From 95e309668471c2808d86d8dea1ec392b8729482b Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Sun, 1 Oct 2023 23:23:06 +0400 Subject: [PATCH 001/257] Added build on RedHat system to build & test RPM packages (#20134) * Added GHA workflow for RPM packages * Avoid rebuild for RPM / Debian packages * Removed conditional include headers * try only post-build * Beautification * Fixed testdata generation for mulit-config generators --- .github/workflows/fedora.yml | 224 ++++++++++++++++++ .github/workflows/linux.yml | 18 +- CMakeLists.txt | 36 ++- .../compile_flags/os_flags.cmake | 6 + cmake/developer_package/cpplint/cpplint.cmake | 9 +- .../frontends/frontends.cmake | 24 +- .../openvino/cmake/CythonConfig.cmake | 6 +- src/cmake/openvino.cmake | 2 +- src/core/tests/frontend/CMakeLists.txt | 6 +- src/frontends/paddle/tests/CMakeLists.txt | 4 +- src/frontends/tensorflow/tests/CMakeLists.txt | 4 +- .../tensorflow_lite/tests/CMakeLists.txt | 4 +- src/inference/CMakeLists.txt | 6 +- .../commit_slider/utils/cfg_samples/e2e.json | 2 +- .../src/kernel_selector/CMakeLists.txt | 4 +- .../tools/mo/utils/find_ie_version.py | 21 +- .../mock_mo_python_api/CMakeLists.txt | 4 +- 17 files changed, 296 insertions(+), 84 deletions(-) create mode 100644 .github/workflows/fedora.yml diff --git a/.github/workflows/fedora.yml b/.github/workflows/fedora.yml new file mode 100644 index 00000000000000..9602180ab66c4e --- /dev/null +++ b/.github/workflows/fedora.yml @@ -0,0 +1,224 @@ +name: Fedora (RHEL), Python 3.9 +on: + workflow_dispatch: + pull_request: + paths-ignore: + - '**/docs/**' + - 'docs/**' + - '**/**.md' + - '**.md' + - '**/layer_tests_summary/**' + - '**/conformance/**' + push: + paths-ignore: + - '**/docs/**' + - 'docs/**' + - '**/**.md' + - '**.md' + - '**/layer_tests_summary/**' + - '**/conformance/**' + branches: + - master + - 'releases/**' + +concurrency: + # github.ref is not unique in post-commit + group: ${{ github.event_name == 'push' && github.run_id || github.ref }}-fedora33 + cancel-in-progress: true + +jobs: + Build: + defaults: + run: + shell: bash + runs-on: aks-linux-16-cores + container: + image: fedora:33 + volumes: + - /mount/caches:/mount/caches + env: + CMAKE_BUILD_TYPE: 'Release' + CMAKE_GENERATOR: 'Ninja' + CMAKE_CXX_COMPILER_LAUNCHER: ccache + CMAKE_C_COMPILER_LAUNCHER: ccache + GITHUB_WORKSPACE: '/__w/openvino/openvino' + OPENVINO_REPO: /__w/openvino/openvino/openvino + INSTALL_DIR: /__w/openvino/openvino/openvino_install + INSTALL_TEST_DIR: /__w/openvino/openvino/tests_install + BUILD_DIR: /__w/openvino/openvino/openvino_build + CCACHE_DIR: /mount/caches/ccache/fedora33_x86_64_Release + CCACHE_TEMPDIR: /__w/openvino/openvino/ccache_temp + CCACHE_MAXSIZE: 50G + steps: + - name: Install git + run: yum update -y && yum install -y git + + - name: Clone OpenVINO + uses: actions/checkout@v4 + with: + path: ${{ env.OPENVINO_REPO }} + submodules: 'true' + + # + # Dependencies + # + + - name: Install build dependencies + run: bash ${OPENVINO_REPO}/install_build_dependencies.sh + + - name: Install python dependencies + run: | + python3 -m pip install -U pip + # For Python API: build and wheel packaging + python3 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/wheel/requirements-dev.txt + python3 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/src/compatibility/openvino/requirements-dev.txt + + # For running ONNX frontend unit tests + python3 -m pip install --force-reinstall -r ${OPENVINO_REPO}/src/frontends/onnx/tests/requirements.txt + + # For running TensorFlow frontend unit tests + python3 -m pip install -r ${OPENVINO_REPO}/src/frontends/tensorflow/tests/requirements.txt + + # For running TensorFlow Lite frontend unit tests + python3 -m pip install -r ${OPENVINO_REPO}/src/frontends/tensorflow_lite/tests/requirements.txt + + # For running Paddle frontend unit tests + python3 -m pip install -r ${OPENVINO_REPO}/src/frontends/paddle/tests/requirements.txt + + # + # Build + # + + - name: CMake configure - OpenVINO + run: | + cmake \ + -G "${{ env.CMAKE_GENERATOR }}" \ + -DENABLE_CPPLINT=OFF \ + -DENABLE_NCC_STYLE=OFF \ + -DENABLE_TESTS=ON \ + -DENABLE_STRICT_DEPENDENCIES=OFF \ + -DENABLE_SYSTEM_TBB=ON \ + -DENABLE_SYSTEM_OPENCL=ON \ + -DENABLE_SYSTEM_PUGIXML=ON \ + -DENABLE_PYTHON_PACKAGING=ON \ + -DCPACK_GENERATOR=TGZ \ + -DCMAKE_COMPILE_WARNING_AS_ERROR=ON \ + -DCMAKE_BUILD_TYPE=${{ env.CMAKE_BUILD_TYPE }} \ + -DCMAKE_CXX_COMPILER_LAUNCHER=${{ env.CMAKE_CXX_COMPILER_LAUNCHER }} \ + -DCMAKE_C_COMPILER_LAUNCHER=${{ env.CMAKE_C_COMPILER_LAUNCHER }} \ + -S ${OPENVINO_REPO} \ + -B ${BUILD_DIR} + + - name: Cmake build - OpenVINO + run: cmake --build ${BUILD_DIR} --parallel --verbose + + - name: Show ccache stats + run: ccache --show-stats + + - name: Cmake install - OpenVINO + run: | + cmake -DCMAKE_INSTALL_PREFIX=${INSTALL_DIR} -P ${BUILD_DIR}/cmake_install.cmake + cmake -DCMAKE_INSTALL_PREFIX=${INSTALL_TEST_DIR} -DCOMPONENT=tests -P ${BUILD_DIR}/cmake_install.cmake + cmake -DCMAKE_INSTALL_PREFIX=${INSTALL_DIR} -DCOMPONENT=python_wheels -P ${BUILD_DIR}/cmake_install.cmake + + - name: Pack Artifacts + run: | + pushd ${INSTALL_DIR} + tar -czvf ${BUILD_DIR}/openvino_package.tar.gz * + popd + + pushd ${INSTALL_TEST_DIR} + tar -czvf ${BUILD_DIR}/openvino_tests.tar.gz * + popd + + - name: Build RPM packages + run: | + cmake -DCPACK_GENERATOR=RPM \ + -DENABLE_TESTS=OFF \ + ${BUILD_DIR} + cmake --build ${BUILD_DIR} --parallel --target package --verbose + + # + # Upload build artifacts + # + + - name: Upload openvino package + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: openvino_package + path: ${{ env.BUILD_DIR }}/openvino_package.tar.gz + if-no-files-found: 'error' + + - name: Upload openvino RPM packages + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: openvino_rpm_packages + path: ${{ env.BUILD_DIR }}/*.rpm + if-no-files-found: 'error' + + - name: Upload openvino tests package + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: openvino_tests + path: ${{ env.BUILD_DIR }}/openvino_tests.tar.gz + if-no-files-found: 'error' + + RPM_Packages: + needs: Build + defaults: + run: + shell: bash + runs-on: ubuntu-20.04 + container: + image: fedora:33 + env: + RPM_PACKAGES_DIR: /__w/openvino/packages/ + + steps: + - name: Create Directories + run: mkdir -p ${RPM_PACKAGES_DIR} + + - name: Download OpenVINO RPM packages + uses: actions/download-artifact@v3 + with: + name: openvino_rpm_packages + path: ${{ env.RPM_PACKAGES_DIR }} + + - name: Install RPM packages & check conflicts + run: | + tee > /tmp/openvino-2023.repo << EOF + [OpenVINO] + name=Intel(R) Distribution of OpenVINO 2023 + baseurl=https://yum.repos.intel.com/openvino/2023 + enabled=1 + gpgcheck=1 + repo_gpgcheck=1 + gpgkey=https://yum.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB + EOF + + # install previous release version + mv /tmp/openvino-2023.repo /etc/yum.repos.d + yum install -y openvino + + # install current version + yum install --allowerasing -y *.rpm + working-directory: ${{ env.RPM_PACKAGES_DIR }} + + - name: Test RPM packages + run: | + /usr/share/openvino/samples/cpp/build_samples.sh + /usr/share/openvino/samples/c/build_samples.sh + ~/openvino_cpp_samples_build/intel64/Release/hello_query_device + python3 /usr/share/openvino/samples/python/hello_query_device/hello_query_device.py + python3 -c 'from openvino import Core; Core().get_property("CPU", "AVAILABLE_DEVICES")' + python3 -c 'from openvino import Core; Core().get_property("GPU", "AVAILABLE_DEVICES")' + python3 -c 'from openvino import Core; Core().get_property("AUTO", "SUPPORTED_METRICS")' + python3 -c 'from openvino import Core; Core().get_property("MULTI", "SUPPORTED_METRICS")' + python3 -c 'from openvino import Core; Core().get_property("HETERO", "SUPPORTED_METRICS")' + python3 -c 'from openvino import Core; Core().get_property("BATCH", "SUPPORTED_METRICS")' + python3 -c 'from openvino.frontend import FrontEndManager; assert len(FrontEndManager().get_available_front_ends()) == 6' + benchmark_app --help + ovc --help diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index aefe5227b6a840..aae1d5936024a4 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -88,12 +88,7 @@ jobs: bash ${OPENVINO_REPO}/install_build_dependencies.sh # default-jdk - Java API # libssl1.1 - 'python3 -m pip' in self-hosted runner - # unzip - to download ninja - apt install --assume-yes --no-install-recommends default-jdk libssl1.1 unzip - - wget https://github.com/ninja-build/ninja/releases/download/v1.11.1/ninja-linux.zip - unzip ninja-linux.zip - cp -v ninja /usr/local/bin/ + apt install --assume-yes --no-install-recommends default-jdk libssl1.1 - uses: actions/setup-python@v4 with: @@ -126,9 +121,6 @@ jobs: # Build # - - name: Setup ccache dir - run: mkdir -p ${CCACHE_DIR} - - name: CMake configure - OpenVINO run: | cmake \ @@ -145,7 +137,6 @@ jobs: -DCMAKE_COMPILE_WARNING_AS_ERROR=ON \ -DCMAKE_CXX_COMPILER_LAUNCHER=${{ env.CMAKE_CXX_COMPILER_LAUNCHER }} \ -DCMAKE_C_COMPILER_LAUNCHER=${{ env.CMAKE_C_COMPILER_LAUNCHER }} \ - -DCMAKE_MINIMUM_REQUIRED_VERSION=3.20 \ -S ${OPENVINO_REPO} \ -B ${BUILD_DIR} @@ -180,12 +171,11 @@ jobs: /usr/bin/python3.8 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/wheel/requirements-dev.txt /usr/bin/python3.8 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/src/compatibility/openvino/requirements-dev.txt cmake -UPYTHON* \ - -DCPACK_GENERATOR=DEB \ -DENABLE_PYTHON_PACKAGING=ON \ - -DPython3_EXECUTABLE=/usr/bin/python3.8 \ -DENABLE_TESTS=OFF \ - -S ${OPENVINO_REPO} \ - -B ${BUILD_DIR} + -DPython3_EXECUTABLE=/usr/bin/python3.8 \ + -DCPACK_GENERATOR=DEB \ + ${BUILD_DIR} cmake --build ${BUILD_DIR} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} --target package - name: Cmake & Build - OpenVINO Contrib diff --git a/CMakeLists.txt b/CMakeLists.txt index e4d31492a43f4e..e3630486535dda 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,32 +2,28 @@ # SPDX-License-Identifier: Apache-2.0 # -if(NOT DEFINED CMAKE_MINIMUM_REQUIRED_VERSION) - if(DEFINED BUILD_SHARED_LIBS AND NOT BUILD_SHARED_LIBS) - # 3.17: 'target_link_libraries' does not work correctly when called from - # different directory where 'add_library' is called: CMake generates - # incorrect OpenVINOConfig.cmake in this case - # 3.18: add_library cannot create ALIAS for non-GLOBAL targets - set(CMAKE_MINIMUM_REQUIRED_VERSION 3.18) +if(DEFINED BUILD_SHARED_LIBS AND NOT BUILD_SHARED_LIBS) + # 3.17: 'target_link_libraries' does not work correctly when called from + # different directory where 'add_library' is called: CMake generates + # incorrect OpenVINOConfig.cmake in this case + # 3.18: add_library cannot create ALIAS for non-GLOBAL targets + cmake_minimum_required(VERSION 3.18) +else() + if(CPACK_GENERATOR STREQUAL "DEB") + # we have to use CPACK_DEBIAN_PACKAGE_SHLIBDEPS_PRIVATE_DIRS variable + cmake_minimum_required(VERSION 3.20) else() - if(CPACK_GENERATOR STREQUAL "DEB") - # we have to use CPACK_DEBIAN_PACKAGE_SHLIBDEPS_PRIVATE_DIRS variable - set(CMAKE_MINIMUM_REQUIRED_VERSION 3.20) + if(WIN32) + # 3.16: FindPython3.cmake can find Python via -DPython3_EXECUTABLE + # 3.18: FindPython3.cmake can find Python automatically from virtualenv + cmake_minimum_required(VERSION 3.16) else() - if(WIN32) - # 3.16: FindPython3.cmake can find Python via -DPython3_EXECUTABLE - # 3.18: FindPython3.cmake can find Python automatically from virtualenv - set(CMAKE_MINIMUM_REQUIRED_VERSION 3.16) - else() - # 3.13: default choice - set(CMAKE_MINIMUM_REQUIRED_VERSION 3.13) - endif() + # 3.13: default choice + cmake_minimum_required(VERSION 3.13) endif() endif() endif() -cmake_minimum_required(VERSION ${CMAKE_MINIMUM_REQUIRED_VERSION}) - if(POLICY CMP0091) cmake_policy(SET CMP0091 NEW) # Enables use of MSVC_RUNTIME_LIBRARY endif() diff --git a/cmake/developer_package/compile_flags/os_flags.cmake b/cmake/developer_package/compile_flags/os_flags.cmake index db0e6a1a53fe8a..3202def116dbe9 100644 --- a/cmake/developer_package/compile_flags/os_flags.cmake +++ b/cmake/developer_package/compile_flags/os_flags.cmake @@ -299,7 +299,13 @@ if(NOT DEFINED CMAKE_CXX_STANDARD) else() set(CMAKE_CXX_STANDARD 11) endif() +endif() + +if(NOT DEFINED CMAKE_CXX_EXTENSIONS) set(CMAKE_CXX_EXTENSIONS OFF) +endif() + +if(NOT DEFINED CMAKE_CXX_STANDARD_REQUIRED) set(CMAKE_CXX_STANDARD_REQUIRED ON) endif() diff --git a/cmake/developer_package/cpplint/cpplint.cmake b/cmake/developer_package/cpplint/cpplint.cmake index d4da25ea31952f..e22c8f2e034b96 100644 --- a/cmake/developer_package/cpplint/cpplint.cmake +++ b/cmake/developer_package/cpplint/cpplint.cmake @@ -14,7 +14,6 @@ endif() if(ENABLE_CPPLINT AND NOT TARGET cpplint_all) add_custom_target(cpplint_all ALL) set_target_properties(cpplint_all PROPERTIES FOLDER cpplint) - set(CPPLINT_ALL_OUTPUT_FILES "" CACHE INTERNAL "All cpplint output files") endif() function(add_cpplint_target TARGET_NAME) @@ -58,6 +57,7 @@ function(add_cpplint_target TARGET_NAME) endif() file(RELATIVE_PATH source_file_relative "${CMAKE_CURRENT_SOURCE_DIR}" "${source_file}") + file(RELATIVE_PATH source_file_relative_root "${CMAKE_SOURCE_DIR}" "${source_file}") set(output_file "${CMAKE_CURRENT_BINARY_DIR}/cpplint/${source_file_relative}.cpplint") string(REPLACE ".." "__" output_file "${output_file}") get_filename_component(output_dir "${output_file}" DIRECTORY) @@ -81,17 +81,12 @@ function(add_cpplint_target TARGET_NAME) "${IEDevScripts_DIR}/cpplint/cpplint.py" "${IEDevScripts_DIR}/cpplint/cpplint_run.cmake" COMMENT - "[cpplint] ${source_file}" + "[cpplint] ${source_file_relative_root}" VERBATIM) list(APPEND all_output_files "${output_file}") endforeach() - set(CPPLINT_ALL_OUTPUT_FILES - ${CPPLINT_ALL_OUTPUT_FILES} ${all_output_files} - CACHE INTERNAL - "All cpplint output files") - add_custom_target(${TARGET_NAME} ALL DEPENDS ${all_output_files} COMMENT "[cpplint] ${TARGET_NAME}") diff --git a/cmake/developer_package/frontends/frontends.cmake b/cmake/developer_package/frontends/frontends.cmake index 964dc6da443e81..78e62101670425 100644 --- a/cmake/developer_package/frontends/frontends.cmake +++ b/cmake/developer_package/frontends/frontends.cmake @@ -127,17 +127,17 @@ macro(ov_add_frontend) # Generate protobuf file on build time for each '.proto' file in src/proto file(GLOB proto_files ${frontend_root_dir}/src/proto/*.proto) - foreach(INFILE IN LISTS proto_files) - get_filename_component(FILE_DIR ${INFILE} DIRECTORY) - get_filename_component(FILE_WE ${INFILE} NAME_WE) + foreach(proto_file IN LISTS proto_files) + file(RELATIVE_PATH proto_file_relative "${CMAKE_SOURCE_DIR}" "${proto_file}") + get_filename_component(FILE_DIR ${proto_file} DIRECTORY) + get_filename_component(FILE_WE ${proto_file} NAME_WE) set(OUTPUT_PB_SRC ${CMAKE_CURRENT_BINARY_DIR}/${FILE_WE}.pb.cc) set(OUTPUT_PB_HEADER ${CMAKE_CURRENT_BINARY_DIR}/${FILE_WE}.pb.h) - set(GENERATED_PROTO ${INFILE}) add_custom_command( OUTPUT "${OUTPUT_PB_SRC}" "${OUTPUT_PB_HEADER}" COMMAND ${PROTOC_EXECUTABLE} ARGS --cpp_out ${CMAKE_CURRENT_BINARY_DIR} -I ${FILE_DIR} ${FILE_WE}.proto - DEPENDS ${PROTOC_DEPENDENCY} ${GENERATED_PROTO} - COMMENT "Running C++ protocol buffer compiler (${PROTOC_EXECUTABLE}) on ${GENERATED_PROTO}" + DEPENDS ${PROTOC_DEPENDENCY} ${proto_file} + COMMENT "Running C++ protocol buffer compiler (${PROTOC_EXECUTABLE}) on ${proto_file_relative}" VERBATIM COMMAND_EXPAND_LISTS) list(APPEND PROTO_SRCS "${OUTPUT_PB_SRC}") @@ -145,15 +145,15 @@ macro(ov_add_frontend) endforeach() file(GLOB flatbuffers_schema_files ${frontend_root_dir}/src/schema/*.fbs) - foreach(INFILE IN LISTS flatbuffers_schema_files) - get_filename_component(FILE_WE ${INFILE} NAME_WE) + foreach(flatbuffers_schema_file IN LISTS flatbuffers_schema_files) + file(RELATIVE_PATH flatbuffers_schema_file_relative "${CMAKE_SOURCE_DIR}" "${flatbuffers_schema_file}") + get_filename_component(FILE_WE "${flatbuffers_schema_file}" NAME_WE) set(OUTPUT_FC_HEADER ${CMAKE_CURRENT_BINARY_DIR}/${FILE_WE}_generated.h) - set(GENERATED_PROTO ${INFILE}) add_custom_command( OUTPUT "${OUTPUT_FC_HEADER}" - COMMAND ${flatbuffers_COMPILER} ARGS -c --gen-mutable -o ${CMAKE_CURRENT_BINARY_DIR} ${INFILE} - DEPENDS ${flatbuffers_DEPENDENCY} ${GENERATED_PROTO} - COMMENT "Running C++ flatbuffers compiler (${flatbuffers_COMPILER}) on ${GENERATED_PROTO}" + COMMAND ${flatbuffers_COMPILER} ARGS -c --gen-mutable -o ${CMAKE_CURRENT_BINARY_DIR} ${flatbuffers_schema_file} + DEPENDS ${flatbuffers_DEPENDENCY} ${flatbuffers_schema_file} + COMMENT "Running C++ flatbuffers compiler (${flatbuffers_COMPILER}) on ${flatbuffers_schema_file_relative}" VERBATIM COMMAND_EXPAND_LISTS) list(APPEND PROTO_HDRS "${OUTPUT_FC_HEADER}") diff --git a/src/bindings/python/src/compatibility/openvino/cmake/CythonConfig.cmake b/src/bindings/python/src/compatibility/openvino/cmake/CythonConfig.cmake index b27dcbc7e84f2f..8d02cf9890a5be 100644 --- a/src/bindings/python/src/compatibility/openvino/cmake/CythonConfig.cmake +++ b/src/bindings/python/src/compatibility/openvino/cmake/CythonConfig.cmake @@ -68,7 +68,11 @@ if(CYTHON_EXIT_CODE EQUAL 0) string(REGEX REPLACE "^Cython version ([0-9]+\\.[0-9]+(\\.[0-9]+)?).*" "\\1" CYTHON_VERSION "${CYTHON_OUTPUT}") else() if(${CMAKE_FIND_PACKAGE_NAME}_FIND_QUIETLY) - set(CYTHON_MESSAGE_MODE TRACE) + if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.15) + set(CYTHON_MESSAGE_MODE TRACE) + else() + set(CYTHON_MESSAGE_MODE WARNING) + endif() endif() if(${CMAKE_FIND_PACKAGE_NAME}_FIND_REQUIRED) set(CYTHON_MESSAGE_MODE FATAL_ERROR) diff --git a/src/cmake/openvino.cmake b/src/cmake/openvino.cmake index d187c007c75d2d..73df6bf480719d 100644 --- a/src/cmake/openvino.cmake +++ b/src/cmake/openvino.cmake @@ -284,7 +284,7 @@ if(ENABLE_PKGCONFIG_GEN) set(pkgconfig_option "--validate") endif() - add_custom_command(TARGET openvino PRE_BUILD + add_custom_command(TARGET openvino POST_BUILD COMMAND "${CMAKE_COMMAND}" --config $ -D PKG_CONFIG_IN_FILE=${pkgconfig_in} -D PKG_CONFIG_OUT_FILE=${pkgconfig_out} diff --git a/src/core/tests/frontend/CMakeLists.txt b/src/core/tests/frontend/CMakeLists.txt index 8f6d2a17b40b6f..dd096fed759a94 100644 --- a/src/core/tests/frontend/CMakeLists.txt +++ b/src/core/tests/frontend/CMakeLists.txt @@ -4,7 +4,7 @@ set(SRC ${CMAKE_CURRENT_SOURCE_DIR}/mock_frontend.cpp) set(MOCK1_FE_NAME openvino_mock1_frontend) -add_library(${MOCK1_FE_NAME} SHARED EXCLUDE_FROM_ALL ${SRC}) +add_library(${MOCK1_FE_NAME} SHARED ${SRC}) ov_add_library_version(${MOCK1_FE_NAME}) @@ -18,5 +18,5 @@ add_dependencies(ov_core_unit_tests ${MOCK1_FE_NAME}) ov_add_clang_format_target(${MOCK1_FE_NAME}_clang FOR_TARGETS ${MOCK1_FE_NAME}) install(TARGETS ${MOCK1_FE_NAME} - RUNTIME DESTINATION tests COMPONENT tests OPTIONAL EXCLUDE_FROM_ALL - LIBRARY DESTINATION tests COMPONENT tests OPTIONAL EXCLUDE_FROM_ALL) + RUNTIME DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL + LIBRARY DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL) diff --git a/src/frontends/paddle/tests/CMakeLists.txt b/src/frontends/paddle/tests/CMakeLists.txt index e9e1155381af87..236919d7dfda8f 100644 --- a/src/frontends/paddle/tests/CMakeLists.txt +++ b/src/frontends/paddle/tests/CMakeLists.txt @@ -44,7 +44,7 @@ endif() # PDPD 2.5.1 is not compatible with tests models we use set(paddlepaddle_FOUND OFF) -set(TEST_PADDLE_MODELS_DIRNAME test_model_zoo/paddle_test_models) +set(TEST_PADDLE_MODELS_DIRNAME ${TEST_MODEL_ZOO}/paddle_test_models) target_compile_definitions(${TARGET_NAME} PRIVATE -D TEST_PADDLE_MODELS_DIRNAME=\"${TEST_PADDLE_MODELS_DIRNAME}/\") set(PADDLEDET_OPS_URL "https://raw.githubusercontent.com/PaddlePaddle/PaddleDetection/release/2.1/ppdet/modeling/ops.py") @@ -58,7 +58,7 @@ DownloadAndCheck(${PADDLEDET_OPS_URL} ${PADDLEDET_DIRNAME}/ops.py PADDLEDET_FATA # This is done this way for 'code style' and check cases - cmake shall pass, but CI machine doesn't need to have # 'paddlepaddle' installed to check code style if(paddlepaddle_FOUND AND PADDLEDET_RESULT) - set(TEST_PADDLE_MODELS ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${TEST_PADDLE_MODELS_DIRNAME}/) + set(TEST_PADDLE_MODELS ${TEST_MODEL_ZOO_OUTPUT_DIR}/paddle_test_models/) file(GLOB_RECURSE PADDLE_ALL_SCRIPTS ${CMAKE_CURRENT_SOURCE_DIR}/*.py) set(OUT_FILE ${TEST_PADDLE_MODELS}/generate_done.txt) diff --git a/src/frontends/tensorflow/tests/CMakeLists.txt b/src/frontends/tensorflow/tests/CMakeLists.txt index 4adb81882c2a42..ccffb195159b35 100644 --- a/src/frontends/tensorflow/tests/CMakeLists.txt +++ b/src/frontends/tensorflow/tests/CMakeLists.txt @@ -33,7 +33,7 @@ ov_check_pip_packages(REQUIREMENTS_FILE "${CMAKE_CURRENT_SOURCE_DIR}/requirement WARNING_MESSAGE "TensorFlow testing models weren't generated, some tests will fail due models not found" RESULT_VAR tensorflow_FOUND) -set(TEST_TENSORFLOW_MODELS_DIRNAME test_model_zoo/tensorflow_test_models) +set(TEST_TENSORFLOW_MODELS_DIRNAME ${TEST_MODEL_ZOO}/tensorflow_test_models) target_compile_definitions(${TARGET_NAME} PRIVATE -D TEST_TENSORFLOW_MODELS_DIRNAME=\"${TEST_TENSORFLOW_MODELS_DIRNAME}/\") # If 'tensorflow' is not found, code will still be compiled @@ -41,7 +41,7 @@ target_compile_definitions(${TARGET_NAME} PRIVATE -D TEST_TENSORFLOW_MODELS_DIRN # This is done this way for 'code style' and check cases - cmake shall pass, but CI machine doesn't need to have # 'tensorflow' installed to check code style if (tensorflow_FOUND) - set(TEST_TENSORFLOW_MODELS ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${TEST_TENSORFLOW_MODELS_DIRNAME}/) + set(TEST_TENSORFLOW_MODELS ${TEST_MODEL_ZOO_OUTPUT_DIR}/tensorflow_test_models/) file(GLOB_RECURSE TENSORFLOW_GEN_SCRIPTS ${CMAKE_CURRENT_SOURCE_DIR}/test_models/gen_scripts/generate_*.py) file(GLOB_RECURSE TENSORFLOW_MODELS_PBTXT ${CMAKE_CURRENT_SOURCE_DIR}/test_models/models_pbtxt/*.pbtxt) diff --git a/src/frontends/tensorflow_lite/tests/CMakeLists.txt b/src/frontends/tensorflow_lite/tests/CMakeLists.txt index 8869e5b4907ec5..5e0b544db2620c 100644 --- a/src/frontends/tensorflow_lite/tests/CMakeLists.txt +++ b/src/frontends/tensorflow_lite/tests/CMakeLists.txt @@ -27,7 +27,7 @@ ov_check_pip_packages(REQUIREMENTS_FILE "${CMAKE_CURRENT_SOURCE_DIR}/requirement WARNING_MESSAGE "TensorFlow Lite testing models weren't generated, some tests will fail due models not found" RESULT_VAR tensorflow_FOUND) -set(TEST_TENSORFLOW_LITE_MODELS_DIRNAME test_model_zoo/tensorflow_lite_test_models) +set(TEST_TENSORFLOW_LITE_MODELS_DIRNAME ${TEST_MODEL_ZOO}/tensorflow_lite_test_models) target_compile_definitions(${TARGET_NAME} PRIVATE -D TEST_TENSORFLOW_LITE_MODELS_DIRNAME=\"${TEST_TENSORFLOW_LITE_MODELS_DIRNAME}/\") # If 'tensorflow' is not found, code will still be compiled @@ -35,7 +35,7 @@ target_compile_definitions(${TARGET_NAME} PRIVATE -D TEST_TENSORFLOW_LITE_MODELS # This is done this way for 'code style' and check cases - cmake shall pass, but CI machine doesn't need to have # 'tensorflow' installed to check code style if (tensorflow_FOUND) - set(TEST_TENSORFLOW_LITE_MODELS ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${TEST_TENSORFLOW_LITE_MODELS_DIRNAME}/) + set(TEST_TENSORFLOW_LITE_MODELS "${TEST_MODEL_ZOO_OUTPUT_DIR}/tensorflow_lite_test_models") file(GLOB_RECURSE TENSORFLOW_GEN_SCRIPTS ${CMAKE_CURRENT_SOURCE_DIR}/test_models/gen_scripts/generate_*.py) file(GLOB_RECURSE TENSORFLOW_ALL_SCRIPTS ${CMAKE_CURRENT_SOURCE_DIR}/*.py) diff --git a/src/inference/CMakeLists.txt b/src/inference/CMakeLists.txt index 99f3670e4fa918..19bf83c27e18f9 100644 --- a/src/inference/CMakeLists.txt +++ b/src/inference/CMakeLists.txt @@ -23,7 +23,10 @@ file (GLOB LIBRARY_SRC set(IE_STATIC_DEPENDENT_FILES ${CMAKE_CURRENT_SOURCE_DIR}/src/file_utils.cpp) list(REMOVE_ITEM LIBRARY_SRC ${IE_STATIC_DEPENDENT_FILES}) -if(ENABLE_IR_V7_READER) +if(BUILD_SHARED_LIBS OR ENABLE_IR_V7_READER) + # TODO: remove together with GNA plugin + # we have unconditional adding of the ENABLE_IR_V7_READER compile definition for shared libs case + # to avoid rebuild, relink during work with build tree set_source_files_properties(${CMAKE_CURRENT_SOURCE_DIR}/src/ie_network_reader.cpp PROPERTIES COMPILE_DEFINITIONS "ENABLE_IR_V7_READER") endif() @@ -149,7 +152,6 @@ target_include_directories(${TARGET_NAME}_obj PRIVATE # for ov_plugins.hpp $,$>,${CMAKE_CURRENT_BINARY_DIR}/$,${CMAKE_CURRENT_BINARY_DIR}> # for ie_ir_version.hpp - $<$:$> "${OpenVINO_SOURCE_DIR}/src/plugins/intel_gna/legacy/include" $ $) diff --git a/src/plugins/intel_cpu/tools/commit_slider/utils/cfg_samples/e2e.json b/src/plugins/intel_cpu/tools/commit_slider/utils/cfg_samples/e2e.json index 23cffb03ba6325..0d3e87088d5d7a 100644 --- a/src/plugins/intel_cpu/tools/commit_slider/utils/cfg_samples/e2e.json +++ b/src/plugins/intel_cpu/tools/commit_slider/utils/cfg_samples/e2e.json @@ -2,7 +2,7 @@ "appPath" : "//e2e/frameworks.ai.openvino.tests/e2e_oss/", "appCmd" : "pytest test_dynamism.py ", "envVars" : [ - {"name" : "PYTHONPATH", "val" : "//bin/intel64/Release/python_api/python3.8/"}, + {"name" : "PYTHONPATH", "val" : "//bin/intel64/Release/python/"}, {"name" : "LD_LIBRARY_PATH", "val" : "//bin/intel64/Release/"}, {"name" : "MO_ROOT", "val" : "//tools/mo/openvino/tools/"}, {"name" : "OPENVINO_ROOT_DIR", "val" : "//"} diff --git a/src/plugins/intel_gpu/src/kernel_selector/CMakeLists.txt b/src/plugins/intel_gpu/src/kernel_selector/CMakeLists.txt index 81637375d811b3..e223e3ad1aa962 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/CMakeLists.txt +++ b/src/plugins/intel_gpu/src/kernel_selector/CMakeLists.txt @@ -81,8 +81,8 @@ elseif((NOT ANDROID) AND UNIX) target_link_libraries(${TARGET_NAME} PRIVATE pthread) endif() -if(WIN32) - set(TUNING_CACHE_PATH "${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/$") +if(OV_GENERATOR_MULTI_CONFIG) + set(TUNING_CACHE_PATH "${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/$") else() set(TUNING_CACHE_PATH "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/") endif() diff --git a/tools/mo/openvino/tools/mo/utils/find_ie_version.py b/tools/mo/openvino/tools/mo/utils/find_ie_version.py index de9aef5d079e43..eb1886dbff1cd9 100644 --- a/tools/mo/openvino/tools/mo/utils/find_ie_version.py +++ b/tools/mo/openvino/tools/mo/utils/find_ie_version.py @@ -78,25 +78,21 @@ def find_ie_version(silent=False): if try_to_import_ie(silent=silent): return True - python_version = 'python{}.{}'.format(sys.version_info[0], sys.version_info[1]) - script_path = os.path.realpath(os.path.dirname(__file__)) # Windows bindings_paths_windows = [ # Local builds { - "module": os.path.join(script_path, '../../../../../../bin/intel64/Release/python_api/', python_version), + "module": os.path.join(script_path, '../../../../../../bin/intel64/Release/python/'), "libs": [ - os.path.join(script_path, '../../../../../../bin/intel64'), os.path.join(script_path, '../../../../../../bin/intel64/Release'), os.path.join(script_path, '../../../../../../temp/tbb/bin'), ] }, { - "module": os.path.join(script_path, '../../../../../../bin/intel64/Debug/python_api/', python_version), + "module": os.path.join(script_path, '../../../../../../bin/intel64/Debug/python/'), "libs": [ - os.path.join(script_path, '../../../../../../bin/intel64'), os.path.join(script_path, '../../../../../../bin/intel64/Debug'), os.path.join(script_path, '../../../../../../temp/tbb/bin'), ] @@ -107,22 +103,21 @@ def find_ie_version(silent=False): bindings_paths_linux = [ # Local builds { - "module": os.path.join(script_path, '../../../../../../bin/intel64/Release/lib/python_api/', python_version), + "module": os.path.join(script_path, '../../../../../../bin/intel64/Release/python/'), "libs": [ - os.path.join(script_path, '../../../../../../bin/intel64/Release/lib'), + os.path.join(script_path, '../../../../../../bin/intel64/Release'), ] }, - { - "module": os.path.join(script_path, '../../../../../../bin/intel64/RelWithDebInfo/lib/python_api/', python_version), + "module": os.path.join(script_path, '../../../../../../bin/intel64/RelWithDebInfo/python'), "libs": [ - os.path.join(script_path, '../../../../../../bin/intel64/RelWithDebInfo/lib'), + os.path.join(script_path, '../../../../../../bin/intel64/RelWithDebInfo'), ] }, { - "module": os.path.join(script_path, '../../../../../../bin/intel64/Debug/lib/python_api/', python_version), + "module": os.path.join(script_path, '../../../../../../bin/intel64/Debug/python'), "libs": [ - os.path.join(script_path, '../../../../../../bin/intel64/Debug/lib'), + os.path.join(script_path, '../../../../../../bin/intel64/Debug'), ] } ] diff --git a/tools/mo/unit_tests/mock_mo_frontend/mock_mo_python_api/CMakeLists.txt b/tools/mo/unit_tests/mock_mo_frontend/mock_mo_python_api/CMakeLists.txt index 9ae9e55a27a049..87e34e41fc2477 100644 --- a/tools/mo/unit_tests/mock_mo_frontend/mock_mo_python_api/CMakeLists.txt +++ b/tools/mo/unit_tests/mock_mo_frontend/mock_mo_python_api/CMakeLists.txt @@ -8,9 +8,9 @@ set(CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY_OLD ${CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY} set(CMAKE_PDB_OUTPUT_DIRECTORY_OLD ${CMAKE_PDB_OUTPUT_DIRECTORY}) if(OV_GENERATOR_MULTI_CONFIG) - set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/$/python_api) + set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/$/python/openvino) else() - set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/python_api) + set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/python/openvino) endif() set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) From e2501a67d26e75a40d3f8e15b4c54a42bfb02769 Mon Sep 17 00:00:00 2001 From: Sergey Shlyapnikov Date: Mon, 2 Oct 2023 09:02:10 +0400 Subject: [PATCH 002/257] [GPU] Fix os_is_yx_osv16_isv4 weights format matching (#20132) --- .../intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp index 5fd4a17728e0cd..99462d8c813e64 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp @@ -896,7 +896,7 @@ cldnn::format::type from_weights_layout(kernel_selector::weights_layout l) { case kernel_selector::weights_layout::g_os_is_zyx_isv16_osv16: return cldnn::format::g_os_is_zyx_isv16_osv16; case kernel_selector::weights_layout::os_is_yx_osv16_isv4: - return cldnn::format::g_os_is_yx_osv16_isv4; + return cldnn::format::os_is_yx_osv16_isv4; case kernel_selector::weights_layout::os_is_zyx_osv16_isv16: return cldnn::format::os_is_zyx_osv16_isv16; case kernel_selector::weights_layout::g_os_is_zyx_osv16_isv16: From b409ea19304f3fe128835cd786c976028ea6492d Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Mon, 2 Oct 2023 09:56:10 +0400 Subject: [PATCH 003/257] [TF FE] Support TF1 While Control flow (#20105) * [TF FE] Support TF1 While Control flow Signed-off-by: Kazantsev, Roman * Apply code-style fix * Update API for OpPlace to store back edge * Fix build: no rvalue by reference passing * Fix build issue: correct type * Fix TF FE unit-tests * Apply code-review feedback: remove unused vars * Fix fusing complicated case of TF1 While * Remove unused variable * Update MO unit test * Fix layer tests for While * Handle Switch and NextIteration nodes connected directly Signed-off-by: Kazantsev, Roman --------- Signed-off-by: Kazantsev, Roman --- src/frontends/tensorflow/src/input_model.cpp | 32 ++- src/frontends/tensorflow/src/op/enter.cpp | 34 +++ src/frontends/tensorflow/src/op/exit.cpp | 33 +++ src/frontends/tensorflow/src/op/loop_cond.cpp | 34 +++ .../tensorflow/src/op/next_iteration.cpp | 33 +++ .../tensorflow/src/op/partitioned_call.cpp | 13 +- src/frontends/tensorflow/src/op/while.cpp | 86 +------- src/frontends/tensorflow/src/op_table.cpp | 10 + src/frontends/tensorflow/src/tf_utils.cpp | 109 ++++++++++ src/frontends/tensorflow/src/tf_utils.hpp | 13 ++ .../tensorflow/src/translate_session.cpp | 197 +++++++++++++++--- .../tensorflow/src/translate_session.hpp | 5 - .../tensorflow/tests/convert_unsupported.cpp | 42 +--- .../include/helper_ops/enter.hpp | 47 +++++ .../include/helper_ops/exit.hpp | 37 ++++ .../include/helper_ops/loop_cond.hpp | 37 ++++ .../include/helper_ops/next_iteration.hpp | 54 +++++ .../tensorflow_common/include/place.hpp | 12 ++ .../include/tf_framework_node.hpp | 4 +- src/frontends/tensorflow_common/src/place.cpp | 18 +- .../tensorflow_tests/test_tf_While.py | 6 +- tools/mo/openvino/tools/mo/convert_impl.py | 3 +- .../moc_tf_fe/conversion_basic_models_test.py | 3 +- 23 files changed, 694 insertions(+), 168 deletions(-) create mode 100644 src/frontends/tensorflow/src/op/enter.cpp create mode 100644 src/frontends/tensorflow/src/op/exit.cpp create mode 100644 src/frontends/tensorflow/src/op/loop_cond.cpp create mode 100644 src/frontends/tensorflow/src/op/next_iteration.cpp create mode 100644 src/frontends/tensorflow_common/include/helper_ops/enter.hpp create mode 100644 src/frontends/tensorflow_common/include/helper_ops/exit.hpp create mode 100644 src/frontends/tensorflow_common/include/helper_ops/loop_cond.hpp create mode 100644 src/frontends/tensorflow_common/include/helper_ops/next_iteration.hpp diff --git a/src/frontends/tensorflow/src/input_model.cpp b/src/frontends/tensorflow/src/input_model.cpp index 6e2103ca463a10..65672ae13a6d1e 100644 --- a/src/frontends/tensorflow/src/input_model.cpp +++ b/src/frontends/tensorflow/src/input_model.cpp @@ -137,6 +137,10 @@ void InputModel::InputModelTFImpl::load_places() { auto op_name = node_decoder->get_op_name(); auto op_type = node_decoder->get_op_type(); + if (op_type == "Placeholder" && op_name.rfind("unused_control_flow_input", 0) != std::string::npos) { + continue; + } + if (m_telemetry) { op_statistics[op_type]++; } @@ -320,9 +324,6 @@ std::vector> InputModel::InputModelTFImpl::topologicall std::stack> ops_to_do; std::unordered_set> ops_done; - // TODO: implement logic to check direct cycles in the graph - // and break them - // probably not only NextIteration can generate cycles for (const auto& output_place : m_outputs) { FRONT_END_GENERAL_CHECK(output_place->get_names().size() > 0, "TensorPlace must have at least one name."); auto output_place_name = output_place->get_names()[0]; @@ -336,6 +337,23 @@ std::vector> InputModel::InputModelTFImpl::topologicall ops_to_do.push(output_operation_place); } + // walk through all NextIteration nodes and put their producers into ops_to_do + // this is needed to avoid missed nodes in the body graph of TF1 While operation + for (const auto& op_place : m_op_places) { + auto op_decoder = op_place->get_decoder(); + if (op_decoder->get_op_type() == "NextIteration") { + std::string producer_name; + std::string producer_output_port_name; + size_t producer_output_port_idx; + op_decoder->get_input_node(0, producer_name, producer_output_port_name, producer_output_port_idx); + FRONT_END_GENERAL_CHECK(m_op_places_map.count(producer_name), + "[TensorFlow Frontend] internal error or inconsistent model: producer of " + "NextIteration is not found among operation places " + + producer_name); + ops_to_do.push(m_op_places_map.at(producer_name)); + } + } + // the traversing algorithm to compute topologically sorted nodes is taken from topological_sort in // core/graph_util.hpp while (ops_to_do.size() > 0) { @@ -350,6 +368,14 @@ std::vector> InputModel::InputModelTFImpl::topologicall if (current_operation_type == "NextIteration") { // break the cycle created by NextIteration input_count = 0; + std::string producer_name; + std::string producer_output_port_name; + size_t producer_output_port_idx; + current_operation_decoder->get_input_node(0, + producer_name, + producer_output_port_name, + producer_output_port_idx); + current_operation_place->set_next_iteration_back_edge(producer_name, producer_output_port_idx); } for (size_t input_port_idx = 0; input_port_idx < input_count; ++input_port_idx) { diff --git a/src/frontends/tensorflow/src/op/enter.cpp b/src/frontends/tensorflow/src/op/enter.cpp new file mode 100644 index 00000000000000..c0719f83e36ccb --- /dev/null +++ b/src/frontends/tensorflow/src/op/enter.cpp @@ -0,0 +1,34 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "helper_ops/enter.hpp" + +#include "common_op_table.hpp" +#include "openvino/frontend/tensorflow/node_context.hpp" +#include "utils.hpp" + +using namespace std; +using namespace ov; +using namespace ov::frontend::tensorflow; + +namespace ov { +namespace frontend { +namespace tensorflow { +namespace op { + +OutputVector translate_enter_op(const NodeContext& node) { + default_op_checks(node, 1, {"Enter"}); + auto data = node.get_input(0); + auto frame_name = node.get_attribute("frame_name"); + + auto enter_node = make_shared(data, frame_name, node.get_decoder()); + set_node_name(node.get_name(), enter_node); + + return enter_node->outputs(); +} + +} // namespace op +} // namespace tensorflow +} // namespace frontend +} // namespace ov diff --git a/src/frontends/tensorflow/src/op/exit.cpp b/src/frontends/tensorflow/src/op/exit.cpp new file mode 100644 index 00000000000000..e85bddf21c6eb2 --- /dev/null +++ b/src/frontends/tensorflow/src/op/exit.cpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "helper_ops/exit.hpp" + +#include "common_op_table.hpp" +#include "openvino/frontend/tensorflow/node_context.hpp" +#include "utils.hpp" + +using namespace std; +using namespace ov; +using namespace ov::frontend::tensorflow; + +namespace ov { +namespace frontend { +namespace tensorflow { +namespace op { + +OutputVector translate_exit_op(const NodeContext& node) { + default_op_checks(node, 1, {"Exit"}); + auto data = node.get_input(0); + + auto exit_node = make_shared(data, node.get_decoder()); + set_node_name(node.get_name(), exit_node); + + return exit_node->outputs(); +} + +} // namespace op +} // namespace tensorflow +} // namespace frontend +} // namespace ov diff --git a/src/frontends/tensorflow/src/op/loop_cond.cpp b/src/frontends/tensorflow/src/op/loop_cond.cpp new file mode 100644 index 00000000000000..286192a017f283 --- /dev/null +++ b/src/frontends/tensorflow/src/op/loop_cond.cpp @@ -0,0 +1,34 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "helper_ops/loop_cond.hpp" + +#include "common_op_table.hpp" +#include "openvino/frontend/tensorflow/node_context.hpp" +#include "utils.hpp" + +using namespace std; +using namespace ov; +using namespace ov::op; +using namespace ov::frontend::tensorflow; + +namespace ov { +namespace frontend { +namespace tensorflow { +namespace op { + +OutputVector translate_loop_cond_op(const NodeContext& node) { + default_op_checks(node, 1, {"LoopCond"}); + auto input = node.get_input(0); + + auto loop_cond_node = make_shared(input, node.get_decoder()); + set_node_name(node.get_name(), loop_cond_node); + + return loop_cond_node->outputs(); +} + +} // namespace op +} // namespace tensorflow +} // namespace frontend +} // namespace ov diff --git a/src/frontends/tensorflow/src/op/next_iteration.cpp b/src/frontends/tensorflow/src/op/next_iteration.cpp new file mode 100644 index 00000000000000..f977f277ad9de9 --- /dev/null +++ b/src/frontends/tensorflow/src/op/next_iteration.cpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "helper_ops/next_iteration.hpp" + +#include "common_op_table.hpp" +#include "helper_ops/merge.hpp" +#include "openvino/frontend/tensorflow/node_context.hpp" +#include "utils.hpp" + +using namespace std; +using namespace ov; +using namespace ov::frontend::tensorflow; + +namespace ov { +namespace frontend { +namespace tensorflow { +namespace op { + +OutputVector translate_next_iteration_op(const NodeContext& node) { + default_op_checks(node, 0, {"NextIteration"}); + + auto next_iteration_node = make_shared(node.get_decoder()); + set_node_name(node.get_name(), next_iteration_node); + + return next_iteration_node->outputs(); +} + +} // namespace op +} // namespace tensorflow +} // namespace frontend +} // namespace ov diff --git a/src/frontends/tensorflow/src/op/partitioned_call.cpp b/src/frontends/tensorflow/src/op/partitioned_call.cpp index 6228d80e86a438..635ea1a802d9b5 100644 --- a/src/frontends/tensorflow/src/op/partitioned_call.cpp +++ b/src/frontends/tensorflow/src/op/partitioned_call.cpp @@ -4,7 +4,7 @@ #include "common_op_table.hpp" #include "input_model.hpp" -#include "openvino/opsets/opset10.hpp" +#include "tf_utils.hpp" using namespace std; using namespace ov; @@ -18,7 +18,7 @@ OutputVector translate_partitioned_call_op(const NodeContext& node) { auto node_name = node.get_name(); auto translate_session = node.get_translate_session(); FRONT_END_GENERAL_CHECK(translate_session, "[TensorFlow Frontend] Internal error: Translate session is nullptr."); - auto operation_type = node.get_attribute("f"); + auto operation_type = node.get_attribute("f"); // prepare a vector of inputs OutputVector ov_inputs; @@ -33,9 +33,8 @@ OutputVector translate_partitioned_call_op(const NodeContext& node) { // of StatefulPartitionedCall. And because otherwise they will cause a duplicates. But we need to keep them // for "internal functions of Saved Model", which are named "__inference_signature_wrapper" or // "__inference_wrapped_model". - auto body_model = translate_session->get_body_ov_model(operation_type, - ov_inputs, - operation_type.find("wrappe") == std::string::npos); + auto body_model = + translate_session->get_body_ov_model(operation_type, ov_inputs, operation_type.find("wrappe") == string::npos); FRONT_END_OP_CONVERSION_CHECK( body_model, "[TensorFlow Frontend] Internal error or incorrect input model: body graph is not found for " + operation_type + @@ -43,11 +42,11 @@ OutputVector translate_partitioned_call_op(const NodeContext& node) { // inject the body graph into the parent graph OutputVector ov_outputs; - translate_session->inject_body_model(body_model, operation_type, ov_inputs, ov_outputs); + inject_body_model(body_model, operation_type, ov_inputs, ov_outputs); // set output tensor names for (size_t idx = 0; idx < ov_outputs.size(); ++idx) { - set_out_name({node_name + ":" + std::to_string(idx)}, ov_outputs[idx]); + set_out_name({node_name + ":" + to_string(idx)}, ov_outputs[idx]); } return ov_outputs; diff --git a/src/frontends/tensorflow/src/op/while.cpp b/src/frontends/tensorflow/src/op/while.cpp index 7bffea10a780cf..fa414b44550417 100644 --- a/src/frontends/tensorflow/src/op/while.cpp +++ b/src/frontends/tensorflow/src/op/while.cpp @@ -4,11 +4,10 @@ #include "common_op_table.hpp" #include "input_model.hpp" -#include "openvino/opsets/opset10.hpp" +#include "tf_utils.hpp" using namespace std; using namespace ov; -using namespace ov::opset10; namespace ov { namespace frontend { @@ -21,7 +20,7 @@ OutputVector translate_while_op(const NodeContext& node) { auto input_size_t = node.get_input_size(); auto input_size = static_cast(input_size_t); - ov::OutputVector ov_inputs; + OutputVector ov_inputs; for (int input_ind = 0; input_ind < input_size; ++input_ind) { ov_inputs.push_back(node.get_input(input_ind)); } @@ -30,8 +29,8 @@ OutputVector translate_while_op(const NodeContext& node) { translate_session, "[TensorFlow Frontend] Internal error: Translate session is nullptr."); // retrieve condition and body graphs - auto cond_type = node.get_attribute("cond"); - auto body_type = node.get_attribute("body"); + auto cond_type = node.get_attribute("cond"); + auto body_type = node.get_attribute("body"); auto cond_model = translate_session->get_body_ov_model(cond_type, ov_inputs); TENSORFLOW_OP_VALIDATION( node, @@ -43,82 +42,7 @@ OutputVector translate_while_op(const NodeContext& node) { body_model, "[TensorFlow Frontend] Internal error or incorrect input model. Cannot find body graph with name " + body_type); - // inject condition body graph prior to Loop node - // to check condition before to start iterations - auto cond_params = cond_model->get_parameters(); - // type setting for body graph parameters is needed for TensorList support since DT_VARIANT type is present - // also for more accurate execution_condition variable shape deducing we need shape inference for condition graph - for (int input_ind = 0; input_ind < input_size; ++input_ind) { - cond_params[input_ind]->set_element_type(node.get_input(input_ind).get_element_type()); - cond_params[input_ind]->set_partial_shape(node.get_input(input_ind).get_partial_shape()); - } - cond_model->validate_nodes_and_infer_types(); - - auto cond_prior = cond_model->clone(); - ov::OutputVector ov_outputs; - translate_session->inject_body_model(cond_prior, node.get_name() + "/cond", ov_inputs, ov_outputs); - TENSORFLOW_OP_VALIDATION( - node, - ov_outputs.size() == 1, - "[TensorFlow Frontend] Internal error or inconsistent model: condition body must contain one Result node."); - auto exec_cond = ov_outputs[0]; - auto trip_count = make_shared(element::i32, Shape{}, -1); - auto loop = make_shared(trip_count, exec_cond); - - // prepare body model to be set for the Loop node - // note that condition should be computed on the updated input - // because this is while(cond) {} construction, - // that is why condition graph is stitched to the body results - auto body_params = body_model->get_parameters(); - auto body_results = body_model->get_results(); - auto cond_results = cond_model->get_results(); - auto cond_params_size = cond_params.size(); - TENSORFLOW_OP_VALIDATION(node, - body_params.size() == input_size_t, - "[TensorFlow Frontend] Internal error or inconsistent model: body graph " - " must have the same number of Parameter nodes as a number of inputs to While."); - TENSORFLOW_OP_VALIDATION(node, - body_results.size() == input_size_t, - "[TensorFlow Frontend] Internal error or inconsistent model: body graphs " - " must have the same number of Result nodes as a number of inputs to While."); - TENSORFLOW_OP_VALIDATION(node, - cond_params.size() == input_size_t, - "[TensorFlow Frontend] Internal error or inconsistent model: condition graph " - " must have the same number of Parameter nodes as a number of inputs to While."); - for (size_t param_ind = 0; param_ind < cond_params_size; ++param_ind) { - cond_params[param_ind]->output(0).replace(body_results[param_ind]->input_value(0)); - } - - // update body model with the new result that corresponds to execution condition - TENSORFLOW_OP_VALIDATION( - node, - cond_results.size() == 1 && cond_results[0], - "[TensorFlow Frontend] Internal error or inconsistent model: condition body must contain one Result node."); - auto body_condition_output_idx = static_cast(body_results.size()); - body_model->add_results(cond_results); - - // type setting for body graph parameters is needed for TensorList support since DT_VARIANT type is present - for (int input_ind = 0; input_ind < input_size; ++input_ind) { - body_params[input_ind]->set_element_type(node.get_input(input_ind).get_element_type()); - } - - // set data for the Loop node - loop->set_function(body_model); - - for (int input_ind = 0; input_ind < input_size; ++input_ind) { - loop->set_merged_input(body_params[input_ind], - node.get_input(input_ind), - body_results[input_ind]->input_value(0)); - } - loop->set_special_body_ports({-1, body_condition_output_idx}); - - // set external outputs for Loop node - // do not get execution condition outside of the Loop node - for (size_t output_ind = 0; output_ind < input_size_t; ++output_ind) { - loop->get_iter_value(body_results[output_ind]); - } - loop->validate_and_infer_types(); - + auto loop = create_loop_for_tf_while(node.get_name(), body_model, cond_model, ov_inputs); set_node_name(node.get_name(), loop); return loop->outputs(); } diff --git a/src/frontends/tensorflow/src/op_table.cpp b/src/frontends/tensorflow/src/op_table.cpp index db2456ec50f3fe..b1313ec07ba826 100644 --- a/src/frontends/tensorflow/src/op_table.cpp +++ b/src/frontends/tensorflow/src/op_table.cpp @@ -22,14 +22,18 @@ namespace op { TF_OP_CONVERTER(translate_assignvariable_op); TF_OP_CONVERTER(translate_block_lstm_op); +TF_OP_CONVERTER(translate_enter_op); +TF_OP_CONVERTER(translate_exit_op); TF_OP_CONVERTER(translate_fifo_queue_op); TF_OP_CONVERTER(translate_gru_block_cell_op); TF_OP_CONVERTER(translate_hash_table_op); TF_OP_CONVERTER(translate_if_op); TF_OP_CONVERTER(translate_iterator_get_next_op); TF_OP_CONVERTER(translate_iterator_op); +TF_OP_CONVERTER(translate_loop_cond_op); TF_OP_CONVERTER(translate_merge_op); TF_OP_CONVERTER(translate_mergev2checkpoint_op); +TF_OP_CONVERTER(translate_next_iteration_op); TF_OP_CONVERTER(translate_partitioned_call_op); TF_OP_CONVERTER(translate_placeholder_linked_op); TF_OP_CONVERTER(translate_queue_dequeue_op); @@ -310,6 +314,12 @@ const std::map get_supported_ops() { // XLA operations {"XlaConvV2", CreatorFunction(translate_xla_conv_v2_op)}, {"XlaDotV2", CreatorFunction(translate_xla_dot_op)}, + + // TF1 Control Flow operations + {"Enter", CreatorFunction(translate_enter_op)}, + {"Exit", CreatorFunction(translate_exit_op)}, + {"LoopCond", CreatorFunction(translate_loop_cond_op)}, + {"NextIteration", CreatorFunction(translate_next_iteration_op)}, }; }; } // namespace op diff --git a/src/frontends/tensorflow/src/tf_utils.cpp b/src/frontends/tensorflow/src/tf_utils.cpp index ba4bce049a0c22..c72e8e7bb9080a 100644 --- a/src/frontends/tensorflow/src/tf_utils.cpp +++ b/src/frontends/tensorflow/src/tf_utils.cpp @@ -12,9 +12,12 @@ #include "helper_ops/switch.hpp" #include "openvino/core/type/element_type.hpp" #include "openvino/frontend/exception.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" #include "openvino/runtime/tensor.hpp" using namespace ov; +using namespace ov::op; using namespace ov::element; using namespace ov::frontend::tensorflow; using namespace std; @@ -369,6 +372,112 @@ bool propagate_conditional_flow(const OutputVector& ov_inputs, return to_propagate; } +// create Loop operation corresponding to TensorFlow While operation +shared_ptr create_loop_for_tf_while(const std::string& while_node_name, + const shared_ptr& body_model, + const shared_ptr& cond_model, + const OutputVector& ov_inputs) { + size_t input_size = ov_inputs.size(); + // inject condition body graph prior to Loop node + // to check condition before to start iterations + auto cond_params = cond_model->get_parameters(); + FRONT_END_GENERAL_CHECK(input_size == cond_params.size(), + "[TensorFlow Frontend] internal error: mismatch number of inputs to While and a number of " + "inputs in a conditional graph"); + // type setting for body graph parameters is needed for TensorList support since DT_VARIANT type is present + // also for more accurate execution_condition variable shape deducing we need shape inference for condition graph + for (size_t input_ind = 0; input_ind < input_size; ++input_ind) { + cond_params[input_ind]->set_element_type(ov_inputs[input_ind].get_element_type()); + cond_params[input_ind]->set_partial_shape(ov_inputs[input_ind].get_partial_shape()); + } + cond_model->validate_nodes_and_infer_types(); + + auto cond_prior = cond_model->clone(); + ov::OutputVector ov_outputs; + inject_body_model(cond_prior, while_node_name + "/cond", ov_inputs, ov_outputs); + FRONT_END_GENERAL_CHECK( + ov_outputs.size() == 1, + "[TensorFlow Frontend] Internal error or inconsistent model: condition body must contain one Result node."); + auto exec_cond = ov_outputs[0]; + auto trip_count = make_shared(element::i32, Shape{}, -1); + auto loop = make_shared(trip_count, exec_cond); + + // prepare body model to be set for the Loop node + // note that condition should be computed on the updated input + // because this is while(cond) {} construction, + // that is why condition graph is stitched to the body results + auto body_params = body_model->get_parameters(); + auto body_results = body_model->get_results(); + auto cond_results = cond_model->get_results(); + FRONT_END_GENERAL_CHECK(body_params.size() == input_size, + "[TensorFlow Frontend] Internal error or inconsistent model: body graph " + " must have the same number of Parameter nodes as a number of inputs to While."); + FRONT_END_GENERAL_CHECK(cond_params.size() == input_size, + "[TensorFlow Frontend] Internal error or inconsistent model: condition graph " + " must have the same number of Parameter nodes as a number of inputs to While."); + for (size_t param_ind = 0; param_ind < body_results.size(); ++param_ind) { + cond_params[param_ind]->output(0).replace(body_results[param_ind]->input_value(0)); + } + + // update body model with the new result that corresponds to execution condition + FRONT_END_GENERAL_CHECK( + cond_results.size() == 1 && cond_results[0], + "[TensorFlow Frontend] Internal error or inconsistent model: condition body must contain one Result node."); + auto body_condition_output_idx = static_cast(body_results.size()); + body_model->add_results(cond_results); + + // type setting for body graph parameters is needed for TensorList support since DT_VARIANT type is present + for (size_t input_ind = 0; input_ind < input_size; ++input_ind) { + body_params[input_ind]->set_element_type(ov_inputs[input_ind].get_element_type()); + } + + // set data for the Loop node + loop->set_function(body_model); + + // body_results may contain less nodes than body_params that means back edge exists not for all body_params + for (size_t input_ind = 0; input_ind < static_cast(body_condition_output_idx); ++input_ind) { + loop->set_merged_input(body_params[input_ind], ov_inputs[input_ind], body_results[input_ind]->input_value(0)); + } + loop->set_special_body_ports({-1, body_condition_output_idx}); + + // set external outputs for Loop node + // do not get execution condition outside of the Loop node + for (size_t output_ind = 0; output_ind < static_cast(body_condition_output_idx); ++output_ind) { + loop->get_iter_value(body_results[output_ind]); + } + loop->validate_and_infer_types(); + return loop; +} + +void inject_body_model(std::shared_ptr ov_model_to_inject, + const std::string& operation_type, + const ov::OutputVector& ov_inputs, + ov::OutputVector& ov_outputs) { + ov_outputs.clear(); + auto body_parameters = ov_model_to_inject->get_parameters(); + FRONT_END_GENERAL_CHECK(body_parameters.size() == ov_inputs.size(), + "[TensorFlow Error] Internal error or incorrect input models: number of " + "inputs and arguments to the function " + + operation_type + " do not match."); + for (size_t param_ind = 0; param_ind < body_parameters.size(); ++param_ind) { + auto orig_type = body_parameters[param_ind]->get_element_type(); + // avoid not needed tensor names from body graph Parameter node after replacing + body_parameters[param_ind]->output(0).set_names({}); + body_parameters[param_ind]->output(0).replace(ov_inputs[param_ind]); + if (auto ext_parameter = as_type_ptr(ov_inputs[param_ind].get_node_shared_ptr())) { + // save type of a Parameter as converted in the body + // this is important if the external conversion extension is applied to body graph node + // with setting its own type + if (orig_type != element::dynamic) { + ext_parameter->set_element_type(orig_type); + } + } + } + for (const auto& result_node : ov_model_to_inject->get_results()) { + ov_outputs.push_back(result_node->input_value(0)); + } +} + } // namespace tensorflow } // namespace frontend } // namespace ov diff --git a/src/frontends/tensorflow/src/tf_utils.hpp b/src/frontends/tensorflow/src/tf_utils.hpp index a7a80a522b70a3..5de9029a816e6c 100644 --- a/src/frontends/tensorflow/src/tf_utils.hpp +++ b/src/frontends/tensorflow/src/tf_utils.hpp @@ -12,6 +12,7 @@ #include "openvino/core/type.hpp" #include "openvino/core/type/element_type.hpp" #include "openvino/frontend/node_context.hpp" +#include "openvino/op/loop.hpp" #include "openvino/runtime/tensor.hpp" #include "tensor.pb.h" #include "tensor_shape.pb.h" @@ -102,6 +103,18 @@ bool propagate_conditional_flow(const ov::OutputVector& ov_inputs, // copy existing markers from copy_from to copy_to marker void copy_conditional_flow_marker(const CfMarkerType& copy_from, CfMarkerType& copy_to); + +// create Loop operation corresponding to TensorFlow While operation +std::shared_ptr create_loop_for_tf_while(const std::string& while_node_name, + const std::shared_ptr& body_model, + const std::shared_ptr& cond_model, + const ov::OutputVector& ov_inputs); + +// inject a graph by given inputs and return outputs of the injected graph +void inject_body_model(std::shared_ptr ov_model_to_inject, + const std::string& operation_type, + const ov::OutputVector& ov_inputs, + ov::OutputVector& ov_outputs); } // namespace tensorflow } // namespace frontend } // namespace ov diff --git a/src/frontends/tensorflow/src/translate_session.cpp b/src/frontends/tensorflow/src/translate_session.cpp index 933bc0d900eba5..51f0addb7c3ff5 100644 --- a/src/frontends/tensorflow/src/translate_session.cpp +++ b/src/frontends/tensorflow/src/translate_session.cpp @@ -4,6 +4,11 @@ #include "translate_session.hpp" +#include "helper_ops/enter.hpp" +#include "helper_ops/loop_cond.hpp" +#include "helper_ops/merge.hpp" +#include "helper_ops/next_iteration.hpp" +#include "helper_ops/switch.hpp" #include "input_model.hpp" #include "openvino/op/util/framework_node.hpp" #include "openvino/opsets/opset10.hpp" @@ -154,6 +159,148 @@ size_t get_flat_index_by_name_and_id(const ov::frontend::NamedOutputVector& outp return idx; } } + +// create Parameter node that will produce given tensor +std::shared_ptr create_parameter_node_for_tensor(ov::Output output_tensor) { + auto param = + std::make_shared(output_tensor.get_element_type(), output_tensor.get_partial_shape()); + param->output(0).set_names(output_tensor.get_names()); + output_tensor.replace(param->output(0)); + return param; +} + +void fuse_loop_cond(std::shared_ptr& loop_cond, + OpMap& ov_tensors_map, + const std::vector>& enter_ops) { + // ov_tensors_map maps a operation name to a vector of its output tensors + auto node_name = loop_cond->get_friendly_name(); + // find key points for condition and body graphs + FRONT_END_GENERAL_CHECK(loop_cond, "[TensorFlow Frontend] internal error: pointer to LoopCond node is nullptr"); + + // extract condition and body graphs + // scan LoopCond node vicinity + // 1. LoopCond has just one output + // walk through all consuming inputs that are expected to be only for Switch nodes + std::vector> switch_nodes; + for (const auto& consuming_input : loop_cond->get_output_target_inputs(0)) { + auto switch_node = ov::as_type_ptr(consuming_input.get_node()->shared_from_this()); + FRONT_END_GENERAL_CHECK(switch_node, + "[TensorFlow Frontend] internal error or inconsistent model: consumer of LoopCond " + "output is not Switch operation"); + switch_nodes.push_back(switch_node); + } + + // collect all output tensors for Loop + // the created Loop node outputs will be connected with ov_outputs + size_t num_inputs = switch_nodes.size(); + FRONT_END_GENERAL_CHECK(num_inputs > 0, + "[TensorFlow Frontend] internal error: LoopCond node has no output Switch nodes"); + ov::OutputVector ov_outputs(num_inputs); + // collect ov_inputs (a list of Tensors) that will provide input data for the created Loop node + ov::OutputVector ov_inputs(num_inputs); + ov::ParameterVector cond_params(num_inputs); + ov::ParameterVector body_params(num_inputs); + ov::OutputVector ov_body_outputs(num_inputs); + std::vector output_tensor_names(num_inputs); + std::set> met_enter_ops; + std::string frame_name; + for (size_t ind = 0; ind < num_inputs; ++ind) { + // Switch node has two outputs: + // 0 (output_false) - interrupt the loop, 1 (output_true) - continue the loop + // check if Exit node exists + auto switch_node = switch_nodes[ind]; + FRONT_END_GENERAL_CHECK( + switch_node->get_output_target_inputs(0).size() < 2, + "[TensorFlow Frontend] internal error or inconsistent model: Switch node has more than one Exit nodes"); + if (switch_node->get_output_target_inputs(0).size() == 1) { + auto exit_node = (*switch_node->get_output_target_inputs(0).begin()).get_node(); + ov_outputs[ind] = exit_node->output(0); + output_tensor_names[ind] = exit_node->get_friendly_name() + ":0"; + } + + auto merge_node = ov::as_type_ptr(switch_node->input_value(0).get_node_shared_ptr()); + FRONT_END_GENERAL_CHECK(merge_node, + "[TensorFlow Frontend] internal error or inconsistent model: Data for Switch node is " + "not produced by Merge node for While operation"); + + // create Parameter node for condition graph + cond_params[ind] = create_parameter_node_for_tensor(merge_node->output(0)); + body_params[ind] = create_parameter_node_for_tensor(switch_node->output(1)); + + // check that Merge node has Enter and NextIteration producers + auto enter = ov::as_type_ptr(merge_node->input_value(0).get_node_shared_ptr()); + auto next_iteration = ov::as_type_ptr(merge_node->input_value(0).get_node_shared_ptr()); + if (!enter) { + enter = ov::as_type_ptr(merge_node->input_value(1).get_node_shared_ptr()); + } + if (!next_iteration) { + next_iteration = ov::as_type_ptr(merge_node->input_value(1).get_node_shared_ptr()); + } + FRONT_END_GENERAL_CHECK(enter && next_iteration, + "[TensorFlow Frontend] internal error or inconsistent model: inputs of Merge node in " + "While sub-graph are not Enter and NextIteration"); + ov_inputs[ind] = enter->input_value(0); + met_enter_ops.insert(enter); + frame_name = enter->get_frame_name(); + + // retrieve output tensor for body graph that is an input to NextIteration node + std::string producer_name; + size_t producer_output_port_idx; + next_iteration->get_producer(producer_name, producer_output_port_idx); + FRONT_END_GENERAL_CHECK( + ov_tensors_map.count(producer_name) > 0, + "[TensorFlow Frontend] internal error: NextIteration producer is not found in the tensor map"); + auto producer_outputs = ov_tensors_map.at(producer_name); + FRONT_END_GENERAL_CHECK( + producer_output_port_idx < producer_outputs.size(), + "[TensorFlow Frontend] internal error: NextIteration producer has insufficient number of outputs"); + auto ov_body_output = producer_outputs[producer_output_port_idx].port; + if (ov_body_output.get_node_shared_ptr() == switch_node) { + // this is case when NextIteration node is connected with Switch node + ov_body_outputs[ind] = body_params[ind]->output(0); + } else { + ov_body_outputs[ind] = ov_body_output; + } + } + auto ov_cond_output = loop_cond->input_values(); + + // insert additional inputs for future Loop node + for (auto& enter : enter_ops) { + if (met_enter_ops.find(enter) == met_enter_ops.end() && enter->get_frame_name() == frame_name) { + ov_inputs.push_back(enter->input_value(0)); + auto additional_param = create_parameter_node_for_tensor(enter->output(0)); + cond_params.push_back(additional_param); + body_params.push_back(additional_param); + } + } + + // create a copy of conditional graph + auto cond_model = std::make_shared(ov_cond_output, cond_params); + auto body_model = std::make_shared(ov_body_outputs, body_params); + + auto loop_node = create_loop_for_tf_while(node_name, body_model, cond_model, ov_inputs); + + auto loop_model = std::make_shared(loop_node->outputs()); + + size_t loop_node_output_size = loop_node->get_output_size(); + FRONT_END_GENERAL_CHECK(loop_node_output_size == num_inputs, + "[TensorFlow Frontend] internal error: the created Loop node to replace TF1 While has " + "unexpected number of outputs"); + for (size_t output_ind = 0; output_ind < loop_node_output_size; ++output_ind) { + auto producer_node = ov_outputs[output_ind].get_node_shared_ptr(); + if (producer_node) { + std::string producer_name = producer_node->get_friendly_name(); + size_t producer_output_port_idx = ov_outputs[output_ind].get_index(); + // work only for non-empty ov::Output + ov_outputs[output_ind].replace(loop_node->output(output_ind)); + ov_outputs[output_ind].set_names({output_tensor_names[output_ind]}); + if (ov_tensors_map.count(producer_name) && + producer_output_port_idx < ov_tensors_map.at(producer_name).size()) { + ov_tensors_map.at(producer_name)[producer_output_port_idx] = ov_outputs[output_ind]; + } + } + } +} } // namespace TranslateSession::TranslateSession(const ov::frontend::InputModel::Ptr& input_model, @@ -173,37 +320,12 @@ std::shared_ptr TranslateSession::get_converted_model() { return m_ov_model; } -void TranslateSession::inject_body_model(std::shared_ptr body_model, - const std::string& operation_type, - const ov::OutputVector& ov_inputs, - ov::OutputVector& ov_outputs) { - ov_outputs.clear(); - auto body_parameters = body_model->get_parameters(); - FRONT_END_GENERAL_CHECK(body_parameters.size() == ov_inputs.size(), - "[TensorFlow Error] Internal error or incorrect input models: number of " - "inputs and arguments to the function " + - operation_type + " do not match."); - for (size_t param_ind = 0; param_ind < body_parameters.size(); ++param_ind) { - auto orig_type = body_parameters[param_ind]->get_element_type(); - body_parameters[param_ind]->output(0).replace(ov_inputs[param_ind]); - if (auto ext_parameter = as_type_ptr(ov_inputs[param_ind].get_node_shared_ptr())) { - // save type of a Parameter as converted in the body - // this is important if the external conversion extension is applied to body graph node - // with setting its own type - if (orig_type != element::dynamic) { - ext_parameter->set_element_type(orig_type); - } - } - } - for (const auto& result_node : body_model->get_results()) { - ov_outputs.push_back(result_node->input_value(0)); - } -} - void TranslateSession::translate_graph(const ov::frontend::InputModel::Ptr& input_model, std::shared_ptr& ov_model) { OpMap ng_op_map; ControlDepsMap control_deps_map; + std::vector> loop_cond_ops; + std::vector> enter_ops; ov::ParameterVector params; ov::ResultVector results; @@ -375,6 +497,19 @@ void TranslateSession::translate_graph(const ov::frontend::InputModel::Ptr& inpu ov_outputs = named_from_indexed(fw_node->outputs()); } + // save LoopCond operations in topological order for further fusing + if (ov_outputs.size() == 1 && as_type_ptr(ov_outputs[0].port.get_node_shared_ptr())) { + loop_cond_ops.push_back(as_type_ptr(ov_outputs[0].port.get_node_shared_ptr())); + } else if (ov_outputs.size() == 1 && as_type_ptr(ov_outputs[0].port.get_node_shared_ptr())) { + enter_ops.push_back(as_type_ptr(ov_outputs[0].port.get_node_shared_ptr())); + } else if (ov_outputs.size() == 1 && as_type_ptr(ov_outputs[0].port.get_node_shared_ptr())) { + std::string producer_name; + size_t producer_output_port_idx; + operation_place->get_next_iteration_back_edge(producer_name, producer_output_port_idx); + auto next_iteration = as_type_ptr(ov_outputs[0].port.get_node_shared_ptr()); + next_iteration->set_producer(producer_name, producer_output_port_idx); + } + // create input control dependencies set for the current operation node std::set> input_control_deps; for (const auto& control_dep_name : control_dependencies_names) { @@ -526,6 +661,14 @@ void TranslateSession::translate_graph(const ov::frontend::InputModel::Ptr& inpu ov::ParameterVector ordered_params = reorder_ops_by_names(input_names, params); ov::ResultVector ordered_results = reorder_ops_by_names(output_names, results); + // before adding Result nodes to terminal nodes + // it fuses TF1 Control flow based While operation to Loop operation + // it needs to perform this in the reverse order + std::reverse(loop_cond_ops.begin(), loop_cond_ops.end()); + for (auto& loop_cond_op : loop_cond_ops) { + fuse_loop_cond(loop_cond_op, ng_op_map, enter_ops); + } + ov_model = std::make_shared(ordered_results, ordered_params, m_model_name); } diff --git a/src/frontends/tensorflow/src/translate_session.hpp b/src/frontends/tensorflow/src/translate_session.hpp index 1bf7627a9ee8a1..9415b19a6e01dc 100644 --- a/src/frontends/tensorflow/src/translate_session.hpp +++ b/src/frontends/tensorflow/src/translate_session.hpp @@ -42,11 +42,6 @@ class TranslateSession { void translate_graph(const ov::frontend::InputModel::Ptr& input_model, std::shared_ptr& ov_model); - void inject_body_model(std::shared_ptr body_model, - const std::string& operation_type, - const ov::OutputVector& ov_inputs, - ov::OutputVector& ov_outputs); - std::shared_ptr get_body_ov_model(const std::string& body_graph_name, const ov::OutputVector& ov_inputs, bool clear_names = true); diff --git a/src/frontends/tensorflow/tests/convert_unsupported.cpp b/src/frontends/tensorflow/tests/convert_unsupported.cpp index 21d350cc2da00e..7d9a83045b5606 100644 --- a/src/frontends/tensorflow/tests/convert_unsupported.cpp +++ b/src/frontends/tensorflow/tests/convert_unsupported.cpp @@ -115,33 +115,6 @@ TEST(FrontEndConvertModelTest, test_unsupported_op) { ASSERT_NO_THROW(frontEnd->convert(model)); } -TEST(FrontEndConvertModelTest, test_unsupported_tf1_while) { - FrontEndManager fem; - FrontEnd::Ptr frontEnd; - InputModel::Ptr inputModel; - ASSERT_NO_THROW(frontEnd = fem.load_by_framework(TF_FE)); - ASSERT_NE(frontEnd, nullptr); - auto model_filename = FrontEndTestUtils::make_model_path(string(TEST_TENSORFLOW_MODELS_DIRNAME) + - string("model_tf1_while/model_tf1_while.pbtxt")); - ASSERT_NO_THROW(inputModel = frontEnd->load(model_filename)); - ASSERT_NE(inputModel, nullptr); - shared_ptr model; - - try { - model = frontEnd->convert(inputModel); - FAIL() << "TensorFlow 1 While is not supported in TF FE but conversion passed without errors. " - "OpConversionFailure is expected."; - } catch (const OpConversionFailure& error) { - string error_message = error.what(); - string ref_message = "[TensorFlow Frontend] Internal error, no translator found for operation(s): Enter, Exit, " - "LoopCond, Merge, NextIteration, Switch"; - ASSERT_TRUE(error_message.find(ref_message) != string::npos); - ASSERT_EQ(model, nullptr); - } catch (...) { - FAIL() << "Conversion of TensorFlow 1 While failed by wrong reason."; - } -} - TEST_F(FrontEndConversionWithReferenceTestsF, ModelWithDynamicType) { { model = convert_model_partially("dynamic_type_model/dynamic_type_model.pb"); } { @@ -169,11 +142,12 @@ TEST(FrontEndConvertModelTest, test_unsupported_tf1_while_and_incorrect_less_tra "OpConversionFailure is expected."; } catch (const OpConversionFailure& error) { string error_message = error.what(); - string ref_message = "Less expects ten inputs.\n" - "\n" - "[TensorFlow Frontend] Internal error, no translator found for operation(s): Enter, Exit, " - "LoopCond, Merge, NextIteration, Switch"; + string ref_message = "Less expects ten inputs.\n"; + string not_found_message = + "[TensorFlow Frontend] Internal error, no translator found for operation(s): Enter, Exit, " + "LoopCond, Merge, NextIteration, Switch"; ASSERT_TRUE(error_message.find(ref_message) != string::npos); + ASSERT_TRUE(error_message.find(not_found_message) == string::npos); ASSERT_EQ(model, nullptr); } catch (...) { FAIL() << "Conversion of TensorFlow 1 While failed by wrong reason."; @@ -191,14 +165,12 @@ TEST(FrontEndConvertModelTest, conversion_with_unknown_exception) { "OpConversionFailure is expected."; } catch (const OpConversionFailure& error) { string error_message = error.what(); - string ref_message = "Unknown exception type\n" - "[TensorFlow Frontend] Internal error, no translator found for operation(s): Enter, Exit, " - "LoopCond, Merge, NextIteration, Switch"; + string ref_message = "Unknown exception type\n"; string doc_message = "To facilitate the conversion of unsupported operations, refer to Frontend Extension documentation: " "https://docs.openvino.ai/latest/openvino_docs_Extensibility_UG_Frontend_Extensions.html"; ASSERT_TRUE(error_message.find(ref_message) != string::npos); - ASSERT_TRUE(error_message.find(doc_message) != string::npos); + ASSERT_TRUE(error_message.find(doc_message) == string::npos); ASSERT_EQ(model, nullptr); } catch (...) { FAIL() << "Conversion of TensorFlow 1 While failed by wrong reason."; diff --git a/src/frontends/tensorflow_common/include/helper_ops/enter.hpp b/src/frontends/tensorflow_common/include/helper_ops/enter.hpp new file mode 100644 index 00000000000000..12beaaff8e6de5 --- /dev/null +++ b/src/frontends/tensorflow_common/include/helper_ops/enter.hpp @@ -0,0 +1,47 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "internal_operation.hpp" +#include "tf_utils.hpp" + +namespace ov { +namespace frontend { +namespace tensorflow { + +// Internal operation for Enter that marks entry point for data going to Loop in the graph +// It is used along with Exit operation +class Enter : public InternalOperation { +public: + OPENVINO_OP("Enter", "ov::frontend::tensorflow", InternalOperation); + + Enter(const Output& data, + const std::string frame_name, + const std::shared_ptr& decoder = std::make_shared()) + : InternalOperation(decoder, OutputVector{data}, 1, "Enter"), + m_frame_name(frame_name) { + validate_and_infer_types(); + } + + void validate_and_infer_types() override { + auto data_type = get_input_element_type(0); + auto data_shape = get_input_partial_shape(0); + + set_output_type(0, data_type, data_shape); + } + + std::string get_frame_name() const { + return m_frame_name; + } + +private: + std::string m_frame_name; +}; + +} // namespace tensorflow +} // namespace frontend +} // namespace ov diff --git a/src/frontends/tensorflow_common/include/helper_ops/exit.hpp b/src/frontends/tensorflow_common/include/helper_ops/exit.hpp new file mode 100644 index 00000000000000..879a780d03008e --- /dev/null +++ b/src/frontends/tensorflow_common/include/helper_ops/exit.hpp @@ -0,0 +1,37 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "internal_operation.hpp" +#include "tf_utils.hpp" + +namespace ov { +namespace frontend { +namespace tensorflow { + +// Internal operation for Exit that marks exit point for data going from Loop in the graph +// It is used along with Enter operation +class Exit : public InternalOperation { +public: + OPENVINO_OP("Exit", "ov::frontend::tensorflow", InternalOperation); + + Exit(const Output& data, const std::shared_ptr& decoder = std::make_shared()) + : InternalOperation(decoder, OutputVector{data}, 1, "Exit") { + validate_and_infer_types(); + } + + void validate_and_infer_types() override { + auto data_type = get_input_element_type(0); + auto data_shape = get_input_partial_shape(0); + + set_output_type(0, data_type, data_shape); + } +}; + +} // namespace tensorflow +} // namespace frontend +} // namespace ov diff --git a/src/frontends/tensorflow_common/include/helper_ops/loop_cond.hpp b/src/frontends/tensorflow_common/include/helper_ops/loop_cond.hpp new file mode 100644 index 00000000000000..c889fbd74b193f --- /dev/null +++ b/src/frontends/tensorflow_common/include/helper_ops/loop_cond.hpp @@ -0,0 +1,37 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "internal_operation.hpp" +#include "tf_utils.hpp" + +namespace ov { +namespace frontend { +namespace tensorflow { + +// Internal operation for Loop that represents the loop termination condition +// by the pivot switches of a loop +class LoopCond : public InternalOperation { +public: + OPENVINO_OP("LoopCond", "ov::frontend::tensorflow", InternalOperation); + + LoopCond(const Output& input, const std::shared_ptr& decoder = std::make_shared()) + : InternalOperation(decoder, OutputVector{input}, 1, "LoopCond") { + validate_and_infer_types(); + } + + void validate_and_infer_types() override { + auto data_type = get_input_element_type(0); + auto data_shape = get_input_partial_shape(0); + + set_output_type(0, data_type, data_shape); + } +}; + +} // namespace tensorflow +} // namespace frontend +} // namespace ov diff --git a/src/frontends/tensorflow_common/include/helper_ops/next_iteration.hpp b/src/frontends/tensorflow_common/include/helper_ops/next_iteration.hpp new file mode 100644 index 00000000000000..eb262b4307af7f --- /dev/null +++ b/src/frontends/tensorflow_common/include/helper_ops/next_iteration.hpp @@ -0,0 +1,54 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "internal_operation.hpp" +#include "merge.hpp" +#include "tf_utils.hpp" + +namespace ov { +namespace frontend { +namespace tensorflow { + +// Internal operation for NextIteration that makes its input available to the next iteration +// the output is going to Merge node. +class NextIteration : public InternalOperation { +public: + OPENVINO_OP("NextIteration", "ov::frontend::tensorflow", InternalOperation); + + NextIteration(const std::shared_ptr& decoder = std::make_shared()) + : InternalOperation(decoder, OutputVector{}, 1, "NextIteration"), + m_back_edge_set(false) { + validate_and_infer_types(); + } + + void validate_and_infer_types() override { + set_output_type(0, ov::element::dynamic, ov::PartialShape::dynamic()); + } + + void set_producer(const std::string& producer_name, size_t producer_output_port_idx) { + m_producer_name = producer_name; + m_producer_output_port_idx = producer_output_port_idx; + m_back_edge_set = true; + } + + void get_producer(std::string& producer_name, size_t& producer_output_port_idx) const { + FRONT_END_GENERAL_CHECK(m_back_edge_set, + "[TensorFlow Frontend] internal error: back edge for NextIteration is not set"); + producer_name = m_producer_name; + producer_output_port_idx = m_producer_output_port_idx; + } + +private: + bool m_back_edge_set; + std::string m_producer_name; + size_t m_producer_output_port_idx; +}; + +} // namespace tensorflow +} // namespace frontend +} // namespace ov diff --git a/src/frontends/tensorflow_common/include/place.hpp b/src/frontends/tensorflow_common/include/place.hpp index ba946e59466a48..0559f53c6c10bf 100644 --- a/src/frontends/tensorflow_common/include/place.hpp +++ b/src/frontends/tensorflow_common/include/place.hpp @@ -131,10 +131,22 @@ class OpPlace : public Place { Ptr get_target_tensor() const override; Ptr get_target_tensor(int outputPortIndex) const override; + // set back edge for OpPlace of NextIteration operation + // this is needed since we break a cycle in a graph + void set_next_iteration_back_edge(const std::string& next_iteration_producer_name, + size_t next_iteration_producer_output_port_idx); + void get_next_iteration_back_edge(std::string& next_iteration_producer_name, + size_t& next_iteration_producer_output_port_idx) const; + private: std::shared_ptr m_op_decoder; std::map>> m_input_ports; std::vector> m_output_ports; + + // flag if back edge is set + bool m_back_edge_set; + std::string m_next_iteration_producer_name; + size_t m_next_iteration_producer_output_port_idx; }; class TensorPlace : public Place { diff --git a/src/frontends/tensorflow_common/include/tf_framework_node.hpp b/src/frontends/tensorflow_common/include/tf_framework_node.hpp index cffba769751a7d..fa2e949706dad0 100644 --- a/src/frontends/tensorflow_common/include/tf_framework_node.hpp +++ b/src/frontends/tensorflow_common/include/tf_framework_node.hpp @@ -35,7 +35,9 @@ class FrameworkNode : public ov::op::util::FrameworkNode { } std::shared_ptr clone_with_new_inputs(const OutputVector& inputs) const override { - return std::make_shared(m_decoder, inputs, get_output_size()); + auto fw_node = std::make_shared(m_decoder, inputs, get_output_size()); + fw_node->set_attrs(get_attrs()); + return fw_node; } std::string get_op_type() const { diff --git a/src/frontends/tensorflow_common/src/place.cpp b/src/frontends/tensorflow_common/src/place.cpp index c1fddd0fc4c885..91dfb2a2cc1619 100644 --- a/src/frontends/tensorflow_common/src/place.cpp +++ b/src/frontends/tensorflow_common/src/place.cpp @@ -29,7 +29,23 @@ bool Place::is_output() const { OpPlace::OpPlace(const ov::frontend::InputModel& input_model, std::shared_ptr op_decoder) : Place(input_model, {op_decoder->get_op_name()}), - m_op_decoder(op_decoder) {} + m_op_decoder(op_decoder), + m_back_edge_set(false) {} + +void OpPlace::set_next_iteration_back_edge(const std::string& next_iteration_producer_name, + size_t next_iteration_producer_output_port_idx) { + m_next_iteration_producer_name = next_iteration_producer_name; + m_next_iteration_producer_output_port_idx = next_iteration_producer_output_port_idx; + m_back_edge_set = true; +} + +void OpPlace::get_next_iteration_back_edge(std::string& next_iteration_producer_name, + size_t& next_iteration_producer_output_port_idx) const { + FRONT_END_GENERAL_CHECK(m_back_edge_set, + "[TensorFlow Frontend] internal error: back edge for NextIteration is not set"); + next_iteration_producer_name = m_next_iteration_producer_name; + next_iteration_producer_output_port_idx = m_next_iteration_producer_output_port_idx; +} const std::vector>& OpPlace::get_output_ports() const { return m_output_ports; diff --git a/tests/layer_tests/tensorflow_tests/test_tf_While.py b/tests/layer_tests/tensorflow_tests/test_tf_While.py index 6ca7fc5c195fcf..3a6f9b7f291029 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_While.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_While.py @@ -49,8 +49,7 @@ def body(x, y): test_data_basic = [ dict(y_shape=[2, 3], data_type=np.int32, lower_control_flow=False), dict(y_shape=[2, 1, 4], data_type=np.int32, lower_control_flow=False), - pytest.param(dict(y_shape=[2, 1, 4], data_type=np.int32, lower_control_flow=True), - marks=pytest.mark.xfail(reason="105670")) + dict(y_shape=[2, 1, 4], data_type=np.int32, lower_control_flow=True) ] @pytest.mark.parametrize("params", test_data_basic) @@ -108,8 +107,7 @@ def body(x, y): test_data_basic = [ dict(y_shape=[2, 3], lower_control_flow=False), dict(y_shape=[2, 1, 4], lower_control_flow=False), - pytest.param(dict(y_shape=[2, 1, 4], lower_control_flow=True), - marks=pytest.mark.xfail(reason="105670")) + dict(y_shape=[2, 1, 4], lower_control_flow=True) ] @pytest.mark.parametrize("params", test_data_basic) diff --git a/tools/mo/openvino/tools/mo/convert_impl.py b/tools/mo/openvino/tools/mo/convert_impl.py index d0d2cd10f77846..ae6c39a144b0a3 100644 --- a/tools/mo/openvino/tools/mo/convert_impl.py +++ b/tools/mo/openvino/tools/mo/convert_impl.py @@ -312,9 +312,8 @@ def update_fallback_with_conversion_error(use_new_frontend: bool, is_tf: bool, e conversion_error_re = r"^(\[TensorFlow\ Frontend\]\ Internal\ error\,\ no\ translator\ found\ for\ operation\(s\)\:\ )((\w+)(\,\ \w+)*)$" conversion_error_match = re.findall(conversion_error_re, ex_msg, re.MULTILINE) all_fallback_operations = [ - # corresponds to TF1 While operation + # corresponds to TF1 TensorList operation "TensorArrayScatterV3", "TensorArrayV3", "TensorArraySizeV3", "TensorArrayGatherV3", - "LoopCond", "Enter", "NextIteration", "Exit", # corresponds to operations with complex tensors "FFT", "FFT2D", "FFT3D", "IFFT", "IFFT2D", "IFFT3D", "RFFT", "RFFT2D", "RFFT3D", "IRFFT", "IRFFT2D", "IRFFT3D", diff --git a/tools/mo/unit_tests/moc_tf_fe/conversion_basic_models_test.py b/tools/mo/unit_tests/moc_tf_fe/conversion_basic_models_test.py index 4ceb2fc3c15c34..3e5c46ecb3ac90 100644 --- a/tools/mo/unit_tests/moc_tf_fe/conversion_basic_models_test.py +++ b/tools/mo/unit_tests/moc_tf_fe/conversion_basic_models_test.py @@ -243,8 +243,7 @@ def test_conversion_failure_fallback_default(self): def test_conversion_failure_fallback_use_new_frontend(self): with self.assertRaisesRegex(Exception, "\[TensorFlow Frontend\] Internal error, no translator found for operation\(s\)\: " - "Enter\, Exit\, LoopCond\, Merge\, NextIteration\, Switch\, TensorArrayGatherV3\, " - "TensorArraySizeV3\, TensorArrayV3"): + "TensorArrayGatherV3\, TensorArrayReadV3\, TensorArraySizeV3\, TensorArrayV3\, TensorArrayWriteV3"): self.basic("ctc_model_based.pbtxt", None, None, None, None, None, None, True, True, True, False) From 641743efb18d7b0f436ebb1158cbe7450138b9c2 Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Mon, 2 Oct 2023 09:22:40 +0200 Subject: [PATCH 004/257] Changing file structure of Attributes of Low Precision Transformations section (#20170) --- .../lpt_attributes}/avg_pool_precision_preserved.md | 0 .../lpt_attributes}/intervals_alignment.md | 0 .../lpt_attributes}/precision_preserved.md | 0 .../low_precision_transformations/lpt_attributes}/precisions.md | 0 .../lpt_attributes}/quantization_alignment.md | 0 .../lpt_attributes}/quantization_granularity.md | 0 6 files changed, 0 insertions(+), 0 deletions(-) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/attributes => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/lpt_attributes}/avg_pool_precision_preserved.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/attributes => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/lpt_attributes}/intervals_alignment.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/attributes => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/lpt_attributes}/precision_preserved.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/attributes => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/lpt_attributes}/precisions.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/attributes => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/lpt_attributes}/quantization_alignment.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/attributes => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/lpt_attributes}/quantization_granularity.md (100%) diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/attributes/avg_pool_precision_preserved.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/lpt_attributes/avg_pool_precision_preserved.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/attributes/avg_pool_precision_preserved.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/lpt_attributes/avg_pool_precision_preserved.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/attributes/intervals_alignment.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/lpt_attributes/intervals_alignment.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/attributes/intervals_alignment.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/lpt_attributes/intervals_alignment.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/attributes/precision_preserved.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/lpt_attributes/precision_preserved.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/attributes/precision_preserved.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/lpt_attributes/precision_preserved.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/attributes/precisions.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/lpt_attributes/precisions.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/attributes/precisions.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/lpt_attributes/precisions.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/attributes/quantization_alignment.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/lpt_attributes/quantization_alignment.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/attributes/quantization_alignment.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/lpt_attributes/quantization_alignment.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/attributes/quantization_granularity.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/lpt_attributes/quantization_granularity.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/attributes/quantization_granularity.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/lpt_attributes/quantization_granularity.md From 319954e19de3a3f010775c601c342330b669255b Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Mon, 2 Oct 2023 10:19:22 +0200 Subject: [PATCH 005/257] Changing file structure of Main Transformations (#20174) --- .../low_precision_transformations}/step3_main/activation/clamp.md | 0 .../low_precision_transformations}/step3_main/activation/prelu.md | 0 .../low_precision_transformations}/step3_main/activation/relu.md | 0 .../low_precision_transformations}/step3_main/arithmetic/add.md | 0 .../step3_main/arithmetic/img/add.common.png | 0 .../step3_main/arithmetic/img/add.common.svg | 0 .../step3_main/arithmetic/img/add.transformed.png | 0 .../step3_main/arithmetic/img/add.transformed.svg | 0 .../step3_main/arithmetic/multiply.md | 0 .../step3_main/arithmetic/subtract.md | 0 .../step3_main/convolution/convolution.md | 0 .../step3_main/convolution/convolution_backprop_data.md | 0 .../step3_main/convolution/group_convolution.md | 0 .../step3_main/convolution/img/fq_and_convolution.common.png | 0 .../step3_main/convolution/img/fq_and_convolution.common.svg | 0 .../step3_main/convolution/img/fq_and_convolution.transformed.png | 0 .../step3_main/convolution/img/fq_and_convolution.transformed.svg | 0 .../step3_main/convolution/img/fq_fq_and_convolution.common.png | 0 .../step3_main/convolution/img/fq_fq_and_convolution.common.svg | 0 .../step3_main/image/interpolate.md | 0 .../low_precision_transformations}/step3_main/matrix/mat_mul.md | 0 .../low_precision_transformations}/step3_main/movement/concat.md | 0 .../step3_main/movement/depth_to_space.md | 0 .../low_precision_transformations}/step3_main/movement/gather.md | 0 .../low_precision_transformations}/step3_main/movement/pad.md | 0 .../step3_main/movement/shuffle_channels.md | 0 .../low_precision_transformations}/step3_main/movement/split.md | 0 .../step3_main/movement/strided_slice.md | 0 .../step3_main/movement/transpose.md | 0 .../step3_main/movement/variadic_split.md | 0 .../step3_main/normalization/mvn.md | 0 .../step3_main/normalization/normalize_l2.md | 0 .../low_precision_transformations}/step3_main/pooling/avg_pool.md | 0 .../low_precision_transformations}/step3_main/pooling/max_pool.md | 0 .../step3_main/quantization/fake_quantize.md | 0 .../step3_main/quantization/fold_fake_quantize.md | 0 .../step3_main/reduction/reduce_max.md | 0 .../step3_main/reduction/reduce_mean.md | 0 .../step3_main/reduction/reduce_min.md | 0 .../step3_main/reduction/reduce_sum.md | 0 .../step3_main/shape/batch_to_space.md | 0 .../low_precision_transformations}/step3_main/shape/reshape.md | 0 .../step3_main/shape/space_to_batch.md | 0 .../low_precision_transformations}/step3_main/shape/squeeze.md | 0 .../low_precision_transformations}/step3_main/shape/unsqueeze.md | 0 45 files changed, 0 insertions(+), 0 deletions(-) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/activation/clamp.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/activation/prelu.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/activation/relu.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/arithmetic/add.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/arithmetic/img/add.common.png (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/arithmetic/img/add.common.svg (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/arithmetic/img/add.transformed.png (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/arithmetic/img/add.transformed.svg (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/arithmetic/multiply.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/arithmetic/subtract.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/convolution/convolution.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/convolution/convolution_backprop_data.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/convolution/group_convolution.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/convolution/img/fq_and_convolution.common.png (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/convolution/img/fq_and_convolution.common.svg (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/convolution/img/fq_and_convolution.transformed.png (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/convolution/img/fq_and_convolution.transformed.svg (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/convolution/img/fq_fq_and_convolution.common.png (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/convolution/img/fq_fq_and_convolution.common.svg (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/image/interpolate.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/matrix/mat_mul.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/movement/concat.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/movement/depth_to_space.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/movement/gather.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/movement/pad.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/movement/shuffle_channels.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/movement/split.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/movement/strided_slice.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/movement/transpose.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/movement/variadic_split.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/normalization/mvn.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/normalization/normalize_l2.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/pooling/avg_pool.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/pooling/max_pool.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/quantization/fake_quantize.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/quantization/fold_fake_quantize.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/reduction/reduce_max.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/reduction/reduce_mean.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/reduction/reduce_min.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/reduction/reduce_sum.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/shape/batch_to_space.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/shape/reshape.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/shape/space_to_batch.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/shape/squeeze.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step3_main/shape/unsqueeze.md (100%) diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/activation/clamp.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/activation/clamp.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/activation/clamp.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/activation/clamp.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/activation/prelu.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/activation/prelu.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/activation/prelu.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/activation/prelu.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/activation/relu.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/activation/relu.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/activation/relu.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/activation/relu.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/arithmetic/add.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/arithmetic/add.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/arithmetic/add.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/arithmetic/add.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/arithmetic/img/add.common.png b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/arithmetic/img/add.common.png similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/arithmetic/img/add.common.png rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/arithmetic/img/add.common.png diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/arithmetic/img/add.common.svg b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/arithmetic/img/add.common.svg similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/arithmetic/img/add.common.svg rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/arithmetic/img/add.common.svg diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/arithmetic/img/add.transformed.png b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/arithmetic/img/add.transformed.png similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/arithmetic/img/add.transformed.png rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/arithmetic/img/add.transformed.png diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/arithmetic/img/add.transformed.svg b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/arithmetic/img/add.transformed.svg similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/arithmetic/img/add.transformed.svg rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/arithmetic/img/add.transformed.svg diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/arithmetic/multiply.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/arithmetic/multiply.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/arithmetic/multiply.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/arithmetic/multiply.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/arithmetic/subtract.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/arithmetic/subtract.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/arithmetic/subtract.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/arithmetic/subtract.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/convolution/convolution.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/convolution/convolution.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/convolution/convolution.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/convolution/convolution.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/convolution/convolution_backprop_data.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/convolution/convolution_backprop_data.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/convolution/convolution_backprop_data.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/convolution/convolution_backprop_data.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/convolution/group_convolution.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/convolution/group_convolution.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/convolution/group_convolution.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/convolution/group_convolution.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/convolution/img/fq_and_convolution.common.png b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/convolution/img/fq_and_convolution.common.png similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/convolution/img/fq_and_convolution.common.png rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/convolution/img/fq_and_convolution.common.png diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/convolution/img/fq_and_convolution.common.svg b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/convolution/img/fq_and_convolution.common.svg similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/convolution/img/fq_and_convolution.common.svg rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/convolution/img/fq_and_convolution.common.svg diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/convolution/img/fq_and_convolution.transformed.png b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/convolution/img/fq_and_convolution.transformed.png similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/convolution/img/fq_and_convolution.transformed.png rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/convolution/img/fq_and_convolution.transformed.png diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/convolution/img/fq_and_convolution.transformed.svg b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/convolution/img/fq_and_convolution.transformed.svg similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/convolution/img/fq_and_convolution.transformed.svg rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/convolution/img/fq_and_convolution.transformed.svg diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/convolution/img/fq_fq_and_convolution.common.png b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/convolution/img/fq_fq_and_convolution.common.png similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/convolution/img/fq_fq_and_convolution.common.png rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/convolution/img/fq_fq_and_convolution.common.png diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/convolution/img/fq_fq_and_convolution.common.svg b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/convolution/img/fq_fq_and_convolution.common.svg similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/convolution/img/fq_fq_and_convolution.common.svg rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/convolution/img/fq_fq_and_convolution.common.svg diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/image/interpolate.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/image/interpolate.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/image/interpolate.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/image/interpolate.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/matrix/mat_mul.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/matrix/mat_mul.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/matrix/mat_mul.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/matrix/mat_mul.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/movement/concat.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/movement/concat.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/movement/concat.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/movement/concat.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/movement/depth_to_space.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/movement/depth_to_space.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/movement/depth_to_space.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/movement/depth_to_space.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/movement/gather.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/movement/gather.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/movement/gather.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/movement/gather.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/movement/pad.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/movement/pad.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/movement/pad.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/movement/pad.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/movement/shuffle_channels.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/movement/shuffle_channels.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/movement/shuffle_channels.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/movement/shuffle_channels.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/movement/split.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/movement/split.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/movement/split.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/movement/split.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/movement/strided_slice.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/movement/strided_slice.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/movement/strided_slice.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/movement/strided_slice.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/movement/transpose.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/movement/transpose.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/movement/transpose.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/movement/transpose.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/movement/variadic_split.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/movement/variadic_split.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/movement/variadic_split.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/movement/variadic_split.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/normalization/mvn.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/normalization/mvn.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/normalization/mvn.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/normalization/mvn.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/normalization/normalize_l2.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/normalization/normalize_l2.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/normalization/normalize_l2.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/normalization/normalize_l2.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/pooling/avg_pool.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/pooling/avg_pool.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/pooling/avg_pool.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/pooling/avg_pool.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/pooling/max_pool.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/pooling/max_pool.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/pooling/max_pool.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/pooling/max_pool.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/quantization/fake_quantize.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/quantization/fake_quantize.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/quantization/fake_quantize.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/quantization/fake_quantize.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/quantization/fold_fake_quantize.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/quantization/fold_fake_quantize.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/quantization/fold_fake_quantize.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/quantization/fold_fake_quantize.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/reduction/reduce_max.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/reduction/reduce_max.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/reduction/reduce_max.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/reduction/reduce_max.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/reduction/reduce_mean.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/reduction/reduce_mean.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/reduction/reduce_mean.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/reduction/reduce_mean.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/reduction/reduce_min.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/reduction/reduce_min.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/reduction/reduce_min.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/reduction/reduce_min.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/reduction/reduce_sum.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/reduction/reduce_sum.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/reduction/reduce_sum.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/reduction/reduce_sum.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/shape/batch_to_space.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/shape/batch_to_space.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/shape/batch_to_space.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/shape/batch_to_space.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/shape/reshape.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/shape/reshape.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/shape/reshape.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/shape/reshape.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/shape/space_to_batch.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/shape/space_to_batch.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/shape/space_to_batch.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/shape/space_to_batch.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/shape/squeeze.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/shape/squeeze.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/shape/squeeze.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/shape/squeeze.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/shape/unsqueeze.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/shape/unsqueeze.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step3_main/shape/unsqueeze.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/shape/unsqueeze.md From e4cd6c5c44a0bdaa08a40c0d6afbc79480c31b01 Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Mon, 2 Oct 2023 10:48:52 +0200 Subject: [PATCH 006/257] Changing file structure of Cleanup Transformations (#20176) --- .../step4_cleanup/eliminate_fake_quantize.md | 0 .../step4_cleanup/fake_quantize_decomposition.md | 0 .../low_precision_transformations}/step4_cleanup/fold_convert.md | 0 .../low_precision_transformations}/step4_cleanup/fuse_convert.md | 0 .../step4_cleanup/fuse_multiply_to_fake_quantize.md | 0 .../step4_cleanup/fuse_subtract_to_fake_quantize.md | 0 .../step4_cleanup/multiply_to_group_convolution.md | 0 7 files changed, 0 insertions(+), 0 deletions(-) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step4_cleanup/eliminate_fake_quantize.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step4_cleanup/fake_quantize_decomposition.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step4_cleanup/fold_convert.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step4_cleanup/fuse_convert.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step4_cleanup/fuse_multiply_to_fake_quantize.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step4_cleanup/fuse_subtract_to_fake_quantize.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step4_cleanup/multiply_to_group_convolution.md (100%) diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step4_cleanup/eliminate_fake_quantize.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step4_cleanup/eliminate_fake_quantize.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step4_cleanup/eliminate_fake_quantize.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step4_cleanup/eliminate_fake_quantize.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step4_cleanup/fake_quantize_decomposition.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step4_cleanup/fake_quantize_decomposition.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step4_cleanup/fake_quantize_decomposition.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step4_cleanup/fake_quantize_decomposition.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step4_cleanup/fold_convert.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step4_cleanup/fold_convert.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step4_cleanup/fold_convert.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step4_cleanup/fold_convert.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step4_cleanup/fuse_convert.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step4_cleanup/fuse_convert.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step4_cleanup/fuse_convert.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step4_cleanup/fuse_convert.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step4_cleanup/fuse_multiply_to_fake_quantize.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step4_cleanup/fuse_multiply_to_fake_quantize.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step4_cleanup/fuse_multiply_to_fake_quantize.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step4_cleanup/fuse_multiply_to_fake_quantize.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step4_cleanup/fuse_subtract_to_fake_quantize.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step4_cleanup/fuse_subtract_to_fake_quantize.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step4_cleanup/fuse_subtract_to_fake_quantize.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step4_cleanup/fuse_subtract_to_fake_quantize.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step4_cleanup/multiply_to_group_convolution.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step4_cleanup/multiply_to_group_convolution.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step4_cleanup/multiply_to_group_convolution.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step4_cleanup/multiply_to_group_convolution.md From 172a50bb3c89ebce6a20c925cf53ff0d3afe0c24 Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Mon, 2 Oct 2023 10:49:13 +0200 Subject: [PATCH 007/257] Changing file structure of Plugin API Reference (#20177) --- .../openvino_plugin_library}/dev_api_references.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename docs/{IE_PLUGIN_DG => articles_en/documentation/openvino_extensibility/openvino_plugin_library}/dev_api_references.md (100%) diff --git a/docs/IE_PLUGIN_DG/dev_api_references.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/dev_api_references.md similarity index 100% rename from docs/IE_PLUGIN_DG/dev_api_references.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/dev_api_references.md From affceaa32bdc98a75e848b28f49095eaaa2e0b39 Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Mon, 2 Oct 2023 11:00:14 +0200 Subject: [PATCH 008/257] Changing file structure of Prerequisites and Markup Transformations section (#20173) --- .../step1_prerequisites/convert_subtract_constant.md | 0 .../step1_prerequisites/lin_op_sequence_fusion.md | 0 .../step1_prerequisites/pull_reshape_through_dequantization.md | 0 .../step1_prerequisites/pull_transpose_through_dequantization.md | 0 .../step2_markup/align_quantization_intervals.md | 0 .../step2_markup/align_quantization_parameters.md | 0 .../step2_markup/create_attribute.md | 0 .../step2_markup/create_precisions_dependent_attribute.md | 0 .../step2_markup/markup_avg_pool_precision_preserved.md | 0 .../low_precision_transformations}/step2_markup/markup_bias.md | 0 .../step2_markup/markup_can_be_quantized.md | 0 .../step2_markup/markup_per_tensor_quantization.md | 0 .../step2_markup/markup_precisions.md | 0 .../step2_markup/propagate_precisions.md | 0 .../step2_markup/propagate_shared_value.md | 0 .../step2_markup/propagate_through_precision_preserved.md | 0 .../step2_markup/propagate_to_input.md | 0 .../step2_markup/update_shared_precision_preserved.md | 0 18 files changed, 0 insertions(+), 0 deletions(-) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step1_prerequisites/convert_subtract_constant.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step1_prerequisites/lin_op_sequence_fusion.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step1_prerequisites/pull_reshape_through_dequantization.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step1_prerequisites/pull_transpose_through_dequantization.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step2_markup/align_quantization_intervals.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step2_markup/align_quantization_parameters.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step2_markup/create_attribute.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step2_markup/create_precisions_dependent_attribute.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step2_markup/markup_avg_pool_precision_preserved.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step2_markup/markup_bias.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step2_markup/markup_can_be_quantized.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step2_markup/markup_per_tensor_quantization.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step2_markup/markup_precisions.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step2_markup/propagate_precisions.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step2_markup/propagate_shared_value.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step2_markup/propagate_through_precision_preserved.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step2_markup/propagate_to_input.md (100%) rename docs/{IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations => articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations}/step2_markup/update_shared_precision_preserved.md (100%) diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step1_prerequisites/convert_subtract_constant.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step1_prerequisites/convert_subtract_constant.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step1_prerequisites/convert_subtract_constant.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step1_prerequisites/convert_subtract_constant.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step1_prerequisites/lin_op_sequence_fusion.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step1_prerequisites/lin_op_sequence_fusion.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step1_prerequisites/lin_op_sequence_fusion.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step1_prerequisites/lin_op_sequence_fusion.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step1_prerequisites/pull_reshape_through_dequantization.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step1_prerequisites/pull_reshape_through_dequantization.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step1_prerequisites/pull_reshape_through_dequantization.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step1_prerequisites/pull_reshape_through_dequantization.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step1_prerequisites/pull_transpose_through_dequantization.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step1_prerequisites/pull_transpose_through_dequantization.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step1_prerequisites/pull_transpose_through_dequantization.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step1_prerequisites/pull_transpose_through_dequantization.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step2_markup/align_quantization_intervals.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step2_markup/align_quantization_intervals.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step2_markup/align_quantization_intervals.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step2_markup/align_quantization_intervals.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step2_markup/align_quantization_parameters.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step2_markup/align_quantization_parameters.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step2_markup/align_quantization_parameters.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step2_markup/align_quantization_parameters.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step2_markup/create_attribute.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step2_markup/create_attribute.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step2_markup/create_attribute.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step2_markup/create_attribute.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step2_markup/create_precisions_dependent_attribute.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step2_markup/create_precisions_dependent_attribute.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step2_markup/create_precisions_dependent_attribute.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step2_markup/create_precisions_dependent_attribute.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step2_markup/markup_avg_pool_precision_preserved.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step2_markup/markup_avg_pool_precision_preserved.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step2_markup/markup_avg_pool_precision_preserved.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step2_markup/markup_avg_pool_precision_preserved.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step2_markup/markup_bias.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step2_markup/markup_bias.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step2_markup/markup_bias.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step2_markup/markup_bias.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step2_markup/markup_can_be_quantized.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step2_markup/markup_can_be_quantized.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step2_markup/markup_can_be_quantized.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step2_markup/markup_can_be_quantized.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step2_markup/markup_per_tensor_quantization.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step2_markup/markup_per_tensor_quantization.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step2_markup/markup_per_tensor_quantization.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step2_markup/markup_per_tensor_quantization.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step2_markup/markup_precisions.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step2_markup/markup_precisions.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step2_markup/markup_precisions.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step2_markup/markup_precisions.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step2_markup/propagate_precisions.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step2_markup/propagate_precisions.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step2_markup/propagate_precisions.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step2_markup/propagate_precisions.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step2_markup/propagate_shared_value.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step2_markup/propagate_shared_value.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step2_markup/propagate_shared_value.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step2_markup/propagate_shared_value.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step2_markup/propagate_through_precision_preserved.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step2_markup/propagate_through_precision_preserved.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step2_markup/propagate_through_precision_preserved.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step2_markup/propagate_through_precision_preserved.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step2_markup/propagate_to_input.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step2_markup/propagate_to_input.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step2_markup/propagate_to_input.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step2_markup/propagate_to_input.md diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step2_markup/update_shared_precision_preserved.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step2_markup/update_shared_precision_preserved.md similarity index 100% rename from docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/transformations/step2_markup/update_shared_precision_preserved.md rename to docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step2_markup/update_shared_precision_preserved.md From 7e709a0ecceb3348d399505abfe4c389a9e3ca6f Mon Sep 17 00:00:00 2001 From: Zlobin Vladimir Date: Mon, 2 Oct 2023 13:50:49 +0400 Subject: [PATCH 009/257] benchmark: remove deprecation notice (#20175) Python version didn't mark -api as deprecated --- samples/cpp/benchmark_app/README.md | 2 +- samples/cpp/benchmark_app/benchmark_app.hpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/samples/cpp/benchmark_app/README.md b/samples/cpp/benchmark_app/README.md index 76a1697aec447f..168d3b85ba898a 100644 --- a/samples/cpp/benchmark_app/README.md +++ b/samples/cpp/benchmark_app/README.md @@ -190,7 +190,7 @@ Running the application with the ``-h`` or ``--help`` option yields the followin -c Required for GPU custom kernels. Absolute path to an .xml file with the kernels description. -cache_dir Optional. Enables caching of loaded models to specified directory. List of devices which support caching is shown at the end of this message. -load_from_file Optional. Loads model from file directly without read_model. All CNNNetwork options (like re-shape) will be ignored - -api Optional (deprecated). Enable Sync/Async API. Default value is "async". + -api Optional. Enable Sync/Async API. Default value is "async". -nireq Optional. Number of infer requests. Default value is determined automatically for device. -nstreams Optional. Number of streams to use for inference on the CPU or GPU devices (for HETERO and MULTI device cases use format :, : or just ). Default value is determined automatically for a device.Please note that although the automatic selection usually provides a reasonable performance, it still may be non - optimal for some cases, especially for very small models. See sample's README for more details. Also, using nstreams>1 is inherently throughput-oriented option, while for the best-latency estimations the number of streams should be set to 1. -inference_only Optional. Measure only inference stage. Default option for static models. Dynamic models are measured in full mode which includes inputs setup stage, inference only mode available for them with single input data shape only. To enable full mode for static models pass "false" value to this argument: ex. "-inference_only=false". diff --git a/samples/cpp/benchmark_app/benchmark_app.hpp b/samples/cpp/benchmark_app/benchmark_app.hpp index e168445c61b0c4..6ae1ec072c3914 100644 --- a/samples/cpp/benchmark_app/benchmark_app.hpp +++ b/samples/cpp/benchmark_app/benchmark_app.hpp @@ -96,7 +96,7 @@ static const char layout_message[] = "For example, \"input1[NCHW],input2[NC]\" or \"[NCHW]\" in case of one input size."; /// @brief message for execution mode -static const char api_message[] = "Optional (deprecated). Enable Sync/Async API. Default value is \"async\"."; +static const char api_message[] = "Optional. Enable Sync/Async API. Default value is \"async\"."; /// @brief message for #streams for CPU inference static const char infer_num_streams_message[] = From 78ef7e85c97bc2a655c43fedffcc25ebeacba2ae Mon Sep 17 00:00:00 2001 From: Vitaliy Urusovskij Date: Mon, 2 Oct 2023 14:01:18 +0400 Subject: [PATCH 010/257] Return missed `return pmc;` (#20118) --- tests/memory_tests/src/memory_tests_helper/memory_counter.cpp | 1 + tests/stress_tests/common/utils.cpp | 1 + 2 files changed, 2 insertions(+) diff --git a/tests/memory_tests/src/memory_tests_helper/memory_counter.cpp b/tests/memory_tests/src/memory_tests_helper/memory_counter.cpp index b0e47f0048884b..a8e03e9b1dd0a7 100644 --- a/tests/memory_tests/src/memory_tests_helper/memory_counter.cpp +++ b/tests/memory_tests/src/memory_tests_helper/memory_counter.cpp @@ -29,6 +29,7 @@ namespace MemoryTest { pmc.cb = sizeof(PROCESS_MEMORY_COUNTERS); if (!GetProcessMemoryInfo(GetCurrentProcess(), &pmc, pmc.cb)) throw std::runtime_error("Can't get system memory values"); + return pmc; } size_t getVmSizeInKB() { diff --git a/tests/stress_tests/common/utils.cpp b/tests/stress_tests/common/utils.cpp index 2dbbbb05895b66..0f3bcbcdecbfc0 100644 --- a/tests/stress_tests/common/utils.cpp +++ b/tests/stress_tests/common/utils.cpp @@ -39,6 +39,7 @@ static PROCESS_MEMORY_COUNTERS getMemoryInfo() { pmc.cb = sizeof(PROCESS_MEMORY_COUNTERS); if (!GetProcessMemoryInfo(GetCurrentProcess(), &pmc, pmc.cb)) throw std::runtime_error("Can't get system memory values"); + return pmc; } size_t getVmSizeInKB() { From ea37126ea56071b5be7b2fd682efa4bada626172 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Mon, 2 Oct 2023 14:02:14 +0400 Subject: [PATCH 011/257] Removed ie:: namespace (#20172) --- src/core/src/runtime/blob_allocator.hpp | 9 ++-- src/inference/dev_api/remote_utils.hpp | 28 +++++++------ .../include/openvino/runtime/common.hpp | 3 -- src/inference/src/core.cpp | 2 +- src/inference/src/dev/converter_utils.cpp | 2 +- src/inference/src/dev/core_impl.cpp | 2 +- src/inference/src/dev/core_impl_ie.cpp | 2 +- src/inference/src/dev/make_tensor.cpp | 42 ++++++++++--------- src/inference/src/dev/plugin.cpp | 8 ++-- src/inference/src/dev/plugin.hpp | 2 +- src/inference/src/infer_request.cpp | 4 +- 11 files changed, 53 insertions(+), 51 deletions(-) diff --git a/src/core/src/runtime/blob_allocator.hpp b/src/core/src/runtime/blob_allocator.hpp index 12c4f8cc1eca2a..0513fdfa02d085 100644 --- a/src/core/src/runtime/blob_allocator.hpp +++ b/src/core/src/runtime/blob_allocator.hpp @@ -47,7 +47,7 @@ struct BlobAllocator : public IAllocator { namespace ov { struct BlobAllocator { - BlobAllocator() : _impl{std::make_shared()} {} + BlobAllocator() : _impl{std::make_shared()} {} void* allocate(const size_t bytes, const size_t alignment) { OPENVINO_ASSERT(alignment == alignof(max_align_t), @@ -70,14 +70,15 @@ struct BlobAllocator { bool is_equal(const BlobAllocator& other) const { if (other._impl == _impl) return true; - auto other_system_memory_allocator = dynamic_cast(other._impl.get()); - auto system_allocator = dynamic_cast(_impl.get()); + auto other_system_memory_allocator = + dynamic_cast(other._impl.get()); + auto system_allocator = dynamic_cast(_impl.get()); if (system_allocator != nullptr && other_system_memory_allocator != nullptr) return true; return false; } - std::shared_ptr _impl; + std::shared_ptr _impl; }; } // namespace ov IE_SUPPRESS_DEPRECATED_END diff --git a/src/inference/dev_api/remote_utils.hpp b/src/inference/dev_api/remote_utils.hpp index 6dc389981218fd..95dbeb48191b9a 100644 --- a/src/inference/dev_api/remote_utils.hpp +++ b/src/inference/dev_api/remote_utils.hpp @@ -14,7 +14,7 @@ namespace legacy_convert { INFERENCE_ENGINE_API_CPP(ov::SoPtr) convert_remote_context(const std::shared_ptr& context); -INFERENCE_ENGINE_API_CPP(ie::Blob*) get_hardware_blob(ie::Blob* blob); +INFERENCE_ENGINE_API_CPP(InferenceEngine::Blob*) get_hardware_blob(InferenceEngine::Blob* blob); class INFERENCE_ENGINE_API_CLASS(TensorHolder) { public: @@ -42,7 +42,7 @@ class INFERENCE_ENGINE_API_CLASS(RemoteBlobTensor) : public IRemoteTensor { mutable std::string m_dev_name; public: - std::shared_ptr blob; + std::shared_ptr blob; RemoteBlobTensor(const InferenceEngine::RemoteBlob::Ptr& blob) : blob{blob} { OPENVINO_ASSERT(blob); @@ -99,10 +99,12 @@ class INFERENCE_ENGINE_API_CLASS(RemoteBlobTensor) : public IRemoteTensor { /** * @brief Create InferenceEngine::RemoteBlob from the Tensor */ -class INFERENCE_ENGINE_API_CLASS(TensorRemoteBlob) : public ie::RemoteBlob, public ov::legacy_convert::TensorHolder { +class INFERENCE_ENGINE_API_CLASS(TensorRemoteBlob) + : public InferenceEngine::RemoteBlob, + public ov::legacy_convert::TensorHolder { public: - TensorRemoteBlob(const ov::SoPtr& tensor, ie::TensorDesc desc) - : ie::RemoteBlob{desc}, + TensorRemoteBlob(const ov::SoPtr& tensor, InferenceEngine::TensorDesc desc) + : InferenceEngine::RemoteBlob{desc}, ov::legacy_convert::TensorHolder(tensor) { OPENVINO_ASSERT(this->get_tensor()); } @@ -121,7 +123,7 @@ class INFERENCE_ENGINE_API_CLASS(TensorRemoteBlob) : public ie::RemoteBlob, publ return {}; } } - std::shared_ptr getContext() const noexcept override { + std::shared_ptr getContext() const noexcept override { return {}; } @@ -129,22 +131,22 @@ class INFERENCE_ENGINE_API_CLASS(TensorRemoteBlob) : public ie::RemoteBlob, publ bool deallocate() noexcept override { return true; } - ie::LockedMemory buffer() noexcept override { + InferenceEngine::LockedMemory buffer() noexcept override { return {nullptr, nullptr, 0}; } - ie::LockedMemory cbuffer() const noexcept override { + InferenceEngine::LockedMemory cbuffer() const noexcept override { return {nullptr, nullptr, 0}; } - ie::LockedMemory rwmap() noexcept override { + InferenceEngine::LockedMemory rwmap() noexcept override { return {nullptr, nullptr, 0}; } - ie::LockedMemory rmap() const noexcept override { + InferenceEngine::LockedMemory rmap() const noexcept override { return {nullptr, nullptr, 0}; } - ie::LockedMemory wmap() noexcept override { + InferenceEngine::LockedMemory wmap() noexcept override { return {nullptr, nullptr, 0}; } - const std::shared_ptr& getAllocator() const noexcept override { + const std::shared_ptr& getAllocator() const noexcept override { return m_allocator; } void* getHandle() const noexcept override { @@ -154,7 +156,7 @@ class INFERENCE_ENGINE_API_CLASS(TensorRemoteBlob) : public ie::RemoteBlob, publ using TensorHolder::get_tensor; private: - std::shared_ptr m_allocator; + std::shared_ptr m_allocator; }; } // namespace ov diff --git a/src/inference/include/openvino/runtime/common.hpp b/src/inference/include/openvino/runtime/common.hpp index 6815f52a61871d..e78df4ccedb10b 100644 --- a/src/inference/include/openvino/runtime/common.hpp +++ b/src/inference/include/openvino/runtime/common.hpp @@ -39,10 +39,7 @@ # define OPENVINO_PLUGIN_API OPENVINO_EXTERN_C #endif -namespace InferenceEngine {} - namespace ov { -namespace ie = InferenceEngine; /** * @brief This type of map is used for result of Core::query_model diff --git a/src/inference/src/core.cpp b/src/inference/src/core.cpp index 16ab7822c1c8fd..fd05fbaec54a78 100644 --- a/src/inference/src/core.cpp +++ b/src/inference/src/core.cpp @@ -160,7 +160,7 @@ CompiledModel Core::compile_model(const std::shared_ptr& model, }); } -void Core::add_extension(const ie::IExtensionPtr& extension) { +void Core::add_extension(const InferenceEngine::IExtensionPtr& extension) { OV_CORE_CALL_STATEMENT(_impl->AddExtension(extension);); } diff --git a/src/inference/src/dev/converter_utils.cpp b/src/inference/src/dev/converter_utils.cpp index a957c032611990..47b2ec6a155e0b 100644 --- a/src/inference/src/dev/converter_utils.cpp +++ b/src/inference/src/dev/converter_utils.cpp @@ -346,7 +346,7 @@ class IInferencePluginWrapper : public InferenceEngine::IInferencePlugin { const std::map& config) const override { auto res = m_plugin->query_model(ov::legacy_convert::convert_model(network, m_plugin->is_new_api()), ov::any_copy(config)); - ie::QueryNetworkResult ret; + InferenceEngine::QueryNetworkResult ret; if (!network.getFunction() || res.empty()) { ret.rc = InferenceEngine::GENERAL_ERROR; return ret; diff --git a/src/inference/src/dev/core_impl.cpp b/src/inference/src/dev/core_impl.cpp index 77e5514ea9999c..a9fe260e385dc0 100644 --- a/src/inference/src/dev/core_impl.cpp +++ b/src/inference/src/dev/core_impl.cpp @@ -944,7 +944,7 @@ std::vector ov::CoreImpl::get_available_devices() const { try { const ov::Any p = GetMetric(deviceName, propertyName); devicesIDs = p.as>(); - } catch (const ie::Exception&) { + } catch (const InferenceEngine::Exception&) { // plugin is not created by e.g. invalid env } catch (const ov::Exception&) { // plugin is not created by e.g. invalid env diff --git a/src/inference/src/dev/core_impl_ie.cpp b/src/inference/src/dev/core_impl_ie.cpp index e689db3ee7113b..88dd55a595f17a 100644 --- a/src/inference/src/dev/core_impl_ie.cpp +++ b/src/inference/src/dev/core_impl_ie.cpp @@ -143,7 +143,7 @@ InferenceEngine::QueryNetworkResult ov::CoreImpl::QueryNetwork(const InferenceEn const std::string& deviceName, const std::map& config) const { OV_ITT_SCOPED_TASK(ov::itt::domains::OV, "Core::QueryNetwork"); - ie::QueryNetworkResult ret; + InferenceEngine::QueryNetworkResult ret; if (!network.getFunction()) { ret.rc = InferenceEngine::GENERAL_ERROR; return ret; diff --git a/src/inference/src/dev/make_tensor.cpp b/src/inference/src/dev/make_tensor.cpp index e250f640e9a3e1..1d23c62f86d957 100644 --- a/src/inference/src/dev/make_tensor.cpp +++ b/src/inference/src/dev/make_tensor.cpp @@ -307,7 +307,7 @@ class BlobTensor : public ITensor { } public: - std::shared_ptr blob; + std::shared_ptr blob; BlobTensor(const InferenceEngine::Blob::Ptr& blob) : blob{blob} { auto remote_impl = dynamic_cast(blob.get()); @@ -349,7 +349,7 @@ class BlobTensor : public ITensor { void* data(const element::Type& element_type) const override { OPENVINO_ASSERT(blob != nullptr, "Tensor was not initialized."); -#define TYPE_CHECK(TYPE) (dynamic_cast*>(blob.get()) != nullptr) +#define TYPE_CHECK(TYPE) (dynamic_cast*>(blob.get()) != nullptr) auto host_accesable_implementation = TYPE_CHECK(bool) || TYPE_CHECK(int8_t) || TYPE_CHECK(uint8_t) || TYPE_CHECK(int16_t) || TYPE_CHECK(uint16_t) || TYPE_CHECK(int32_t) || TYPE_CHECK(uint32_t) || TYPE_CHECK(int64_t) || TYPE_CHECK(uint64_t) || @@ -379,10 +379,10 @@ class BlobTensor : public ITensor { * @tparam T Blob data type */ template -class TensorMemoryBlob : public ie::TBlob { +class TensorMemoryBlob : public InferenceEngine::TBlob { public: ~TensorMemoryBlob() override = default; - explicit TensorMemoryBlob(const ov::SoPtr& tensor_, ie::TensorDesc desc) try : ie + explicit TensorMemoryBlob(const ov::SoPtr& tensor_, InferenceEngine::TensorDesc desc) try : InferenceEngine ::TBlob{desc, static_cast(tensor_->data()), tensor_->get_byte_size()}, tensor{tensor_} { OPENVINO_ASSERT(!std::dynamic_pointer_cast(tensor._ptr)); } @@ -390,24 +390,24 @@ class TensorMemoryBlob : public ie::TBlob { OPENVINO_THROW(ex.what()); } - void setShape(const ie::SizeVector& dims) override { + void setShape(const InferenceEngine::SizeVector& dims) override { tensor->set_shape(dims); - ie::TBlob::getTensorDesc().setDims(dims); + InferenceEngine::TBlob::getTensorDesc().setDims(dims); allocate(); } void allocate() noexcept override { - if (ie::TBlob::buffer() != tensor->data()) { - ie::TBlob::_allocator = - ie::details::make_pre_allocator(static_cast(tensor->data()), tensor->get_byte_size()); - ie::TBlob::allocate(); + if (InferenceEngine::TBlob::buffer() != tensor->data()) { + InferenceEngine::TBlob::_allocator = + InferenceEngine::details::make_pre_allocator(static_cast(tensor->data()), tensor->get_byte_size()); + InferenceEngine::TBlob::allocate(); } } ov::SoPtr tensor; }; -ov::SoPtr make_tensor(const std::shared_ptr& blob, bool unwrap) { +ov::SoPtr make_tensor(const std::shared_ptr& blob, bool unwrap) { #define ELSE_IF(type) \ else if (auto tblob = dynamic_cast*>(blob.get())) { \ return tblob->tensor; \ @@ -440,7 +440,7 @@ ov::SoPtr make_tensor(const std::shared_ptr& blob, bool unwra #undef IF } -ie::Blob* get_hardware_blob(ie::Blob* blob) { +InferenceEngine::Blob* get_hardware_blob(InferenceEngine::Blob* blob) { #ifdef PROXY_PLUGIN_ENABLED if (auto remote_blob = dynamic_cast(blob)) { const auto& tensor = ov::proxy::get_hardware_tensor(remote_blob->get_tensor()); @@ -455,7 +455,7 @@ ie::Blob* get_hardware_blob(ie::Blob* blob) { return blob; } -const ie::Blob* get_hardware_blob(const ie::Blob* blob) { +const InferenceEngine::Blob* get_hardware_blob(const InferenceEngine::Blob* blob) { #ifdef PROXY_PLUGIN_ENABLED if (auto remote_blob = dynamic_cast(blob)) { const auto& tensor = ov::proxy::get_hardware_tensor(remote_blob->get_tensor()); @@ -470,7 +470,9 @@ const ie::Blob* get_hardware_blob(const ie::Blob* blob) { return blob; } -ie::Blob::Ptr tensor_to_blob(const ov::SoPtr& orig_tensor, bool unwrap, InferenceEngine::TensorDesc desc) { +InferenceEngine::Blob::Ptr tensor_to_blob(const ov::SoPtr& orig_tensor, + bool unwrap, + InferenceEngine::TensorDesc desc) { auto create_desc = [](const ov::SoPtr& tensor, const InferenceEngine::TensorDesc& desc) -> InferenceEngine::TensorDesc { if (desc.getLayout() != InferenceEngine::ANY || @@ -479,10 +481,10 @@ ie::Blob::Ptr tensor_to_blob(const ov::SoPtr& orig_tensor, bool unwrap, } auto element_type = tensor->get_element_type(); auto shape = tensor->get_shape(); - ie::SizeVector blk_order(shape.size()); + InferenceEngine::SizeVector blk_order(shape.size()); std::iota(blk_order.begin(), blk_order.end(), 0); - ie::SizeVector dim_offset(shape.size(), 0); - ie::SizeVector blk_strides; + InferenceEngine::SizeVector dim_offset(shape.size(), 0); + InferenceEngine::SizeVector blk_strides; auto byte_strides = element_type.bitwidth() >= 8 ? tensor->get_strides() : Strides{}; if (byte_strides.empty()) { blk_strides = ov::row_major_strides(shape); @@ -500,9 +502,9 @@ ie::Blob::Ptr tensor_to_blob(const ov::SoPtr& orig_tensor, bool unwrap, return byte_stride / element_type.size(); }); } - return ie::TensorDesc{ie::details::convertPrecision(element_type), - shape, - ie::BlockingDesc{shape, blk_order, 0, dim_offset, blk_strides}}; + return InferenceEngine::TensorDesc{InferenceEngine::details::convertPrecision(element_type), + shape, + InferenceEngine::BlockingDesc{shape, blk_order, 0, dim_offset, blk_strides}}; }; #ifdef PROXY_PLUGIN_ENABLED const auto& tensor = unwrap ? ov::proxy::get_hardware_tensor(orig_tensor) : orig_tensor; diff --git a/src/inference/src/dev/plugin.cpp b/src/inference/src/dev/plugin.cpp index f3effb552e20b4..b2c2475b85f37b 100644 --- a/src/inference/src/dev/plugin.cpp +++ b/src/inference/src/dev/plugin.cpp @@ -52,7 +52,7 @@ const ov::Version ov::Plugin::get_version() const { OV_PLUGIN_CALL_STATEMENT(return m_ptr->get_version()); } -void ov::Plugin::add_extension(const ie::IExtensionPtr& extension) { +void ov::Plugin::add_extension(const InferenceEngine::IExtensionPtr& extension) { OPENVINO_SUPPRESS_DEPRECATED_START OV_PLUGIN_CALL_STATEMENT(m_ptr->add_extension(extension)); OPENVINO_SUPPRESS_DEPRECATED_END @@ -116,7 +116,7 @@ ov::Any ov::Plugin::get_property(const std::string& name, const AnyMap& argument if (ov::supported_properties == name) { try { return {m_ptr->get_property(name, arguments), {m_so}}; - } catch (const ie::Exception&) { + } catch (const InferenceEngine::Exception&) { std::vector supported_properties; try { auto ro_properties = @@ -128,7 +128,7 @@ ov::Any ov::Plugin::get_property(const std::string& name, const AnyMap& argument } } } catch (const ov::Exception&) { - } catch (const ie::Exception&) { + } catch (const InferenceEngine::Exception&) { } try { auto rw_properties = m_ptr->get_property(METRIC_KEY(SUPPORTED_CONFIG_KEYS), arguments) @@ -137,7 +137,7 @@ ov::Any ov::Plugin::get_property(const std::string& name, const AnyMap& argument supported_properties.emplace_back(rw_property, PropertyMutability::RW); } } catch (const ov::Exception&) { - } catch (const ie::Exception&) { + } catch (const InferenceEngine::Exception&) { } supported_properties.emplace_back(ov::supported_properties.name(), PropertyMutability::RO); return supported_properties; diff --git a/src/inference/src/dev/plugin.hpp b/src/inference/src/dev/plugin.hpp index 67a9d59b5e6cd4..639ae1caa9ec55 100644 --- a/src/inference/src/dev/plugin.hpp +++ b/src/inference/src/dev/plugin.hpp @@ -41,7 +41,7 @@ class Plugin { const ov::Version get_version() const; - void add_extension(const ie::IExtensionPtr& extension); + void add_extension(const InferenceEngine::IExtensionPtr& extension); void set_property(const ov::AnyMap& config); diff --git a/src/inference/src/infer_request.cpp b/src/inference/src/infer_request.cpp index 89f283f162045d..18b97cdf7b7eb8 100644 --- a/src/inference/src/infer_request.cpp +++ b/src/inference/src/infer_request.cpp @@ -248,7 +248,7 @@ void InferRequest::wait() { _impl->wait(); } catch (const ov::Cancelled&) { throw; - } catch (const ie::InferCancelled& e) { + } catch (const InferenceEngine::InferCancelled& e) { Cancelled::create(e.what()); } catch (const std::exception& ex) { OPENVINO_THROW(ex.what()); @@ -263,7 +263,7 @@ bool InferRequest::wait_for(const std::chrono::milliseconds timeout) { OPENVINO_SUPPRESS_DEPRECATED_START try { return _impl->wait_for(timeout); - } catch (const ie::InferCancelled& e) { + } catch (const InferenceEngine::InferCancelled& e) { Cancelled::create(e.what()); } catch (const std::exception& ex) { OPENVINO_THROW(ex.what()); From 850bf3d87f34326c40f989bcca5200c75d0c0ec8 Mon Sep 17 00:00:00 2001 From: "Alessandro de Oliveira Faria (A.K.A.CABELO)" Date: Mon, 2 Oct 2023 07:13:35 -0300 Subject: [PATCH 012/257] Opensuse (#20166) * compatibility with opensuse linux * compatibility with opensuse linux --- install_build_dependencies.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/install_build_dependencies.sh b/install_build_dependencies.sh index bbcbb34d476ac3..680e2027a49b1a 100755 --- a/install_build_dependencies.sh +++ b/install_build_dependencies.sh @@ -135,6 +135,7 @@ elif [ -f /etc/os-release ] && grep -q "SUSE" /etc/os-release ; then zypper install -y \ file \ `# build tools` \ + patterns-devel-C-C++-devel_C_C++ \ cmake \ ccache \ ninja \ From 32b4ae75705c57feb9423f6ffad7d6aa7e979d55 Mon Sep 17 00:00:00 2001 From: River Li Date: Mon, 2 Oct 2023 18:24:20 +0800 Subject: [PATCH 013/257] [C API] support string size for char pointer (#19931) * [C API] support string size for char pointer * rename function name * Add deprecated flag * Add macro to ignore deprecated * Fix build error in windows --------- Co-authored-by: Ilya Churaev --- src/bindings/c/include/openvino/c/ov_core.h | 29 +++++++++++++++++-- src/bindings/c/src/ov_core.cpp | 23 ++++++++++----- src/bindings/c/tests/ov_core_test.cpp | 31 +++++++++++++++++++++ 3 files changed, 74 insertions(+), 9 deletions(-) diff --git a/src/bindings/c/include/openvino/c/ov_core.h b/src/bindings/c/include/openvino/c/ov_core.h index 22123c2a9e34f4..d0c1fcf30533ad 100644 --- a/src/bindings/c/include/openvino/c/ov_core.h +++ b/src/bindings/c/include/openvino/c/ov_core.h @@ -10,6 +10,7 @@ #pragma once +#include "openvino/c/deprecated.h" #include "openvino/c/ov_common.h" #include "openvino/c/ov_compiled_model.h" #include "openvino/c/ov_model.h" @@ -173,8 +174,9 @@ ov_core_read_model_unicode(const ov_core_t* core, /** * @brief Reads models from IR / ONNX / PDPD / TF / TFLite formats. * @ingroup ov_core_c_api + * @deprecated Use ov_core_read_model_from_memory_buffer instead. * @param core A pointer to the ie_core_t instance. - * @param model_str String with a model in IR / ONNX / PDPD / TF / TFLite format. + * @param model_str String with a model in IR / ONNX / PDPD / TF / TFLite format, string is null-terminated. * @param weights Shared pointer to a constant tensor with weights. * @param model A pointer to the newly created model. * Reading ONNX / PDPD / TF / TFLite models does not support loading weights from the @p weights tensors. @@ -183,12 +185,35 @@ ov_core_read_model_unicode(const ov_core_t* core, * constant data will point to an invalid memory. * @return Status code of the operation: OK(0) for success. */ -OPENVINO_C_API(ov_status_e) +OPENVINO_C_API(OPENVINO_DEPRECATED( + "This API is deprecated and will be replaced by ov_core_read_model_from_memory_buffer") ov_status_e) ov_core_read_model_from_memory(const ov_core_t* core, const char* model_str, const ov_tensor_t* weights, ov_model_t** model); +/** + * @brief Reads models from IR / ONNX / PDPD / TF / TFLite formats with models string size. + * @ingroup ov_core_c_api + * @param core A pointer to the ie_core_t instance. + * @param model_str String with a model in IR / ONNX / PDPD / TF / TFLite format, support model string containing + * several null chars. + * @param str_len The length of model string. + * @param weights Shared pointer to a constant tensor with weights. + * @param model A pointer to the newly created model. + * Reading ONNX / PDPD / TF / TFLite models does not support loading weights from the @p weights tensors. + * @note Created model object shares the weights with the @p weights object. + * Thus, do not create @p weights on temporary data that can be freed later, since the model + * constant data will point to an invalid memory. + * @return Status code of the operation: OK(0) for success. + */ +OPENVINO_C_API(ov_status_e) +ov_core_read_model_from_memory_buffer(const ov_core_t* core, + const char* model_str, + const size_t str_len, + const ov_tensor_t* weights, + ov_model_t** model); + /** * @brief Creates a compiled model from a source model object. * Users can create as many compiled models as they need and use diff --git a/src/bindings/c/src/ov_core.cpp b/src/bindings/c/src/ov_core.cpp index b24b1987636e14..a97d6a51a4a5d0 100644 --- a/src/bindings/c/src/ov_core.cpp +++ b/src/bindings/c/src/ov_core.cpp @@ -89,20 +89,22 @@ ov_status_e ov_core_read_model(const ov_core_t* core, return ov_status_e::OK; } -ov_status_e ov_core_read_model_from_memory(const ov_core_t* core, - const char* model_str, - const ov_tensor_t* weights, - ov_model_t** model) { - if (!core || !model_str || !model) { +ov_status_e ov_core_read_model_from_memory_buffer(const ov_core_t* core, + const char* model_str, + const size_t str_size, + const ov_tensor_t* weights, + ov_model_t** model) { + if (!core || !model_str || !model || !str_size) { return ov_status_e::INVALID_C_PARAM; } try { std::unique_ptr _model(new ov_model_t); + std::string model_string(model_str, str_size); if (weights) { - _model->object = core->object->read_model(model_str, *(weights->object)); + _model->object = core->object->read_model(model_string, *(weights->object)); } else { - _model->object = core->object->read_model(model_str, ov::Tensor()); + _model->object = core->object->read_model(model_string, ov::Tensor()); } *model = _model.release(); } @@ -110,6 +112,13 @@ ov_status_e ov_core_read_model_from_memory(const ov_core_t* core, return ov_status_e::OK; } +ov_status_e ov_core_read_model_from_memory(const ov_core_t* core, + const char* model_str, + const ov_tensor_t* weights, + ov_model_t** model) { + return ov_core_read_model_from_memory_buffer(core, model_str, strlen(model_str), weights, model); +} + ov_status_e ov_core_compile_model(const ov_core_t* core, const ov_model_t* model, const char* device_name, diff --git a/src/bindings/c/tests/ov_core_test.cpp b/src/bindings/c/tests/ov_core_test.cpp index f96fdace23f056..f6203da682ff97 100644 --- a/src/bindings/c/tests/ov_core_test.cpp +++ b/src/bindings/c/tests/ov_core_test.cpp @@ -77,6 +77,7 @@ TEST_P(ov_core_test, ov_core_read_model_no_bin) { ov_core_free(core); } +OPENVINO_SUPPRESS_DEPRECATED_START TEST_P(ov_core_test, ov_core_read_model_from_memory) { ov_core_t* core = nullptr; OV_EXPECT_OK(ov_core_create(&core)); @@ -102,6 +103,36 @@ TEST_P(ov_core_test, ov_core_read_model_from_memory) { ov_model_free(model); ov_core_free(core); } +OPENVINO_SUPPRESS_DEPRECATED_END + +TEST_P(ov_core_test, ov_core_read_model_from_memory_buffer_with_size) { + ov_core_t* core = nullptr; + OV_EXPECT_OK(ov_core_create(&core)); + EXPECT_NE(nullptr, core); + + std::vector weights_content(content_from_file(bin_file_name.c_str(), true)); + + ov_tensor_t* tensor = nullptr; + ov_shape_t shape; + int64_t dims[2] = {1, (int64_t)weights_content.size()}; + ov_shape_create(2, dims, &shape); + OV_EXPECT_OK(ov_tensor_create_from_host_ptr(ov_element_type_e::U8, shape, weights_content.data(), &tensor)); + EXPECT_NE(nullptr, tensor); + + std::vector xml_content(content_from_file(xml_file_name.c_str(), false)); + ov_model_t* model = nullptr; + OV_EXPECT_OK(ov_core_read_model_from_memory_buffer(core, + reinterpret_cast(xml_content.data()), + xml_content.size(), + tensor, + &model)); + EXPECT_NE(nullptr, model); + + ov_shape_free(&shape); + ov_tensor_free(tensor); + ov_model_free(model); + ov_core_free(core); +} TEST_P(ov_core_test, ov_core_compile_model) { auto device_name = GetParam(); From 7452656e5ae1d17f1a67d2486cbd74d8d933ef44 Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Mon, 2 Oct 2023 12:40:50 +0200 Subject: [PATCH 014/257] Changing file structure of Tool Ecosystem section (#20178) --- .../documentation}/openvino_ecosystem.md | 0 .../documentation/openvino_ecosystem}/datumaro.md | 0 .../openvino_ecosystem}/openvino_training_extensions.md | 0 .../documentation/openvino_ecosystem}/ovsa_get_started.md | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename docs/{Documentation => articles_en/documentation}/openvino_ecosystem.md (100%) rename docs/{Documentation => articles_en/documentation/openvino_ecosystem}/datumaro.md (100%) rename docs/{Documentation => articles_en/documentation/openvino_ecosystem}/openvino_training_extensions.md (100%) rename docs/{ovsa => articles_en/documentation/openvino_ecosystem}/ovsa_get_started.md (100%) diff --git a/docs/Documentation/openvino_ecosystem.md b/docs/articles_en/documentation/openvino_ecosystem.md similarity index 100% rename from docs/Documentation/openvino_ecosystem.md rename to docs/articles_en/documentation/openvino_ecosystem.md diff --git a/docs/Documentation/datumaro.md b/docs/articles_en/documentation/openvino_ecosystem/datumaro.md similarity index 100% rename from docs/Documentation/datumaro.md rename to docs/articles_en/documentation/openvino_ecosystem/datumaro.md diff --git a/docs/Documentation/openvino_training_extensions.md b/docs/articles_en/documentation/openvino_ecosystem/openvino_training_extensions.md similarity index 100% rename from docs/Documentation/openvino_training_extensions.md rename to docs/articles_en/documentation/openvino_ecosystem/openvino_training_extensions.md diff --git a/docs/ovsa/ovsa_get_started.md b/docs/articles_en/documentation/openvino_ecosystem/ovsa_get_started.md similarity index 100% rename from docs/ovsa/ovsa_get_started.md rename to docs/articles_en/documentation/openvino_ecosystem/ovsa_get_started.md From ae00b3e2abfbf5502abcf5c2645ffd353061c739 Mon Sep 17 00:00:00 2001 From: Egor Duplenskii Date: Mon, 2 Oct 2023 13:24:43 +0200 Subject: [PATCH 015/257] [CPU][ARM] Correct execution of the Reorder via Transpose (#20018) --- .../nodes/executors/common/ref_transpose.hpp | 2 +- src/plugins/intel_cpu/src/nodes/reorder.cpp | 20 +++++++++---------- src/plugins/intel_cpu/src/nodes/transpose.cpp | 10 +++++----- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/src/plugins/intel_cpu/src/nodes/executors/common/ref_transpose.hpp b/src/plugins/intel_cpu/src/nodes/executors/common/ref_transpose.hpp index 0edabdff028a30..07910df06892dc 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/common/ref_transpose.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/common/ref_transpose.hpp @@ -37,4 +37,4 @@ class RefTransposeExecutorBuilder : public TransposeExecutorBuilder { }; } // namespace intel_cpu -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/reorder.cpp b/src/plugins/intel_cpu/src/nodes/reorder.cpp index 1cd1257f3318f0..85b58413273693 100644 --- a/src/plugins/intel_cpu/src/nodes/reorder.cpp +++ b/src/plugins/intel_cpu/src/nodes/reorder.cpp @@ -119,8 +119,8 @@ void Reorder::executeDynamicImpl(dnnl::stream strm) { #if defined(OV_CPU_ARM_ENABLE_FP16) void Reorder::prepareReorderAsTranspose(MemoryDescPtr parentDesc, MemoryDescPtr childDesc) { - auto getOrder = [](const MemoryDesc& lhs, const MemoryDesc& rhs) -> std::pair, std::vector> { - const auto& in = lhs.getShape().getStaticDims(); + auto getOrderAndBlockedDims = [](const MemoryDesc& lhs, const MemoryDesc& rhs) -> std::pair, std::vector> { + const auto& in = lhs.as()->getBlockDims(); const auto rank = lhs.getShape().getRank(); if (lhs.hasLayoutType(LayoutType::ncsp) && rhs.hasLayoutType(LayoutType::nspc)) { @@ -142,17 +142,17 @@ void Reorder::prepareReorderAsTranspose(MemoryDescPtr parentDesc, MemoryDescPtr } }; - auto order = getOrder(*parentDesc, *childDesc); + auto order = getOrderAndBlockedDims(*parentDesc, *childDesc); const auto& transposeOrder = order.first; - const auto& transposedDims = order.second; + const auto& transposedBlockDims = order.second; - auto transposedDesc = std::make_shared(parentDesc->getPrecision(), Shape{transposedDims}); + auto transposedDesc = std::make_shared(parentDesc->getPrecision(), Shape{transposedBlockDims}); TransposeParams transposeParams; transposeParams.permuteParams.src_block_dims = parentDesc->as()->getBlockDims(); transposeParams.permuteParams.src_block_order = parentDesc->as()->getOrder(); - transposeParams.permuteParams.dst_block_dims = transposedDesc->as()->getBlockDims(); - transposeParams.permuteParams.dst_block_order = transposedDesc->as()->getOrder(); + transposeParams.permuteParams.dst_block_dims = transposedBlockDims; + transposeParams.permuteParams.dst_block_order = transposeParams.permuteParams.src_block_order; transposeParams.permuteParams.order = transposeOrder; transposeParams.permuteParams.data_size = parentDesc->getPrecision().size(); @@ -163,9 +163,9 @@ void Reorder::prepareReorderAsTranspose(MemoryDescPtr parentDesc, MemoryDescPtr transpose_context); dnnl::primitive_attr attr; transposeExecutor = factory->makeExecutor(transposeParams, - {parentDesc}, - {transposedDesc}, - attr); + {parentDesc}, + {transposedDesc}, + attr); getSelectedPrimitiveDescriptor()->setImplementationType(transposeExecutor->getImplType()); return; } diff --git a/src/plugins/intel_cpu/src/nodes/transpose.cpp b/src/plugins/intel_cpu/src/nodes/transpose.cpp index 2379680e763028..923fcced1ef229 100644 --- a/src/plugins/intel_cpu/src/nodes/transpose.cpp +++ b/src/plugins/intel_cpu/src/nodes/transpose.cpp @@ -176,11 +176,11 @@ void Transpose::prepareParams() { auto builder = [&srcDesc, &dstDesc, this](const PermuteParams& key) -> std::shared_ptr { dnnl::primitive_attr attr; auto selectedPD = getSelectedPrimitiveDescriptor(); - auto jitExec = selectedPD->getExecutorFactoryAs()->makeExecutor(transposeParams, - {srcDesc}, - {dstDesc}, - attr); - return jitExec; + auto executor = selectedPD->getExecutorFactoryAs()->makeExecutor(transposeParams, + {srcDesc}, + {dstDesc}, + attr); + return executor; }; auto cache = context->getParamsCache(); From a96aceb31ea9e2b26db70913b8247cda746e0cb0 Mon Sep 17 00:00:00 2001 From: Aleksandr Voron Date: Mon, 2 Oct 2023 13:25:44 +0200 Subject: [PATCH 016/257] [CPU][ARM] Skip tests affected by in-completed tests migration to new API (#20115) --- .../shared_tests_instances/skip_tests_config.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 8d7d3ec43462ac..063424fa1bb22d 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -215,6 +215,13 @@ std::vector disabledTestPatterns() { retVector.emplace_back(R"(.*IEClassBasicTestP.*)"); #elif defined(OPENVINO_ARCH_ARM64) || defined(OPENVINO_ARCH_ARM) { + // Issue: 121709 + retVector.emplace_back(R"(smoke_ConversionLayerTest/ConversionLayerTest.Inference/conversionOpType=Convert_IS.*_inputPRC=f16_targetPRC=(u|i)8_trgDev=CPU.*)"); + // Issue: 121710 + retVector.emplace_back(R"(smoke_GRUCellCommon/GRUCellTest.Inference/decomposition0_batch=5_.*WType=CONSTANT_RType=CONSTANT_BType=CONSTANT_netPRC=f16_targetDevice=CPU_.*)"); + // Issue: 121715 + retVector.emplace_back(R"(smoke_CompareWithRefs_static/EltwiseLayerTest.Inference/IS.*_eltwise_op_type=Div_secondary_input_type=PARAMETER_opType=VECTOR_model_type=i32_InType=undefined_OutType=undefined_trgDev=CPU.*)"); + retVector.emplace_back(R"(smoke_CompareWithRefs_static_check_collapsing/EltwiseLayerTest.Inference/IS.*_eltwise_op_type=Div_secondary_input_type=PARAMETER_opType=VECTOR_model_type=i32_InType=undefined_OutType=undefined_trgDev=CPU.*)"); // TODO: enable once streams / tput mode is supported retVector.emplace_back(R"(OVClassConfigTestCPU.smoke_CpuExecNetworkCheck(Model|Core)StreamsHasHigherPriorityThanLatencyHint.*)"); retVector.emplace_back(R"(smoke_BehaviorTests/CorrectConfigCheck.canSetConfigAndCheckGetConfig.*CPU_THROUGHPUT_STREAMS=8.*)"); From f1db726559840c191c0cc97113aaf1b07f6bc205 Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Mon, 2 Oct 2023 13:46:39 +0200 Subject: [PATCH 017/257] Changing file structure of Legacy Features section (#20182) --- .../documentation}/openvino_legacy_features.md | 0 .../openvino_legacy_features}/--installing-model-dev-tools.md | 0 .../documentation/openvino_legacy_features}/MO_OVC_transition.md | 0 .../openvino_legacy_features/api_2_0_transition_guide.md} | 0 .../documentation/openvino_legacy_features}/model_zoo.md | 0 .../documentation}/openvino_legacy_features/mxnet_caffe_kaldi.md | 0 .../documentation/openvino_legacy_features/pot_introduction.md | 0 7 files changed, 0 insertions(+), 0 deletions(-) rename docs/{Documentation => articles_en/documentation}/openvino_legacy_features.md (100%) rename docs/{install_guides => articles_en/documentation/openvino_legacy_features}/--installing-model-dev-tools.md (100%) rename docs/{OV_Converter_UG/prepare_model/convert_model => articles_en/documentation/openvino_legacy_features}/MO_OVC_transition.md (100%) rename docs/{OV_Runtime_UG/migration_ov_2_0/intro.md => articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide.md} (100%) rename docs/{ => articles_en/documentation/openvino_legacy_features}/model_zoo.md (100%) rename docs/{Documentation => articles_en/documentation}/openvino_legacy_features/mxnet_caffe_kaldi.md (100%) rename tools/pot/docs/Introduction.md => docs/articles_en/documentation/openvino_legacy_features/pot_introduction.md (100%) diff --git a/docs/Documentation/openvino_legacy_features.md b/docs/articles_en/documentation/openvino_legacy_features.md similarity index 100% rename from docs/Documentation/openvino_legacy_features.md rename to docs/articles_en/documentation/openvino_legacy_features.md diff --git a/docs/install_guides/--installing-model-dev-tools.md b/docs/articles_en/documentation/openvino_legacy_features/--installing-model-dev-tools.md similarity index 100% rename from docs/install_guides/--installing-model-dev-tools.md rename to docs/articles_en/documentation/openvino_legacy_features/--installing-model-dev-tools.md diff --git a/docs/OV_Converter_UG/prepare_model/convert_model/MO_OVC_transition.md b/docs/articles_en/documentation/openvino_legacy_features/MO_OVC_transition.md similarity index 100% rename from docs/OV_Converter_UG/prepare_model/convert_model/MO_OVC_transition.md rename to docs/articles_en/documentation/openvino_legacy_features/MO_OVC_transition.md diff --git a/docs/OV_Runtime_UG/migration_ov_2_0/intro.md b/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide.md similarity index 100% rename from docs/OV_Runtime_UG/migration_ov_2_0/intro.md rename to docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide.md diff --git a/docs/model_zoo.md b/docs/articles_en/documentation/openvino_legacy_features/model_zoo.md similarity index 100% rename from docs/model_zoo.md rename to docs/articles_en/documentation/openvino_legacy_features/model_zoo.md diff --git a/docs/Documentation/openvino_legacy_features/mxnet_caffe_kaldi.md b/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi.md similarity index 100% rename from docs/Documentation/openvino_legacy_features/mxnet_caffe_kaldi.md rename to docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi.md diff --git a/tools/pot/docs/Introduction.md b/docs/articles_en/documentation/openvino_legacy_features/pot_introduction.md similarity index 100% rename from tools/pot/docs/Introduction.md rename to docs/articles_en/documentation/openvino_legacy_features/pot_introduction.md From 98153f8be1f2fca335f60770394df6e80b764ef2 Mon Sep 17 00:00:00 2001 From: Maciej Smyk Date: Mon, 2 Oct 2023 14:11:57 +0200 Subject: [PATCH 018/257] Workflow (#20181) --- docs/{Documentation => articles_en}/openvino_workflow.md | 0 .../openvino_workflow}/deployment_intro.md | 0 .../openvino_workflow}/model_introduction.md | 0 .../openvino_workflow}/model_optimization_guide.md | 0 .../openvino_workflow}/openvino_intro.md | 0 .../openvino_workflow}/torch_compile.md | 0 6 files changed, 0 insertions(+), 0 deletions(-) rename docs/{Documentation => articles_en}/openvino_workflow.md (100%) rename docs/{OV_Runtime_UG/deployment => articles_en/openvino_workflow}/deployment_intro.md (100%) rename docs/{Documentation => articles_en/openvino_workflow}/model_introduction.md (100%) rename docs/{optimization_guide => articles_en/openvino_workflow}/model_optimization_guide.md (100%) rename docs/{OV_Runtime_UG => articles_en/openvino_workflow}/openvino_intro.md (100%) rename docs/{Documentation => articles_en/openvino_workflow}/torch_compile.md (100%) diff --git a/docs/Documentation/openvino_workflow.md b/docs/articles_en/openvino_workflow.md similarity index 100% rename from docs/Documentation/openvino_workflow.md rename to docs/articles_en/openvino_workflow.md diff --git a/docs/OV_Runtime_UG/deployment/deployment_intro.md b/docs/articles_en/openvino_workflow/deployment_intro.md similarity index 100% rename from docs/OV_Runtime_UG/deployment/deployment_intro.md rename to docs/articles_en/openvino_workflow/deployment_intro.md diff --git a/docs/Documentation/model_introduction.md b/docs/articles_en/openvino_workflow/model_introduction.md similarity index 100% rename from docs/Documentation/model_introduction.md rename to docs/articles_en/openvino_workflow/model_introduction.md diff --git a/docs/optimization_guide/model_optimization_guide.md b/docs/articles_en/openvino_workflow/model_optimization_guide.md similarity index 100% rename from docs/optimization_guide/model_optimization_guide.md rename to docs/articles_en/openvino_workflow/model_optimization_guide.md diff --git a/docs/OV_Runtime_UG/openvino_intro.md b/docs/articles_en/openvino_workflow/openvino_intro.md similarity index 100% rename from docs/OV_Runtime_UG/openvino_intro.md rename to docs/articles_en/openvino_workflow/openvino_intro.md diff --git a/docs/Documentation/torch_compile.md b/docs/articles_en/openvino_workflow/torch_compile.md similarity index 100% rename from docs/Documentation/torch_compile.md rename to docs/articles_en/openvino_workflow/torch_compile.md From f94445668be4e2993c17287a63237ec186367f66 Mon Sep 17 00:00:00 2001 From: Sofya Balandina Date: Mon, 2 Oct 2023 13:27:36 +0100 Subject: [PATCH 019/257] [template] Fix log_level, model_priority, full_device_name tests (#20133) --- src/plugins/template/src/config.cpp | 8 ++++++++ src/plugins/template/src/config.hpp | 3 +++ src/plugins/template/src/plugin.cpp | 3 ++- .../shared/src/behavior/ov_plugin/properties_tests.cpp | 4 ++++ 4 files changed, 17 insertions(+), 1 deletion(-) diff --git a/src/plugins/template/src/config.cpp b/src/plugins/template/src/config.cpp index 683439d97018e5..39e51ad262b465 100644 --- a/src/plugins/template/src/config.cpp +++ b/src/plugins/template/src/config.cpp @@ -54,6 +54,10 @@ Configuration::Configuration(const ov::AnyMap& config, const Configuration& defa num_requests = tmp_i; else OPENVINO_THROW("Incorrect value, it should be unsigned integer: ", key); + } else if (ov::log::level == key) { + log_level = value.as(); + } else if (ov::hint::model_priority == key) { + model_priority = value.as(); } else if (throwOnUnsupported) { OPENVINO_THROW("Property was not found: ", key); } @@ -90,6 +94,10 @@ ov::Any Configuration::Get(const std::string& name) const { return execution_mode; } else if (name == ov::hint::num_requests) { return num_requests; + } else if (name == ov::log::level) { + return log_level; + } else if (name == ov::hint::model_priority) { + return model_priority; } else { OPENVINO_THROW("Property was not found: ", name); } diff --git a/src/plugins/template/src/config.hpp b/src/plugins/template/src/config.hpp index d891fd472deddc..918e2b514f724a 100644 --- a/src/plugins/template/src/config.hpp +++ b/src/plugins/template/src/config.hpp @@ -41,6 +41,9 @@ struct Configuration { // unused ov::element::Type inference_precision = ov::element::undefined; ov::hint::ExecutionMode execution_mode = ov::hint::ExecutionMode::ACCURACY; + ov::log::Level log_level = ov::log::Level::NO; + + ov::hint::Priority model_priority = ov::hint::Priority::DEFAULT; }; // ! [configuration:header] diff --git a/src/plugins/template/src/plugin.cpp b/src/plugins/template/src/plugin.cpp index fde640e7f016d5..d96ea739d58c03 100644 --- a/src/plugins/template/src/plugin.cpp +++ b/src/plugins/template/src/plugin.cpp @@ -237,7 +237,8 @@ ov::Any ov::template_plugin::Plugin::get_property(const std::string& name, const ov::hint::inference_precision, ov::hint::execution_mode, ov::num_streams, - ov::template_plugin::disable_transformations}; + ov::template_plugin::disable_transformations, + ov::log::level}; return rw_properties; }; if (ov::supported_properties == name) { diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/properties_tests.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/properties_tests.cpp index ac66fea4559951..6bb177b77ccbd1 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/properties_tests.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/properties_tests.cpp @@ -367,6 +367,10 @@ TEST_P(OVCheckChangePropComplieModleGetPropTests_DEVICE_ID, ChangeCorrectDeviceP auto device_ids = core->get_available_devices(); for (auto&& device_name_with_id : device_ids) { + if (device_name_with_id.find(target_device) == std::string::npos) { + continue; + } + std::string device_name = device_name_with_id; std::string device_id = ""; auto pos = device_name_with_id.find('.'); From a6e5f6bdb45897758cd720489de29350d415f1bd Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Mon, 2 Oct 2023 14:42:13 +0200 Subject: [PATCH 020/257] Changing file structure of Legacy Conversion API (#20184) --- .../mo_ovc_transition}/Customize_Model_Optimizer.md | 0 .../mo_ovc_transition/Legacy_Conversion_API.md} | 0 .../legacy_conversion_api/Convert_Python_Model_Objects.md} | 0 .../mo_ovc_transition/legacy_conversion_api}/Cutting_Model.md | 0 .../legacy_conversion_api/Embedding_Preprocessing_Computation.md} | 0 .../mo_ovc_transition/legacy_conversion_api}/FP16_Compression.md | 0 .../legacy_conversion_api}/Model_Optimizer_FAQ.md | 0 .../legacy_conversion_api/Setting_Input_Shapes.md} | 0 .../legacy_conversion_api}/supported_model_formats.md | 0 9 files changed, 0 insertions(+), 0 deletions(-) rename docs/{MO_DG/prepare_model/customize_model_optimizer => articles_en/documentation/openvino_legacy_features/mo_ovc_transition}/Customize_Model_Optimizer.md (100%) rename docs/{MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/Legacy_Conversion_API.md} (100%) rename docs/{MO_DG/prepare_model/MO_Python_API.md => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/Convert_Python_Model_Objects.md} (100%) rename docs/{MO_DG/prepare_model/convert_model => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api}/Cutting_Model.md (100%) rename docs/{MO_DG/prepare_model/Additional_Optimizations.md => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/Embedding_Preprocessing_Computation.md} (100%) rename docs/{MO_DG/prepare_model => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api}/FP16_Compression.md (100%) rename docs/{MO_DG/prepare_model => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api}/Model_Optimizer_FAQ.md (100%) rename docs/{MO_DG/prepare_model/convert_model/Converting_Model.md => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/Setting_Input_Shapes.md} (100%) rename docs/{MO_DG/prepare_model/convert_model => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api}/supported_model_formats.md (100%) diff --git a/docs/MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/Customize_Model_Optimizer.md similarity index 100% rename from docs/MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/Customize_Model_Optimizer.md diff --git a/docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/Legacy_Conversion_API.md similarity index 100% rename from docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/Legacy_Conversion_API.md diff --git a/docs/MO_DG/prepare_model/MO_Python_API.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/Convert_Python_Model_Objects.md similarity index 100% rename from docs/MO_DG/prepare_model/MO_Python_API.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/Convert_Python_Model_Objects.md diff --git a/docs/MO_DG/prepare_model/convert_model/Cutting_Model.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/Cutting_Model.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/Cutting_Model.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/Cutting_Model.md diff --git a/docs/MO_DG/prepare_model/Additional_Optimizations.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/Embedding_Preprocessing_Computation.md similarity index 100% rename from docs/MO_DG/prepare_model/Additional_Optimizations.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/Embedding_Preprocessing_Computation.md diff --git a/docs/MO_DG/prepare_model/FP16_Compression.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/FP16_Compression.md similarity index 100% rename from docs/MO_DG/prepare_model/FP16_Compression.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/FP16_Compression.md diff --git a/docs/MO_DG/prepare_model/Model_Optimizer_FAQ.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/Model_Optimizer_FAQ.md similarity index 100% rename from docs/MO_DG/prepare_model/Model_Optimizer_FAQ.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/Model_Optimizer_FAQ.md diff --git a/docs/MO_DG/prepare_model/convert_model/Converting_Model.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/Setting_Input_Shapes.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/Converting_Model.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/Setting_Input_Shapes.md diff --git a/docs/MO_DG/prepare_model/convert_model/supported_model_formats.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/supported_model_formats.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats.md From b1c0a1a299f1793fdbc15d06ae9733fd05790313 Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Mon, 2 Oct 2023 15:41:12 +0200 Subject: [PATCH 021/257] Changing file structure of Supported Model Formats section (#20189) --- .../supported_model_formats}/Convert_Model_From_ONNX.md | 0 .../supported_model_formats}/Convert_Model_From_Paddle.md | 0 .../supported_model_formats}/Convert_Model_From_PyTorch.md | 0 .../supported_model_formats}/Convert_Model_From_TensorFlow.md | 0 .../Convert_Model_From_TensorFlow_Lite.md | 0 5 files changed, 0 insertions(+), 0 deletions(-) rename docs/{MO_DG/prepare_model/convert_model => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats}/Convert_Model_From_ONNX.md (100%) rename docs/{MO_DG/prepare_model/convert_model => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats}/Convert_Model_From_Paddle.md (100%) rename docs/{MO_DG/prepare_model/convert_model => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats}/Convert_Model_From_PyTorch.md (100%) rename docs/{MO_DG/prepare_model/convert_model => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats}/Convert_Model_From_TensorFlow.md (100%) rename docs/{MO_DG/prepare_model/convert_model => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats}/Convert_Model_From_TensorFlow_Lite.md (100%) diff --git a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_ONNX.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_ONNX.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/Convert_Model_From_ONNX.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_ONNX.md diff --git a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_Paddle.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_Paddle.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/Convert_Model_From_Paddle.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_Paddle.md diff --git a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_PyTorch.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_PyTorch.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/Convert_Model_From_PyTorch.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_PyTorch.md diff --git a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_TensorFlow.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_TensorFlow.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/Convert_Model_From_TensorFlow.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_TensorFlow.md diff --git a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_TensorFlow_Lite.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_TensorFlow_Lite.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/Convert_Model_From_TensorFlow_Lite.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_TensorFlow_Lite.md From 97a830965f7b9998a36a79d190cf2ce597e81ade Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Mon, 2 Oct 2023 16:18:45 +0200 Subject: [PATCH 022/257] Changing file structure of Model Conversion Tutorials in Documenation section (#20193) --- .../supported_model_formats/convert_model_tutorials.md} | 0 .../Convert_AttentionOCR_From_Tensorflow.md | 0 .../convert_model_tutorials}/Convert_BERT_From_Tensorflow.md | 0 .../convert_model_tutorials}/Convert_Bert_ner.md | 0 .../convert_model_tutorials}/Convert_CRNN_From_Tensorflow.md | 0 .../convert_model_tutorials}/Convert_Cascade_RCNN_res101.md | 0 .../Convert_DeepSpeech_From_Tensorflow.md | 0 .../convert_model_tutorials}/Convert_EfficientDet_Models.md | 0 .../convert_model_tutorials}/Convert_F3Net.md | 0 .../convert_model_tutorials}/Convert_FaceNet_From_Tensorflow.md | 0 .../convert_model_tutorials}/Convert_Faster_RCNN.md | 0 .../convert_model_tutorials}/Convert_GNMT_From_Tensorflow.md | 0 .../convert_model_tutorials}/Convert_GPT2.md | 0 .../convert_model_tutorials}/Convert_Mask_RCNN.md | 0 .../convert_model_tutorials}/Convert_NCF_From_Tensorflow.md | 0 .../Convert_Object_Detection_API_Models.md | 0 .../convert_model_tutorials}/Convert_QuartzNet.md | 0 .../convert_model_tutorials}/Convert_RCAN.md | 0 .../convert_model_tutorials}/Convert_RNNT.md | 0 .../convert_model_tutorials}/Convert_RetinaNet_From_Tensorflow.md | 0 .../convert_model_tutorials}/Convert_Slim_Library_Models.md | 0 .../convert_model_tutorials}/Convert_WideAndDeep_Family_Models.md | 0 .../convert_model_tutorials}/Convert_XLNet_From_Tensorflow.md | 0 .../convert_model_tutorials}/Convert_YOLACT.md | 0 .../convert_model_tutorials}/Convert_YOLO_From_Tensorflow.md | 0 .../convert_model_tutorials}/Convert_lm_1b_From_Tensorflow.md | 0 26 files changed, 0 insertions(+), 0 deletions(-) rename docs/{MO_DG/prepare_model/convert_model/Convert_Model_Tutorials.md => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials.md} (100%) rename docs/{MO_DG/prepare_model/convert_model/tf_specific => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials}/Convert_AttentionOCR_From_Tensorflow.md (100%) rename docs/{MO_DG/prepare_model/convert_model/tf_specific => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials}/Convert_BERT_From_Tensorflow.md (100%) rename docs/{MO_DG/prepare_model/convert_model/pytorch_specific => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials}/Convert_Bert_ner.md (100%) rename docs/{MO_DG/prepare_model/convert_model/tf_specific => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials}/Convert_CRNN_From_Tensorflow.md (100%) rename docs/{MO_DG/prepare_model/convert_model/pytorch_specific => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials}/Convert_Cascade_RCNN_res101.md (100%) rename docs/{MO_DG/prepare_model/convert_model/tf_specific => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials}/Convert_DeepSpeech_From_Tensorflow.md (100%) rename docs/{MO_DG/prepare_model/convert_model/tf_specific => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials}/Convert_EfficientDet_Models.md (100%) rename docs/{MO_DG/prepare_model/convert_model/pytorch_specific => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials}/Convert_F3Net.md (100%) rename docs/{MO_DG/prepare_model/convert_model/tf_specific => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials}/Convert_FaceNet_From_Tensorflow.md (100%) rename docs/{MO_DG/prepare_model/convert_model/onnx_specific => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials}/Convert_Faster_RCNN.md (100%) rename docs/{MO_DG/prepare_model/convert_model/tf_specific => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials}/Convert_GNMT_From_Tensorflow.md (100%) rename docs/{MO_DG/prepare_model/convert_model/onnx_specific => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials}/Convert_GPT2.md (100%) rename docs/{MO_DG/prepare_model/convert_model/onnx_specific => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials}/Convert_Mask_RCNN.md (100%) rename docs/{MO_DG/prepare_model/convert_model/tf_specific => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials}/Convert_NCF_From_Tensorflow.md (100%) rename docs/{MO_DG/prepare_model/convert_model/tf_specific => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials}/Convert_Object_Detection_API_Models.md (100%) rename docs/{MO_DG/prepare_model/convert_model/pytorch_specific => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials}/Convert_QuartzNet.md (100%) rename docs/{MO_DG/prepare_model/convert_model/pytorch_specific => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials}/Convert_RCAN.md (100%) rename docs/{MO_DG/prepare_model/convert_model/pytorch_specific => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials}/Convert_RNNT.md (100%) rename docs/{MO_DG/prepare_model/convert_model/tf_specific => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials}/Convert_RetinaNet_From_Tensorflow.md (100%) rename docs/{MO_DG/prepare_model/convert_model/tf_specific => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials}/Convert_Slim_Library_Models.md (100%) rename docs/{MO_DG/prepare_model/convert_model/tf_specific => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials}/Convert_WideAndDeep_Family_Models.md (100%) rename docs/{MO_DG/prepare_model/convert_model/tf_specific => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials}/Convert_XLNet_From_Tensorflow.md (100%) rename docs/{MO_DG/prepare_model/convert_model/pytorch_specific => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials}/Convert_YOLACT.md (100%) rename docs/{MO_DG/prepare_model/convert_model/tf_specific => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials}/Convert_YOLO_From_Tensorflow.md (100%) rename docs/{MO_DG/prepare_model/convert_model/tf_specific => articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials}/Convert_lm_1b_From_Tensorflow.md (100%) diff --git a/docs/MO_DG/prepare_model/convert_model/Convert_Model_Tutorials.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/Convert_Model_Tutorials.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials.md diff --git a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_AttentionOCR_From_Tensorflow.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_AttentionOCR_From_Tensorflow.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_AttentionOCR_From_Tensorflow.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_AttentionOCR_From_Tensorflow.md diff --git a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_BERT_From_Tensorflow.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_BERT_From_Tensorflow.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_BERT_From_Tensorflow.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_BERT_From_Tensorflow.md diff --git a/docs/MO_DG/prepare_model/convert_model/pytorch_specific/Convert_Bert_ner.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Bert_ner.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/pytorch_specific/Convert_Bert_ner.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Bert_ner.md diff --git a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_CRNN_From_Tensorflow.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_CRNN_From_Tensorflow.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_CRNN_From_Tensorflow.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_CRNN_From_Tensorflow.md diff --git a/docs/MO_DG/prepare_model/convert_model/pytorch_specific/Convert_Cascade_RCNN_res101.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Cascade_RCNN_res101.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/pytorch_specific/Convert_Cascade_RCNN_res101.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Cascade_RCNN_res101.md diff --git a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_DeepSpeech_From_Tensorflow.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_DeepSpeech_From_Tensorflow.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_DeepSpeech_From_Tensorflow.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_DeepSpeech_From_Tensorflow.md diff --git a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_EfficientDet_Models.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_EfficientDet_Models.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_EfficientDet_Models.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_EfficientDet_Models.md diff --git a/docs/MO_DG/prepare_model/convert_model/pytorch_specific/Convert_F3Net.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_F3Net.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/pytorch_specific/Convert_F3Net.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_F3Net.md diff --git a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_FaceNet_From_Tensorflow.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_FaceNet_From_Tensorflow.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_FaceNet_From_Tensorflow.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_FaceNet_From_Tensorflow.md diff --git a/docs/MO_DG/prepare_model/convert_model/onnx_specific/Convert_Faster_RCNN.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Faster_RCNN.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/onnx_specific/Convert_Faster_RCNN.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Faster_RCNN.md diff --git a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_GNMT_From_Tensorflow.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_GNMT_From_Tensorflow.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_GNMT_From_Tensorflow.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_GNMT_From_Tensorflow.md diff --git a/docs/MO_DG/prepare_model/convert_model/onnx_specific/Convert_GPT2.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_GPT2.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/onnx_specific/Convert_GPT2.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_GPT2.md diff --git a/docs/MO_DG/prepare_model/convert_model/onnx_specific/Convert_Mask_RCNN.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Mask_RCNN.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/onnx_specific/Convert_Mask_RCNN.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Mask_RCNN.md diff --git a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_NCF_From_Tensorflow.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_NCF_From_Tensorflow.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_NCF_From_Tensorflow.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_NCF_From_Tensorflow.md diff --git a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_Object_Detection_API_Models.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Object_Detection_API_Models.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_Object_Detection_API_Models.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Object_Detection_API_Models.md diff --git a/docs/MO_DG/prepare_model/convert_model/pytorch_specific/Convert_QuartzNet.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_QuartzNet.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/pytorch_specific/Convert_QuartzNet.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_QuartzNet.md diff --git a/docs/MO_DG/prepare_model/convert_model/pytorch_specific/Convert_RCAN.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_RCAN.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/pytorch_specific/Convert_RCAN.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_RCAN.md diff --git a/docs/MO_DG/prepare_model/convert_model/pytorch_specific/Convert_RNNT.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_RNNT.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/pytorch_specific/Convert_RNNT.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_RNNT.md diff --git a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_RetinaNet_From_Tensorflow.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_RetinaNet_From_Tensorflow.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_RetinaNet_From_Tensorflow.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_RetinaNet_From_Tensorflow.md diff --git a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_Slim_Library_Models.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Slim_Library_Models.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_Slim_Library_Models.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Slim_Library_Models.md diff --git a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_WideAndDeep_Family_Models.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_WideAndDeep_Family_Models.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_WideAndDeep_Family_Models.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_WideAndDeep_Family_Models.md diff --git a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_XLNet_From_Tensorflow.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_XLNet_From_Tensorflow.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_XLNet_From_Tensorflow.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_XLNet_From_Tensorflow.md diff --git a/docs/MO_DG/prepare_model/convert_model/pytorch_specific/Convert_YOLACT.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_YOLACT.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/pytorch_specific/Convert_YOLACT.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_YOLACT.md diff --git a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_YOLO_From_Tensorflow.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_YOLO_From_Tensorflow.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_YOLO_From_Tensorflow.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_YOLO_From_Tensorflow.md diff --git a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_lm_1b_From_Tensorflow.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_lm_1b_From_Tensorflow.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_lm_1b_From_Tensorflow.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_lm_1b_From_Tensorflow.md From 33881542c5b6bcd76a733560cb4d147759a389b0 Mon Sep 17 00:00:00 2001 From: Taylor Yeonbok Lee Date: Mon, 2 Oct 2023 09:44:22 -0700 Subject: [PATCH 023/257] [GPU] Fix gpu memory handling (#19930) * [GPU] Added more debug print for prepare input/output Skip enqueuemempcy for zero count tensor * Fix mem alloc limitation * Simplify memory check condition * Fix not to wait for null event * Additional fixes - apply review comments (disable 0 size memcpy for other mem types) && return user event instead of nullptr - fixd get_max_memory to return cpu memory instead of global memorysize (Because in windows, max_global_memory returns shared gpu memory instead of physical memory size) * Fixed functest failure --- .../include/intel_gpu/runtime/engine.hpp | 3 + .../src/plugin/sync_infer_request.cpp | 10 ++- src/plugins/intel_gpu/src/runtime/engine.cpp | 10 ++- .../intel_gpu/src/runtime/ocl/ocl_engine.cpp | 37 +++++++---- .../intel_gpu/src/runtime/ocl/ocl_engine.hpp | 1 + .../intel_gpu/src/runtime/ocl/ocl_memory.cpp | 64 +++++++++++++++++++ 6 files changed, 109 insertions(+), 16 deletions(-) diff --git a/src/plugins/intel_gpu/include/intel_gpu/runtime/engine.hpp b/src/plugins/intel_gpu/include/intel_gpu/runtime/engine.hpp index 71b8a11a209ed5..ee8d10bb580f9e 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/runtime/engine.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/runtime/engine.hpp @@ -125,6 +125,9 @@ class engine { /// Returns the size of the larger of the GPU memory and CPU memory. uint64_t get_max_memory_size() const; + /// Returns the size of CPU memory. + uint64_t get_host_memory_size() const; + /// Create stream object for current engine virtual stream_ptr create_stream(const ExecutionConfig& config) const = 0; diff --git a/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp b/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp index d5ee5d423fcd13..cb25b369b2e336 100644 --- a/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp +++ b/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp @@ -676,6 +676,9 @@ std::vector SyncInferRequest::prepare_input(const std::string auto element_type = user_tensor->get_element_type(); auto remote_ptr = std::dynamic_pointer_cast(user_tensor); bool is_remote = remote_ptr != nullptr; + GPU_DEBUG_TRACE_DETAIL << "Prepare input for " << name << " ( is_remote ? " << is_remote << ")" << std::endl; + GPU_DEBUG_TRACE_DETAIL << " port shape : " << pshape.to_string() << std::endl; + GPU_DEBUG_TRACE_DETAIL << " user_tensor shape: " << user_tensor->get_shape().to_string() << std::endl; auto network = m_graph->get_network(); auto& engine = m_graph->get_engine(); @@ -709,6 +712,7 @@ std::vector SyncInferRequest::prepare_input(const std::string if (device_tensor->get_original_memory()->size() < user_tensor->get_byte_size()) { auto& shape_predictor = network->get_shape_predictor(); auto actual_shape = predict_shape(name, user_tensor->get_shape(), device_tensor_et, shape_predictor); + GPU_DEBUG_TRACE_DETAIL << " actual memory shape: " << actual_shape.to_string() << std::endl; auto new_tensor = create_device_tensor(actual_shape, device_tensor_et, false); new_tensor->set_shape(user_tensor->get_shape()); m_plugin_inputs[name] = { new_tensor, TensorOwner::PLUGIN }; @@ -759,7 +763,7 @@ std::vector SyncInferRequest::prepare_input(const std::string const cldnn::primitive_id internal_name = "parameter:" + name; network->set_input_data(internal_name, memory); - if (ret_event) + if (ret_event && !ret_event->is_set()) return { ret_event }; else return {}; @@ -776,6 +780,10 @@ std::vector SyncInferRequest::prepare_output(const std::strin auto remote_ptr = std::dynamic_pointer_cast(user_tensor); bool is_remote = remote_ptr != nullptr; + GPU_DEBUG_TRACE_DETAIL << "Prepare output for " << name << std::endl; + GPU_DEBUG_TRACE_DETAIL << " port shape : " << pshape.to_string() << std::endl; + GPU_DEBUG_TRACE_DETAIL << " user_tensor shape: " << user_tensor->get_shape().to_string() << std::endl; + if (user_tensor->get_size() > 0) { OPENVINO_ASSERT(pshape.compatible(ov::PartialShape(user_tensor->get_shape())), "[GPU] The output tensor size is not equal to model port shape, can't handle output tensor with name: ", diff --git a/src/plugins/intel_gpu/src/runtime/engine.cpp b/src/plugins/intel_gpu/src/runtime/engine.cpp index 746f05e857a416..38855de5035e9a 100644 --- a/src/plugins/intel_gpu/src/runtime/engine.cpp +++ b/src/plugins/intel_gpu/src/runtime/engine.cpp @@ -82,10 +82,18 @@ bool engine::use_unified_shared_memory() const { } uint64_t engine::get_max_memory_size() const { - static uint64_t max_device_mem = (std::max)(get_device_info().max_global_mem_size, static_cast(get_cpu_ram_size())); + static uint64_t max_device_mem = get_host_memory_size(); + const auto& dev_type = get_device_info().dev_type; + if (dev_type == device_type::discrete_gpu) { + max_device_mem += get_device_info().max_global_mem_size; + } return max_device_mem; } +uint64_t engine::get_host_memory_size() const { + return static_cast(get_cpu_ram_size()); +} + bool engine::supports_allocation(allocation_type type) const { if (memory_capabilities::is_usm_type(type) && !use_unified_shared_memory()) return false; diff --git a/src/plugins/intel_gpu/src/runtime/ocl/ocl_engine.cpp b/src/plugins/intel_gpu/src/runtime/ocl/ocl_engine.cpp index 181c3baa0c5786..325bb3aa5816bc 100644 --- a/src/plugins/intel_gpu/src/runtime/ocl/ocl_engine.cpp +++ b/src/plugins/intel_gpu/src/runtime/ocl/ocl_engine.cpp @@ -125,23 +125,29 @@ allocation_type ocl_engine::detect_usm_allocation_type(const void* memory) const : allocation_type::unknown; } -memory::ptr ocl_engine::allocate_memory(const layout& layout, allocation_type type, bool reset) { - OPENVINO_ASSERT(!layout.is_dynamic() || layout.has_upper_bound(), "[GPU] Can't allocate memory for dynamic layout"); - - OPENVINO_ASSERT(layout.bytes_count() <= get_device_info().max_alloc_mem_size, - "[GPU] Exceeded max size of memory object allocation: ", - "Requested ", layout.bytes_count(), " bytes " - "but max alloc size is ", get_device_info().max_alloc_mem_size, " bytes"); - +bool ocl_engine::check_allocatable(const layout& layout, allocation_type type) const { + OPENVINO_ASSERT(supports_allocation(type) || type == allocation_type::cl_mem, "[GPU] Unsupported allocation type: ", type); auto used_mem = get_used_device_memory(allocation_type::usm_device) + get_used_device_memory(allocation_type::usm_host); +#ifdef __unix__ + // Prevent from being killed by Ooo Killer of Linux OPENVINO_ASSERT(layout.bytes_count() + used_mem <= get_max_memory_size(), - "[GPU] Exceeded max size of memory allocation: ", - "Required ", (layout.bytes_count() + used_mem), " bytes " - "but memory size is ", get_max_memory_size(), " bytes"); + "[GPU] Exceeded max size of memory allocation: ", + "Required ", layout.bytes_count(), " bytes, already occupied : ", used_mem, " bytes, ", + "but available memory size is ", get_max_memory_size(), " bytes"); +#else + if (layout.bytes_count() + used_mem > get_max_memory_size()) { + GPU_DEBUG_COUT << "[Warning] [GPU] Exceeded max size of memory allocation: " << "Required " << layout.bytes_count() << " bytes, already occupied : " + << used_mem << " bytes, but available memory size is " << get_max_memory_size() << " bytes" << std::endl; + GPU_DEBUG_COUT << "Please note that performance might drop due to memory swap." << std::endl; + } +#endif + return true; +} - OPENVINO_ASSERT(supports_allocation(type) || type == allocation_type::cl_mem, - "[GPU] Unsupported allocation type: ", type); +memory::ptr ocl_engine::allocate_memory(const layout& layout, allocation_type type, bool reset) { + OPENVINO_ASSERT(!layout.is_dynamic() || layout.has_upper_bound(), "[GPU] Can't allocate memory for dynamic layout"); + check_allocatable(layout, type); try { memory::ptr res = nullptr; if (layout.format.is_image_2d()) { @@ -153,7 +159,10 @@ memory::ptr ocl_engine::allocate_memory(const layout& layout, allocation_type ty } if (reset || res->is_memory_reset_needed(layout)) { - get_service_stream().wait_for_events({res->fill(get_service_stream())}); + auto ev = res->fill(get_service_stream()); + if (ev) { + get_service_stream().wait_for_events({ev}); + } } return res; diff --git a/src/plugins/intel_gpu/src/runtime/ocl/ocl_engine.hpp b/src/plugins/intel_gpu/src/runtime/ocl/ocl_engine.hpp index 20fa06f350124c..6d4141396518e6 100644 --- a/src/plugins/intel_gpu/src/runtime/ocl/ocl_engine.hpp +++ b/src/plugins/intel_gpu/src/runtime/ocl/ocl_engine.hpp @@ -28,6 +28,7 @@ class ocl_engine : public engine { memory_ptr reinterpret_handle(const layout& new_layout, shared_mem_params params) override; memory_ptr reinterpret_buffer(const memory& memory, const layout& new_layout) override; bool is_the_same_buffer(const memory& mem1, const memory& mem2) override; + bool check_allocatable(const layout& layout, allocation_type type) const; void* get_user_context() const override; diff --git a/src/plugins/intel_gpu/src/runtime/ocl/ocl_memory.cpp b/src/plugins/intel_gpu/src/runtime/ocl/ocl_memory.cpp index 67d3e65dc08c5d..bbdb9dd4cccde8 100644 --- a/src/plugins/intel_gpu/src/runtime/ocl/ocl_memory.cpp +++ b/src/plugins/intel_gpu/src/runtime/ocl/ocl_memory.cpp @@ -64,10 +64,18 @@ void gpu_buffer::unlock(const stream& stream) { } event::ptr gpu_buffer::fill(stream& stream) { + if (_bytes_count == 0) { + GPU_DEBUG_TRACE_DETAIL << "Skip EnqueueMemcpy for 0 size tensor" << std::endl; + return stream.create_user_event(true); + } return fill(stream, 0); } event::ptr gpu_buffer::fill(stream& stream, unsigned char pattern) { + if (_bytes_count == 0) { + GPU_DEBUG_TRACE_DETAIL << "Skip EnqueueMemcpy for 0 size tensor" << std::endl; + return stream.create_user_event(true); + } auto& cl_stream = downcast(stream); auto ev = stream.create_base_event(); cl::Event& ev_ocl = downcast(ev.get())->get(); @@ -89,6 +97,10 @@ shared_mem_params gpu_buffer::get_internal_params() const { } event::ptr gpu_buffer::copy_from(stream& stream, const memory& other, bool blocking) { + if (_bytes_count == 0) { + GPU_DEBUG_TRACE_DETAIL << "Skip EnqueueMemcpy for 0 size tensor" << std::endl; + return stream.create_user_event(true); + } switch (other.get_allocation_type()) { case allocation_type::usm_host: case allocation_type::usm_shared: @@ -118,6 +130,10 @@ event::ptr gpu_buffer::copy_from(stream& stream, const memory& other, bool block } event::ptr gpu_buffer::copy_from(stream& stream, const void* host_ptr, bool blocking) { + if (_bytes_count == 0) { + GPU_DEBUG_TRACE_DETAIL << "Skip EnqueueMemcpy for 0 size tensor" << std::endl; + return stream.create_user_event(true); + } auto& cl_stream = downcast(stream); auto ev = blocking ? stream.create_user_event(true) : stream.create_base_event(); cl::Event* ev_ocl = blocking ? nullptr : &downcast(ev.get())->get(); @@ -127,6 +143,10 @@ event::ptr gpu_buffer::copy_from(stream& stream, const void* host_ptr, bool bloc } event::ptr gpu_buffer::copy_to(stream& stream, void* host_ptr, bool blocking) { + if (_bytes_count == 0) { + GPU_DEBUG_TRACE_DETAIL << "Skip EnqueueMemcpy for 0 size tensor" << std::endl; + return stream.create_user_event(true); + } auto& cl_stream = downcast(stream); auto ev = blocking ? stream.create_user_event(true) : stream.create_base_event(); cl::Event* ev_ocl = blocking ? nullptr : &downcast(ev.get())->get(); @@ -211,10 +231,18 @@ gpu_image2d::gpu_image2d(ocl_engine* engine, } event::ptr gpu_image2d::fill(stream& stream) { + if (_bytes_count == 0) { + GPU_DEBUG_TRACE_DETAIL << "Skip EnqueueMemcpy for 0 size tensor" << std::endl; + return stream.create_user_event(true); + } return fill(stream, 0); } event::ptr gpu_image2d::fill(stream& stream, unsigned char pattern) { + if (_bytes_count == 0) { + GPU_DEBUG_TRACE_DETAIL << "Skip EnqueueMemcpy for 0 size tensor" << std::endl; + return stream.create_user_event(true); + } auto& cl_stream = downcast(stream); auto ev = stream.create_base_event(); cl::Event& ev_ocl = downcast(ev.get())->get(); @@ -268,6 +296,10 @@ shared_mem_params gpu_image2d::get_internal_params() const { } event::ptr gpu_image2d::copy_from(stream& stream, const memory& other, bool blocking) { + if (_bytes_count == 0) { + GPU_DEBUG_TRACE_DETAIL << "Skip EnqueueMemcpy for 0 size tensor" << std::endl; + return stream.create_user_event(true); + } auto& cl_stream = downcast(stream); auto& casted = downcast(other); auto ev = stream.create_base_event(); @@ -283,6 +315,10 @@ event::ptr gpu_image2d::copy_from(stream& stream, const memory& other, bool bloc } event::ptr gpu_image2d::copy_from(stream& stream, const void* host_ptr, bool blocking) { + if (_bytes_count == 0) { + GPU_DEBUG_TRACE_DETAIL << "Skip EnqueueMemcpy for 0 size tensor" << std::endl; + return stream.create_user_event(true); + } auto& cl_stream = downcast(stream); auto ev = blocking ? stream.create_user_event(true) : stream.create_base_event(); cl::Event* ev_ocl = blocking ? nullptr : &downcast(ev.get())->get(); @@ -293,6 +329,10 @@ event::ptr gpu_image2d::copy_from(stream& stream, const void* host_ptr, bool blo } event::ptr gpu_image2d::copy_to(stream& stream, memory& other, bool blocking) { + if (_bytes_count == 0) { + GPU_DEBUG_TRACE_DETAIL << "Skip EnqueueMemcpy for 0 size tensor" << std::endl; + return stream.create_user_event(true); + } auto& cl_stream = downcast(stream); auto& casted = downcast(other); auto ev = stream.create_base_event(); @@ -308,6 +348,10 @@ event::ptr gpu_image2d::copy_to(stream& stream, memory& other, bool blocking) { } event::ptr gpu_image2d::copy_to(stream& stream, void* host_ptr, bool blocking) { + if (_bytes_count == 0) { + GPU_DEBUG_TRACE_DETAIL << "Skip EnqueueMemcpy for 0 size tensor" << std::endl; + return stream.create_user_event(true); + } auto& cl_stream = downcast(stream); auto ev = blocking ? stream.create_user_event(true) : stream.create_base_event(); cl::Event* ev_ocl = blocking ? nullptr : &downcast(ev.get())->get(); @@ -414,6 +458,10 @@ void gpu_usm::unlock(const stream& /* stream */) { } event::ptr gpu_usm::fill(stream& stream, unsigned char pattern) { + if (_bytes_count == 0) { + GPU_DEBUG_TRACE_DETAIL << "Skip gpu_usm::fill for 0 size tensor" << std::endl; + return stream.create_user_event(true); + } auto& cl_stream = downcast(stream); auto ev = stream.create_base_event(); cl::Event& ev_ocl = downcast(ev.get())->get(); @@ -435,10 +483,18 @@ event::ptr gpu_usm::fill(stream& stream) { // ev->wait(); // [WA] + if (_bytes_count == 0) { + GPU_DEBUG_TRACE_DETAIL << "Skip EnqueueMemcpy for 0 size tensor" << std::endl; + return stream.create_user_event(true); + } return fill(stream, 0); } event::ptr gpu_usm::copy_from(stream& stream, const memory& other, bool blocking) { + if (_bytes_count == 0) { + GPU_DEBUG_TRACE_DETAIL << "Skip EnqueueMemcpy for 0 size tensor" << std::endl; + return stream.create_user_event(true); + } auto& cl_stream = downcast(stream); auto ev = blocking ? stream.create_user_event(true) : stream.create_base_event(); cl::Event* ev_ocl = blocking ? nullptr : &downcast(ev.get())->get(); @@ -462,6 +518,10 @@ event::ptr gpu_usm::copy_from(stream& stream, const memory& other, bool blocking } event::ptr gpu_usm::copy_from(stream& stream, const void* host_ptr, bool blocking) { + if (_bytes_count == 0) { + GPU_DEBUG_TRACE_DETAIL << "Skip EnqueueMemcpy for 0 size tensor" << std::endl; + return stream.create_user_event(true); + } auto& cl_stream = downcast(stream); auto dst_ptr = get_buffer().get(); auto ev = blocking ? stream.create_user_event(true) : stream.create_base_event(); @@ -477,6 +537,10 @@ event::ptr gpu_usm::copy_from(stream& stream, const void* host_ptr, bool blockin } event::ptr gpu_usm::copy_to(stream& stream, void* host_ptr, bool blocking) { + if (_bytes_count == 0) { + GPU_DEBUG_TRACE_DETAIL << "Skip EnqueueMemcpy for 0 size tensor" << std::endl; + return stream.create_user_event(true); + } auto& cl_stream = downcast(stream); auto ev = blocking ? stream.create_user_event(true) : stream.create_base_event(); cl::Event* ev_ocl = blocking ? nullptr : &downcast(ev.get())->get(); From 86dfacbc5134c3b15cfc9b7343b026c3f8daee60 Mon Sep 17 00:00:00 2001 From: Alina Kladieva Date: Mon, 2 Oct 2023 19:34:08 +0200 Subject: [PATCH 024/257] [build_samples_msvc] Exit properly on error (#20191) --- samples/cpp/build_samples_msvc.bat | 1 + 1 file changed, 1 insertion(+) diff --git a/samples/cpp/build_samples_msvc.bat b/samples/cpp/build_samples_msvc.bat index 40f2f67a3bcb10..43d69e00fe9a59 100644 --- a/samples/cpp/build_samples_msvc.bat +++ b/samples/cpp/build_samples_msvc.bat @@ -72,3 +72,4 @@ exit /b :errorHandling echo Error +exit /b %ERRORLEVEL% From bcd331d145e5c1c34190d603e554e1dd6e0535e2 Mon Sep 17 00:00:00 2001 From: Andrey Kashchikhin Date: Mon, 2 Oct 2023 18:45:17 +0100 Subject: [PATCH 025/257] [CI] [GHA] Transfer Linux ONNX Runtime to AKS runners (#20157) * transition to aks runners * correct name * incorporate onnxruntime into linux pipeline * add missing python installation * install build dependencies * add path * rm unused * mv stage * correct dir * use 16-core for onnxruntime job * rm obsolete * rm logs * rm unnecessary step * rm unnecessary dir creation --- .github/workflows/linux.yml | 134 +++++++++++++++++ .github/workflows/linux_onnxruntime.yml | 182 ------------------------ 2 files changed, 134 insertions(+), 182 deletions(-) delete mode 100644 .github/workflows/linux_onnxruntime.yml diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index aae1d5936024a4..052f8ebfcd6081 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -59,6 +59,7 @@ jobs: CCACHE_DIR: /mount/caches/ccache/ubuntu20_x86_64_Release CCACHE_TEMPDIR: /__w/openvino/openvino/ccache_temp CCACHE_MAXSIZE: 50G + ONNX_RUNTIME_UTILS: /__w/openvino/openvino/openvino/.ci/azure/ci_utils/onnxruntime steps: - name: Install git @@ -157,6 +158,12 @@ jobs: - name: Pack Artifacts run: | + + # Add the ONNX Runtime version and skip tests list to the archive to use in the ONNX Runtime Job + # w/o the need to checkout repository + + cp -R ${ONNX_RUNTIME_UTILS} ${INSTALL_DIR} + pushd ${INSTALL_DIR} tar -czvf ${BUILD_DIR}/openvino_package.tar.gz * popd @@ -377,6 +384,133 @@ jobs: path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml if-no-files-found: 'error' + ONNX_Runtime: + needs: Build + defaults: + run: + shell: bash + runs-on: aks-linux-16-cores + container: + image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04 + volumes: + - /mount/caches:/mount/caches + env: + DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input + CMAKE_GENERATOR: 'Ninja Multi-Config' + CMAKE_CXX_COMPILER_LAUNCHER: ccache + CMAKE_C_COMPILER_LAUNCHER: ccache + OPENVINO_REPO: /__w/openvino/openvino/openvino + INSTALL_DIR: /__w/openvino/openvino/install + CCACHE_DIR: /mount/caches/ccache/ubuntu20_x86_64_onnxruntime + CCACHE_TEMPDIR: /__w/openvino/openvino/ccache_temp + CCACHE_MAXSIZE: 50G + ONNX_RUNTIME_REPO: /__w/openvino/openvino/onnxruntime + ONNX_RUNTIME_UTILS: /__w/openvino/openvino/install/onnxruntime + ONNX_RUNTIME_BUILD_DIR: /__w/openvino/openvino/onnxruntime/build + + steps: + + - name: Fetch install_build_dependencies.sh + uses: actions/checkout@v4 + with: + sparse-checkout: | + install_build_dependencies.sh + sparse-checkout-cone-mode: false + path: ${{ env.OPENVINO_REPO }} + + - name: Install git + run: | + apt-get update + apt-get install --assume-yes --no-install-recommends git ca-certificates + + - uses: actions/setup-python@v4 + with: + python-version: ${{ env.PYTHON_VERSION }} + + # + # Initialize OpenVINO + # + + - name: Download OpenVINO package + uses: actions/download-artifact@v3 + with: + name: openvino_package + path: ${{ env.INSTALL_DIR }} + + - name: Extract OpenVINO package + run: | + pushd ${INSTALL_DIR} + tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} && rm openvino_package.tar.gz + popd + + - name: Install OpenVINO dependencies + run: ${INSTALL_DIR}/install_dependencies/install_openvino_dependencies.sh -c=core -c=dev -y + + - name: Clone ONNX Runtime + run: | + branch=`tr -s '\n ' < ${ONNX_RUNTIME_UTILS}/version` + git clone --branch $branch --single-branch --recursive https://github.com/microsoft/onnxruntime.git ${ONNX_RUNTIME_REPO} + + # + # Tests + # + + - name: Install Build Dependencies + run: bash ${OPENVINO_REPO}/install_build_dependencies.sh + + - name: Build Lin ONNX Runtime + run: | + source ${INSTALL_DIR}/setupvars.sh + + ${ONNX_RUNTIME_REPO}/build.sh \ + --config RelWithDebInfo \ + --use_openvino CPU_FP32 \ + --build_shared_lib \ + --parallel \ + --skip_tests \ + --compile_no_warning_as_error \ + --build_dir ${ONNX_RUNTIME_BUILD_DIR} + env: + CXXFLAGS: "-Wno-error=deprecated-declarations" + + - name: Run onnxruntime_test_all + run: | + source ${INSTALL_DIR}/setupvars.sh + skip_tests=$(tr -s '\n ' ':' < ${ONNX_RUNTIME_UTILS}/skip_tests) + + ./onnxruntime_test_all --gtest_filter=-$skip_tests + working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo + + - name: Run onnxruntime_shared_lib_test + run: | + source ${INSTALL_DIR}/setupvars.sh + ./onnxruntime_shared_lib_test --gtest_filter=-CApiTest.test_custom_op_openvino_wrapper_library + working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo + + - name: Run onnxruntime_global_thread_pools_test + run: | + source ${INSTALL_DIR}/setupvars.sh + ./onnxruntime_global_thread_pools_test + working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo + + - name: Run onnxruntime_api_tests_without_env + run: | + source ${INSTALL_DIR}/setupvars.sh + ./onnxruntime_api_tests_without_env + working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo + + - name: Run pytorch-converted tests + run: | + source ${INSTALL_DIR}/setupvars.sh + ./onnx_test_runner "${ONNX_RUNTIME_REPO}/cmake/external/onnx/onnx/backend/test/data/pytorch-converted" + working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo + + - name: Run pytorch-operator tests + run: | + source ${INSTALL_DIR}/setupvars.sh + ./onnx_test_runner "${ONNX_RUNTIME_REPO}/cmake/external/onnx/onnx/backend/test/data/pytorch-operator" + working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo + CXX_Unit_Tests: needs: Build defaults: diff --git a/.github/workflows/linux_onnxruntime.yml b/.github/workflows/linux_onnxruntime.yml deleted file mode 100644 index 178aae4fefac6b..00000000000000 --- a/.github/workflows/linux_onnxruntime.yml +++ /dev/null @@ -1,182 +0,0 @@ -name: Linux ONNX Runtime (Ubuntu 20.04, Python 3.11) -on: - workflow_dispatch: - schedule: - # run daily at 00:00 - - cron: '0 0 * * *' -# pull_request: -# paths-ignore: -# - '**/docs/**' -# - 'docs/**' -# - '**/**.md' -# - '**.md' -# - '**/layer_tests_summary/**' -# - '**/conformance/**' -# push: -# paths-ignore: -# - '**/docs/**' -# - 'docs/**' -# - '**/**.md' -# - '**.md' -# - '**/layer_tests_summary/**' -# - '**/conformance/**' -# branches: -# - master - -concurrency: - group: ${{ github.head_ref || github.run_id }}-linux-onnx-runtime - cancel-in-progress: true - -jobs: - Build: - # TODO: remove. Temporary measure to prevent the workflow from scheduling on forks. - if: ${{ github.repository_owner == 'openvinotoolkit' }} - defaults: - run: - shell: bash - runs-on: ubuntu-20.04-8-cores - env: - CMAKE_BUILD_TYPE: 'Release' - CMAKE_GENERATOR: 'Ninja' - CMAKE_CXX_COMPILER_LAUNCHER: ccache - CMAKE_C_COMPILER_LAUNCHER: ccache - CMAKE_CXX_LINKER_LAUNCHER: ccache - CMAKE_C_LINKER_LAUNCHER: ccache - BUILD_TYPE: Release - OPENVINO_REPO: ${{ github.workspace }}/openvino - ONNX_RUNTIME_REPO: ${{ github.workspace }}/onnxruntime - ONNX_RUNTIME_UTILS: ${{ github.workspace }}/openvino/.ci/azure/ci_utils/onnxruntime - ONNX_RUNTIME_BUILD_DIR: ${{ github.workspace }}/onnxruntime/build - BUILD_DIR: ${{ github.workspace }}/build - INSTALL_DIR: ${{ github.workspace }}/install/openvino - steps: - - name: Clone OpenVINO - uses: actions/checkout@v4 - with: - path: 'openvino' - submodules: 'true' - - - name: Clone ONNX Runtime - run: | - branch=`tr -s '\n ' < ${{ env.ONNX_RUNTIME_UTILS }}/version` - git clone --branch $branch --single-branch --recursive https://github.com/microsoft/onnxruntime.git ${{ env.ONNX_RUNTIME_REPO }} - - - name: Create Directories - run: | - mkdir -p ${{ env.BUILD_DIR }} - mkdir -p ${{ env.INSTALL_DIR }} - - - name: Setup Python 3.11 - uses: actions/setup-python@v4 - with: - python-version: '3.11' - - # - # Dependencies - # - - - name: Install build dependencies - run: | - sudo -E ${{ env.OPENVINO_REPO }}/install_build_dependencies.sh - - - name: Setup ccache - uses: hendrikmuhs/ccache-action@v1.2 - with: - max-size: "2000M" - # Should save cache only if run in the master branch of the base repo - # github.ref_name is 'ref/PR_#' in case of the PR, and 'branch_name' when executed on push - save: ${{ github.ref_name == 'master' && 'true' || 'false' }} - verbose: 2 - key: ${{ github.job }}-linux-onnx-runtime - restore-keys: | - ${{ github.job }}-linux-onnx-runtime - - # - # Build - # - - - name: Get number of CPU cores - uses: SimenB/github-actions-cpu-cores@v2 - id: cpu-cores - - - name: CMake configure - run: | - cmake \ - -GNinja \ - -DCMAKE_BUILD_TYPE=${{ env.BUILD_TYPE }} \ - -DCMAKE_COMPILE_WARNING_AS_ERROR=OFF \ - -DENABLE_INTEL_GNA=OFF \ - -DENABLE_INTEL_GPU=OFF \ - -DENABLE_CPPLINT=OFF \ - -DENABLE_PROFILING_ITT=OFF \ - -DENABLE_SAMPLES=OFF \ - -DENABLE_OV_TF_FRONTEND=OFF \ - -DENABLE_OV_TF_LITE=OFF \ - -DENABLE_OV_PADDLE_FRONTEND=OFF \ - -DENABLE_OV_PYTORCH_FRONTEND=OFF \ - -S ${{ env.OPENVINO_REPO }} \ - -B ${{ env.BUILD_DIR }} - - - name: Clean ccache stats - run: ccache --zero-stats --show-config - - - name: Build - run: cmake --build ${{ env.BUILD_DIR }} --parallel ${{ steps.cpu-cores.outputs.count }} --config ${{ env.BUILD_TYPE }} - - - name: Show ccache stats - run: ccache --show-stats - - - name: Install OpenVINO - run: cmake -DCMAKE_INSTALL_PREFIX=${{ env.INSTALL_DIR }} -P ${{ env.BUILD_DIR }}/cmake_install.cmake - - - name: Build Lin ONNX Runtime - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - - ${{ env.ONNX_RUNTIME_REPO }}/build.sh \ - --config RelWithDebInfo \ - --use_openvino CPU_FP32 \ - --build_shared_lib \ - --parallel \ - --skip_tests \ - --compile_no_warning_as_error \ - --build_dir ${{ env.ONNX_RUNTIME_BUILD_DIR }} - env: - CXXFLAGS: "-Wno-error=deprecated-declarations" - - - name: Run onnxruntime_test_all - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - skip_tests=$(tr -s '\n ' ':' < ${{ env.ONNX_RUNTIME_UTILS }}/skip_tests) - ./onnxruntime_test_all --gtest_filter=-$skip_tests - working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo - - - name: Run onnxruntime_shared_lib_test - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - ./onnxruntime_shared_lib_test --gtest_filter=-CApiTest.test_custom_op_openvino_wrapper_library - working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo - - - name: Run onnxruntime_global_thread_pools_test - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - ./onnxruntime_global_thread_pools_test - working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo - - - name: Run onnxruntime_api_tests_without_env - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - ./onnxruntime_api_tests_without_env - working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo - - - name: Run pytorch-converted tests - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - ./onnx_test_runner "${{ env.ONNX_RUNTIME_REPO }}/cmake/external/onnx/onnx/backend/test/data/pytorch-converted" - working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo - - - name: Run pytorch-operator tests - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - ./onnx_test_runner "${{ env.ONNX_RUNTIME_REPO }}/cmake/external/onnx/onnx/backend/test/data/pytorch-operator" - working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo From 9dd5fe97cb0f837028aeae0a2af573ff41ffff9b Mon Sep 17 00:00:00 2001 From: Przemyslaw Wysocki Date: Mon, 2 Oct 2023 19:55:14 +0200 Subject: [PATCH 026/257] Add field for JIRA ticket in Good First Issue template (#20187) * Add gfi * Minor change * Fix linter * fix typo * Fix typo' * Add new field * Minor change --- .github/ISSUE_TEMPLATE/good_first_issue.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.github/ISSUE_TEMPLATE/good_first_issue.yml b/.github/ISSUE_TEMPLATE/good_first_issue.yml index 658e1bb00a9739..0b7edcbaecd1a4 100644 --- a/.github/ISSUE_TEMPLATE/good_first_issue.yml +++ b/.github/ISSUE_TEMPLATE/good_first_issue.yml @@ -57,3 +57,12 @@ body: placeholder: GitHub users validations: required: true + + - type: textarea + id: ticket + attributes: + label: Ticket + description: | + Provide the ticket number, if available. + validations: + required: false From 3bdd12b6c5564589ce99b20f69ffb05c7ec5bfe9 Mon Sep 17 00:00:00 2001 From: Anastasia Kuporosova Date: Tue, 3 Oct 2023 06:59:57 +0200 Subject: [PATCH 027/257] [PyOV][Docs] Update docs with new way of using properties (#20116) --- docs/snippets/cpu/Bfloat16Inference.py | 6 ++--- docs/snippets/cpu/ov_execution_mode.py | 4 +-- docs/snippets/gpu/compile_model.py | 2 +- docs/snippets/ov_auto.py | 32 ++++++++++++------------ docs/snippets/ov_auto_batching.py | 18 ++++++------- docs/snippets/ov_caching.py | 6 ++--- docs/snippets/ov_hetero.py | 8 +++--- docs/snippets/ov_multi.py | 14 +++++------ docs/snippets/ov_properties_api.py | 20 +++++++-------- docs/snippets/ov_properties_migration.py | 18 ++++++------- 10 files changed, 64 insertions(+), 64 deletions(-) diff --git a/docs/snippets/cpu/Bfloat16Inference.py b/docs/snippets/cpu/Bfloat16Inference.py index 7d6516ea7d46fb..69479e8c3fb5cb 100644 --- a/docs/snippets/cpu/Bfloat16Inference.py +++ b/docs/snippets/cpu/Bfloat16Inference.py @@ -12,16 +12,16 @@ #! [part0] core = ov.Core() -cpu_optimization_capabilities = core.get_property("CPU", device.capabilities()) +cpu_optimization_capabilities = core.get_property("CPU", device.capabilities) #! [part0] #! [part1] core = ov.Core() compiled_model = core.compile_model(model, "CPU") -inference_precision = core.get_property("CPU", hints.inference_precision()) +inference_precision = core.get_property("CPU", hints.inference_precision) #! [part1] #! [part2] core = ov.Core() -core.set_property("CPU", {hints.inference_precision(): ov.Type.f32}) +core.set_property("CPU", {hints.inference_precision: ov.Type.f32}) #! [part2] diff --git a/docs/snippets/cpu/ov_execution_mode.py b/docs/snippets/cpu/ov_execution_mode.py index 5476426e77aa7f..511e2a28dd6a35 100644 --- a/docs/snippets/cpu/ov_execution_mode.py +++ b/docs/snippets/cpu/ov_execution_mode.py @@ -9,11 +9,11 @@ # in case of Accuracy core.set_property( "CPU", - {hints.execution_mode(): hints.ExecutionMode.ACCURACY}, + {hints.execution_mode: hints.ExecutionMode.ACCURACY}, ) # in case of Performance core.set_property( "CPU", - {hints.execution_mode(): hints.ExecutionMode.PERFORMANCE}, + {hints.execution_mode: hints.ExecutionMode.PERFORMANCE}, ) #! [ov:execution_mode:part0] diff --git a/docs/snippets/gpu/compile_model.py b/docs/snippets/gpu/compile_model.py index 152de661c1f7ab..733f162dbc1119 100644 --- a/docs/snippets/gpu/compile_model.py +++ b/docs/snippets/gpu/compile_model.py @@ -46,7 +46,7 @@ def main(): model, "GPU", { - hints.performance_mode(): hints.PerformanceMode.THROUGHPUT, + hints.performance_mode: hints.PerformanceMode.THROUGHPUT, }, ) #! [compile_model_auto_batch] diff --git a/docs/snippets/ov_auto.py b/docs/snippets/ov_auto.py index 35454e0527b6f1..a665b509713f6e 100644 --- a/docs/snippets/ov_auto.py +++ b/docs/snippets/ov_auto.py @@ -31,13 +31,13 @@ def part0(): compiled_model = core.compile_model( model=model, device_name="AUTO", - config={device.priorities(): "GPU,CPU"}, + config={device.priorities: "GPU,CPU"}, ) # Optional # the AUTO plugin is pre-configured (globally) with the explicit option: core.set_property( - device_name="AUTO", properties={device.priorities(): "GPU,CPU"} + device_name="AUTO", properties={device.priorities: "GPU,CPU"} ) #! [part0] @@ -60,13 +60,13 @@ def part1(): exec_net = ie.load_network( network=net, device_name="AUTO", - config={device.priorities(): "GPU,CPU"}, + config={"MULTI_DEVICE_PRIORITIES": "GPU,CPU"}, ) # Optional # the AUTO plugin is pre-configured (globally) with the explicit option: ie.set_config( - config={device.priorities(): "GPU,CPU"}, device_name="AUTO" + config={"MULTI_DEVICE_PRIORITIES": "GPU,CPU"}, device_name="AUTO" ) #! [part1] @@ -81,7 +81,7 @@ def part3(): model=model, device_name="AUTO", config={ - hints.performance_mode(): hints.PerformanceMode.THROUGHPUT + hints.performance_mode: hints.PerformanceMode.THROUGHPUT }, ) # To use the “LATENCY” mode: @@ -89,7 +89,7 @@ def part3(): model=model, device_name="AUTO", config={ - hints.performance_mode(): hints.PerformanceMode.LATENCY + hints.performance_mode: hints.PerformanceMode.LATENCY }, ) # To use the “CUMULATIVE_THROUGHPUT” mode: @@ -97,7 +97,7 @@ def part3(): model=model, device_name="AUTO", config={ - hints.performance_mode(): hints.PerformanceMode.CUMULATIVE_THROUGHPUT + hints.performance_mode: hints.PerformanceMode.CUMULATIVE_THROUGHPUT }, ) #! [part3] @@ -111,19 +111,19 @@ def part4(): compiled_model0 = core.compile_model( model=model, device_name="AUTO", - config={hints.model_priority(): hints.Priority.HIGH}, + config={hints.model_priority: hints.Priority.HIGH}, ) compiled_model1 = core.compile_model( model=model, device_name="AUTO", config={ - hints.model_priority(): hints.Priority.MEDIUM + hints.model_priority: hints.Priority.MEDIUM }, ) compiled_model2 = core.compile_model( model=model, device_name="AUTO", - config={hints.model_priority(): hints.Priority.LOW}, + config={hints.model_priority: hints.Priority.LOW}, ) # Assume that all the devices (CPU and GPUs) can support all the networks. # Result: compiled_model0 will use GPU.1, compiled_model1 will use GPU.0, compiled_model2 will use CPU. @@ -132,19 +132,19 @@ def part4(): compiled_model3 = core.compile_model( model=model, device_name="AUTO", - config={hints.model_priority(): hints.Priority.HIGH}, + config={hints.model_priority: hints.Priority.HIGH}, ) compiled_model4 = core.compile_model( model=model, device_name="AUTO", config={ - hints.model_priority(): hints.Priority.MEDIUM + hints.model_priority: hints.Priority.MEDIUM }, ) compiled_model5 = core.compile_model( model=model, device_name="AUTO", - config={hints.model_priority(): hints.Priority.LOW}, + config={hints.model_priority: hints.Priority.LOW}, ) # Assume that all the devices (CPU ang GPUs) can support all the networks. # Result: compiled_model3 will use GPU.1, compiled_model4 will use GPU.1, compiled_model5 will use GPU.0. @@ -169,12 +169,12 @@ def part6(): compiled_model = core.compile_model( model=model, device_name="AUTO", - config={log.level(): log.Level.DEBUG}, + config={log.level: log.Level.DEBUG}, ) # set log level with set_property and compile model core.set_property( device_name="AUTO", - properties={log.level(): log.Level.DEBUG}, + properties={log.level: log.Level.DEBUG}, ) compiled_model = core.compile_model(model=model, device_name="AUTO") #! [part6] @@ -187,7 +187,7 @@ def part7(): # compile a model on AUTO and set log level to debug compiled_model = core.compile_model(model=model, device_name="AUTO") # query the runtime target devices on which the inferences are being executed - execution_devices = compiled_model.get_property(properties.execution_devices()) + execution_devices = compiled_model.get_property(properties.execution_devices) #! [part7] diff --git a/docs/snippets/ov_auto_batching.py b/docs/snippets/ov_auto_batching.py index 7508c90337c240..54a0c8accddac4 100644 --- a/docs/snippets/ov_auto_batching.py +++ b/docs/snippets/ov_auto_batching.py @@ -18,37 +18,37 @@ def main(): import openvino.properties as props import openvino.properties.hint as hints - config = {hints.performance_mode(): hints.PerformanceMode.THROUGHPUT} + config = {hints.performance_mode: hints.PerformanceMode.THROUGHPUT} compiled_model = core.compile_model(model, "GPU", config) # [compile_model] # [compile_model_no_auto_batching] # disabling the automatic batching # leaving intact other configurations options that the device selects for the 'throughput' hint - config = {hints.performance_mode(): hints.PerformanceMode.THROUGHPUT, - hints.allow_auto_batching(): False} + config = {hints.performance_mode: hints.PerformanceMode.THROUGHPUT, + hints.allow_auto_batching: False} compiled_model = core.compile_model(model, "GPU", config) # [compile_model_no_auto_batching] # [query_optimal_num_requests] # when the batch size is automatically selected by the implementation # it is important to query/create and run the sufficient requests - config = {hints.performance_mode(): hints.PerformanceMode.THROUGHPUT} + config = {hints.performance_mode: hints.PerformanceMode.THROUGHPUT} compiled_model = core.compile_model(model, "GPU", config) - num_requests = compiled_model.get_property(props.optimal_number_of_infer_requests()) + num_requests = compiled_model.get_property(props.optimal_number_of_infer_requests) # [query_optimal_num_requests] # [hint_num_requests] - config = {hints.performance_mode(): hints.PerformanceMode.THROUGHPUT, - hints.num_requests(): "4"} + config = {hints.performance_mode: hints.PerformanceMode.THROUGHPUT, + hints.num_requests: "4"} # limiting the available parallel slack for the 'throughput' # so that certain parameters (like selected batch size) are automatically accommodated accordingly compiled_model = core.compile_model(model, "GPU", config) # [hint_num_requests] # [hint_plus_low_level] - config = {hints.performance_mode(): hints.PerformanceMode.THROUGHPUT, - props.inference_num_threads(): "4"} + config = {hints.performance_mode: hints.PerformanceMode.THROUGHPUT, + props.inference_num_threads: "4"} # limiting the available parallel slack for the 'throughput' # so that certain parameters (like selected batch size) are automatically accommodated accordingly compiled_model = core.compile_model(model, "CPU", config) diff --git a/docs/snippets/ov_caching.py b/docs/snippets/ov_caching.py index a30ec968e7d2cf..b6b4c6e3f8eed7 100644 --- a/docs/snippets/ov_caching.py +++ b/docs/snippets/ov_caching.py @@ -12,7 +12,7 @@ path_to_cache_dir = get_temp_dir() # ! [ov:caching:part0] core = ov.Core() -core.set_property({props.cache_dir(): path_to_cache_dir}) +core.set_property({props.cache_dir: path_to_cache_dir}) model = core.read_model(model=model_path) compiled_model = core.compile_model(model=model, device_name=device_name) # ! [ov:caching:part0] @@ -28,7 +28,7 @@ # ! [ov:caching:part2] core = ov.Core() -core.set_property({props.cache_dir(): path_to_cache_dir}) +core.set_property({props.cache_dir: path_to_cache_dir}) compiled_model = core.compile_model(model=model_path, device_name=device_name) # ! [ov:caching:part2] @@ -38,5 +38,5 @@ import openvino.properties.device as device # Find 'EXPORT_IMPORT' capability in supported capabilities -caching_supported = 'EXPORT_IMPORT' in core.get_property(device_name, device.capabilities()) +caching_supported = 'EXPORT_IMPORT' in core.get_property(device_name, device.capabilities) # ! [ov:caching:part3] diff --git a/docs/snippets/ov_hetero.py b/docs/snippets/ov_hetero.py index aa0eac7784fe4f..7f338081f69c48 100644 --- a/docs/snippets/ov_hetero.py +++ b/docs/snippets/ov_hetero.py @@ -41,15 +41,15 @@ def main(): compiled_model = core.compile_model(model, device_name="HETERO:GPU,CPU") # device priorities via configuration property compiled_model = core.compile_model( - model, device_name="HETERO", config={device.priorities(): "GPU,CPU"} + model, device_name="HETERO", config={device.priorities: "GPU,CPU"} ) #! [compile_model] #! [configure_fallback_devices] import openvino.hint as hints - core.set_property("HETERO", {device.priorities(): "GPU,CPU"}) - core.set_property("GPU", {properties.enable_profiling(): True}) - core.set_property("CPU", {hints.inference_precision(): ov.Type.f32}) + core.set_property("HETERO", {device.priorities: "GPU,CPU"}) + core.set_property("GPU", {properties.enable_profiling: True}) + core.set_property("CPU", {hints.inference_precision: ov.Type.f32}) compiled_model = core.compile_model(model=model, device_name="HETERO") #! [configure_fallback_devices] diff --git a/docs/snippets/ov_multi.py b/docs/snippets/ov_multi.py index bae82aa3d47713..1f852faea94c9c 100644 --- a/docs/snippets/ov_multi.py +++ b/docs/snippets/ov_multi.py @@ -17,7 +17,7 @@ def MULTI_0(): # Pre-configure MULTI globally with explicitly defined devices, # and compile the model on MULTI using the newly specified default device list. core.set_property( - device_name="MULTI", properties={device.priorities(): "GPU,CPU"} + device_name="MULTI", properties={device.priorities: "GPU,CPU"} ) compiled_model = core.compile_model(model=model, device_name="MULTI") @@ -28,7 +28,7 @@ def MULTI_0(): compiled_model = core.compile_model( model=model, device_name="MULTI", - config={device.priorities(): "GPU,CPU"}, + config={device.priorities: "GPU,CPU"}, ) #! [MULTI_0] @@ -38,22 +38,22 @@ def MULTI_1(): core = ov.Core() core.set_property( - device_name="MULTI", properties={device.priorities(): "CPU,GPU"} + device_name="MULTI", properties={device.priorities: "CPU,GPU"} ) # Once the priority list is set, you can alter it on the fly: # reverse the order of priorities core.set_property( - device_name="MULTI", properties={device.priorities(): "GPU,CPU"} + device_name="MULTI", properties={device.priorities: "GPU,CPU"} ) # exclude some devices (in this case, CPU) core.set_property( - device_name="MULTI", properties={device.priorities(): "GPU"} + device_name="MULTI", properties={device.priorities: "GPU"} ) # bring back the excluded devices core.set_property( - device_name="MULTI", properties={device.priorities(): "GPU,CPU"} + device_name="MULTI", properties={device.priorities: "GPU,CPU"} ) # You cannot add new devices on the fly! @@ -109,7 +109,7 @@ def MULTI_4(): # Optionally, query the optimal number of requests: nireq = compiled_model.get_property( - properties.optimal_number_of_infer_requests() + properties.optimal_number_of_infer_requests ) #! [MULTI_4] diff --git a/docs/snippets/ov_properties_api.py b/docs/snippets/ov_properties_api.py index 369e88ad572de3..2c07ca67741c45 100644 --- a/docs/snippets/ov_properties_api.py +++ b/docs/snippets/ov_properties_api.py @@ -17,48 +17,48 @@ def main(): # [get_available_devices] # [hetero_priorities] - device_priorites = core.get_property("HETERO", device.priorities()) + device_priorites = core.get_property("HETERO", device.priorities) # [hetero_priorities] # [cpu_device_name] - cpu_device_name = core.get_property("CPU", device.full_name()) + cpu_device_name = core.get_property("CPU", device.full_name) # [cpu_device_name] model = get_model() # [compile_model_with_property] - config = {hints.performance_mode(): hints.PerformanceMode.THROUGHPUT, - hints.inference_precision(): ov.Type.f32} + config = {hints.performance_mode: hints.PerformanceMode.THROUGHPUT, + hints.inference_precision: ov.Type.f32} compiled_model = core.compile_model(model, "CPU", config) # [compile_model_with_property] # [optimal_number_of_infer_requests] compiled_model = core.compile_model(model, "CPU") - nireq = compiled_model.get_property(props.optimal_number_of_infer_requests()) + nireq = compiled_model.get_property(props.optimal_number_of_infer_requests) # [optimal_number_of_infer_requests] # [core_set_property_then_compile] # latency hint is a default for CPU - core.set_property("CPU", {hints.performance_mode(): hints.PerformanceMode.LATENCY}) + core.set_property("CPU", {hints.performance_mode: hints.PerformanceMode.LATENCY}) # compiled with latency configuration hint compiled_model_latency = core.compile_model(model, "CPU") # compiled with overriden performance hint value - config = {hints.performance_mode(): hints.PerformanceMode.THROUGHPUT} + config = {hints.performance_mode: hints.PerformanceMode.THROUGHPUT} compiled_model_thrp = core.compile_model(model, "CPU", config) # [core_set_property_then_compile] # [inference_num_threads] compiled_model = core.compile_model(model, "CPU") - nthreads = compiled_model.get_property(props.inference_num_threads()) + nthreads = compiled_model.get_property(props.inference_num_threads) # [inference_num_threads] if "GPU" not in available_devices: return 0 # [multi_device] - config = {device.priorities(): "CPU,GPU"} + config = {device.priorities: "CPU,GPU"} compiled_model = core.compile_model(model, "MULTI", config) # change the order of priorities - compiled_model.set_property({device.priorities(): "GPU,CPU"}) + compiled_model.set_property({device.priorities: "GPU,CPU"}) # [multi_device] diff --git a/docs/snippets/ov_properties_migration.py b/docs/snippets/ov_properties_migration.py index 88addb6a72d925..8d5ac86b2088f0 100644 --- a/docs/snippets/ov_properties_migration.py +++ b/docs/snippets/ov_properties_migration.py @@ -14,7 +14,7 @@ def main(): core = ov.Core() # ! [core_set_property] - core.set_property(device_name="CPU", properties={props.enable_profiling(): True}) + core.set_property(device_name="CPU", properties={props.enable_profiling: True}) # ! [core_set_property] model = get_model() @@ -25,31 +25,31 @@ def main(): # ! [core_compile_model] compiled_model = core.compile_model(model=model, device_name="MULTI", config= { - device.priorities(): "GPU,CPU", - hints.performance_mode(): hints.PerformanceMode.THROUGHPUT, - hints.inference_precision(): ov.Type.f32 + device.priorities: "GPU,CPU", + hints.performance_mode: hints.PerformanceMode.THROUGHPUT, + hints.inference_precision: ov.Type.f32 }) # ! [core_compile_model] # ! [compiled_model_set_property] # turn CPU off for multi-device execution - compiled_model.set_property(properties={device.priorities(): "GPU"}) + compiled_model.set_property(properties={device.priorities: "GPU"}) # ! [compiled_model_set_property] # ! [core_get_rw_property] - num_streams = core.get_property("CPU", streams.num()) + num_streams = core.get_property("CPU", streams.num) # ! [core_get_rw_property] # ! [core_get_ro_property] - full_device_name = core.get_property("CPU", device.full_name()) + full_device_name = core.get_property("CPU", device.full_name) # ! [core_get_ro_property] # ! [compiled_model_get_rw_property] - perf_mode = compiled_model.get_property(hints.performance_mode()) + perf_mode = compiled_model.get_property(hints.performance_mode) # ! [compiled_model_get_rw_property] # ! [compiled_model_get_ro_property] - nireq = compiled_model.get_property(props.optimal_number_of_infer_requests()) + nireq = compiled_model.get_property(props.optimal_number_of_infer_requests) # ! [compiled_model_get_ro_property] import ngraph as ng From 23e5964c5db4ad5e1ab2557556bb19c448da4466 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Tue, 3 Oct 2023 09:13:18 +0400 Subject: [PATCH 028/257] Remove HostTensor from ov namespace (#20168) * Remove HostTensor from ov namespace * Fixed build --- .../include/ov_ops/type_relaxed.hpp | 41 ++++++----- .../include/ngraph/runtime/host_tensor.hpp | 6 ++ .../include/openvino/core/core_visibility.hpp | 4 +- .../openvino/core/descriptor/tensor.hpp | 1 - src/core/include/openvino/core/node.hpp | 12 +--- src/core/include/openvino/op/broadcast.hpp | 10 +-- .../openvino/op/util/broadcast_base.hpp | 12 ++-- .../include/tensor_data_accessor.hpp | 6 +- src/core/shape_inference/include/utils.hpp | 4 +- .../src/tensor_data_accessor.cpp | 2 +- src/core/src/op/broadcast.cpp | 8 +-- src/core/src/op/eye.cpp | 6 +- src/core/src/op/grid_sample.cpp | 18 ++--- src/core/src/op/scatter_elements_update.cpp | 40 +++++------ src/core/src/op/util/broadcast_base.cpp | 68 ++++++++----------- src/core/src/runtime/host_tensor.cpp | 14 ++-- src/core/tests/type_prop/eye.cpp | 8 +-- src/core/tests/visitors/visitors.hpp | 8 +-- .../subgraph_tests/src/denormal_check.cpp | 2 +- src/plugins/template/backend/ops/if.cpp | 2 +- .../subgraph_reference/preprocess.cpp | 8 +-- 21 files changed, 128 insertions(+), 152 deletions(-) diff --git a/src/common/transformations/include/ov_ops/type_relaxed.hpp b/src/common/transformations/include/ov_ops/type_relaxed.hpp index 1748cdc586f45e..5fdd9ba4bb87c4 100644 --- a/src/common/transformations/include/ov_ops/type_relaxed.hpp +++ b/src/common/transformations/include/ov_ops/type_relaxed.hpp @@ -12,6 +12,7 @@ #include "openvino/op/convert.hpp" #include "openvino/op/parameter.hpp" +#include "openvino/runtime/tensor.hpp" #include "transformations_visibility.hpp" namespace ov { @@ -178,9 +179,6 @@ class TemporaryReplaceOutputType { } }; -// TODO: remove once FusedOp is removed -OPENVINO_SUPPRESS_DEPRECATED_START - /// Relaxes tensor element type requirements for BaseOp inputs and outputs /// This class template should be used with Node descendant class. Defines a new operation by extending the /// original BaseOp operation with ability to accept inputs and provide outputs with element type that is @@ -221,9 +219,7 @@ class TypeRelaxed : public BaseOp, public TypeRelaxedBase { } void validate_and_infer_types() override; - OPENVINO_SUPPRESS_DEPRECATED_START - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - OPENVINO_SUPPRESS_DEPRECATED_END + bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override; bool evaluate_lower(TensorVector& outputs) const override; bool evaluate_upper(TensorVector& outputs) const override; @@ -241,15 +237,14 @@ class TypeRelaxed : public BaseOp, public TypeRelaxedBase { init_rt_result init_rt = init_rt_info(*this); }; -OPENVINO_SUPPRESS_DEPRECATED_START template -bool TypeRelaxed::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool TypeRelaxed::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { std::shared_ptr convert; - HostTensorVector casted_inputs(BaseOp::get_input_size()); + ov::TensorVector casted_inputs(BaseOp::get_input_size()); for (size_t i = 0; i < BaseOp::get_input_size(); ++i) { const auto expected_input_type = get_origin_input_type(i); - if (inputs[i]->get_element_type() == expected_input_type || expected_input_type == element::undefined) { + if (inputs[i].get_element_type() == expected_input_type || expected_input_type == element::undefined) { casted_inputs[i] = inputs[i]; } else { if (convert == nullptr) { @@ -257,21 +252,25 @@ bool TypeRelaxed::evaluate(const HostTensorVector& outputs, const HostTe } convert->set_destination_type(expected_input_type); - casted_inputs[i] = std::make_shared(expected_input_type, inputs[i]->get_shape()); - if (!convert->evaluate({casted_inputs[i]}, {inputs[i]})) { + casted_inputs[i] = ov::Tensor(expected_input_type, inputs[i].get_shape()); + ov::TensorVector outs = {casted_inputs[i]}; + ov::TensorVector ins = {inputs[i]}; + + if (!convert->evaluate(outs, ins)) { return false; } } } - HostTensorVector original_outputs(BaseOp::get_output_size()); + ov::TensorVector original_outputs(BaseOp::get_output_size()); for (size_t i = 0; i < BaseOp::get_output_size(); ++i) { const auto expected_output_type = get_overridden_output_type(i); if (expected_output_type == element::undefined || expected_output_type == m_original_output_data_types[i]) { original_outputs[i] = outputs[i]; } else { - original_outputs[i] = - std::make_shared(m_original_output_data_types[i], BaseOp::get_output_partial_shape(i)); + auto partial_shape = BaseOp::get_output_partial_shape(i); + auto shape = partial_shape.is_dynamic() ? ov::Shape{0} : partial_shape.to_shape(); + original_outputs[i] = ov::Tensor(m_original_output_data_types[i], shape); } } @@ -283,15 +282,16 @@ bool TypeRelaxed::evaluate(const HostTensorVector& outputs, const HostTe const auto expected_output_type = get_overridden_output_type(i); if (expected_output_type != element::undefined && - original_outputs[i]->get_element_type() != expected_output_type) { + original_outputs[i].get_element_type() != expected_output_type) { if (convert == nullptr) { convert = std::make_shared(); } convert->set_destination_type(expected_output_type); - const auto casted_output = - std::make_shared(expected_output_type, original_outputs[i]->get_shape()); - if (!convert->evaluate({outputs[i]}, {original_outputs[i]})) { + const auto casted_output = ov::Tensor(expected_output_type, original_outputs[i].get_shape()); + ov::TensorVector outs = {outputs[i]}; + ov::TensorVector ins = {original_outputs[i]}; + if (!convert->evaluate(outs, ins)) { return false; } } @@ -299,7 +299,6 @@ bool TypeRelaxed::evaluate(const HostTensorVector& outputs, const HostTe return true; } -OPENVINO_SUPPRESS_DEPRECATED_END std::unordered_map> OPENVINO_API convert_input_types(OutputVector& inputs, const element::TypeVector& types); @@ -384,7 +383,5 @@ bool TypeRelaxed::visit_attributes(AttributeVisitor& visitor) { return true; } -OPENVINO_SUPPRESS_DEPRECATED_END - } // namespace op } // namespace ov diff --git a/src/core/include/ngraph/runtime/host_tensor.hpp b/src/core/include/ngraph/runtime/host_tensor.hpp index 1d5a03305c1e85..2f8374a577cf8e 100644 --- a/src/core/include/ngraph/runtime/host_tensor.hpp +++ b/src/core/include/ngraph/runtime/host_tensor.hpp @@ -32,6 +32,12 @@ class Constant; } // namespace op } // namespace ov namespace ngraph { + +namespace runtime { +class HostTensor; +} + +using HostTensorPtr = std::shared_ptr; namespace op { namespace v0 { using ov::op::v0::Constant; diff --git a/src/core/include/openvino/core/core_visibility.hpp b/src/core/include/openvino/core/core_visibility.hpp index 705bdd28e17476..b9d60aafc5b96e 100644 --- a/src/core/include/openvino/core/core_visibility.hpp +++ b/src/core/include/openvino/core/core_visibility.hpp @@ -48,11 +48,11 @@ namespace ov {} // namespace ov # pragma warning(disable : 4275) #endif -#ifdef OPENVINO_STATIC_LIBRARY // defined if we are building or calling NGRAPH as a static library +#ifdef OPENVINO_STATIC_LIBRARY // defined if we are building or calling OpenVINO as a static library # define OPENVINO_API # define OPENVINO_API_C(...) __VA_ARGS__ #else -# ifdef IMPLEMENT_OPENVINO_API // defined if we are building the NGRAPH DLL (instead of using it) +# ifdef IMPLEMENT_OPENVINO_API // defined if we are building the OpenVINO DLL (instead of using it) # define OPENVINO_API OPENVINO_CORE_EXPORTS # define OPENVINO_API_C(...) OPENVINO_EXTERN_C OPENVINO_CORE_EXPORTS __VA_ARGS__ OPENVINO_CDECL # else diff --git a/src/core/include/openvino/core/descriptor/tensor.hpp b/src/core/include/openvino/core/descriptor/tensor.hpp index d264d568ab4db5..73b34a32ea53a0 100644 --- a/src/core/include/openvino/core/descriptor/tensor.hpp +++ b/src/core/include/openvino/core/descriptor/tensor.hpp @@ -22,7 +22,6 @@ namespace ngraph { namespace runtime { class HostTensor; } -using HostTensorPtr = std::shared_ptr; } // namespace ngraph namespace ov { diff --git a/src/core/include/openvino/core/node.hpp b/src/core/include/openvino/core/node.hpp index d41da823d2418f..860290617709a7 100644 --- a/src/core/include/openvino/core/node.hpp +++ b/src/core/include/openvino/core/node.hpp @@ -38,14 +38,6 @@ #include "openvino/op/util/variable_value.hpp" #include "openvino/runtime/tensor.hpp" -namespace ngraph { - -namespace runtime { -class HostTensor; -} // namespace runtime - -} // namespace ngraph - namespace ov { namespace op { namespace v0 { @@ -62,9 +54,7 @@ class Matcher; } // namespace pattern } // namespace pass OPENVINO_SUPPRESS_DEPRECATED_START -using HostTensor = ngraph::runtime::HostTensor; -using HostTensorPtr = std::shared_ptr; -using HostTensorVector = std::vector; +using HostTensorVector = std::vector; OPENVINO_SUPPRESS_DEPRECATED_END template diff --git a/src/core/include/openvino/op/broadcast.hpp b/src/core/include/openvino/op/broadcast.hpp index fccffc5ed1d22e..8442f22c7ac597 100644 --- a/src/core/include/openvino/op/broadcast.hpp +++ b/src/core/include/openvino/op/broadcast.hpp @@ -65,13 +65,11 @@ class OPENVINO_API Broadcast : public util::BroadcastBase { /// \return true and the AxisSet if broadcast axes can be fully determined. std::pair get_broadcast_axes() const override; - OPENVINO_SUPPRESS_DEPRECATED_START - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - OPENVINO_SUPPRESS_DEPRECATED_END + bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override; bool has_evaluate() const override; private: - bool broadcast_evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const; + bool broadcast_evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const; }; } // namespace v3 @@ -126,9 +124,7 @@ class OPENVINO_API Broadcast : public util::BroadcastBase { } void validate_and_infer_types() override; - OPENVINO_SUPPRESS_DEPRECATED_START - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - OPENVINO_SUPPRESS_DEPRECATED_END + bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override; bool has_evaluate() const override; protected: diff --git a/src/core/include/openvino/op/util/broadcast_base.hpp b/src/core/include/openvino/op/util/broadcast_base.hpp index 669c0713655d10..92f283a89638b3 100644 --- a/src/core/include/openvino/op/util/broadcast_base.hpp +++ b/src/core/include/openvino/op/util/broadcast_base.hpp @@ -45,9 +45,7 @@ class OPENVINO_API BroadcastBase : public Op { /// \return true and the AxisSet if broadcast axes can be fully determined. virtual std::pair get_broadcast_axes() const; - OPENVINO_SUPPRESS_DEPRECATED_START - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - OPENVINO_SUPPRESS_DEPRECATED_END + bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override; const BroadcastModeSpec& get_broadcast_spec() const { return m_mode; @@ -56,12 +54,12 @@ class OPENVINO_API BroadcastBase : public Op { protected: BroadcastModeSpec m_mode; - bool evaluate_broadcast(const HostTensorPtr& arg0, - const HostTensorPtr& out, + bool evaluate_broadcast(const ov::Tensor& arg0, + ov::Tensor& out, const std::pair& pair_broadcast_axes, const Shape& output_shape) const; - bool evaluate_broadcast(const HostTensorPtr& arg0, const HostTensorPtr& out, const AxisSet& broadcast_axes) const; + bool evaluate_broadcast(const ov::Tensor& arg0, ov::Tensor& out, const AxisSet& broadcast_axes) const; bool evaluate_lower(TensorVector& outputs) const override; bool evaluate_upper(TensorVector& outputs) const override; @@ -83,7 +81,7 @@ class OPENVINO_API BroadcastBase : public Op { const AxisVector& axes_mapping_val, const PartialShape& target_shape) const; - Shape get_target_shape(const HostTensorPtr& input1) const; + Shape get_target_shape(const ov::Tensor& input1) const; }; } // namespace util } // namespace op diff --git a/src/core/shape_inference/include/tensor_data_accessor.hpp b/src/core/shape_inference/include/tensor_data_accessor.hpp index 300d859561b6da..8f017e05f1be42 100644 --- a/src/core/shape_inference/include/tensor_data_accessor.hpp +++ b/src/core/shape_inference/include/tensor_data_accessor.hpp @@ -30,8 +30,8 @@ class ITensorAccessor { * This accessor not take ownership of tensors container. * Supports following containers: * - ov::TensorVector - * - ov::HostTensorVector - * - std::map + * - ngraph::HostTensorVector + * - std::map * * @tparam TContainer Type of tensor container. */ @@ -68,7 +68,7 @@ template <> Tensor TensorAccessor>::operator()(size_t port) const; template <> -Tensor TensorAccessor>::operator()(size_t port) const; +Tensor TensorAccessor>::operator()(size_t port) const; template <> Tensor TensorAccessor::operator()(size_t port) const; diff --git a/src/core/shape_inference/include/utils.hpp b/src/core/shape_inference/include/utils.hpp index 2b6b17f06b336e..32e53766ba0d60 100644 --- a/src/core/shape_inference/include/utils.hpp +++ b/src/core/shape_inference/include/utils.hpp @@ -76,13 +76,13 @@ OPENVINO_SUPPRESS_DEPRECATED_START * \return Object of TResult with data from host tensor. */ template , class UnaryOperation> -TResult get_tensor_data_as(HostTensor& tv, UnaryOperation&& func) { +TResult get_tensor_data_as(ngraph::HostTensor& tv, UnaryOperation&& func) { auto t = Tensor(tv.get_element_type(), tv.get_shape(), tv.get_data_ptr()); return get_tensor_data_as(t, std::forward(func)); } template , class UnaryOperation> -TResult get_tensor_data_as(HostTensor* tv, UnaryOperation&& func) { +TResult get_tensor_data_as(ngraph::HostTensor* tv, UnaryOperation&& func) { return get_tensor_data_as(*tv, std::forward(func)); } OPENVINO_SUPPRESS_DEPRECATED_END diff --git a/src/core/shape_inference/src/tensor_data_accessor.cpp b/src/core/shape_inference/src/tensor_data_accessor.cpp index 59fb83eb025a02..1eeb4e6c949e68 100644 --- a/src/core/shape_inference/src/tensor_data_accessor.cpp +++ b/src/core/shape_inference/src/tensor_data_accessor.cpp @@ -38,7 +38,7 @@ Tensor TensorAccessor>::operator()(size_t por } template <> -Tensor TensorAccessor>::operator()(size_t port) const { +Tensor TensorAccessor>::operator()(size_t port) const { const auto t_iter = m_tensors->find(port); if (t_iter != m_tensors->cend()) { auto ptr = t_iter->second.get(); diff --git a/src/core/src/op/broadcast.cpp b/src/core/src/op/broadcast.cpp index 23661efdfd5ab9..a793164570ad9c 100644 --- a/src/core/src/op/broadcast.cpp +++ b/src/core/src/op/broadcast.cpp @@ -114,9 +114,9 @@ ov::PartialShape get_result_shape_bidirectional(const Node* this_ptr, } } // namespace -bool op::v3::Broadcast::broadcast_evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool op::v3::Broadcast::broadcast_evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { if (get_broadcast_spec().m_type == op::BroadcastType::BIDIRECTIONAL) { - auto arg_shape = inputs[0]->get_shape(); + auto arg_shape = inputs[0].get_shape(); ov::Shape target_shape = op::util::BroadcastBase::get_target_shape(inputs[1]); ov::PartialShape result_shape = get_result_shape_bidirectional(this, ov::PartialShape{arg_shape}, ov::PartialShape{target_shape}); @@ -193,7 +193,7 @@ bool op::v3::Broadcast::visit_attributes(AttributeVisitor& visitor) { return true; } -bool op::v3::Broadcast::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool op::v3::Broadcast::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { OV_OP_SCOPE(v3_Broadcast_evaluate); return broadcast_evaluate(outputs, inputs); } @@ -303,7 +303,7 @@ bool op::v1::Broadcast::visit_attributes(AttributeVisitor& visitor) { return true; } -bool op::v1::Broadcast::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool op::v1::Broadcast::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { OV_OP_SCOPE(v1_Broadcast_evaluate); return op::util::BroadcastBase::evaluate(outputs, inputs); } diff --git a/src/core/src/op/eye.cpp b/src/core/src/op/eye.cpp index 18c3de19c19344..551fd6b1be13cf 100644 --- a/src/core/src/op/eye.cpp +++ b/src/core/src/op/eye.cpp @@ -15,12 +15,12 @@ namespace op { namespace eye { namespace { template -bool evaluate(const ov::HostTensorPtr& out, const int64_t diagonal_index) { +bool evaluate(const ngraph::HostTensorPtr& out, const int64_t diagonal_index) { ov::reference::eye(out->get_data_ptr(), out->get_shape(), diagonal_index); return true; } -bool evaluate_eye(const ov::HostTensorPtr& out, const int64_t diagonal_index) { +bool evaluate_eye(const ngraph::HostTensorPtr& out, const int64_t diagonal_index) { bool rc = true; switch (out->get_element_type()) { NGRAPH_TYPE_CASE(evaluate, i8, out, diagonal_index); @@ -113,7 +113,7 @@ bool ov::op::v9::Eye::has_evaluate() const { return false; } -bool ov::op::v9::Eye::evaluate(const ov::HostTensorVector& outputs, const ov::HostTensorVector& inputs) const { +bool ov::op::v9::Eye::evaluate(const ngraph::HostTensorVector& outputs, const ngraph::HostTensorVector& inputs) const { OV_OP_SCOPE(v9_Eye_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START OPENVINO_ASSERT(ngraph::validate_host_tensor_vector(inputs, get_input_size()), "Invalid Eye input TensorVector."); diff --git a/src/core/src/op/grid_sample.cpp b/src/core/src/op/grid_sample.cpp index 80226db5ce97a2..0df903f8b54f7e 100644 --- a/src/core/src/op/grid_sample.cpp +++ b/src/core/src/op/grid_sample.cpp @@ -77,9 +77,9 @@ OPENVINO_SUPPRESS_DEPRECATED_START namespace { template -bool evaluate_exec(const HostTensorPtr& output, - const HostTensorPtr& data, - const HostTensorPtr& grid, +bool evaluate_exec(const ngraph::HostTensorPtr& output, + const ngraph::HostTensorPtr& data, + const ngraph::HostTensorPtr& grid, const op::v9::GridSample::Attributes& attributes) { ov::reference::grid_sample(output->get_data_ptr(), data->get_data_ptr(), @@ -99,9 +99,9 @@ bool evaluate_exec(const HostTensorPtr& output, } break template -bool evaluate(const HostTensorPtr& output, - const HostTensorPtr& data, - const HostTensorPtr& grid, +bool evaluate(const ngraph::HostTensorPtr& output, + const ngraph::HostTensorPtr& data, + const ngraph::HostTensorPtr& grid, const op::v9::GridSample::Attributes& attributes) { auto rc = true; switch (grid->get_element_type()) { @@ -113,9 +113,9 @@ bool evaluate(const HostTensorPtr& output, return rc; } -bool evaluate_grid_sample(const HostTensorPtr& output, - const HostTensorPtr& data, - const HostTensorPtr& grid, +bool evaluate_grid_sample(const ngraph::HostTensorPtr& output, + const ngraph::HostTensorPtr& data, + const ngraph::HostTensorPtr& grid, const op::v9::GridSample::Attributes& attributes) { auto rc = true; switch (output->get_element_type()) { diff --git a/src/core/src/op/scatter_elements_update.cpp b/src/core/src/op/scatter_elements_update.cpp index 8fc46e66c171c2..fd6c52951cb34b 100644 --- a/src/core/src/op/scatter_elements_update.cpp +++ b/src/core/src/op/scatter_elements_update.cpp @@ -95,11 +95,11 @@ OPENVINO_SUPPRESS_DEPRECATED_START namespace scatter_elements_update { namespace { template -bool evaluate(const HostTensorPtr& data, - const HostTensorPtr& indices, - const HostTensorPtr& updates, - const HostTensorPtr& axis, - const HostTensorPtr& out, +bool evaluate(const ngraph::HostTensorPtr& data, + const ngraph::HostTensorPtr& indices, + const ngraph::HostTensorPtr& updates, + const ngraph::HostTensorPtr& axis, + const ngraph::HostTensorPtr& out, const int64_t normalized_axis, const op::v12::ScatterElementsUpdate::Reduction reduction_type, const bool use_init_value) { @@ -128,11 +128,11 @@ bool evaluate(const HostTensorPtr& data, } break; template -bool evaluate(const HostTensorPtr& arg0, - const HostTensorPtr& arg1, - const HostTensorPtr& arg2, - const HostTensorPtr& arg3, - const HostTensorPtr& out, +bool evaluate(const ngraph::HostTensorPtr& arg0, + const ngraph::HostTensorPtr& arg1, + const ngraph::HostTensorPtr& arg2, + const ngraph::HostTensorPtr& arg3, + const ngraph::HostTensorPtr& out, const int64_t normalized_axis, const op::v12::ScatterElementsUpdate::Reduction reduction_type, const bool use_init_value) { @@ -164,11 +164,11 @@ bool evaluate(const HostTensorPtr& arg0, } break; template -bool evaluate(const HostTensorPtr& arg0, - const HostTensorPtr& arg1, - const HostTensorPtr& arg2, - const HostTensorPtr& arg3, - const HostTensorPtr& out, +bool evaluate(const ngraph::HostTensorPtr& arg0, + const ngraph::HostTensorPtr& arg1, + const ngraph::HostTensorPtr& arg2, + const ngraph::HostTensorPtr& arg3, + const ngraph::HostTensorPtr& out, const int64_t normalized_axis, const op::v12::ScatterElementsUpdate::Reduction reduction_type, const bool use_init_value) { @@ -194,11 +194,11 @@ bool evaluate(const HostTensorPtr& arg0, } bool evaluate_scatter_elements_update( - const HostTensorPtr& arg0, - const HostTensorPtr& arg1, - const HostTensorPtr& arg2, - const HostTensorPtr& arg3, - const HostTensorPtr& out, + const ngraph::HostTensorPtr& arg0, + const ngraph::HostTensorPtr& arg1, + const ngraph::HostTensorPtr& arg2, + const ngraph::HostTensorPtr& arg3, + const ngraph::HostTensorPtr& out, const int64_t normalized_axis, const op::v12::ScatterElementsUpdate::Reduction reduction_type = op::v12::ScatterElementsUpdate::Reduction::NONE, const bool use_init_value = false) { diff --git a/src/core/src/op/util/broadcast_base.cpp b/src/core/src/op/util/broadcast_base.cpp index e37836ad33bded..86abeed1455868 100644 --- a/src/core/src/op/util/broadcast_base.cpp +++ b/src/core/src/op/util/broadcast_base.cpp @@ -8,15 +8,11 @@ #include "bound_evaluate.hpp" #include "itt.hpp" -#include "ngraph/runtime/host_tensor.hpp" -#include "ngraph/validation_util.hpp" #include "openvino/core/validation_util.hpp" #include "openvino/op/concat.hpp" #include "openvino/op/util/precision_sensitive_attribute.hpp" #include "openvino/reference/broadcast.hpp" -using namespace std; - ov::op::util::BroadcastBase::BroadcastBase(const Output& arg, const Output& target_shape, const Output& axes_mapping, @@ -87,7 +83,7 @@ void ov::op::util::BroadcastBase::validate_target_shape_numpy(const PartialShape " than arg shape ", arg_rank_length); for (auto i = start_axis; i < target_rank_length; i++) { - stringstream ss; + std::stringstream ss; ss << " or " << target_shape[i]; NODE_VALIDATION_CHECK(this, arg_shape[i - start_axis].is_dynamic() || target_shape[i].is_dynamic() || @@ -203,7 +199,7 @@ void ov::op::util::BroadcastBase::validate_and_infer_types() { if (!output_shape_defined && concat->get_output_partial_shape(0).is_static() && concat->get_shape().size() == 1 && concat_inputs.size() == shape_size(concat->get_shape())) { - auto output_partial_shape = vector{}; + auto output_partial_shape = std::vector{}; for (const auto& concat_input : concat_inputs) { auto source_node_ptr = concat_input.get_source_output().get_node_shared_ptr(); if (auto source_const_ptr = ov::as_type_ptr(source_node_ptr)) { @@ -318,31 +314,30 @@ std::pair ov::op::util::BroadcastBase::get_broadcast_axes() c return std::make_pair(axes_known, broadcast_axes); } -OPENVINO_SUPPRESS_DEPRECATED_START -bool ov::op::util::BroadcastBase::evaluate_broadcast(const HostTensorPtr& arg0, - const HostTensorPtr& out, +bool ov::op::util::BroadcastBase::evaluate_broadcast(const ov::Tensor& arg0, + ov::Tensor& out, const AxisSet& broadcast_axes) const { OV_OP_SCOPE(util_BroadcastBase_evaluate_axes); - auto arg0_shape = arg0->get_shape(); + auto arg0_shape = arg0.get_shape(); if (arg0_shape.size() == 0) { arg0_shape = Shape{1}; } - ov::reference::broadcast(arg0->get_data_ptr(), - out->get_data_ptr(), + ov::reference::broadcast(static_cast(arg0.data()), + static_cast(out.data()), arg0_shape, - out->get_shape(), + out.get_shape(), broadcast_axes, - arg0->get_element_type().size()); + arg0.get_element_type().size()); return true; } namespace { template -void get_axis_vector_from_hosttensor(const ngraph::HostTensorPtr& arg, ov::AxisVector& axes_vector) { +void get_axis_vector_from_hosttensor(const ov::Tensor& arg, ov::AxisVector& axes_vector) { using T = typename ov::element_type_traits::value_type; - auto rank = arg->get_shape().at(0); + auto rank = arg.get_shape().at(0); std::vector axes_vec(rank); - arg->read(axes_vec.data(), rank * sizeof(T)); + std::memcpy(axes_vec.data(), arg.data(), rank * sizeof(T)); axes_vector = ov::AxisVector(axes_vec.begin(), axes_vec.end()); } @@ -350,10 +345,8 @@ void get_axis_vector_from_hosttensor(const ngraph::HostTensorPtr& arg, ov::AxisV case ov::element::Type_t::a: \ get_axis_vector_from_hosttensor -void get_axis_vector_from_ht(const ngraph::HostTensorPtr& arg, - ov::AxisVector& axis_vector, - const ov::Shape& arg_shape) { - switch (arg->get_element_type()) { +void get_axis_vector_from_ht(const ov::Tensor& arg, ov::AxisVector& axis_vector, const ov::Shape& arg_shape) { + switch (arg.get_element_type()) { GET_AXIS_VECTOR(i8)(arg, axis_vector); break; GET_AXIS_VECTOR(i16)(arg, axis_vector); @@ -383,11 +376,11 @@ void get_axis_vector_from_ht(const ngraph::HostTensorPtr& arg, } template -void get_shape_from_hosttensor(const ngraph::HostTensorPtr& input1, ov::Shape& target_shape) { +void get_shape_from_hosttensor(const ov::Tensor& input1, ov::Shape& target_shape) { using T = typename ov::element_type_traits::value_type; - auto rank = input1->get_shape().at(0); + auto rank = input1.get_shape().at(0); std::vector target_shape_vec(rank); - input1->read(target_shape_vec.data(), rank * sizeof(T)); + std::memcpy(target_shape_vec.data(), input1.data(), rank * sizeof(T)); target_shape = ov::Shape(target_shape_vec.begin(), target_shape_vec.end()); } @@ -395,9 +388,9 @@ void get_shape_from_hosttensor(const ngraph::HostTensorPtr& input1, ov::Shape& t case ov::element::Type_t::a: \ get_shape_from_hosttensor -ov::Shape get_target_shape_from_ht(const ngraph::HostTensorPtr& input1) { +ov::Shape get_target_shape_from_ht(const ov::Tensor& input1) { ov::Shape target_shape; - switch (input1->get_element_type()) { + switch (input1.get_element_type()) { CASE_GET_SHAPE(i8)(input1, target_shape); break; CASE_GET_SHAPE(i16)(input1, target_shape); @@ -422,36 +415,33 @@ ov::Shape get_target_shape_from_ht(const ngraph::HostTensorPtr& input1) { } } // namespace -bool ov::op::util::BroadcastBase::evaluate_broadcast(const HostTensorPtr& arg0, - const HostTensorPtr& out, +bool ov::op::util::BroadcastBase::evaluate_broadcast(const ov::Tensor& arg0, + ov::Tensor& out, const std::pair& pair_broadcast_axes, const Shape& output_shape) const { if (!pair_broadcast_axes.first) { // broadcast_axes not known deterministically return false; } - Shape in_shape = arg0->get_shape(); - out->set_shape(output_shape); - out->set_element_type(arg0->get_element_type()); + Shape in_shape = arg0.get_shape(); + out.set_shape(output_shape); return evaluate_broadcast(arg0, out, pair_broadcast_axes.second); } -ov::Shape ov::op::util::BroadcastBase::get_target_shape(const HostTensorPtr& input1) const { +ov::Shape ov::op::util::BroadcastBase::get_target_shape(const ov::Tensor& input1) const { return get_target_shape_from_ht(input1); } -bool ov::op::util::BroadcastBase::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool ov::op::util::BroadcastBase::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { OV_OP_SCOPE(util_BroadcastBase_evaluate); - OPENVINO_SUPPRESS_DEPRECATED_START - OPENVINO_ASSERT(ngraph::validate_host_tensor_vector(inputs, 2) || ngraph::validate_host_tensor_vector(inputs, 3)); - OPENVINO_ASSERT(ngraph::validate_host_tensor_vector(outputs, 1)); - OPENVINO_SUPPRESS_DEPRECATED_END + OPENVINO_ASSERT(inputs.size() == 2 || inputs.size() == 3); + OPENVINO_ASSERT(outputs.size(), 1); Shape target_shape = get_target_shape(inputs[1]); PartialShape result_shape; std::pair pair_broadcast_axes; - auto arg_shape = inputs[0]->get_shape(); + auto arg_shape = inputs[0].get_shape(); if (m_mode.m_type == BroadcastType::NONE) { AxisVector axes_mapping_val; @@ -459,7 +449,7 @@ bool ov::op::util::BroadcastBase::evaluate(const HostTensorVector& outputs, cons get_axis_vector_from_ht(inputs[2], axes_mapping_val, arg_shape); pair_broadcast_axes = get_broadcast_axes_none(axes_mapping_val, target_shape.size()); - validate_target_shape_none(inputs[0]->get_shape(), axes_mapping_val, target_shape); + validate_target_shape_none(inputs[0].get_shape(), axes_mapping_val, target_shape); result_shape = target_shape; } else if (m_mode.m_type == BroadcastType::PDPD) { result_shape = get_result_shape_pdpd(arg_shape, target_shape, m_mode); diff --git a/src/core/src/runtime/host_tensor.cpp b/src/core/src/runtime/host_tensor.cpp index 28ad2526b82529..000e8de2f645d3 100644 --- a/src/core/src/runtime/host_tensor.cpp +++ b/src/core/src/runtime/host_tensor.cpp @@ -139,13 +139,13 @@ void runtime::HostTensor::set_element_type(const element::Type& element_type) { } void runtime::HostTensor::set_shape(const Shape& shape) { - NGRAPH_CHECK( - PartialShape(shape).refines(get_partial_shape()) || - (m_descriptor->m_partial_shape.is_static() && m_descriptor->m_partial_shape.to_shape() == ov::Shape{0}), - "Allocation shape ", - shape, - " must be compatible with the partial shape: ", - get_partial_shape()); + NGRAPH_CHECK(PartialShape(shape).refines(get_partial_shape()) || + (m_descriptor->get_partial_shape().is_static() && + m_descriptor->get_partial_shape().to_shape() == ov::Shape{0}), + "Allocation shape ", + shape, + " must be compatible with the partial shape: ", + get_partial_shape()); m_descriptor->m_partial_shape = shape; m_descriptor->m_shape_changed = true; } diff --git a/src/core/tests/type_prop/eye.cpp b/src/core/tests/type_prop/eye.cpp index f69d0e135e11cb..b3325d5f12735e 100644 --- a/src/core/tests/type_prop/eye.cpp +++ b/src/core/tests/type_prop/eye.cpp @@ -363,10 +363,10 @@ TEST_F(TypePropEyeV9Test, default_ctor_no_arguments) { int64_t rows = 8, cols = 5; auto batch = std::array{2, 4, 1}; - const auto constant_map = std::map{ - {0, std::make_shared(element::i64, Shape{}, &rows)}, - {1, std::make_shared(element::i64, Shape{}, &cols)}, - {3, std::make_shared(element::i32, Shape{batch.size()}, batch.data())}}; + const auto constant_map = std::map{ + {0, std::make_shared(element::i64, Shape{}, &rows)}, + {1, std::make_shared(element::i64, Shape{}, &cols)}, + {3, std::make_shared(element::i32, Shape{batch.size()}, batch.data())}}; const auto output_shapes = op::v9::shape_infer(op.get(), PartialShapes{{}, {}, {}, {3}}, make_tensor_accessor(constant_map)); diff --git a/src/core/tests/visitors/visitors.hpp b/src/core/tests/visitors/visitors.hpp index 4cd336ae9dacd6..838eade854181b 100644 --- a/src/core/tests/visitors/visitors.hpp +++ b/src/core/tests/visitors/visitors.hpp @@ -96,7 +96,7 @@ class ValueHolder { virtual operator std::vector&() { OPENVINO_THROW("Invalid type access"); } - virtual operator HostTensorPtr&() { + virtual operator ngraph::HostTensorPtr&() { OPENVINO_THROW("Invalid type access"); } virtual operator std::shared_ptr&() { @@ -219,7 +219,7 @@ class DeserializeAttributeVisitor : public AttributeVisitor { void on_adapter(const std::string& name, ValueAccessor& adapter) override { OPENVINO_SUPPRESS_DEPRECATED_START if (auto a = ::ov::as_type<::ov::AttributeAdapter>>(&adapter)) { - auto& data = m_values.get(name); + auto& data = m_values.get(name); data->read(a->get()->get_ptr(), a->get()->size()); } else if (auto a = ov::as_type< ov::AttributeAdapter>>>( @@ -291,7 +291,7 @@ class DeserializeAttributeVisitor : public AttributeVisitor { } void on_adapter(const std::string& name, ValueAccessor& adapter) override { OPENVINO_SUPPRESS_DEPRECATED_START - HostTensorPtr& data = m_values.get(name); + ngraph::HostTensorPtr& data = m_values.get(name); data->read(adapter.get_ptr(), adapter.size()); OPENVINO_SUPPRESS_DEPRECATED_END } @@ -311,7 +311,7 @@ class SerializeAttributeVisitor : public AttributeVisitor { void on_adapter(const std::string& name, ValueAccessor& adapter) override { OPENVINO_SUPPRESS_DEPRECATED_START if (auto a = ::ov::as_type<::ov::AttributeAdapter>>(&adapter)) { - HostTensorPtr data = std::make_shared(element::u8, Shape{a->get()->size()}); + ngraph::HostTensorPtr data = std::make_shared(element::u8, Shape{a->get()->size()}); data->write(a->get()->get_ptr(), a->get()->size()); m_values.insert(name, data); } else if (auto a = ov::as_type< diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/denormal_check.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/denormal_check.cpp index 4777d7c3475254..1e70b5a0277bb2 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/denormal_check.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/denormal_check.cpp @@ -66,7 +66,7 @@ void SetUp() override { ov::ParameterVector params {std::make_shared(rtPrc, ov::Shape(inpShape))}; pConstStorage.reset(new AlignedBufferWrapper(elemsCount, alignment)); - auto constTensor = std::make_shared(rtPrc, inpShape, pConstStorage->get_ptr()); + auto constTensor = std::make_shared(rtPrc, inpShape, pConstStorage->get_ptr()); auto constNode = std::make_shared(constTensor); ov::NodeVector input = {params[0], constNode}; auto concat = std::make_shared(input, 1); diff --git a/src/plugins/template/backend/ops/if.cpp b/src/plugins/template/backend/ops/if.cpp index dc3a8074996f16..2164d83bd58c04 100644 --- a/src/plugins/template/backend/ops/if.cpp +++ b/src/plugins/template/backend/ops/if.cpp @@ -23,7 +23,7 @@ bool call(ov::TensorVector& func_outputs, } std::unordered_map, size_t> results_map; - // map function outputs -> ov::HostTensor + // map function outputs -> ov::Tensor for (size_t output_count = 0; output_count < function->get_results().size(); ++output_count) { auto output = function->get_results()[output_count]; results_map[output] = output_count; diff --git a/src/plugins/template/tests/functional/subgraph_reference/preprocess.cpp b/src/plugins/template/tests/functional/subgraph_reference/preprocess.cpp index b4c35bb936a43f..cdd0cf5c2318dc 100644 --- a/src/plugins/template/tests/functional/subgraph_reference/preprocess.cpp +++ b/src/plugins/template/tests/functional/subgraph_reference/preprocess.cpp @@ -351,7 +351,7 @@ static RefPreprocessParams resize_to_network_width_height() { return f; }; - auto result = std::make_shared(); + auto result = std::make_shared(); // clang-format off std::vector input = {0., 1., 2., 3., 4., 1., 2., 3., 4., 5., @@ -380,7 +380,7 @@ static RefPreprocessParams resize_to_specified_width_height() { return f; }; - auto result = std::make_shared(); + auto result = std::make_shared(); // clang-format off std::vector input = {0., 1., 2., 3., 4., 1., 2., 3., 4., 5., @@ -747,7 +747,7 @@ static RefPreprocessParams resize_and_convert_layout() { return f; }; - auto result = std::make_shared(); + auto result = std::make_shared(); // clang-format off std::vector input = { 1., 1., 1., 1., // channel 1 @@ -857,7 +857,7 @@ static RefPreprocessParams convert_color_nv12_layout_resize() { return f; }; - auto result = std::make_shared(); + auto result = std::make_shared(); // clang-format off auto input = std::vector {81, 81, 145, 145, // RRGG 81, 81, 145, 145, // RRGG From d37326b37eec21501fe0177deaa71239ede656ef Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Tue, 3 Oct 2023 09:16:56 +0400 Subject: [PATCH 029/257] Disable pkg-config search on Android via CMAKE_DISABLE_FIND_PACKAGE_PkgConfig (#20179) --- .../developer_package/IEDevScriptsConfig.cmake | 8 ++++++++ cmake/features.cmake | 6 +----- docs/snippets/CMakeLists.txt | 12 +++++------- samples/cpp/CMakeLists.txt | 8 ++++++++ samples/cpp/benchmark_app/CMakeLists.txt | 5 +---- samples/cpp/speech_sample/CMakeLists.txt | 5 +---- src/cmake/ov_parallel.cmake | 4 +--- .../intel_gpu/tests/functional/CMakeLists.txt | 8 +++----- thirdparty/dependencies.cmake | 17 ++++++----------- 9 files changed, 34 insertions(+), 39 deletions(-) diff --git a/cmake/developer_package/IEDevScriptsConfig.cmake b/cmake/developer_package/IEDevScriptsConfig.cmake index e1ccba489fcacf..6a3bd84b46516a 100644 --- a/cmake/developer_package/IEDevScriptsConfig.cmake +++ b/cmake/developer_package/IEDevScriptsConfig.cmake @@ -8,6 +8,14 @@ if(NOT DEFINED IEDevScripts_DIR) message(FATAL_ERROR "IEDevScripts_DIR is not defined") endif() +# disable FindPkgConfig.cmake for Android +if(ANDROID) + # Android toolchain does not provide pkg-config file. So, cmake mistakenly uses + # build system pkg-config executable, which finds packages on build system. Such + # libraries cannot be linked into Android binaries. + set(CMAKE_DISABLE_FIND_PACKAGE_PkgConfig ON) +endif() + macro(ov_set_if_not_defined var value) if(NOT DEFINED ${var}) set(${var} ${value}) diff --git a/cmake/features.cmake b/cmake/features.cmake index 5f5a58d2daf435..455db56d28ab32 100644 --- a/cmake/features.cmake +++ b/cmake/features.cmake @@ -60,11 +60,7 @@ Usage: -DSELECTIVE_BUILD=ON -DSELECTIVE_BUILD_STAT=/path/*.csv" OFF ie_option (ENABLE_DOCS "Build docs using Doxygen" OFF) -if(NOT ANDROID) - # on Android build FindPkgConfig.cmake finds host system pkg-config, which is not appropriate - find_package(PkgConfig QUIET) -endif() - +find_package(PkgConfig QUIET) ie_dependent_option (ENABLE_PKGCONFIG_GEN "Enable openvino.pc pkg-config file generation" ON "LINUX OR APPLE;PkgConfig_FOUND;BUILD_SHARED_LIBS" OFF) # diff --git a/docs/snippets/CMakeLists.txt b/docs/snippets/CMakeLists.txt index 273f5a4ffae087..d9c87955266f3f 100644 --- a/docs/snippets/CMakeLists.txt +++ b/docs/snippets/CMakeLists.txt @@ -26,13 +26,11 @@ if(TARGET OpenCL::OpenCL) endif() # try to find VA libraries -if(NOT ANDROID) - find_package(PkgConfig QUIET) - if(PkgConfig_FOUND) - pkg_search_module(libva QUIET IMPORTED_TARGET libva) - if(libva_FOUND) - message(STATUS "${PKG_CONFIG_EXECUTABLE}: libva (${libva_VERSION}) is found at ${libva_PREFIX}") - endif() +find_package(PkgConfig QUIET) +if(PkgConfig_FOUND) + pkg_search_module(libva QUIET IMPORTED_TARGET libva) + if(libva_FOUND) + message(STATUS "${PKG_CONFIG_EXECUTABLE}: libva (${libva_VERSION}) is found at ${libva_PREFIX}") endif() endif() diff --git a/samples/cpp/CMakeLists.txt b/samples/cpp/CMakeLists.txt index 714cc0a3016923..ea3448522dea90 100644 --- a/samples/cpp/CMakeLists.txt +++ b/samples/cpp/CMakeLists.txt @@ -21,6 +21,14 @@ elseif(NOT OV_GENERATOR_MULTI_CONFIG) set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Release;Debug;RelWithDebInfo;MinSizeRel") endif() +# disable FindPkgConfig.cmake for Android +if(ANDROID) + # Android toolchain does not provide pkg-config file. So, cmake mistakenly uses + # build system pkg-config executable, which finds packages on build system. Such + # libraries cannot be linked into Android binaries. + set(CMAKE_DISABLE_FIND_PACKAGE_PkgConfig ON) +endif() + set_property(GLOBAL PROPERTY USE_FOLDERS ON) if (NOT BIN_FOLDER) diff --git a/samples/cpp/benchmark_app/CMakeLists.txt b/samples/cpp/benchmark_app/CMakeLists.txt index 1ec22b20cb0cd2..c79615130c268a 100644 --- a/samples/cpp/benchmark_app/CMakeLists.txt +++ b/samples/cpp/benchmark_app/CMakeLists.txt @@ -14,11 +14,8 @@ ie_add_sample(NAME ${TARGET_NAME} # Required nlohmann_json dependency -if(NOT ANDROID) - find_package(PkgConfig QUIET) -endif() - if(NOT TARGET nlohmann_json::nlohmann_json) + find_package(PkgConfig QUIET) find_package(nlohmann_json QUIET # exception for Ubuntu 18.04, where cmake files for nlohmann_json # are located in a wrong directory diff --git a/samples/cpp/speech_sample/CMakeLists.txt b/samples/cpp/speech_sample/CMakeLists.txt index f37ef003341527..092ebf14807680 100644 --- a/samples/cpp/speech_sample/CMakeLists.txt +++ b/samples/cpp/speech_sample/CMakeLists.txt @@ -8,13 +8,10 @@ file (GLOB HDR ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp) # Required zlib and cnpy dependencies -if(NOT ANDROID) - find_package(PkgConfig QUIET) -endif() - find_package(ZLIB QUIET) if(NOT TARGET ZLIB::ZLIB) + find_package(PkgConfig QUIET) if(PkgConfig_FOUND) pkg_search_module(zlib QUIET IMPORTED_TARGET GLOBAL diff --git a/src/cmake/ov_parallel.cmake b/src/cmake/ov_parallel.cmake index 2d66146c6994a8..f669b7b1562d9e 100644 --- a/src/cmake/ov_parallel.cmake +++ b/src/cmake/ov_parallel.cmake @@ -2,9 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 # -if(NOT ANDROID) - find_package(PkgConfig QUIET) -endif() +find_package(PkgConfig QUIET) function(_ov_get_tbb_location tbb_target _tbb_lib_location_var) if(NOT TBB_FOUND) diff --git a/src/plugins/intel_gpu/tests/functional/CMakeLists.txt b/src/plugins/intel_gpu/tests/functional/CMakeLists.txt index d37f2abb20d672..e784d57d291c08 100644 --- a/src/plugins/intel_gpu/tests/functional/CMakeLists.txt +++ b/src/plugins/intel_gpu/tests/functional/CMakeLists.txt @@ -39,11 +39,9 @@ if(ENABLE_PROXY) endif() # try to find VA libraries -if(NOT ANDROID) - find_package(PkgConfig QUIET) - if(PkgConfig_FOUND) - pkg_search_module(libva QUIET IMPORTED_TARGET libva) - endif() +find_package(PkgConfig QUIET) +if(PkgConfig_FOUND) + pkg_search_module(libva QUIET IMPORTED_TARGET libva) endif() if(libva_FOUND) diff --git a/thirdparty/dependencies.cmake b/thirdparty/dependencies.cmake index eb4cd25aaf58bc..8ef6c5bf1c51ea 100644 --- a/thirdparty/dependencies.cmake +++ b/thirdparty/dependencies.cmake @@ -11,17 +11,12 @@ endif() set(_old_CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS}) set(_old_CMAKE_INTERPROCEDURAL_OPTIMIZATION_RELEASE ${CMAKE_INTERPROCEDURAL_OPTIMIZATION_RELEASE}) -# Android toolchain does not provide pkg-config file. So, cmake mistakenly uses -# build system pkg-config executable, which finds packages on build system. Such -# libraries cannot be linked into Android binaries. -if(NOT ANDROID) - find_package(PkgConfig QUIET) - # see https://cmake.org/cmake/help/latest/command/add_library.html#alias-libraries - # cmake older than 3.18 cannot create an alias for imported non-GLOBAL targets - # so, we have to use 'IMPORTED_GLOBAL' property - if(CMAKE_VERSION VERSION_LESS 3.18) - set(OV_PkgConfig_VISILITY GLOBAL) - endif() +find_package(PkgConfig QUIET) +# see https://cmake.org/cmake/help/latest/command/add_library.html#alias-libraries +# cmake older than 3.18 cannot create an alias for imported non-GLOBAL targets +# so, we have to use 'IMPORTED_GLOBAL' property +if(CMAKE_VERSION VERSION_LESS 3.18) + set(OV_PkgConfig_VISILITY GLOBAL) endif() if(SUGGEST_OVERRIDE_SUPPORTED) From eec30ec50d25482c86e9c8e7d40f123b7931bcd7 Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Tue, 3 Oct 2023 08:26:35 +0200 Subject: [PATCH 030/257] Changing file structure of Legacy Model Optimizer Extensibility section (#20198) --- ...OVC_transition.md => mo_ovc_transition.md} | 0 ...imizer.md => customize_model_optimizer.md} | 0 ...odel_optimizer_with_caffe_python_layers.md | 112 ++++ .../model_optimizer_extensions.md | 62 ++ .../model_optimizer_extractor.md | 115 ++++ .../model_optimizer_operation.md | 112 ++++ ...del_optimizer_transformation_extensions.md | 607 ++++++++++++++++++ .../model_optimizer_ports_connections.md | 188 ++++++ ...ersion_API.md => legacy_conversion_api.md} | 0 ...cts.md => convert_python_model_objects.md} | 0 .../{Cutting_Model.md => cutting_model.md} | 0 ...=> embedding_preprocessing_computation.md} | 0 ...P16_Compression.md => fp16_compression.md} | 0 ...ptimizer_FAQ.md => model_optimizer_faq.md} | 0 ...nput_Shapes.md => setting_input_shapes.md} | 0 15 files changed, 1196 insertions(+) rename docs/articles_en/documentation/openvino_legacy_features/{MO_OVC_transition.md => mo_ovc_transition.md} (100%) rename docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/{Customize_Model_Optimizer.md => customize_model_optimizer.md} (100%) create mode 100644 docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/extending_model_optimizer_with_caffe_python_layers.md create mode 100644 docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_extensions.md create mode 100644 docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_extensions/model_optimizer_extractor.md create mode 100644 docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_extensions/model_optimizer_operation.md create mode 100644 docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_extensions/model_optimizer_transformation_extensions.md create mode 100644 docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_ports_connections.md rename docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/{Legacy_Conversion_API.md => legacy_conversion_api.md} (100%) rename docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/{Convert_Python_Model_Objects.md => convert_python_model_objects.md} (100%) rename docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/{Cutting_Model.md => cutting_model.md} (100%) rename docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/{Embedding_Preprocessing_Computation.md => embedding_preprocessing_computation.md} (100%) rename docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/{FP16_Compression.md => fp16_compression.md} (100%) rename docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/{Model_Optimizer_FAQ.md => model_optimizer_faq.md} (100%) rename docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/{Setting_Input_Shapes.md => setting_input_shapes.md} (100%) diff --git a/docs/articles_en/documentation/openvino_legacy_features/MO_OVC_transition.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition.md similarity index 100% rename from docs/articles_en/documentation/openvino_legacy_features/MO_OVC_transition.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition.md diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/Customize_Model_Optimizer.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer.md similarity index 100% rename from docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/Customize_Model_Optimizer.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer.md diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/extending_model_optimizer_with_caffe_python_layers.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/extending_model_optimizer_with_caffe_python_layers.md new file mode 100644 index 00000000000000..f04835ead4f519 --- /dev/null +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/extending_model_optimizer_with_caffe_python_layers.md @@ -0,0 +1,112 @@ +# [LEGACY] Extending Model Optimizer with Caffe Python Layers {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_Model_Optimizer_With_Caffe_Python_Layers} + +@sphinxdirective + +.. meta:: + :description: Learn how to extract operator attributes in Model Optimizer to + support a custom Caffe operation written only in Python. + +.. danger:: + + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated TensorFlow conversion method. The guide on the new and recommended method, using a new frontend, can be found in the :doc:`Frontend Extensions ` article. + +This article provides instructions on how to support a custom Caffe operation written only in Python. For example, the +`Faster-R-CNN model `__ implemented in +Caffe contains a custom proposal layer written in Python. The layer is described in the +`Faster-R-CNN prototxt `__ in the following way: + +.. code-block:: sh + + layer { + name: 'proposal' + type: 'Python' + bottom: 'rpn_cls_prob_reshape' + bottom: 'rpn_bbox_pred' + bottom: 'im_info' + top: 'rois' + python_param { + module: 'rpn.proposal_layer' + layer: 'ProposalLayer' + param_str: "'feat_stride': 16" + } + } + + +This article describes only a procedure on how to extract operator attributes in Model Optimizer. The rest of the +operation enabling pipeline and information on how to support other Caffe operations (written in C++) is described in +the :doc:`Customize Model Optimizer ` guide. + +======================================== +Writing Extractor for Caffe Python Layer +======================================== + +Custom Caffe Python layers have an attribute ``type`` (defining the type of the operation) equal to ``Python`` and two +mandatory attributes ``module`` and ``layer`` in the ``python_param`` dictionary. The ``module`` defines the Python module name +with the layer implementation, while ``layer`` value is an operation type defined by a user. In order to extract +attributes for such an operation it is necessary to implement extractor class inherited from the +``CaffePythonFrontExtractorOp`` class instead of ``FrontExtractorOp`` class, used for standard framework layers. The ``op`` +class attribute value should be set to the ``module + "." + layer`` value so the extractor is triggered for this kind of +operation. + +Below is a simplified example of the extractor for the custom operation Proposal from the mentioned Faster-R-CNN model. +The full code with additional checks can be found `here `__. + +The sample code uses operation ``ProposalOp`` which corresponds to ``Proposal`` operation described in the :doc:`Available Operations Sets ` +page. For a detailed explanation of the extractor, refer to the source code below. + +.. code-block:: py + :force: + + from openvino.tools.mo.ops.proposal import ProposalOp + from openvino.tools.mo.front.extractor import CaffePythonFrontExtractorOp + + + class ProposalPythonFrontExtractor(CaffePythonFrontExtractorOp): + op = 'rpn.proposal_layer.ProposalLayer' # module + "." + layer + enabled = True # extractor is enabled + + @staticmethod + def extract_proposal_params(node, defaults): + param = node.pb.python_param # get the protobuf message representation of the layer attributes + # parse attributes from the layer protobuf message to a Python dictionary + attrs = CaffePythonFrontExtractorOp.parse_param_str(param.param_str) + update_attrs = defaults + + # the operation expects ratio and scale values to be called "ratio" and "scale" while Caffe uses different names + if 'ratios' in attrs: + attrs['ratio'] = attrs['ratios'] + del attrs['ratios'] + if 'scales' in attrs: + attrs['scale'] = attrs['scales'] + del attrs['scales'] + + update_attrs.update(attrs) + ProposalOp.update_node_stat(node, update_attrs) # update the node attributes + + @classmethod + def extract(cls, node): + # define default values for the Proposal layer attributes + defaults = { + 'feat_stride': 16, + 'base_size': 16, + 'min_size': 16, + 'ratio': [0.5, 1, 2], + 'scale': [8, 16, 32], + 'pre_nms_topn': 6000, + 'post_nms_topn': 300, + 'nms_thresh': 0.7 + } + cls.extract_proposal_params(node, defaults) + return cls.enabled + +==================== +Additional Resources +==================== + +* :doc:`Model Optimizer Extensibility ` +* :doc:`Graph Traversal and Modification Using Ports and Connections ` +* :doc:`Model Optimizer Extensions ` + +@endsphinxdirective diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_extensions.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_extensions.md new file mode 100644 index 00000000000000..6da9bbe4c78e87 --- /dev/null +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_extensions.md @@ -0,0 +1,62 @@ +# [LEGACY] Model Optimizer Extensions {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions} + +@sphinxdirective + +.. meta:: + :description: Learn about deprecated extensions, which enable injecting logic + to the model conversion pipeline without changing the Model + Optimizer core code. + +.. toctree:: + :maxdepth: 1 + :hidden: + + openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Operation + openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Extractor + openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Transformation_Extensions + +.. danger:: + + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated TensorFlow conversion method. The guide on the new and recommended method, using a new frontend, can be found in the :doc:`Frontend Extensions ` article. + +Model Optimizer extensions enable you to inject some logic to the model conversion pipeline without changing the Model +Optimizer core code. There are three types of the Model Optimizer extensions: + +1. :doc:`Model Optimizer operation `. +2. A :doc:`framework operation extractor `. +3. A :doc:`model transformation `, which can be executed during front, middle or back phase of the model conversion. + +An extension is just a plain text file with a Python code. The file should contain a class (or classes) inherited from +one of extension base classes. Extension files should be saved to a directory with the following structure: + +.. code-block:: sh + + .// + ops/ - custom operations + front/ - framework independent front transformations + / - front transformations for models only and extractors for operations + / - front transformations for models only and extractors for operations + ... + middle/ - middle transformations + back/ - back transformations + +Model Optimizer uses the same layout internally to keep built-in extensions. The only exception is that the +``mo/ops/`` directory is also used as a source of the Model Optimizer operations due to historical reasons. + +.. note:: + The name of a root directory with extensions should not be equal to "extensions" because it will result in a name conflict with the built-in Model Optimizer extensions. + +.. note:: + Model Optimizer itself is built by using these extensions, so there is a huge number of examples of their usage in the Model Optimizer code. + +==================== +Additional Resources +==================== + +* :doc:`Model Optimizer Extensibility ` +* :doc:`Graph Traversal and Modification Using Ports and Connections ` +* :doc:`Extending Model Optimizer with Caffe Python Layers ` + +@endsphinxdirective diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_extensions/model_optimizer_extractor.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_extensions/model_optimizer_extractor.md new file mode 100644 index 00000000000000..71b291553973a9 --- /dev/null +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_extensions/model_optimizer_extractor.md @@ -0,0 +1,115 @@ +# [LEGACY] Operation Extractor {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Extractor} + +@sphinxdirective + +.. meta:: + :description: Learn about a deprecated generic extension in Model Optimizer, + which provides the operation extractor usable for all model + frameworks. + + +.. danger:: + + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated TensorFlow conversion method. The guide on the new and recommended method, using a new frontend, can be found in the :doc:`Frontend Extensions ` article. + +Model Optimizer runs specific extractor for each operation in the model during the model loading. + +There are several types of Model Optimizer extractor extensions: + +1. The generic one, which is described in this article. +2. The special extractor for Caffe models with Python layers. This kind of extractor is described in the :doc:`Extending Model Optimizer with Caffe Python Layers ` guide. + +Generic extension provides a generic mechanism for the operation extractor applicable for all frameworks. Model Optimizer provides the ``mo.front.extractor.FrontExtractorOp`` class as a base class to implement the extractor. It has the ``extract`` class method, which gets the only parameter ``Node``, which corresponds to the graph node to extract data from. The operation description in the original framework format is stored in the attribute ``pb`` of the node. The extractor goal is to parse this attribute and save necessary attributes to the corresponding node of the graph. Consider the extractor for the ``Const`` TensorFlow operation (refer to the ``extensions/front/tf/const_ext.py`` file): + +.. code-block:: py + :force: + + from openvino.tools.mo.front.extractor import FrontExtractorOp + from openvino.tools.mo.front.tf.extractors.utils import tf_dtype_extractor, tf_tensor_shape, tf_tensor_content + from openvino.tools.mo.ops.const import Const + + + class ConstExtractor(FrontExtractorOp): + # The "op" class attribute defines a type of the operation in the framework (in this case it is a TensorFlow), + # for which the extractor should be triggered. + op = 'Const' + enabled = True # The flag that indicates that this extractor is enabled. + + @classmethod + def extract(cls, node): # The entry point of the extractor. + # The `node.pb` attribute stores the TensorFlow representation of the operation, which is a Protobuf message of the + # specific format. In particular, the message contains the attribute called "value" containing the description of + # the constant. The string "pb.attr["value"].tensor" is just a Python binding for Protobuf message parsing. + pb_tensor = node.pb.attr["value"].tensor + # Get the shape of the tensor from the protobuf message, using the helper function "tf_tensor_shape". + shape = tf_tensor_shape(pb_tensor.tensor_shape) + # Create a dictionary with necessary attributes. + attrs = { + 'shape': shape, + # Get the tensor value, using "tf_tensor_content" helper function. + 'value': tf_tensor_content(pb_tensor.dtype, shape, pb_tensor), + # Get the tensor data type, using "tf_dtype_extractor" helper function. + 'data_type': tf_dtype_extractor(pb_tensor.dtype), + } + # Update the node attributes, using default attributes from the "Const" operation and attributes saved to the + # "attrs" dictionary. + Const.update_node_stat(node, attrs) + return cls.enabled + +Consider another example with an extractor of the ``Constant`` ONNX operation (refer to the ``extensions/front/onnx/const_ext.py`` file): + +.. code-block:: py + :force: + + from onnx import numpy_helper + from onnx.numpy_helper import to_array + + from openvino.tools.mo.front.extractor import FrontExtractorOp + from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr + from openvino.tools.mo.ops.const import Const + + + class ConstantExtractor(FrontExtractorOp): + op = 'Constant' + enabled = True + + @classmethod + def extract(cls, node): + # Use "onnx_attr" helper method, which parses the Protobuf representation of the operation saved in the "node". + # Gets the value of the attribute with name "value" as "TensorProto" type (specified with a keyword "t"). + pb_value = onnx_attr(node, 'value', 't') + # Use "numpy_helper.to_array()" ONNX helper method to convert "TensorProto" object to a numpy array. + value = numpy_helper.to_array(pb_value) + + attrs = { + 'data_type': value.dtype, + 'value': value, + } + # Update the node attributes, using default attributes from the "Const" operation and attributes saved to the + # "attrs" dictionary. + Const.update_node_stat(node, attrs) + return cls.enabled + +The extractors for operations from different frameworks work similarly. The only difference is in the helper methods used to parse operation attributes encoded with a framework-specific representation. + +A common practice is to use ``update_node_stat()`` method of the dedicated ``Op`` class to update the node attributes. This method does the following: + +1. Sets values for common attributes like ``op``, ``type``, ``infer``, ``in_ports_count``, ``out_ports_count``, ``version`` to values specific to the dedicated operation (``Const`` operation in this case). +2. Uses ``supported_attrs()`` and ``backend_attrs()`` methods, defined in the ``Op`` class to update specific node attribute ``IE``. The IR emitter uses the value stored in the ``IE`` attribute to pre-process attribute values and save them to IR. +3. Optionally sets additional attributes provided to the ``update_node_stat()`` function as a second parameter. Usually these attributes are parsed from the particular instance of the operation. + +.. note:: + Model Optimizer uses numpy arrays to store values and numpy arrays of ``np.int64`` type to store shapes in the graph. + +==================== +Additional Resources +==================== + +* :doc:`Model Optimizer Extensibility ` +* :doc:`Graph Traversal and Modification Using Ports and Connections ` +* :doc:`Model Optimizer Extensions ` +* :doc:`Extending Model Optimizer with Caffe Python Layers ` + +@endsphinxdirective diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_extensions/model_optimizer_operation.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_extensions/model_optimizer_operation.md new file mode 100644 index 00000000000000..0de74fbd6ff073 --- /dev/null +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_extensions/model_optimizer_operation.md @@ -0,0 +1,112 @@ +# [LEGACY] Model Optimizer Operation {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Operation} + +@sphinxdirective + +.. meta:: + :description: Learn about the Op class, that contains operation attributes, + which are set to a node of the graph created during model + conversion with Model Optimizer. + +.. danger:: + + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated TensorFlow conversion method. The guide on the new and recommended method, using a new frontend, can be found in the :doc:`Frontend Extensions ` article. + +Model Optimizer defines a ``mo.ops.Op`` class (``Op`` will be used later in the document to be short), which is a base class +for an operation used in the Model Optimizer. The instance of the ``Op`` class serves several purposes: + +1. Stores the operation attributes. +2. Stores the operation shape/value and type inference functions. +3. Defines operation attributes to be saved to the corresponding IR section. +4. Contains convenient methods to create a graph node from an ``Op`` object instance and connect it with the existing graph. +5. Used in the extractors to store parsed attributes and operation specific attributes in the dedicated graph node. + +It is important to mention that there is no connection between the instance of the ``Op`` class and the ``Node`` object +created from it. The ``Op`` class is just a container for attributes describing the operation. Model Optimizer uses the ``Op`` +class during a model conversion to create a node of the graph with attributes copied from the ``Op`` class instance. Graph +manipulations are performed with graph ``Nodes`` and their attributes and does not involve ``Ops``. + +There are a number of common attributes used in the operations. Below is the list of these attributes with description. + +* ``id`` — **(Mandatory)** — unique identifier of a node in a graph. Generated automatically, equal to the number of nodes in the graph plus 1 if not specified. +* ``name`` — **(Mandatory)** — name of the operation. Generated automatically, equal to the ``id`` if not specified. +* ``type`` — **(Mandatory)** — type of the operation according to the :doc:`opset specification `. For the internal Model Optimizer operations, this attribute should be set to ``None``. The model conversion fails if an operation with ``type`` equal to ``None`` comes to the IR emitting phase. +* ``version`` — **(Mandatory)** — the operation set (opset) name the operation belongs to. If not specified, Model Optimizer sets it equal to ``experimental``. For more information about operation sets, refer to :doc:`OpenVINO Model Representation ` section. +* ``op`` — Model Optimizer type of the operation. In many cases, the value of ``type`` is equal to the value of ``op``. However, when Model Optimizer cannot instantiate the opset operation during model loading, it creates an instance of an internal operation. Thus, the attribute ``op`` is used as a type of this internal operation. Later in the pipeline, the node created from an internal operation will be replaced during front, middle or back phase with node(s) created from the opset. +* ``infer`` — the attribute defines a function calculating output tensor(s) shape and optional value(s). The attribute may be set to ``None`` for the internal Model Optimizer operations used during the front phase only. For more information about the shape inference function, refer to the :ref:`Partial Inference `. +* ``type_infer`` — the attribute defines a function calculating output tensor(s) data type. If the attribute is not defined, the default function is used. The function checks if the ``data_type`` node attribute is set and then propagates this type to the output tensor from the **port 0**. Otherwise, it propagates the data type of the tensor coming into the input **port 0** to the output tensor from the **port 0**. +* ``in_ports_count`` — default number of input ports to be created for the operation. Additional ports can be created or redundant ports can be removed using dedicated ``Node`` class API methods. +* ``out_ports_count`` — default number of output ports to be created for the operation. Additional ports can be created or redundant ports can be removed using dedicated ``Node`` class API methods. + +Below is an example of the Model Optimizer class for the :doc:`SoftMax ` operation from +the ``mo/ops/softmax.py`` file with the comments in code. + +.. code-block:: py + + class Softmax(Op): + # The class attribute defines a name of the operation so the operation class can be obtained using the + # "Op.get_op_class_by_name()" static method + op = 'SoftMax' + + # The operation works as an extractor by default. This is a legacy behavior, currently not recommended for use, + # thus "enabled" class attribute is set to False. The recommended approach is to use dedicated extractor extension. + enabled = False + + def __init__(self, graph: Graph, attrs: dict): + super().__init__(graph, { # The constructor of the base class Op is called with additional default attributes. + 'type': __class__.op, # The operation is from the opset so the type is set to 'SoftMax'. + 'op': __class__.op, # Internal Model Optimizer operation has the same type. + 'version': 'opset1', # The operation corresponds to opset1. + 'infer': Softmax.infer, # Shape inference function is defined below. + 'axis': 1, # Default value for the "axis" attribute of the operation SoftMax. + 'in_ports_count': 1, # The operation has one input. + 'out_ports_count': 1, # The operation produces one output. + }, attrs) + + # The method returns operation specific attributes list. This method is important when implementing + # extractor inherited from CaffePythonFrontExtractorOp class to extract attribute for Caffe Python operation. + # However, it is currently used interchangeably with the "backend_attrs()" method. If the "backend_attrs()" is not used, + # then the "supported_attrs()" is used instead. In this particular case, the operation has just one attribute "axis". + def supported_attrs(self): + return ['axis'] + + @staticmethod + def infer(node: Node): + "some code calculating output shape and values" + +There is a dedicated method called ``backend_attrs()`` defining a list of attributes to be saved to the IR. Consider an +example from the ``mo/ops/pooling.py`` file: + +.. code-block:: py + + def backend_attrs(self): + return [ + ('strides', lambda node: ','.join(map(str, node['stride'][node.spatial_dims]))), + ('kernel', lambda node: ','.join(map(str, node['window'][node.spatial_dims]))), + + ('pads_begin', lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 0)))), + ('pads_end', lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 1)))), + + ('pool-method', 'pool_method'), + ('exclude-pad', 'exclude_pad'), + + 'rounding_type', + 'auto_pad', + ] + +The ``backend_attrs()`` function returns a list of records. A record can be of one of the following formats: +1. A string defining the attribute to be saved to the IR. If the value of the attribute is ``None``, the attribute is not saved. Examples of this case are ``rounding_type`` and ``auto_pad``. +2. A tuple, where the first element is a string defining the name of the attribute as it will appear in the IR and the second element is a function to produce the value for this attribute. The function gets an instance of the ``Node`` as the only parameter and returns a string with the value to be saved to the IR. Examples of this case are ``strides``, ``kernel``, ``pads_begin`` and ``pads_end``. +3. A tuple, where the first element is a string defining the name of the attribute as it will appear in the IR and the second element is the name of the ``Node`` attribute to get the value from. Examples of this case are ``pool-method`` and ``exclude-pad``. + +==================== +Additional Resources +==================== + +* :doc:`Model Optimizer Extensibility ` +* :doc:`Graph Traversal and Modification Using Ports and Connections ` +* :doc:`Model Optimizer Extensions ` +* :doc:`Extending Model Optimizer with Caffe Python Layers ` + +@endsphinxdirective diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_extensions/model_optimizer_transformation_extensions.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_extensions/model_optimizer_transformation_extensions.md new file mode 100644 index 00000000000000..cc198f3a40660b --- /dev/null +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_extensions/model_optimizer_transformation_extensions.md @@ -0,0 +1,607 @@ +# [LEGACY] Graph Transformation Extensions {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Transformation_Extensions} + +@sphinxdirective + +.. meta:: + :description: Learn about various base classes for front, middle and back phase + transformations applied during model conversion with Model Optimizer. + +.. danger:: + + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated TensorFlow conversion method. The guide on the new and recommended method, using a new frontend, can be found in the :doc:`Frontend Extensions ` article. + +Model Optimizer provides various base classes to implement :ref:`Front Phase Transformations `, +:ref:`Middle Phase Transformations `, and :ref:`Back Phase Transformations `. +All classes have the following common class attributes and methods: + +1. The ``enabled`` attribute specifies whether the transformation is enabled or not. The value can be changed during runtime to enable or disable execution of the transformation during a model conversion. Default value is ``True``. +2. The ``id`` attribute specifies a unique transformation string identifier. This transformation identifier can be used to enable (disable) the transformation by setting environment variable ``MO_ENABLED_TRANSFORMS`` (``MO_DISABLED_TRANSFORMS``) with a comma separated list of ``ids``. The environment variables override the value of the ``enabled`` attribute of the transformation. Instead of using ``id`` attribute value you can add fully defined class name to ``MO_ENABLED_TRANSFORMS`` (``MO_DISABLED_TRANSFORMS``) variable, ``extensions.back.NonmalizeToNormalizeL2.NormalizeToNormalizeL2`` for example. It is an optional attribute. +3. The ``run_not_recursively`` attribute specifies whether the transformation should be executed in the sub-graphs, for example, body of the :doc:`TensorIterator ` and the :doc:`Loop `. Default value is ``True``. +4. The ``force_clean_up`` attribute specifies whether the graph clean up should be executed after the transformation. The graph cleanup removes nodes of the graph not reachable from the model inputs. Default value is ``False``. +5. The ``force_shape_inference`` attribute specifies whether the nodes marked with ``need_shape_inference`` attribute equal to ``True`` should be re-inferred after the transformation. Model Optimizer sets this attribute automatically for nodes, input(s) of which were changed during the transformation, or you can set this attribute manually in the transformation for the specific nodes. Default value is ``False``. +6. Attribute ``graph_condition`` specifies a list of functions with one parameter -- ``Graph`` object. The transformation is executed if and only if all functions return ``True``. If the attribute is not set, no check is performed. +7. Method ``run_before()`` returns a list of transformation classes which this transformation should be executed before. +8. Method ``run_after()`` returns a list of transformation classes which this transformation should be executed after. + +.. note:: + Some of the transformation types have specific class attributes and methods, which are explained in the corresponding sections of this document. + +Model Optimizer builds a graph of dependencies between registered transformations and executes them in the topological +order. To execute the transformation during a proper model conversion phase, Model Optimizer defines several +anchor transformations that do nothing. All transformations are ordered with respect to these anchor transformations. +The diagram below shows anchor transformations, some of built-in transformations and dependencies between them: + +.. image:: _static/images/MO_transformations_graph.svg + +User-defined transformations are executed after the corresponding ``Start`` and before the corresponding ``Finish`` anchor +transformations by default (if ``run_before()`` and ``run_after()`` methods have not been overridden). + +.. note:: + The ``PreMiddleStart`` and ``PostMiddleStart`` anchors were introduced due to historical reasons to refactor the Model Optimizer pipeline, which initially had a hardcoded order of transformations. + +.. _mo_front_phase_transformations: + +=========================== +Front Phase Transformations +=========================== + +There are several types of a front phase transformation: + +1. :ref:`Pattern-Defined Front Phase Transformations ` triggered for each sub-graph of the original graph isomorphic to the specified pattern. +2. :ref:`Specific Operation Front Phase Transformations ` triggered for the node with a specific ``op`` attribute value. +3. :ref:`Generic Front Phase Transformations `. +4. Manually enabled transformation, defined with a JSON configuration file (for TensorFlow, ONNX, Apache MXNet, and PaddlePaddle models), specified using the ``--transformations_config`` command-line parameter: + + 1. :ref:`Node Name Pattern Front Phase Transformations `. + 2. :ref:`Front Phase Transformations Using Start and End Points `. + 3. :ref:`Generic Front Phase Transformations Enabled with Transformations Configuration File `. + +.. _pattern_defined_front_phase_transformations: + +Pattern-Defined Front Phase Transformations +########################################### + +This type of transformation is implemented using ``mo.front.common.replacement.FrontReplacementSubgraph`` and +``mo.front.common.replacement.FrontReplacementPattern`` as base classes and works as follows: + +1. Define a sub-graph to be matched, using a list of nodes with attributes and edges connecting them (edges may also have attributes). +2. Model Optimizer searches for all sub-graphs of the original graph, isomorphic to the specified sub-graph (pattern). +3. Model Optimizer executes the defined function performing graph transformation for each instance of a matched sub-graph. You can override different functions in the base transformation class so the Model Optimizer works differently: + + 1. The ``replace_sub_graph(self, graph, match)`` override the method. In this case Model Optimizer only executes the overridden function, pass the ``graph`` object and a dictionary describing the matched sub-graph. You are required to write the transformation and connect the newly created nodes to the rest of the graph. + 2. The ``generate_sub_graph(self, graph, match)`` override the method. This case is not recommended for use because it is the most complicated approach. It can be effectively replaced with one of two previous approaches. + +The sub-graph pattern is defined in the ``pattern()`` function. This function should return a dictionary with two keys: +``nodes`` and ``edges``: + +* The value for the ``nodes`` key is a list of tuples with two elements. + + * The first element is an alias name for a node that will be used to define edges between nodes and in the transformation function. + * The second element is a dictionary with attributes. The key is a name of an attribute that should exist in the node. The value for the attribute can be some specific value to match or a function that gets a single parameter - the attribute value from the node. The function should return the result of attribute comparison with a dedicated value. + +* The value for the ``edges`` key is a list of tuples with two or three elements. + + * The first element is the alias name of the node producing a tensor. + * The second element is the alias name of the node consuming the tensor. + * The third element (optional) is the dictionary with expected edge attributes. This dictionary usually contains attributes like ``in`` and ``out``, defining input and output ports. + +Consider the example of a front transformation implemented in the ``extensions/front/Mish_fusion.py`` file performing +fusing of the sub-graph defining the :doc:`Mish ` activation function into a single +operation: + +.. code-block:: py + :force: + + from openvino.tools.mo.front.Softplus_fusion import SoftplusFusion + from openvino.tools.mo.ops.activation_ops import Mish + from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph + from openvino.tools.mo.front.subgraph_matcher import SubgraphMatch + from openvino.tools.mo.graph.graph import Graph, rename_nodes + + + class MishFusion(FrontReplacementSubgraph): + """ + The transformation looks for the pattern with Softplus defining the Mish function: Mish(x) = x * tanh(SoftPlus(x)). + """ + enabled = True # Transformation is enabled. + + def run_after(self): # Run this transformation after "SoftplusFusion" transformation. + return [SoftplusFusion] + + def pattern(self): # Define pattern according to formulae x * tanh(SoftPlus(x)). + return dict( + nodes=[ + ('mul', dict(op='Mul')), + ('tanh', dict(op='Tanh')), + ('softplus', dict(op='SoftPlus')), + ], + edges=[ + ('softplus', 'tanh'), + ('tanh', 'mul'), + ]) + + def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]): # Entry point for the transformation. + mul = match['mul'] # Get the Node corresponding to matched "mul" node. + mul_name = mul.soft_get('name', mul.id) + softplus = match['softplus'] # Get the Node corresponding to the matched "softplus" node. + + # Determine the input port of Mul which gets the 'input' node output. + input_port_idx = int(mul.in_port(0).get_connection().get_source().node.soft_get('op') == 'Tanh') + + # Check that the same tensor is provided as input to Mul and SoftPlus. + if mul.in_port(input_port_idx).get_source() != softplus.in_port(0).get_source(): + return + + mish = Mish(graph, {}).create_node() # Create Mish operation. + mish.in_port(0).connect(mul.in_port(input_port_idx).get_source()) # Connect input to the Mish. + mul.out_port(0).get_connection().set_source(mish.out_port(0)) # Reconnect outgoing edge from "mul" to Mish. + + # Rename the created Mish operation to have the name of the "mul" node, which produced the value equal to the + # Mish output. + rename_nodes([(mul, mul_name + '/TBR'), (mish, mul_name)]) + +.. _specific_operation_front_phase_transformations: + +Specific Operation Front Phase Transformations +############################################## + +This type of transformation is implemented using ``mo.front.common.replacement.FrontReplacementOp`` as base class and +works as follows: + +1. Define an operation type to trigger the transformation. +2. Model Optimizer searches for all nodes in the graph with the attribute ``op`` equal to the specified value. +3. Model Optimizer executes the defined function performing graph transformation for each instance of a matched node. You can override different functions in the base transformation class and Model Optimizer works differently: + + 1. The ``replace_sub_graph(self, graph, match)`` override method. In this case, Model Optimizer only executes the overridden function. Pass the ``graph`` object and a dictionary with a single key ``op`` with the matched node as value. You are required to write the transformation and connect the newly created nodes to the rest of the graph. + 2. The ``replace_op(self, graph, node)`` override method. In this case, Model Optimizer executes the overridden function. Pass the ``graph`` object and the matched node as ``node`` parameter. If the function returns an ``id`` of some node, then the ``Node`` with this ``id`` is connected to the consumers of the matched node. After applying the transformation, the matched node is removed from the graph. + +The ``FrontReplacementOp`` class provides a simpler mechanism to match a single operation with specific value of the ``op`` +(write the ``op`` attribute in the class instead of defining a ``pattern()`` function) attribute and perform the +transformation. + +Consider an example transformation from the ``extensions/front/Pack.py`` file, which replaces ``Pack`` operation from +the TensorFlow: + +.. code-block:: py + :force: + + from openvino.tools.mo.front.common.partial_infer.utils import int64_array + from openvino.tools.mo.front.common.replacement import FrontReplacementOp + from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs + from openvino.tools.mo.graph.graph import Node, Graph, rename_nodes + from openvino.tools.mo.ops.concat import Concat + from openvino.tools.mo.ops.unsqueeze import Unsqueeze + + + class Pack(FrontReplacementOp): + op = "Pack" # Trigger transformation for all nodes in the graph with the op = "Pack" attribute + enabled = True # Transformation is enabled. + + def replace_op(self, graph: Graph, node: Node): # Entry point for the transformation. + # Create a Concat operation with a number of inputs equal to a number of inputs to Pack. + out_node = Concat(graph, {'axis': node.axis, 'in_ports_count': len(node.in_ports())}).create_node() + pack_name = node.soft_get('name', node.id) + + for ind in node.in_ports(): + # Add dimension of size 1 to all inputs of the Pack operation and add them as Concat inputs. + unsqueeze_node = create_op_with_const_inputs(graph, Unsqueeze, {1: int64_array([node.axis])}, + {'name': node.soft_get('name', node.id) + '/Unsqueeze'}) + node.in_port(ind).get_connection().set_destination(unsqueeze_node.in_port(0)) + unsqueeze_node.out_port(0).connect(out_node.in_port(ind)) + + # Rename the created Concat operation to have the name of the "pack" node, which produced the value equal to the + # Concat output. + rename_nodes([(node, pack_name + '/TBR'), (out_node, pack_name)]) + return [out_node.id] # Reconnect the Pack operation consumers to get input from Concat instead. + + +.. _generic_front_phase_transformations: + +Generic Front Phase Transformations +################################### + +Model Optimizer provides a mechanism to implement generic front phase transformation. This type of transformation is +implemented using ``mo.front.common.replacement.FrontReplacementSubgraph`` or +``mo.front.common.replacement.FrontReplacementPattern`` as base classes. Make sure the transformation is enabled before trying to execute it. +Then, Model Optimizer executes the ``find_and_replace_pattern(self, graph)`` method and +provides a ``Graph`` object as an input. + +Consider the example of a generic front transformation from the ``extensions/front/SqueezeNormalize.py`` file performing +normalization of the :doc:`Squeeze ` operation. Older version of the operation had a list of +axes to squeeze as an attribute, but now it is a separate input. For backward compatibility, the Model Optimizer +operation supports both semantics. Before IR generation, however, the operation should be normalized according to the +specification. + +.. code-block:: py + :force: + + import logging as log + + from openvino.tools.mo.front.common.partial_infer.utils import int64_array + from openvino.tools.mo.front.common.replacement import FrontReplacementPattern + from openvino.tools.mo.graph.graph import Graph + from openvino.tools.mo.ops.const import Const + from openvino.tools.mo.utils.error import Error + + + class SqueezeNormalize(FrontReplacementPattern): + """ + Normalizes inputs of the Squeeze layers. The layers should have two inputs: the input with data and input with the + dimensions to squeeze. If the second input is omitted then all dimensions of size 1 should be removed. + """ + enabled = True # The transformation is enabled. + + def find_and_replace_pattern(self, graph: Graph): # The function is called unconditionally. + for squeeze_node in graph.get_op_nodes(op='Squeeze'): # Iterate over all nodes with op='Squeeze'. + # If the operation has only 1 input node and no 'squeeze_dims' Node attribute, then convert the attribute to + # the operation input. + if len(squeeze_node.in_nodes()) == 1 and squeeze_node.has_valid('squeeze_dims'): + dims_node = Const(graph, {'name': squeeze_node.id + '/Dims', + 'value': int64_array(squeeze_node.squeeze_dims)}).create_node() + squeeze_node.in_port(1).connect(dims_node.out_port(0)) + del squeeze_node['squeeze_dims'] + # If two inputs already exist, that means the operation is already normalized. + elif len(squeeze_node.in_nodes()) == 2: + log.debug('The Squeeze node "{}" is already normalized'.format(squeeze_node.name)) + # In all other cases, raise an error. + else: + raise Error('The Squeeze layer "{}" should either have 2 inputs or one input and an "squeeze_dims" ' + 'attribute'.format(squeeze_node.soft_get('name'))) + +For the details on implementation and how these front phase transformations work, refer to the ``mo/front/common/replacement.py`` +file. + +.. _node_name_pattern_front_phase_transformations: + +Node Name Pattern Front Phase Transformations +############################################# + +TensorFlow uses a mechanism of scope to group related operation nodes. It is a good practice to put nodes performing +particular task into the same scope. This approach divides a graph into logical blocks that are easier to review in the +TensorBoard. The scope, in fact, just defines a common name prefix for the nodes belonging to it. + +For example, Inception topologies contain several types of so-called **Inception blocks**. Some of them are equal to each +other, but located in different places of the network. For example, Inception V4 from the +`TensorFlow-Slim image classification model library `__ has +``Mixed_5b``, ``Mixed_5c`` and ``Mixed_5d`` inception blocks with exactly the same nodes, with the same set of attributes. + +Consider a situation when these Inception blocks are implemented extremely efficiently using a single Inference +Engine operation called ``InceptionBlock`` and these blocks in the model need to be replaced with instances of this operation. +Model Optimizer provides mechanism to trigger the transformation for a sub-graph of operations defined by the node name +regular expressions (scope). In this particular case, some of the patterns are: ``.*InceptionV4/Mixed_5b``, +``.*InceptionV4/Mixed_5c`` and ``.*InceptionV4/Mixed_5d``. Each pattern starts with ``.*``, because the ``InceptionV4`` prefix +is added to all nodes names during a model freeze. + +This type of transformation is implemented using ``mo.front.tf.replacement.FrontReplacementFromConfigFileSubGraph`` as a +base class and works as follows: + +1. Prepare a JSON configuration file template defining node names patterns. +2. Run Model Optimizer with the ``--tensorflow_custom_operations_config_update`` command-line parameter, and Model Optimizer adds information about input and output nodes of the specified sub-graphs. +3. Model Optimizer executes the defined transformation **only** when you specify the path to the configuration file updated in step 2 using the ``--transformations_config`` command-line parameter. + +Consider the following possible configuration file template for the Inception Block transformation: + +.. code-block:: json + + [ + { + "custom_attributes": { + "attr1_key": "attr1_value", + "attr2_key": 123456 + }, + "id": "InceptionBlockTransformation", + "instances": [ + ".*InceptionV4/Mixed_5b", + ".*InceptionV4/Mixed_5c", + ".*InceptionV4/Mixed_5d" + ], + "match_kind": "scope" + } + ] + +The configuration file contains a list of dictionaries. Each dictionary defines one transformation. Each transformation +is defined with several parameters: + +* ``id`` - **(Mandatory)** — is a unique identifier of the transformation. It is used in the Python code that implements the transformation to link the class and the transformation description from the configuration file. +* ``match_kind`` - **(Mandatory)** — is a string that specifies the matching algorithm. For the node name pattern case, the value should be equal to ``scope``. Another possible values are described in the dedicated sections below. +* ``instances`` - **(Mandatory)** — specifies instances of the sub-graph to be matched. It contains a list of node names prefixes patterns for the match kind of the ``scope`` type. +* ``custom_attributes`` - **(Optional)** — is a dictionary with attributes that can be used in the transformation code. + +After running Model Optimizer with additional ``--tensorflow_custom_operations_config_update`` parameter pointing to +the template configuration file, the content of the file should be updated with two new sections ``inputs`` and ``outputs``. +The file content after the update is as follows: + +.. code-block:: json + + [ + { + "id": "InceptionBlockTransformation", + "custom_attributes": { + "attr1_key": "attr1_value", + "attr2_key": 123456 + }, + "instances": [ + ".*InceptionV4/Mixed_5b", + ".*InceptionV4/Mixed_5c", + ".*InceptionV4/Mixed_5d" + ], + "match_kind": "scope", + "inputs": [ + [ + { + "node": "Branch_2/Conv2d_0a_1x1/Conv2D$", + "port": 0 + }, + { + "node": "Branch_3/AvgPool_0a_3x3/AvgPool$", + "port": 0 + }, + { + "node": "Branch_1/Conv2d_0a_1x1/Conv2D$", + "port": 0 + }, + { + "node": "Branch_0/Conv2d_0a_1x1/Conv2D$", + "port": 0 + } + ] + ], + "outputs": [ + { + "node": "concat$", + "port": 0 + } + ] + } + ] + +The value for ``inputs`` key is a list of lists describing input tensors of the sub-graph. Each element of the top-level +list corresponds to one unique input tensor of the sub-graph. Each internal list describes a list of nodes consuming +this tensor and port numbers, where the tensor is consumed. Model Optimizer generates regular expressions for the input +nodes names to uniquely identify them in each instance of the sub-graph, defined by the ``instances``. Denote these nodes +as input nodes of the sub-graph. + +In the InceptionV4 topology, the ``InceptionV4/Mixed_5b`` block has four input tensors from outside of the sub-graph, +but all of them are produced by the ``InceptionV4/Mixed_5a/concat`` node. Therefore, the top-level list of the ``inputs`` +contains one list corresponding to this tensor. Four input nodes of the sub-graph consume the tensor produced by +``InceptionV4/Mixed_5a/concat`` node. In this case, all four input nodes consume input tensor into "port 0". + +The order of items in the internal list describing nodes does not matter, but the order of elements in the top-level +list is important. This order defines how Model Optimizer attaches input tensors to a new generated +node if the sub-graph is replaced with a single node. The ``i``-th input node of the sub-graph is obtained using +``match.single_input_node(i)`` call in the sub-graph transformation code. More information about API is given below. If it is +necessary to change the order of input tensors, the configuration file can be edited in the text editor. + +The value for the ``outputs`` key is a list describing nodes of the sub-graph producing tensor, that goes outside of the +sub-graph or does not have child nodes. Denote these nodes as output nodes of the sub-graph. The order of elements in +the list is important. The ``i``-th element of the list describes the ``i``-th output tensor of the sub-graph, which could be +obtained using ``match.output_node(i)`` call. The order of elements can be manually changed in the configuration file. +Model Optimizer uses this order to connect output edges if the sub-graph is replaced with a single node. + +For more examples of this type of transformation, refer to the :doc:`Converting TensorFlow Object Detection API Models ` guide. + +.. _start_end_points_front_phase_transformations: + +Front Phase Transformations Using Start and End Points +###################################################### + +This type of transformation is implemented using ``mo.front.tf.replacement.FrontReplacementFromConfigFileSubGraph`` as a +base class and works as follows: + +1. Prepare a JSON configuration file that defines the sub-graph to match, using two lists of node names: "start" and "end" nodes. +2. Model Optimizer executes the defined transformation **only** when you specify the path to the configuration file using the ``--transformations_config`` command-line parameter . Model Optimizer performs the following steps to match the sub-graph: + + 1. Starts a graph traversal from every start node following the direction of the graph edges. The search stops in an end node or in the case of a node without consumers. All visited nodes are added to the matched sub-graph. + 2. Starts another graph traversal from each non-start node of the sub-graph, i.e. every node except nodes from the "start" list. In this step, the edges are traversed in the opposite edge direction. All newly visited nodes are added to the matched sub-graph. This step is needed to add nodes required for calculation values of internal nodes of the matched sub-graph. + 3. Checks that all "end" nodes were reached from "start" nodes. If not, it exits with an error. + 4. Checks that there are no :doc:`Parameter ` operations among added nodes. If they exist, the sub-graph depends on the inputs of the model. Such configuration is considered incorrect so Model Optimizer exits with an error. + +This algorithm finds all nodes "between" start and end nodes and nodes needed for calculation of non-input nodes of the +matched sub-graph. + +The example of a JSON configuration file for a transformation with start and end points is +``extensions/front/tf/ssd_support_api_v1.15.json``: + +.. code-block:: json + + [ + { + "custom_attributes": { + "code_type": "caffe.PriorBoxParameter.CENTER_SIZE", + "pad_mode": "caffe.ResizeParameter.CONSTANT", + "resize_mode": "caffe.ResizeParameter.WARP", + "clip_before_nms": false, + "clip_after_nms": true + }, + "id": "ObjectDetectionAPISSDPostprocessorReplacement", + "include_inputs_to_sub_graph": true, + "include_outputs_to_sub_graph": true, + "instances": { + "end_points": [ + "detection_boxes", + "detection_scores", + "num_detections" + ], + "start_points": [ + "Postprocessor/Shape", + "Postprocessor/scale_logits", + "Postprocessor/Tile", + "Postprocessor/Reshape_1", + "Postprocessor/Cast_1" + ] + }, + "match_kind": "points" + } + ] + +The format of the file is similar to the one provided as an example in the +:ref:`Node Name Pattern Front Phase Transformations ` section. The difference is in +the value of the ``match_kind`` parameter, which should be equal to the ``points`` and the format of the ``instances`` parameter, +which should be a dictionary with two keys ``start_points`` and ``end_points``, defining start and end node names +respectively. + +.. note:: + The ``include_inputs_to_sub_graph`` and ``include_outputs_to_sub_graph`` parameters are redundant and should be always equal to ``true``. + +.. note:: + This sub-graph match algorithm has a limitation that each start node must have only one input. Therefore, it is not possible to specify, for example, the :doc:`Convolution ` node as input because it has two inputs: data tensor and tensor with weights. + +For other examples of transformations with points, refer to the +:doc:`Converting TensorFlow Object Detection API Models ` guide. + +.. _generic_transformations_config_front_phase_transformations: + +Generic Front Phase Transformations Enabled with Transformations Configuration File +################################################################################### + +This type of transformation works similarly to the :ref:`Generic Front Phase Transformations ` and +:ref:`Front Phase Transformations Using Start and End Points `. + +The base class for this type of transformation is +``mo.front.common.replacement.FrontReplacementFromConfigFileGeneral``. Model Optimizer executes the +``transform_graph(self, graph, replacement_descriptions)`` method and provides the ``Graph`` object and dictionary with values +parsed from the `custom_attributes` attribute of the provided JSON configuration file. + +The example of the configuration file for this type of transformation is ``extensions/front/tf/yolo_v1_tiny.json``: + +.. code-block:: json + + [ + { + "id": "TFYOLO", + "match_kind": "general", + "custom_attributes": { + "classes": 20, + "coords": 4, + "num": 2, + "do_softmax": 0 + } + } + ] + +and the corresponding transformation file is ``./extensions/front/YOLO.py``: + +.. code-block:: py + :force: + + from openvino.tools.mo.front.no_op_eraser import NoOpEraser + from openvino.tools.mo.front.standalone_const_eraser import StandaloneConstEraser + from openvino.tools.mo.ops.regionyolo import RegionYoloOp + from openvino.tools.mo.front.tf.replacement import FrontReplacementFromConfigFileGeneral + from openvino.tools.mo.graph.graph import Node, Graph + from openvino.tools.mo.ops.result import Result + from openvino.tools.mo.utils.error import Error + + + class YoloRegionAddon(FrontReplacementFromConfigFileGeneral): + """ + Replaces all Result nodes in graph with YoloRegion->Result nodes chain. + YoloRegion node attributes are taken from configuration file + """ + replacement_id = 'TFYOLO' # The identifier matching the "id" attribute in the JSON file. + + def run_after(self): + return [NoOpEraser, StandaloneConstEraser] + + def transform_graph(self, graph: Graph, replacement_descriptions): + op_outputs = [n for n, d in graph.nodes(data=True) if 'op' in d and d['op'] == 'Result'] + for op_output in op_outputs: + last_node = Node(graph, op_output).in_node(0) + op_params = dict(name=last_node.id + '/YoloRegion', axis=1, end_axis=-1) + op_params.update(replacement_descriptions) + region_layer = RegionYoloOp(graph, op_params) + region_layer_node = region_layer.create_node([last_node]) + # In here, 'axis' from 'dim_attrs' can be removed to avoid permutation from axis = 1 to axis = 2. + region_layer_node.dim_attrs.remove('axis') + Result(graph).create_node([region_layer_node]) + graph.remove_node(op_output) + +The configuration file has only 3 parameters: ``id`` identifier of the transformation , ``match_kind`` (which should be equal +to ``general``) and the ``custom_attributes`` dictionary with custom attributes accessible in the transformation. + +.. _mo_middle_phase_transformations: + +============================ +Middle Phase Transformations +============================ + +There are two types of middle phase transformations: + +1. :ref:`Pattern-Defined Middle Phase Transformations ` triggered for each sub-graph of the original graph, isomorphic to the specified pattern. +2. :ref:`Generic Middle Phase Transformations `. + +.. _pattern_defined_middle_phase_transformations: + +Pattern-Defined Middle Phase Transformations +############################################ + +This type of transformation is implemented using ``mo.middle.replacement.MiddleReplacementPattern`` as a base class and +works similarly to the :ref:`Pattern-Defined Middle Phase Transformations ` +The are two differences: + +1. The transformation entry function name is ``replace_pattern(self, graph, match)``. +2. The pattern defining the graph should contain data nodes because the structure of the graph is different between front and middle phases. For more information about the graph structure changes, refer to the :ref:`Partial Inference `. + +For the example of a pattern-defined middle transformation, refer to the ``extensions/middle/L2NormToNorm.py`` file. + +.. _generic_middle_phase_transformations: + +Generic Middle Phase Transformations +#################################### + +Model Optimizer provides a mechanism to implement generic middle phase transformations. This type of transformation is +implemented using ``mo.middle.replacement.MiddleReplacementPattern`` as a base class and works similarly to the +:ref:`Generic Front Phase Transformations `. The only difference is that the +transformation entry function name is ``find_and_replace_pattern(self, graph: Graph)``. + +For the example of this transformation, refer to the ``extensions/middle/CheckForCycle.py`` file. + +.. _mo_back_phase_transformations: + +========================== +Back Phase Transformations +========================== + +There are two types of back phase transformations: + +1. :ref:`Pattern-Defined Back Phase Transformations ` triggered for each sub-graph of the original graph, isomorphic to the specified pattern. +2. :ref:`Generic Back Phase Transformations `. + +.. note:: + The graph layout during the back phase is always NCHW. However, during the front and middle phases it could be NHWC if the original model was using it. For more details, refer to :ref:`Model Conversion Pipeline `. + +.. _pattern_defined_back_phase_transformations: + +Pattern-Defined Back Phase Transformations +########################################## + +This type of transformation is implemented using ``mo.back.replacement.MiddleReplacementPattern`` as a base class and +works the same way as :ref:`Pattern-Defined Middle Phase Transformations `. + +For the example of a pattern-defined back transformation, refer to the ``extensions/back/ShufflenetReLUReorder.py`` file. + +.. _generic_back_phase_transformations: + +Generic Back Phase Transformations +################################## + +Model Optimizer provides mechanism to implement generic back phase transformations. This type of transformation is +implemented using ``mo.back.replacement.BackReplacementPattern`` as a base class and works the same way as +:ref:`Generic Middle Phase Transformations `. + +For the example of this transformation, refer to the ``extensions/back/GatherNormalizer.py`` file. + +==================== +Additional Resources +==================== + +* :doc:`Model Optimizer Extensibility ` +* :doc:`Graph Traversal and Modification Using Ports and Connections ` +* :doc:`Model Optimizer Extensions ` +* :doc:`Extending Model Optimizer with Caffe Python Layers ` + +@endsphinxdirective diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_ports_connections.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_ports_connections.md new file mode 100644 index 00000000000000..65f4d76d28dbd5 --- /dev/null +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_ports_connections.md @@ -0,0 +1,188 @@ +# [LEGACY] Graph Traversal and Modification {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Customize_Model_Optimizer_Model_Optimizer_Ports_Connections} + +@sphinxdirective + +.. meta:: + :description: Learn about deprecated APIs and the Port and Connection classes + in Model Optimizer used for graph traversal and transformation. + +.. danger:: + + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated TensorFlow conversion method. The guide on the new and recommended method, using a new frontend, can be found in the :doc:`Frontend Extensions ` article. + +There are three APIs for a graph traversal and transformation used in the Model Optimizer: + +1. The API provided with the ``networkx`` Python library for the ``networkx.MultiDiGraph`` class, which is the base class for +the ``mo.graph.graph.Graph`` object. For example, the following methods belong to this API level: + +* ``graph.add_edges_from([list])``, +* ``graph.add_node(x, attrs)``, +* ``graph.out_edges(node_id)`` +* other methods where ``graph`` is a an instance of the ``networkx.MultiDiGraph`` class. + +**This is the lowest-level API. Avoid using it in the Model Optimizer transformations**. For more details, refer to the :ref:`Model Representation in Memory ` section. + +2. The API built around the ``mo.graph.graph.Node`` class. The ``Node`` class is the primary class to work with graph nodes +and their attributes. Examples of such methods and functions are: + +* ``node.in_node(y)``, +* ``node.out_node(x)``, +* ``node.get_outputs()``, +* ``node.insert_node_after(n1, y)``, +* ``create_edge(n1, n2)`` + +**There are some "Node" class methods not recommended for use and some functions defined in the mo.graph.graph have been deprecated**. For more details, refer to the ``mo/graph/graph.py`` file. + +3. The high-level API called Model Optimizer Graph API, which uses ``mo.graph.graph.Graph``, ``mo.graph.port.Port`` and +``mo.graph.connection.Connection`` classes. For example, the following methods belong to this API level: + +* ``node.in_port(x)``, +* ``node.out_port(y)``, +* ``port.get_connection()``, +* ``connection.get_source()``, +* ``connection.set_destination(dest_port)`` + +**This is the recommended API for the Model Optimizer transformations and operations implementation**. + +The main benefit of using the Model Optimizer Graph API is that it hides some internal implementation details (the fact that +the graph contains data nodes), provides API to perform safe and predictable graph manipulations, and adds operation +semantic to the graph. This is achieved with introduction of concepts of ports and connections. + +.. note:: + This article is dedicated to the Model Optimizer Graph API only and does not cover other two non-recommended APIs. + +.. _mo_intro_ports: + +===== +Ports +===== + +An operation semantic describes how many inputs and outputs the operation has. For example, +:doc:`Parameter ` and :doc:`Const ` operations have no +inputs and have one output, :doc:`ReLU ` operation has one input and one output, +:doc:`Split ` operation has 2 inputs and a variable number of outputs depending on the value of the +attribute ``num_splits``. + +Each operation node in the graph (an instance of the ``Node`` class) has 0 or more input and output ports (instances of +the ``mo.graph.port.Port`` class). The ``Port`` object has several attributes: + +* ``node`` - the instance of the ``Node`` object the port belongs to. +* ``idx`` - the port number. Input and output ports are numbered independently, starting from ``0``. Thus, +:doc:`ReLU ` operation has one input port (with index ``0``) and one output port (with index ``0``). +* ``type`` - the type of the port. Could be equal to either ``"in"`` or ``"out"``. +* ``data`` - the object that should be used to get attributes of the corresponding data node. This object has methods ``get_shape()`` / ``set_shape()`` and ``get_value()`` / ``set_value()`` to get/set shape/value of the corresponding data node. For example, ``in_port.data.get_shape()`` returns an input shape of a tensor connected to input port ``in_port`` (``in_port.type == 'in'``), ``out_port.data.get_value()`` returns a value of a tensor produced from output port ``out_port`` (``out_port.type == 'out'``). + +.. note:: + Functions ``get_shape()`` and ``get_value()`` return ``None`` until the partial inference phase. For more information about model conversion phases, refer to the :ref:`Model Conversion Pipeline `. For information about partial inference phase, see the :ref:`Partial Inference `. + +There are several methods of the ``Node`` class to get the instance of a corresponding port: + +* ``in_port(x)`` and ``out_port(x)`` to get the input/output port with number ``x``. +* ``in_ports()`` and ``out_ports()`` to get a dictionary, where key is a port number and the value is the corresponding input/output port. + +Attributes ``in_ports_count`` and ``out_ports_count`` of the ``Op`` class instance define default number of input and output +ports to be created for the ``Node``. However, additional input/output ports can be added using methods +``add_input_port()`` and ``add_output_port()``. Port also can be removed, using the ``delete_input_port()`` and +``delete_output_port()`` methods. + +The ``Port`` class is just an abstraction that works with edges incoming/outgoing to/from a specific ``Node`` instance. For +example, output port with ``idx = 1`` corresponds to the outgoing edge of a node with an attribute ``out = 1``, the input +port with ``idx = 2`` corresponds to the incoming edge of a node with an attribute ``in = 2``. + +Consider the example of a graph part with 4 operation nodes "Op1", "Op2", "Op3", and "Op4" and a number of data nodes +depicted with light green boxes. + +.. image:: _static/images/MO_ports_example_1.svg + :scale: 80 % + :align: center + +Operation nodes have input ports (yellow squares) and output ports (light purple squares). Input port may not be +connected. For example, the input **port 2** of node **Op1** does not have incoming edge, while output port always has an +associated data node (after the partial inference when the data nodes are added to the graph), which may have no +consumers. + +Ports can be used to traverse a graph. The method ``get_source()`` of an input port returns an output port producing the +tensor consumed by the input port. It is important that the method works the same during front, middle and back phases of a +model conversion even though the graph structure changes (there are no data nodes in the graph during the front phase). + +Let's assume that there are 4 instances of ``Node`` object ``op1, op2, op3``, and ``op4`` corresponding to nodes **Op1**, **Op2**, +**Op3**, and **Op4**, respectively. The result of ``op2.in_port(0).get_source()`` and ``op4.in_port(1).get_source()`` is the +same object ``op1.out_port(1)`` of type ``Port``. + +The method ``get_destination()`` of an output port returns the input port of the node consuming this tensor. If there are +multiple consumers of this tensor, the error is raised. The method ``get_destinations()`` of an output port returns a +list of input ports consuming the tensor. + +The method ``disconnect()`` removes a node incoming edge corresponding to the specific input port. The method removes +several edges if it is applied during the front phase for a node output port connected with multiple nodes. + +The method ``port.connect(another_port)`` connects output port ``port`` and input port ``another_port``. The method handles +situations when the graph contains data nodes (middle and back phases) and does not create an edge between two nodes +but also automatically creates data node or reuses existing data node. If the method is used during the front phase and +data nodes do not exist, the method creates edge and properly sets ``in`` and ``out`` edge attributes. + +For example, applying the following two methods to the graph above will result in the graph depicted below: + +.. code-block:: py + :force: + + op4.in_port(1).disconnect() + op3.out_port(0).connect(op4.in_port(1)) + +.. image:: _static/images/MO_ports_example_2.svg + :scale: 80 % + :align: center + +.. note:: + For a full list of available methods, refer to the ``Node`` class implementation in the ``mo/graph/graph.py`` and ``Port`` class implementation in the ``mo/graph/port.py`` files. + +=========== +Connections +=========== + +Connection is a concept introduced to easily and reliably perform graph modifications. Connection corresponds to a +link between a source output port with one or more destination input ports or a link between a destination input port +and source output port producing data. So each port is connected with one or more ports with help of a connection. +Model Optimizer uses the ``mo.graph.connection.Connection`` class to represent a connection. + +There is only one ``get_connection()`` method of the ``Port`` class to get the instance of the corresponding ``Connection`` +object. If the port is not connected, the returned value is ``None``. + +For example, the ``op3.out_port(0).get_connection()`` method returns a ``Connection`` object encapsulating edges from node +**Op3** to data node **data_3_0** and two edges from data node **data_3_0** to two ports of the node **Op4**. + +The ``Connection`` class provides methods to get source and destination(s) ports the connection corresponds to: + +* ``connection.get_source()`` - returns an output ``Port`` object producing the tensor. +* ``connection.get_destinations()`` - returns a list of input ``Port`` consuming the data. +* ``connection.get_destination()`` - returns a single input ``Port`` consuming the data. If there are multiple consumers, the exception is raised. + +The ``Connection`` class provides methods to modify a graph by changing a source or destination(s) of a connection. For +example, the function call ``op3.out_port(0).get_connection().set_source(op1.out_port(0))`` changes source port of edges +consuming data from port ``op3.out_port(0)`` to ``op1.out_port(0)``. The transformed graph from the sample above is depicted +below: + +.. image:: _static/images/MO_connection_example_1.svg + :scale: 80 % + :align: center + +Another example is the ``connection.set_destination(dest_port)`` method. It disconnects ``dest_port`` and all input ports to which +the connection is currently connected and connects the connection source port to ``dest_port``. + +Note that connection works seamlessly during front, middle, and back phases and hides the fact that the graph structure is +different. + +.. note:: + For a full list of available methods, refer to the ``Connection`` class implementation in the ``mo/graph/connection.py`` file. + +==================== +Additional Resources +==================== + +* :doc:`Model Optimizer Extensibility ` +* :doc:`Model Optimizer Extensions ` +* :doc:`Extending Model Optimizer with Caffe Python Layers ` + +@endsphinxdirective diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/Legacy_Conversion_API.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api.md similarity index 100% rename from docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/Legacy_Conversion_API.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api.md diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/Convert_Python_Model_Objects.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/convert_python_model_objects.md similarity index 100% rename from docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/Convert_Python_Model_Objects.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/convert_python_model_objects.md diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/Cutting_Model.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/cutting_model.md similarity index 100% rename from docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/Cutting_Model.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/cutting_model.md diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/Embedding_Preprocessing_Computation.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/embedding_preprocessing_computation.md similarity index 100% rename from docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/Embedding_Preprocessing_Computation.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/embedding_preprocessing_computation.md diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/FP16_Compression.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/fp16_compression.md similarity index 100% rename from docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/FP16_Compression.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/fp16_compression.md diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/Model_Optimizer_FAQ.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/model_optimizer_faq.md similarity index 100% rename from docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/Model_Optimizer_FAQ.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/model_optimizer_faq.md diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/Setting_Input_Shapes.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/setting_input_shapes.md similarity index 100% rename from docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/Setting_Input_Shapes.md rename to docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/setting_input_shapes.md From 1f1841c683655116394e21b2eccda06251a995b7 Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Tue, 3 Oct 2023 08:34:44 +0200 Subject: [PATCH 031/257] Changing file structure of OpenVINO API 2.0 transition section (#20199) --- .../api_2_0_transition_guide}/common_inference_pipeline.md | 0 .../api_2_0_transition_guide}/configure_devices.md | 0 .../api_2_0_transition_guide}/deployment_migration.md | 0 .../api_2_0_transition_guide}/graph_construction.md | 0 .../api_2_0_transition_guide}/preprocessing.md | 0 5 files changed, 0 insertions(+), 0 deletions(-) rename docs/{OV_Runtime_UG/migration_ov_2_0 => articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide}/common_inference_pipeline.md (100%) rename docs/{OV_Runtime_UG/migration_ov_2_0 => articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide}/configure_devices.md (100%) rename docs/{OV_Runtime_UG/migration_ov_2_0 => articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide}/deployment_migration.md (100%) rename docs/{OV_Runtime_UG/migration_ov_2_0 => articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide}/graph_construction.md (100%) rename docs/{OV_Runtime_UG/migration_ov_2_0 => articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide}/preprocessing.md (100%) diff --git a/docs/OV_Runtime_UG/migration_ov_2_0/common_inference_pipeline.md b/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/common_inference_pipeline.md similarity index 100% rename from docs/OV_Runtime_UG/migration_ov_2_0/common_inference_pipeline.md rename to docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/common_inference_pipeline.md diff --git a/docs/OV_Runtime_UG/migration_ov_2_0/configure_devices.md b/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/configure_devices.md similarity index 100% rename from docs/OV_Runtime_UG/migration_ov_2_0/configure_devices.md rename to docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/configure_devices.md diff --git a/docs/OV_Runtime_UG/migration_ov_2_0/deployment_migration.md b/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/deployment_migration.md similarity index 100% rename from docs/OV_Runtime_UG/migration_ov_2_0/deployment_migration.md rename to docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/deployment_migration.md diff --git a/docs/OV_Runtime_UG/migration_ov_2_0/graph_construction.md b/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/graph_construction.md similarity index 100% rename from docs/OV_Runtime_UG/migration_ov_2_0/graph_construction.md rename to docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/graph_construction.md diff --git a/docs/OV_Runtime_UG/migration_ov_2_0/preprocessing.md b/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/preprocessing.md similarity index 100% rename from docs/OV_Runtime_UG/migration_ov_2_0/preprocessing.md rename to docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/preprocessing.md From c7fb00a46156af7ef477108ea8c8f9c6b0724572 Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Tue, 3 Oct 2023 08:45:59 +0200 Subject: [PATCH 032/257] [DOCS] Changing file structure of Apache MXNet Caffe and Kaldi section (#20200) * Changing file structure of Apache MXNet Caffe and Kaldi section * Update --- .../mxnet_caffe_kaldi/aspire_tdnn_model.md} | 0 .../mxnet_caffe_kaldi/convert_gluoncv_models.md} | 0 .../mxnet_caffe_kaldi/convert_model_from_caffe.md} | 0 .../mxnet_caffe_kaldi/convert_model_from_kaldi.md} | 0 .../mxnet_caffe_kaldi/convert_model_from_mxnet.md} | 0 .../mxnet_caffe_kaldi/convert_style_transfer_from_mxnet.md} | 0 6 files changed, 0 insertions(+), 0 deletions(-) rename docs/{Documentation/openvino_legacy_features/mxnet_caffe_kaldi/Aspire_Tdnn_Model.md => articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/aspire_tdnn_model.md} (100%) rename docs/{Documentation/openvino_legacy_features/mxnet_caffe_kaldi/Convert_GluonCV_Models.md => articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_gluoncv_models.md} (100%) rename docs/{Documentation/openvino_legacy_features/mxnet_caffe_kaldi/Convert_Model_From_Caffe.md => articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_model_from_caffe.md} (100%) rename docs/{Documentation/openvino_legacy_features/mxnet_caffe_kaldi/Convert_Model_From_Kaldi.md => articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_model_from_kaldi.md} (100%) rename docs/{Documentation/openvino_legacy_features/mxnet_caffe_kaldi/Convert_Model_From_MxNet.md => articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_model_from_mxnet.md} (100%) rename docs/{Documentation/openvino_legacy_features/mxnet_caffe_kaldi/Convert_Style_Transfer_From_MXNet.md => articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_style_transfer_from_mxnet.md} (100%) diff --git a/docs/Documentation/openvino_legacy_features/mxnet_caffe_kaldi/Aspire_Tdnn_Model.md b/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/aspire_tdnn_model.md similarity index 100% rename from docs/Documentation/openvino_legacy_features/mxnet_caffe_kaldi/Aspire_Tdnn_Model.md rename to docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/aspire_tdnn_model.md diff --git a/docs/Documentation/openvino_legacy_features/mxnet_caffe_kaldi/Convert_GluonCV_Models.md b/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_gluoncv_models.md similarity index 100% rename from docs/Documentation/openvino_legacy_features/mxnet_caffe_kaldi/Convert_GluonCV_Models.md rename to docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_gluoncv_models.md diff --git a/docs/Documentation/openvino_legacy_features/mxnet_caffe_kaldi/Convert_Model_From_Caffe.md b/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_model_from_caffe.md similarity index 100% rename from docs/Documentation/openvino_legacy_features/mxnet_caffe_kaldi/Convert_Model_From_Caffe.md rename to docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_model_from_caffe.md diff --git a/docs/Documentation/openvino_legacy_features/mxnet_caffe_kaldi/Convert_Model_From_Kaldi.md b/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_model_from_kaldi.md similarity index 100% rename from docs/Documentation/openvino_legacy_features/mxnet_caffe_kaldi/Convert_Model_From_Kaldi.md rename to docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_model_from_kaldi.md diff --git a/docs/Documentation/openvino_legacy_features/mxnet_caffe_kaldi/Convert_Model_From_MxNet.md b/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_model_from_mxnet.md similarity index 100% rename from docs/Documentation/openvino_legacy_features/mxnet_caffe_kaldi/Convert_Model_From_MxNet.md rename to docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_model_from_mxnet.md diff --git a/docs/Documentation/openvino_legacy_features/mxnet_caffe_kaldi/Convert_Style_Transfer_From_MXNet.md b/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_style_transfer_from_mxnet.md similarity index 100% rename from docs/Documentation/openvino_legacy_features/mxnet_caffe_kaldi/Convert_Style_Transfer_From_MXNet.md rename to docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/convert_style_transfer_from_mxnet.md From e7435af7b0aeaf361f9b5a9db380f8989e492eca Mon Sep 17 00:00:00 2001 From: Maciej Smyk Date: Tue, 3 Oct 2023 09:41:44 +0200 Subject: [PATCH 033/257] Running Inference Restructure (#20195) --- .../openvino_workflow/openvino_intro}/Device_Plugins.md | 0 .../openvino_workflow/openvino_intro}/ShapeInference.md | 0 .../openvino_intro}/dldt_deployment_optimization_guide.md | 0 .../openvino_workflow/openvino_intro}/inference_modes_overview.md | 0 .../openvino_intro}/integrate_with_your_application.md | 0 .../openvino_workflow/openvino_intro}/model_state_intro.md | 0 .../openvino_workflow/openvino_intro}/ov_dynamic_shapes.md | 0 7 files changed, 0 insertions(+), 0 deletions(-) rename docs/{OV_Runtime_UG/supported_plugins => articles_en/openvino_workflow/openvino_intro}/Device_Plugins.md (100%) rename docs/{OV_Runtime_UG => articles_en/openvino_workflow/openvino_intro}/ShapeInference.md (100%) rename docs/{optimization_guide => articles_en/openvino_workflow/openvino_intro}/dldt_deployment_optimization_guide.md (100%) rename docs/{Documentation => articles_en/openvino_workflow/openvino_intro}/inference_modes_overview.md (100%) rename docs/{OV_Runtime_UG => articles_en/openvino_workflow/openvino_intro}/integrate_with_your_application.md (100%) rename docs/{OV_Runtime_UG => articles_en/openvino_workflow/openvino_intro}/model_state_intro.md (100%) rename docs/{OV_Runtime_UG => articles_en/openvino_workflow/openvino_intro}/ov_dynamic_shapes.md (100%) diff --git a/docs/OV_Runtime_UG/supported_plugins/Device_Plugins.md b/docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins.md similarity index 100% rename from docs/OV_Runtime_UG/supported_plugins/Device_Plugins.md rename to docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins.md diff --git a/docs/OV_Runtime_UG/ShapeInference.md b/docs/articles_en/openvino_workflow/openvino_intro/ShapeInference.md similarity index 100% rename from docs/OV_Runtime_UG/ShapeInference.md rename to docs/articles_en/openvino_workflow/openvino_intro/ShapeInference.md diff --git a/docs/optimization_guide/dldt_deployment_optimization_guide.md b/docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide.md similarity index 100% rename from docs/optimization_guide/dldt_deployment_optimization_guide.md rename to docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide.md diff --git a/docs/Documentation/inference_modes_overview.md b/docs/articles_en/openvino_workflow/openvino_intro/inference_modes_overview.md similarity index 100% rename from docs/Documentation/inference_modes_overview.md rename to docs/articles_en/openvino_workflow/openvino_intro/inference_modes_overview.md diff --git a/docs/OV_Runtime_UG/integrate_with_your_application.md b/docs/articles_en/openvino_workflow/openvino_intro/integrate_with_your_application.md similarity index 100% rename from docs/OV_Runtime_UG/integrate_with_your_application.md rename to docs/articles_en/openvino_workflow/openvino_intro/integrate_with_your_application.md diff --git a/docs/OV_Runtime_UG/model_state_intro.md b/docs/articles_en/openvino_workflow/openvino_intro/model_state_intro.md similarity index 100% rename from docs/OV_Runtime_UG/model_state_intro.md rename to docs/articles_en/openvino_workflow/openvino_intro/model_state_intro.md diff --git a/docs/OV_Runtime_UG/ov_dynamic_shapes.md b/docs/articles_en/openvino_workflow/openvino_intro/ov_dynamic_shapes.md similarity index 100% rename from docs/OV_Runtime_UG/ov_dynamic_shapes.md rename to docs/articles_en/openvino_workflow/openvino_intro/ov_dynamic_shapes.md From 492d4757283dbbc150f347e89bc8348630b84368 Mon Sep 17 00:00:00 2001 From: Maciej Smyk Date: Tue, 3 Oct 2023 09:41:56 +0200 Subject: [PATCH 034/257] Model Optimization Guide Restructure (#20188) --- .../model_optimization_guide}/ptq_introduction.md | 0 .../ptq_introduction}/basic_quantization_flow.md | 0 .../ptq_introduction}/quantization_w_accuracy_control.md | 0 .../model_optimization_guide/tmo_introduction.md} | 0 .../model_optimization_guide/tmo_introduction}/filter_pruning.md | 0 .../model_optimization_guide/tmo_introduction}/qat.md | 0 .../model_optimization_guide}/weight_compression.md | 0 7 files changed, 0 insertions(+), 0 deletions(-) rename docs/{optimization_guide => articles_en/openvino_workflow/model_optimization_guide}/ptq_introduction.md (100%) rename docs/{optimization_guide/nncf/ptq => articles_en/openvino_workflow/model_optimization_guide/ptq_introduction}/basic_quantization_flow.md (100%) rename docs/{optimization_guide/nncf/ptq => articles_en/openvino_workflow/model_optimization_guide/ptq_introduction}/quantization_w_accuracy_control.md (100%) rename docs/{optimization_guide/nncf/introduction.md => articles_en/openvino_workflow/model_optimization_guide/tmo_introduction.md} (100%) rename docs/{optimization_guide/nncf => articles_en/openvino_workflow/model_optimization_guide/tmo_introduction}/filter_pruning.md (100%) rename docs/{optimization_guide/nncf => articles_en/openvino_workflow/model_optimization_guide/tmo_introduction}/qat.md (100%) rename docs/{optimization_guide/nncf => articles_en/openvino_workflow/model_optimization_guide}/weight_compression.md (100%) diff --git a/docs/optimization_guide/ptq_introduction.md b/docs/articles_en/openvino_workflow/model_optimization_guide/ptq_introduction.md similarity index 100% rename from docs/optimization_guide/ptq_introduction.md rename to docs/articles_en/openvino_workflow/model_optimization_guide/ptq_introduction.md diff --git a/docs/optimization_guide/nncf/ptq/basic_quantization_flow.md b/docs/articles_en/openvino_workflow/model_optimization_guide/ptq_introduction/basic_quantization_flow.md similarity index 100% rename from docs/optimization_guide/nncf/ptq/basic_quantization_flow.md rename to docs/articles_en/openvino_workflow/model_optimization_guide/ptq_introduction/basic_quantization_flow.md diff --git a/docs/optimization_guide/nncf/ptq/quantization_w_accuracy_control.md b/docs/articles_en/openvino_workflow/model_optimization_guide/ptq_introduction/quantization_w_accuracy_control.md similarity index 100% rename from docs/optimization_guide/nncf/ptq/quantization_w_accuracy_control.md rename to docs/articles_en/openvino_workflow/model_optimization_guide/ptq_introduction/quantization_w_accuracy_control.md diff --git a/docs/optimization_guide/nncf/introduction.md b/docs/articles_en/openvino_workflow/model_optimization_guide/tmo_introduction.md similarity index 100% rename from docs/optimization_guide/nncf/introduction.md rename to docs/articles_en/openvino_workflow/model_optimization_guide/tmo_introduction.md diff --git a/docs/optimization_guide/nncf/filter_pruning.md b/docs/articles_en/openvino_workflow/model_optimization_guide/tmo_introduction/filter_pruning.md similarity index 100% rename from docs/optimization_guide/nncf/filter_pruning.md rename to docs/articles_en/openvino_workflow/model_optimization_guide/tmo_introduction/filter_pruning.md diff --git a/docs/optimization_guide/nncf/qat.md b/docs/articles_en/openvino_workflow/model_optimization_guide/tmo_introduction/qat.md similarity index 100% rename from docs/optimization_guide/nncf/qat.md rename to docs/articles_en/openvino_workflow/model_optimization_guide/tmo_introduction/qat.md diff --git a/docs/optimization_guide/nncf/weight_compression.md b/docs/articles_en/openvino_workflow/model_optimization_guide/weight_compression.md similarity index 100% rename from docs/optimization_guide/nncf/weight_compression.md rename to docs/articles_en/openvino_workflow/model_optimization_guide/weight_compression.md From 1f6617e05d7c606384d6c5a70ffffe3e4edb7707 Mon Sep 17 00:00:00 2001 From: Maciej Smyk Date: Tue, 3 Oct 2023 09:42:09 +0200 Subject: [PATCH 035/257] model-preparation-restructured (#20186) --- .../openvino_workflow/model_introduction}/Converting_Model.md | 0 .../model_introduction}/Deep_Learning_Model_Optimizer_DevGuide.md | 0 .../model_introduction}/supported_model_formats.md | 0 .../supported_model_formats}/Convert_Model_From_ONNX.md | 0 .../supported_model_formats}/Convert_Model_From_Paddle.md | 0 .../supported_model_formats}/Convert_Model_From_PyTorch.md | 0 .../supported_model_formats}/Convert_Model_From_TensorFlow.md | 0 .../Convert_Model_From_TensorFlow_Lite.md | 0 8 files changed, 0 insertions(+), 0 deletions(-) rename docs/{OV_Converter_UG/prepare_model/convert_model => articles_en/openvino_workflow/model_introduction}/Converting_Model.md (100%) rename docs/{OV_Converter_UG => articles_en/openvino_workflow/model_introduction}/Deep_Learning_Model_Optimizer_DevGuide.md (100%) rename docs/{OV_Converter_UG/prepare_model/convert_model => articles_en/openvino_workflow/model_introduction}/supported_model_formats.md (100%) rename docs/{OV_Converter_UG/prepare_model/convert_model => articles_en/openvino_workflow/model_introduction/supported_model_formats}/Convert_Model_From_ONNX.md (100%) rename docs/{OV_Converter_UG/prepare_model/convert_model => articles_en/openvino_workflow/model_introduction/supported_model_formats}/Convert_Model_From_Paddle.md (100%) rename docs/{OV_Converter_UG/prepare_model/convert_model => articles_en/openvino_workflow/model_introduction/supported_model_formats}/Convert_Model_From_PyTorch.md (100%) rename docs/{OV_Converter_UG/prepare_model/convert_model => articles_en/openvino_workflow/model_introduction/supported_model_formats}/Convert_Model_From_TensorFlow.md (100%) rename docs/{OV_Converter_UG/prepare_model/convert_model => articles_en/openvino_workflow/model_introduction/supported_model_formats}/Convert_Model_From_TensorFlow_Lite.md (100%) diff --git a/docs/OV_Converter_UG/prepare_model/convert_model/Converting_Model.md b/docs/articles_en/openvino_workflow/model_introduction/Converting_Model.md similarity index 100% rename from docs/OV_Converter_UG/prepare_model/convert_model/Converting_Model.md rename to docs/articles_en/openvino_workflow/model_introduction/Converting_Model.md diff --git a/docs/OV_Converter_UG/Deep_Learning_Model_Optimizer_DevGuide.md b/docs/articles_en/openvino_workflow/model_introduction/Deep_Learning_Model_Optimizer_DevGuide.md similarity index 100% rename from docs/OV_Converter_UG/Deep_Learning_Model_Optimizer_DevGuide.md rename to docs/articles_en/openvino_workflow/model_introduction/Deep_Learning_Model_Optimizer_DevGuide.md diff --git a/docs/OV_Converter_UG/prepare_model/convert_model/supported_model_formats.md b/docs/articles_en/openvino_workflow/model_introduction/supported_model_formats.md similarity index 100% rename from docs/OV_Converter_UG/prepare_model/convert_model/supported_model_formats.md rename to docs/articles_en/openvino_workflow/model_introduction/supported_model_formats.md diff --git a/docs/OV_Converter_UG/prepare_model/convert_model/Convert_Model_From_ONNX.md b/docs/articles_en/openvino_workflow/model_introduction/supported_model_formats/Convert_Model_From_ONNX.md similarity index 100% rename from docs/OV_Converter_UG/prepare_model/convert_model/Convert_Model_From_ONNX.md rename to docs/articles_en/openvino_workflow/model_introduction/supported_model_formats/Convert_Model_From_ONNX.md diff --git a/docs/OV_Converter_UG/prepare_model/convert_model/Convert_Model_From_Paddle.md b/docs/articles_en/openvino_workflow/model_introduction/supported_model_formats/Convert_Model_From_Paddle.md similarity index 100% rename from docs/OV_Converter_UG/prepare_model/convert_model/Convert_Model_From_Paddle.md rename to docs/articles_en/openvino_workflow/model_introduction/supported_model_formats/Convert_Model_From_Paddle.md diff --git a/docs/OV_Converter_UG/prepare_model/convert_model/Convert_Model_From_PyTorch.md b/docs/articles_en/openvino_workflow/model_introduction/supported_model_formats/Convert_Model_From_PyTorch.md similarity index 100% rename from docs/OV_Converter_UG/prepare_model/convert_model/Convert_Model_From_PyTorch.md rename to docs/articles_en/openvino_workflow/model_introduction/supported_model_formats/Convert_Model_From_PyTorch.md diff --git a/docs/OV_Converter_UG/prepare_model/convert_model/Convert_Model_From_TensorFlow.md b/docs/articles_en/openvino_workflow/model_introduction/supported_model_formats/Convert_Model_From_TensorFlow.md similarity index 100% rename from docs/OV_Converter_UG/prepare_model/convert_model/Convert_Model_From_TensorFlow.md rename to docs/articles_en/openvino_workflow/model_introduction/supported_model_formats/Convert_Model_From_TensorFlow.md diff --git a/docs/OV_Converter_UG/prepare_model/convert_model/Convert_Model_From_TensorFlow_Lite.md b/docs/articles_en/openvino_workflow/model_introduction/supported_model_formats/Convert_Model_From_TensorFlow_Lite.md similarity index 100% rename from docs/OV_Converter_UG/prepare_model/convert_model/Convert_Model_From_TensorFlow_Lite.md rename to docs/articles_en/openvino_workflow/model_introduction/supported_model_formats/Convert_Model_From_TensorFlow_Lite.md From 9a9c74f6c6fbec2dc8be2b2a3463efdd6df21761 Mon Sep 17 00:00:00 2001 From: Maciej Smyk Date: Tue, 3 Oct 2023 09:42:18 +0200 Subject: [PATCH 036/257] Deployment guide restructure (#20201) --- .../deployment_intro}/conditional_compilation_deployment.md | 0 .../deployment_intro}/deployment-manager-tool.md | 0 .../openvino_workflow/deployment_intro}/local-distribution.md | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename docs/{OV_Runtime_UG/deployment => articles_en/openvino_workflow/deployment_intro}/conditional_compilation_deployment.md (100%) rename docs/{OV_Runtime_UG/deployment => articles_en/openvino_workflow/deployment_intro}/deployment-manager-tool.md (100%) rename docs/{OV_Runtime_UG/deployment => articles_en/openvino_workflow/deployment_intro}/local-distribution.md (100%) diff --git a/docs/OV_Runtime_UG/deployment/conditional_compilation_deployment.md b/docs/articles_en/openvino_workflow/deployment_intro/conditional_compilation_deployment.md similarity index 100% rename from docs/OV_Runtime_UG/deployment/conditional_compilation_deployment.md rename to docs/articles_en/openvino_workflow/deployment_intro/conditional_compilation_deployment.md diff --git a/docs/OV_Runtime_UG/deployment/deployment-manager-tool.md b/docs/articles_en/openvino_workflow/deployment_intro/deployment-manager-tool.md similarity index 100% rename from docs/OV_Runtime_UG/deployment/deployment-manager-tool.md rename to docs/articles_en/openvino_workflow/deployment_intro/deployment-manager-tool.md diff --git a/docs/OV_Runtime_UG/deployment/local-distribution.md b/docs/articles_en/openvino_workflow/deployment_intro/local-distribution.md similarity index 100% rename from docs/OV_Runtime_UG/deployment/local-distribution.md rename to docs/articles_en/openvino_workflow/deployment_intro/local-distribution.md From f8881dd2a47712d08d71ee2341f154c1f4e897cf Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Tue, 3 Oct 2023 10:55:20 +0200 Subject: [PATCH 037/257] [core]Api 2.0/migrate Subtract op to new API (#20108) * Migrate Subtract to new API * Sync has_evaluate precision with evaluate --- src/core/include/openvino/op/subtract.hpp | 4 +- .../include/openvino/reference/subtract.hpp | 27 +++-- src/core/src/op/subtract.cpp | 109 ++++++++---------- 3 files changed, 67 insertions(+), 73 deletions(-) diff --git a/src/core/include/openvino/op/subtract.hpp b/src/core/include/openvino/op/subtract.hpp index 5fd58da3bd6ff5..63da458f23a9e5 100644 --- a/src/core/include/openvino/op/subtract.hpp +++ b/src/core/include/openvino/op/subtract.hpp @@ -27,9 +27,7 @@ class OPENVINO_API Subtract : public util::BinaryElementwiseArithmetic { const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - OPENVINO_SUPPRESS_DEPRECATED_START - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - OPENVINO_SUPPRESS_DEPRECATED_END + bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override; bool has_evaluate() const override; }; } // namespace v1 diff --git a/src/core/reference/include/openvino/reference/subtract.hpp b/src/core/reference/include/openvino/reference/subtract.hpp index 1e5454bb64ae3d..2051dd1874d510 100644 --- a/src/core/reference/include/openvino/reference/subtract.hpp +++ b/src/core/reference/include/openvino/reference/subtract.hpp @@ -4,31 +4,36 @@ #pragma once -#include +#include +#include -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/shape.hpp" #include "openvino/reference/autobroadcast_binop.hpp" namespace ov { namespace reference { -template +template void subtract(const T* arg0, const T* arg1, T* out, size_t count) { - for (size_t i = 0; i < count; i++) { - out[i] = arg0[i] - arg1[i]; - } + std::transform(arg0, std::next(arg0, count), arg1, out, std::minus()); } -template +/** + * @brief Reference implementation of binary elementwise Subtract operator. + * + * @param arg0 Pointer to input 0 data. + * @param arg1 Pointer to input 1 data. + * @param out Pointer to output data. + * @param arg_shape0 Input 0 shape. + * @param arg_shape1 Input 1 shape. + * @param broadcast_spec Broadcast specification mode. + */ +template void subtract(const T* arg0, const T* arg1, T* out, const Shape& arg0_shape, const Shape& arg1_shape, const op::AutoBroadcastSpec& broadcast_spec) { - autobroadcast_binop(arg0, arg1, out, arg0_shape, arg1_shape, broadcast_spec, [](T x, T y) -> T { - return x - y; - }); + autobroadcast_binop(arg0, arg1, out, arg0_shape, arg1_shape, broadcast_spec, std::minus()); } } // namespace reference } // namespace ov diff --git a/src/core/src/op/subtract.cpp b/src/core/src/op/subtract.cpp index 7986bb060d0e99..6538918f9f14e2 100644 --- a/src/core/src/op/subtract.cpp +++ b/src/core/src/op/subtract.cpp @@ -2,89 +2,80 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/subtract.hpp" +#include "openvino/op/subtract.hpp" +#include "element_visitor.hpp" #include "itt.hpp" -#include "ngraph/op/negative.hpp" -#include "ngraph/runtime/host_tensor.hpp" -#include "ngraph/validation_util.hpp" #include "openvino/reference/subtract.hpp" +#include "utils.hpp" -using namespace std; -using namespace ngraph; - -OPENVINO_SUPPRESS_DEPRECATED_START +namespace ov { +namespace op { namespace subtract { -namespace { -template -bool evaluate(const HostTensorPtr& arg0, - const HostTensorPtr& arg1, - const HostTensorPtr& out, - const op::AutoBroadcastSpec& broadcast_spec) { - ov::reference::subtract(arg0->get_data_ptr(), - arg1->get_data_ptr(), - out->get_data_ptr(), - arg0->get_shape(), - arg1->get_shape(), - broadcast_spec); - return true; -} +struct Evaluate : element::NoAction { + using element::NoAction::visit; -bool evaluate_subtract(const HostTensorPtr& arg0, - const HostTensorPtr& arg1, - const HostTensorPtr& out, - const op::AutoBroadcastSpec& broadcast_spec) { - bool rc = true; - out->set_broadcast(broadcast_spec, arg0, arg1); - switch (arg0->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_subtract, i8, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_subtract, i32, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_subtract, i64, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_subtract, u8, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_subtract, u32, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_subtract, u64, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_subtract, f16, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_subtract, f32, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_subtract, bf16, arg0, arg1, out, broadcast_spec); - default: - rc = false; - break; + template + static result_type visit(const Tensor& in0, + const Tensor& in1, + Tensor& out, + const AutoBroadcastSpec& broadcast_spec) { + using T = typename element_type_traits::value_type; + reference::subtract(in0.data(), + in1.data(), + out.data(), + in0.get_shape(), + in1.get_shape(), + broadcast_spec); + return true; } - return rc; -} -} // namespace +}; } // namespace subtract // ------------------------------- v1 ------------------------------------------ -op::v1::Subtract::Subtract(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) +namespace v1 { +Subtract::Subtract(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) : BinaryElementwiseArithmetic(arg0, arg1, auto_broadcast) { constructor_validate_and_infer_types(); } -shared_ptr op::v1::Subtract::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr Subtract::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v1_Subtract_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); + return std::make_shared(new_args.at(0), new_args.at(1), get_autob()); } -bool op::v1::Subtract::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool Subtract::evaluate(TensorVector& outputs, const TensorVector& inputs) const { OV_OP_SCOPE(v1_Subtract_evaluate); - return subtract::evaluate_subtract(inputs[0], inputs[1], outputs[0], get_autob()); + OPENVINO_ASSERT(outputs.size() == 1); + OPENVINO_ASSERT(inputs.size() == 2); + + outputs[0].set_shape(infer_broadcast_shape(this, inputs[0].get_shape(), inputs[1].get_shape())); + using namespace ov::element; + return IfTypeOf::apply(inputs[0].get_element_type(), + inputs[0], + inputs[1], + outputs[0], + get_autob()); } -bool op::v1::Subtract::has_evaluate() const { +bool Subtract::has_evaluate() const { OV_OP_SCOPE(v1_Subtract_has_evaluate); switch (get_input_element_type(0)) { - case ngraph::element::i32: - case ngraph::element::i64: - case ngraph::element::u32: - case ngraph::element::u64: - case ngraph::element::f16: - case ngraph::element::f32: - case ngraph::element::bf16: + case element::bf16: + case element::f16: + case element::f32: + case element::i8: + case element::i32: + case element::i64: + case element::u8: + case element::u32: + case element::u64: return true; default: - break; + return false; } - return false; } +} // namespace v1 +} // namespace op +} // namespace ov From 893710d08f7e62bade835ece271c67a89de05ee7 Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Tue, 3 Oct 2023 11:42:53 +0200 Subject: [PATCH 038/257] Changing file structure of Post-Training Optimization Tool section (#20207) --- .../{pot_introduction.md => post_training_optimization_tool.md} | 0 .../post_training_optimization_tool/quantizing_models.md | 0 .../quantizing_models/default_quantization_algorithm.md | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename docs/articles_en/documentation/openvino_legacy_features/{pot_introduction.md => post_training_optimization_tool.md} (100%) rename tools/pot/docs/DefaultQuantizationUsage.md => docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantizing_models.md (100%) rename tools/pot/openvino/tools/pot/algorithms/quantization/default/README.md => docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantizing_models/default_quantization_algorithm.md (100%) diff --git a/docs/articles_en/documentation/openvino_legacy_features/pot_introduction.md b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool.md similarity index 100% rename from docs/articles_en/documentation/openvino_legacy_features/pot_introduction.md rename to docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool.md diff --git a/tools/pot/docs/DefaultQuantizationUsage.md b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantizing_models.md similarity index 100% rename from tools/pot/docs/DefaultQuantizationUsage.md rename to docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantizing_models.md diff --git a/tools/pot/openvino/tools/pot/algorithms/quantization/default/README.md b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantizing_models/default_quantization_algorithm.md similarity index 100% rename from tools/pot/openvino/tools/pot/algorithms/quantization/default/README.md rename to docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantizing_models/default_quantization_algorithm.md From 35e72251e9e1f5d1442ce6893039e4ceebeafd90 Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Tue, 3 Oct 2023 11:52:29 +0200 Subject: [PATCH 039/257] [PT FE] Add support for aten::numpy_T and aten::feature_dropout (#20136) * Add support for aten::numpy_t and aten::feature_dropout * Update tests/layer_tests/pytorch_tests/test_transpose.py Co-authored-by: Ekaterina Aidova --------- Co-authored-by: Ekaterina Aidova --- src/frontends/pytorch/src/op_table.cpp | 2 ++ .../pytorch_tests/test_transpose.py | 19 ++++++++++++------- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index 5d85bf05dcab79..bbad312b74ed4e 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -301,6 +301,7 @@ const std::map get_supported_ops_ts() { {"aten::eye", op::translate_eye}, {"aten::fake_quantize_per_channel_affine", op::translate_fake_quantize_per_channel_affine}, {"aten::fake_quantize_per_tensor_affine", op::translate_fake_quantize_per_tensor_affine}, + {"aten::feature_dropout", op::skip_node}, {"aten::fill_", op::inplace_op}, {"aten::flatten", op::quantizable_op}, {"aten::flip", op::translate_flip}, @@ -384,6 +385,7 @@ const std::map get_supported_ops_ts() { {"aten::nonzero", op::translate_nonzero}, {"aten::norm", op::translate_norm}, {"aten::numel", op::translate_numel}, + {"aten::numpy_T", op::translate_t}, {"aten::one_hot", op::translate_one_hot}, {"aten::ones", op::translate_ones}, {"aten::ones_like", op::translate_ones_like}, diff --git a/tests/layer_tests/pytorch_tests/test_transpose.py b/tests/layer_tests/pytorch_tests/test_transpose.py index 9f9652053182cb..b3378761da8c74 100644 --- a/tests/layer_tests/pytorch_tests/test_transpose.py +++ b/tests/layer_tests/pytorch_tests/test_transpose.py @@ -57,12 +57,14 @@ def _prepare_input(self, num_dims=2, input_dtype="float32"): return (np.array(num_dims).astype(input_dtype),) return (np.random.randn(*shape[:num_dims]).astype(input_dtype),) - def create_model(self, num_dims=2, inplace=False): + def create_model(self, mode): class aten_transpose(torch.nn.Module): - def __init__(self, inplace): + def __init__(self, mode): super(aten_transpose, self).__init__() - if inplace: + if mode == "inplace": self.forward = self.forward_inplace + elif mode == "numpy": + self.forward = self.forward_numpy_t def forward(self, x): return x.t(), x @@ -70,18 +72,21 @@ def forward(self, x): def forward_inplace(self, x): return x.t_(), x + def forward_numpy_t(self, x): + return x.T, x + ref_net = None - return aten_transpose(inplace), ref_net, "aten::t" if not inplace else "aten::t_" + return aten_transpose(mode), ref_net, "aten::t_" if mode == "inplace" else ("aten::numpy_T" if mode == "numpy" else "aten::t") @pytest.mark.parametrize("num_dims", [0, 1, 2]) @pytest.mark.parametrize("input_dtype", ["float32", "int32"]) - @pytest.mark.parametrize("inplace", [True, False]) + @pytest.mark.parametrize("mode", [None, "inplace", "numpy"]) @pytest.mark.nightly @pytest.mark.precommit - def test_t_small(self, num_dims, input_dtype, inplace, ie_device, precision, ir_version): + def test_t_small(self, num_dims, input_dtype, mode, ie_device, precision, ir_version): self._test( - *self.create_model(num_dims, inplace), + *self.create_model(mode), ie_device, precision, ir_version, From 09e642a9e51f47f7bd928657588359c0494fc6d5 Mon Sep 17 00:00:00 2001 From: Oleg Pipikin Date: Tue, 3 Oct 2023 11:56:12 +0200 Subject: [PATCH 040/257] Refactor FakeQuantizeLayerTest, ExtractImagePatchesTest, GatherNDLayerTest, GatherTreeLayerTest (#20016) * Refactor FakeQuantizeLayerTest * Refactor ExtractImagePatchesTest * Refactor GatherNDLayerTest * Refactor GatherTreeLayerTest * Apply comments * Apply comments --------- Co-authored-by: Pavel Durandin --- .../extract_image_patches.cpp | 34 ++-- .../single_layer_tests/fake_quantize.cpp | 169 ++++++++---------- .../single_layer_tests/gather_nd.cpp | 102 +++++------ .../single_layer_tests/gather_tree.cpp | 31 ++-- .../single_op_tests/extract_image_patches.hpp | 15 ++ .../include/single_op_tests/fake_quantize.hpp | 16 ++ .../include/single_op_tests/gather_nd.hpp | 19 ++ .../include/single_op_tests/gather_tree.hpp | 15 ++ .../single_op/extract_image_patches.hpp | 34 ++++ .../single_op/fake_quantize.hpp | 37 ++++ .../single_op/gather_nd.hpp | 43 +++++ .../single_op/gather_tree.hpp | 31 ++++ .../src/base/utils/generate_inputs.cpp | 4 +- .../src/single_op/extract_image_patches.cpp | 59 ++++++ .../src/single_op/fake_quantize.cpp | 88 +++++++++ .../src/single_op/gather_nd.cpp | 77 ++++++++ .../src/single_op/gather_tree.cpp | 74 ++++++++ 17 files changed, 663 insertions(+), 185 deletions(-) create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/extract_image_patches.hpp create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/fake_quantize.hpp create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/gather_nd.hpp create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/gather_tree.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/extract_image_patches.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/fake_quantize.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/gather_nd.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/gather_tree.hpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/extract_image_patches.cpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/fake_quantize.cpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/gather_nd.cpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/gather_tree.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/extract_image_patches.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/extract_image_patches.cpp index 917c8c27abb225..b03d22476e3122 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/extract_image_patches.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/extract_image_patches.cpp @@ -4,38 +4,34 @@ #include -#include "single_layer_tests/extract_image_patches.hpp" - -using namespace LayerTestsDefinitions; -using ngraph::op::PadType; +#include "single_op_tests/extract_image_patches.hpp" namespace { +using ov::test::ExtractImagePatchesTest; +using ov::op::PadType; -const std::vector> inDataShape = {{1, 1, 10, 10}, {1, 3, 10, 10}}; +const std::vector> shapes_static = {{{1, 1, 10, 10}}, {{1, 3, 10, 10}}}; const std::vector> kernels = {{2, 2}, {3, 3}, {4, 4}, {1, 3}, {4, 2}}; const std::vector> strides = {{3, 3}, {5, 5}, {9, 9}, {1, 3}, {6, 2}}; const std::vector> rates = {{1, 1}, {1, 2}, {2, 1}, {2, 2}}; -const std::vector autoPads = {PadType::VALID, PadType::SAME_UPPER, PadType::SAME_LOWER}; -const std::vector netPrecisions = { - InferenceEngine::Precision::I8, - InferenceEngine::Precision::U8, - InferenceEngine::Precision::BF16, - InferenceEngine::Precision::I32, - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::I64 +const std::vector auto_pads = {PadType::VALID, PadType::SAME_UPPER, PadType::SAME_LOWER}; +const std::vector model_types = { + ov::element::i8, + ov::element::u8, + ov::element::bf16, + ov::element::i32, + ov::element::f32, + ov::element::i64 }; INSTANTIATE_TEST_SUITE_P(smoke_layers_CPU, ExtractImagePatchesTest, ::testing::Combine( - ::testing::ValuesIn(inDataShape), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(shapes_static)), ::testing::ValuesIn(kernels), ::testing::ValuesIn(strides), ::testing::ValuesIn(rates), - ::testing::ValuesIn(autoPads), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::ValuesIn(auto_pads), + ::testing::ValuesIn(model_types), ::testing::Values(ov::test::utils::DEVICE_CPU)), ExtractImagePatchesTest::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/fake_quantize.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/fake_quantize.cpp index 1259872fd8320f..5063afdfda002c 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/fake_quantize.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/fake_quantize.cpp @@ -4,147 +4,126 @@ #include -#include "single_layer_tests/fake_quantize.hpp" +#include "single_op_tests/fake_quantize.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; - namespace { +using ov::test::FakeQuantizeLayerTest; -const ngraph::op::AutoBroadcastSpec numpyBroadcast = ngraph::op::AutoBroadcastType::NUMPY; +const ov::op::AutoBroadcastSpec numpy_broadcast = ov::op::AutoBroadcastType::NUMPY; -const ngraph::op::AutoBroadcastSpec noneBroadcast = ngraph::op::AutoBroadcastType::NONE; +const ov::op::AutoBroadcastSpec none_broadcast = ov::op::AutoBroadcastType::NONE; -const std::vector broadcasts = { - {ngraph::op::AutoBroadcastType::NUMPY}, - {ngraph::op::AutoBroadcastType::PDPD, -1}, +const std::vector broadcasts = { + {ov::op::AutoBroadcastType::NUMPY}, + {ov::op::AutoBroadcastType::PDPD, -1}, }; -const std::vector - netPrecisions = {InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16}; - -const std::vector> inputShapes = {{1, 1}, {2, 6}, {1, 1, 1}, {2, 6, 13}, - {1, 1, 1, 1}, {3, 10, 5, 6}, {2, 8, 5, 18}, {2, 16, 3, 18}, {3, 49, 5, 6}, - {1, 1, 1, 1, 1}, {3, 10, 2, 5, 6}, {2, 8, 1, 5, 18}, {2, 16, 4, 3, 18}, {3, 49, 7, 5, 6}}; -const std::vector> constShapes = {{1}}; +const std::vector model_types = + {ov::element::f32, + ov::element::f16}; + +const std::vector> shapes_static = + {{{1, 1}}, + {{2, 6}}, + {{1, 1, 1}}, + {{2, 6, 13}}, + {{1, 1, 1, 1}}, + {{3, 10, 5, 6}}, + {{2, 8, 5, 18}}, + {{2, 16, 3, 18}}, + {{3, 49, 5, 6}}, + {{1, 1, 1, 1, 1}}, + {{3, 10, 2, 5, 6}}, + {{2, 8, 1, 5, 18}}, + {{2, 16, 4, 3, 18}}, + {{3, 49, 7, 5, 6}}}; +const std::vector> const_shapes = {{1}}; const std::vector levels = {16, 255, 256}; -const std::pair> config = {}; -const std::vector fqArgs = {}; -const std::vector inputParams = {}; +const std::vector fq_args = {}; -const auto fqParams = ::testing::Combine( +const auto fq_params = ::testing::Combine( ::testing::ValuesIn(levels), - ::testing::ValuesIn(constShapes), - ::testing::Values(fqArgs), - ::testing::Values(inputParams), + ::testing::ValuesIn(const_shapes), + ::testing::Values(fq_args), ::testing::ValuesIn(broadcasts) ); INSTANTIATE_TEST_SUITE_P(smoke_FakeQuantize, FakeQuantizeLayerTest, ::testing::Combine( - fqParams, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::ValuesIn(inputShapes), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(config)), + fq_params, + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(shapes_static)), + ::testing::Values(ov::test::utils::DEVICE_CPU)), FakeQuantizeLayerTest::getTestCaseName); -const std::vector singleShape = {3, 4, 2, 5}; -const auto noneBroadcastFqParams = ::testing::Combine( +const std::vector single_shape = {3, 4, 2, 5}; +const auto none_broadcast_fq_params = ::testing::Combine( ::testing::ValuesIn(levels), - ::testing::Values(singleShape), - ::testing::Values(fqArgs), - ::testing::Values(inputParams), - ::testing::Values(noneBroadcast) + ::testing::Values(single_shape), + ::testing::Values(fq_args), + ::testing::Values(none_broadcast) ); -INSTANTIATE_TEST_SUITE_P(smoke_FakeQuantizeNoneBroadcast, FakeQuantizeLayerTest, +INSTANTIATE_TEST_SUITE_P(smoke_FakeQuantizenone_broadcast, FakeQuantizeLayerTest, ::testing::Combine( - noneBroadcastFqParams, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(singleShape), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(config)), + none_broadcast_fq_params, + ::testing::ValuesIn(model_types), + ::testing::Values(ov::test::static_shapes_to_test_representation({ov::Shape(single_shape)})), + ::testing::Values(ov::test::utils::DEVICE_CPU)), FakeQuantizeLayerTest::getTestCaseName); -const std::vector> inputShapesPerChannel = {{11, 10, 22, 19}, {11, 10, 5, 6}}; -const std::vector> constShapesPerChannelAxis0 = {{11, 1, 1, 1}}; -const std::vector> constShapesPerChannelAxis1 = {{1, 10, 1, 1}, {10, 1, 1}}; +const std::vector> shapes_static_per_channel = {{{11, 10, 22, 19}}, {{11, 10, 5, 6}}}; +const std::vector> const_shapes_per_channel_axis0 = {{11, 1, 1, 1}}; +const std::vector> const_shapes_per_channel_axis1 = {{1, 10, 1, 1}, {10, 1, 1}}; -const auto fqParamsPerChannelAxis0 = ::testing::Combine( +const auto fq_params_per_channel_axis0 = ::testing::Combine( ::testing::ValuesIn(levels), - ::testing::ValuesIn(constShapesPerChannelAxis0), - ::testing::Values(fqArgs), - ::testing::Values(inputParams), - ::testing::Values(numpyBroadcast) + ::testing::ValuesIn(const_shapes_per_channel_axis0), + ::testing::Values(fq_args), + ::testing::Values(numpy_broadcast) ); -const auto fqParamsPerChannelAxis1 = ::testing::Combine( +const auto fq_params_per_channel_axis1 = ::testing::Combine( ::testing::ValuesIn(levels), - ::testing::ValuesIn(constShapesPerChannelAxis1), - ::testing::Values(fqArgs), - ::testing::Values(inputParams), - ::testing::Values(numpyBroadcast) + ::testing::ValuesIn(const_shapes_per_channel_axis1), + ::testing::Values(fq_args), + ::testing::Values(numpy_broadcast) ); INSTANTIATE_TEST_SUITE_P(smoke_FakeQuantizePerChannelAxis0, FakeQuantizeLayerTest, ::testing::Combine( - fqParamsPerChannelAxis0, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::ValuesIn(inputShapesPerChannel), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(config)), + fq_params_per_channel_axis0, + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(shapes_static_per_channel)), + ::testing::Values(ov::test::utils::DEVICE_CPU)), FakeQuantizeLayerTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_FakeQuantizePerChannelAxis1, FakeQuantizeLayerTest, ::testing::Combine( - fqParamsPerChannelAxis1, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::ValuesIn(inputShapesPerChannel), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(config)), + fq_params_per_channel_axis1, + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(shapes_static_per_channel)), + ::testing::Values(ov::test::utils::DEVICE_CPU)), FakeQuantizeLayerTest::getTestCaseName); -const std::vector> inputShapesPerChannel2D = {{1, 10}}; -const std::vector> constShapesPerChannel2D = { {10}, {1, 10}, {1} }; -const auto fqParamsPerChannel2D = ::testing::Combine( +const std::vector shapes_static_per_channel_2d = {{1, 10}}; +const std::vector> const_shapes_per_channel_2d = { {10}, {1, 10}, {1} }; +const auto fq_params_const_shapes_per_channel_2d = ::testing::Combine( ::testing::ValuesIn(levels), - ::testing::ValuesIn(constShapesPerChannel2D), - ::testing::Values(fqArgs), - ::testing::Values(inputParams), - ::testing::Values(numpyBroadcast) + ::testing::ValuesIn(const_shapes_per_channel_2d), + ::testing::Values(fq_args), + ::testing::Values(numpy_broadcast) ); INSTANTIATE_TEST_SUITE_P(smoke_FakeQuantizePerChannel2D, FakeQuantizeLayerTest, ::testing::Combine( - fqParamsPerChannel2D, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::ValuesIn(inputShapesPerChannel2D), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(config)), + fq_params_const_shapes_per_channel_2d, + ::testing::ValuesIn(model_types), + ::testing::Values(ov::test::static_shapes_to_test_representation(shapes_static_per_channel_2d)), + ::testing::Values(ov::test::utils::DEVICE_CPU)), FakeQuantizeLayerTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/gather_nd.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/gather_nd.cpp index c17094dc195d67..c5206e2b709cf1 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/gather_nd.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/gather_nd.cpp @@ -4,76 +4,72 @@ #include -#include "single_layer_tests/gather_nd.hpp" +#include "single_op_tests/gather_nd.hpp" -using namespace LayerTestsDefinitions; namespace { +using ov::test::GatherNDLayerTest; +using ov::test::GatherND8LayerTest; -const std::vector dPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16, - InferenceEngine::Precision::I32, - InferenceEngine::Precision::I64, - InferenceEngine::Precision::I16, - InferenceEngine::Precision::U8, - InferenceEngine::Precision::I8 +const std::vector model_types = { + ov::element::f32, + ov::element::f16, + ov::element::i32, + ov::element::i64, + ov::element::i16, + ov::element::u8, + ov::element::i8 }; -const std::vector iPrecisions = { - InferenceEngine::Precision::I32, - InferenceEngine::Precision::I64 +const std::vector indices_types = { + ov::element::i32, + ov::element::i64 }; +std::vector> shapes_subset1_static = {{{2, 2}}, {{2, 3, 4}}}; + const auto gatherNDArgsSubset1 = ::testing::Combine( - ::testing::ValuesIn(std::vector>( - {{2, 2}, {2, 3, 4}})), // Data shape - ::testing::ValuesIn(std::vector>( - {{2, 1}, {2, 1, 1}})), // Indices shape - ::testing::ValuesIn(std::vector({0, 1})) // Batch dims ); INSTANTIATE_TEST_SUITE_P(smoke_GatherND5_Set1, GatherNDLayerTest, - ::testing::Combine( - gatherNDArgsSubset1, - ::testing::ValuesIn(dPrecisions), - ::testing::ValuesIn(iPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values({})), - GatherNDLayerTest::getTestCaseName); + ::testing::Combine( + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(shapes_subset1_static)), + ::testing::ValuesIn(std::vector({{2, 1}, {2, 1, 1}})), + ::testing::ValuesIn(std::vector({0, 1})), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(indices_types), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + GatherNDLayerTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_GatherND8_Set1, GatherND8LayerTest, - ::testing::Combine( - gatherNDArgsSubset1, - ::testing::ValuesIn(dPrecisions), - ::testing::ValuesIn(iPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values({})), - GatherNDLayerTest::getTestCaseName); + ::testing::Combine( + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(shapes_subset1_static)), + ::testing::ValuesIn(std::vector({{2, 1}, {2, 1, 1}})), + ::testing::ValuesIn(std::vector({0, 1})), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(indices_types), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + GatherNDLayerTest::getTestCaseName); -const auto gatherNDArgsSubset2 = ::testing::Combine( - ::testing::ValuesIn(std::vector>( - {{15, 12, 20, 15, 2}, {15, 12, 18, 7, 17}})), // Data shape - ::testing::ValuesIn(std::vector>( - {{15, 12, 2}, {15, 12, 5, 9, 1, 3}})), // Indices shape - ::testing::ValuesIn(std::vector({0, 1, 2})) // Batch dims -); +std::vector> shapes_subset2_static = {{{15, 12, 20, 15, 2}}, {{15, 12, 18, 7, 17}}}; INSTANTIATE_TEST_SUITE_P(smoke_GatherND5_Set2, GatherNDLayerTest, - ::testing::Combine( - gatherNDArgsSubset2, - ::testing::ValuesIn(dPrecisions), - ::testing::ValuesIn(iPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values({})), - GatherNDLayerTest::getTestCaseName); + ::testing::Combine( + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(shapes_subset2_static)), + ::testing::ValuesIn(std::vector{{15, 12, 2}, {15, 12, 5, 9, 1, 3}}), + ::testing::ValuesIn(std::vector({0, 1, 2})), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(indices_types), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + GatherNDLayerTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_GatherND8_Set2, GatherND8LayerTest, - ::testing::Combine( - gatherNDArgsSubset2, - ::testing::ValuesIn(dPrecisions), - ::testing::ValuesIn(iPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values({})), - GatherNDLayerTest::getTestCaseName); + ::testing::Combine( + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(shapes_subset2_static)), + ::testing::ValuesIn(std::vector{{15, 12, 2}, {15, 12, 5, 9, 1, 3}}), + ::testing::ValuesIn(std::vector({0, 1, 2})), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(indices_types), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + GatherNDLayerTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/gather_tree.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/gather_tree.cpp index a90b2860926b42..63076e984a75ff 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/gather_tree.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/gather_tree.cpp @@ -4,34 +4,31 @@ #include -#include "single_layer_tests/gather_tree.hpp" +#include "single_op_tests/gather_tree.hpp" #include "common_test_utils/test_constants.hpp" - -using namespace LayerTestsDefinitions; +#include "common_test_utils/test_enums.hpp" namespace { +using ov::test::GatherTreeLayerTest; +using ov::test::utils::InputLayerType; -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::I32 +const std::vector model_types = { + ov::element::f32, + ov::element::i32 }; -const std::vector> inputShapes = { {5, 1, 10}, {1, 1, 10}, {20, 1, 10}, {20, 20, 10} }; +const std::vector input_shapes = {{5, 1, 10}, {1, 1, 10}, {20, 1, 10}, {20, 20, 10}}; -const std::vector secondaryInputTypes = { - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::PARAMETER +const std::vector secondary_input_types = { + InputLayerType::CONSTANT, + InputLayerType::PARAMETER }; INSTANTIATE_TEST_SUITE_P(smoke_GatherTree, GatherTreeLayerTest, ::testing::Combine( - ::testing::ValuesIn(inputShapes), - ::testing::ValuesIn(secondaryInputTypes), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::ValuesIn(input_shapes), + ::testing::ValuesIn(secondary_input_types), + ::testing::ValuesIn(model_types), ::testing::Values(ov::test::utils::DEVICE_CPU)), GatherTreeLayerTest::getTestCaseName); diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/extract_image_patches.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/extract_image_patches.hpp new file mode 100644 index 00000000000000..dd439894c23bd3 --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/extract_image_patches.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/extract_image_patches.hpp" + +namespace ov { +namespace test { +TEST_P(ExtractImagePatchesTest, Inference) { + run(); +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/fake_quantize.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/fake_quantize.hpp new file mode 100644 index 00000000000000..3990528366bf65 --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/fake_quantize.hpp @@ -0,0 +1,16 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/fake_quantize.hpp" + +namespace ov { +namespace test { + +TEST_P(FakeQuantizeLayerTest, Inference) { + run(); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/gather_nd.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/gather_nd.hpp new file mode 100644 index 00000000000000..4897f383ece390 --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/gather_nd.hpp @@ -0,0 +1,19 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/gather_nd.hpp" + +namespace ov { +namespace test { +TEST_P(GatherNDLayerTest, Inference) { + run(); +} + +TEST_P(GatherND8LayerTest, Inference) { + run(); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/gather_tree.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/gather_tree.hpp new file mode 100644 index 00000000000000..d0cc718529517f --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/gather_tree.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/gather_tree.hpp" + +namespace ov { +namespace test { +TEST_P(GatherTreeLayerTest, Inference) { + run(); +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/extract_image_patches.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/extract_image_patches.hpp new file mode 100644 index 00000000000000..f42e05bbf37e0f --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/extract_image_patches.hpp @@ -0,0 +1,34 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { +using extractImagePatchesTuple = typename std::tuple< + std::vector, // input shape + std::vector, // kernel size + std::vector, // strides + std::vector, // rates + ov::op::PadType, // pad type + ov::element::Type, // model type + std::string>; // device name + +class ExtractImagePatchesTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo &obj); + +protected: + void SetUp() override; +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/fake_quantize.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/fake_quantize.hpp new file mode 100644 index 00000000000000..e5a93df9cf3d51 --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/fake_quantize.hpp @@ -0,0 +1,37 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { +typedef std::tuple< + size_t, // fake quantize levels + std::vector, // fake quantize inputs shape + std::vector, // fake quantize (inputLow, inputHigh, outputLow, outputHigh) or empty for random + ov::op::AutoBroadcastSpec // fake quantize broadcast mode +> fqSpecificParams; +typedef std::tuple< + fqSpecificParams, + ov::element::Type, // Model type + std::vector, // Input shapes + std::string // Device name +> fqLayerTestParamsSet; + +class FakeQuantizeLayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); +protected: + void SetUp() override; +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/gather_nd.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/gather_nd.hpp new file mode 100644 index 00000000000000..fcdf09089bc23f --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/gather_nd.hpp @@ -0,0 +1,43 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { + +typedef std::tuple< + std::vector, // Input shapes + ov::Shape, // Indices shape + int, // Batch dim + ov::element::Type, // Model type + ov::element::Type, // Indices type + std::string // Device name +> GatherNDParams; + +class GatherNDLayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo &obj); + +protected: + void SetUp() override; +}; + +class GatherND8LayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo &obj); + +protected: + void SetUp() override; +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/gather_tree.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/gather_tree.hpp new file mode 100644 index 00000000000000..0c674023107c23 --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/gather_tree.hpp @@ -0,0 +1,31 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "common_test_utils/test_enums.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { +using GatherTreeParamsTuple = typename std::tuple< + ov::Shape, // Input tensors shape + ov::test::utils::InputLayerType, // Secondary input type + ov::element::Type, // Model type + std::string>; // Device name + +class GatherTreeLayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo &obj); + +protected: + void SetUp() override; +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp b/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp index c97617aec0d86c..285dbebeaa8c49 100644 --- a/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp +++ b/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp @@ -455,7 +455,9 @@ ov::runtime::Tensor generate(const std::shared_ptr& return ov::test::utils::create_and_fill_tensor(elemType, targetShape, inGenData.range, inGenData.start_from, inGenData.resolution, inGenData.seed); } default: - return generate(std::dynamic_pointer_cast(node), port, elemType, targetShape); + InputGenerateData inGenData; + inGenData.range = maxBeamIndx; + return ov::test::utils::create_and_fill_tensor(elemType, targetShape, inGenData.range, inGenData.start_from, inGenData.resolution, inGenData.seed); } } diff --git a/src/tests/functional/shared_test_classes/src/single_op/extract_image_patches.cpp b/src/tests/functional/shared_test_classes/src/single_op/extract_image_patches.cpp new file mode 100644 index 00000000000000..c5aad78d475333 --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/extract_image_patches.cpp @@ -0,0 +1,59 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/extract_image_patches.hpp" + +#include "openvino/op/parameter.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/extractimagepatches.hpp" + + +namespace ov { +namespace test { + +std::string ExtractImagePatchesTest::getTestCaseName(const testing::TestParamInfo &obj) { + std::vector shapes; + std::vector kernel, strides, rates; + ov::op::PadType pad_type; + ov::element::Type model_type; + std::string device_name; + std::tie(shapes, kernel, strides, rates, pad_type, model_type, device_name) = obj.param; + std::ostringstream result; + + result << "IS=("; + for (size_t i = 0lu; i < shapes.size(); i++) { + result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < shapes.size(); j++) { + result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + result << "model_type=" << model_type.get_type_name() << "_"; + result << "K=" << ov::test::utils::vec2str(kernel) << "_"; + result << "S=" << ov::test::utils::vec2str(strides) << "_"; + result << "R=" << ov::test::utils::vec2str(rates) << "_"; + result << "P=" << pad_type << "_"; + result << "trgDev=" << device_name; + return result.str(); +} + +void ExtractImagePatchesTest::SetUp() { + std::vector shapes; + std::vector kernel, strides, rates; + ov::op::PadType pad_type; + ov::element::Type model_type; + std::tie(shapes, kernel, strides, rates, pad_type, model_type, targetDevice) = this->GetParam(); + init_input_shapes(shapes); + + auto param = std::make_shared(model_type, inputDynamicShapes.front()); + auto extImgPatches = std::make_shared(param, ov::Shape(kernel), ov::Strides(strides), ov::Shape(rates), pad_type); + auto result = std::make_shared(extImgPatches); + function = std::make_shared(result, ov::ParameterVector{param}, "ExtractImagePatches"); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_op/fake_quantize.cpp b/src/tests/functional/shared_test_classes/src/single_op/fake_quantize.cpp new file mode 100644 index 00000000000000..a6ac9f69f629bf --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/fake_quantize.cpp @@ -0,0 +1,88 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/fake_quantize.hpp" + +#include "ngraph_functions/builders.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/fake_quantize.hpp" + +namespace ov { +namespace test { +std::string FakeQuantizeLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { + fqSpecificParams fqParams; + ov::element::Type model_type; + std::vector shapes; + std::string target_device; + std::tie(fqParams, model_type, shapes, target_device) = obj.param; + size_t levels; + std::vector const_shape; + std::vector fq_direct_args; + ov::op::AutoBroadcastSpec broadcast; + std::tie(levels, const_shape, fq_direct_args, broadcast) = fqParams; + + std::ostringstream result; + result << "IS=("; + for (size_t i = 0lu; i < shapes.size(); i++) { + result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < shapes.size(); j++) { + result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + result << "CS=" << ov::test::utils::vec2str(const_shape) << "_"; + result << "LEVELS=" << levels << "_"; + result << "netPRC=" << model_type.get_type_name() << "_"; + result << "trgDev=" << target_device; + if (!fq_direct_args.empty()) { + result << "_fqArgs=" << fq_direct_args[0] << "_" << fq_direct_args[1] << "_" << fq_direct_args[2] << "_" << fq_direct_args[3]; + } + result << "_" << broadcast.m_type; + return result.str(); +} + +void FakeQuantizeLayerTest::SetUp() { + fqSpecificParams fqParams; + ov::element::Type model_type; + std::vector shapes; + std::tie(fqParams, model_type, shapes, targetDevice) = this->GetParam(); + init_input_shapes(shapes); + + std::vector kernel, stride, dilation; + size_t levels; + std::vector const_shape; + std::vector fq_direct_arg; + ov::op::AutoBroadcastSpec broadcast; + std::tie(levels, const_shape, fq_direct_arg, broadcast) = fqParams; + if (fq_direct_arg.size() != 0) { + abs_threshold = (fq_direct_arg[3] - fq_direct_arg[2]) / levels; + } + auto param = std::make_shared(model_type, inputDynamicShapes.front()); + + std::shared_ptr fq; + if (fq_direct_arg.empty()) { + fq = ngraph::builder::makeFakeQuantize(param, model_type, levels, const_shape, 1); + } else { + fq = ngraph::builder::makeFakeQuantize( + param, + model_type, + levels, + const_shape, + {fq_direct_arg[0]}, + {fq_direct_arg[1]}, + {fq_direct_arg[2]}, + {fq_direct_arg[3]}); + } + + auto result = std::make_shared(fq); + function = std::make_shared(result, ov::ParameterVector{param}, "fakeQuantize"); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_op/gather_nd.cpp b/src/tests/functional/shared_test_classes/src/single_op/gather_nd.cpp new file mode 100644 index 00000000000000..1c2dd1abbe045c --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/gather_nd.cpp @@ -0,0 +1,77 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/gather_nd.hpp" + +#include "ngraph_functions/builders.hpp" + +namespace ov { +namespace test { +std::string GatherNDLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { + std::vector shapes; + ov::Shape indices_shape; + ov::element::Type model_type, indices_type; + int batch_dims; + std::string device; + std::tie(shapes, indices_shape, batch_dims, model_type, indices_type, device) = obj.param; + + std::ostringstream result; + result << "IS=("; + for (size_t i = 0lu; i < shapes.size(); i++) { + result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < shapes.size(); j++) { + result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + result << "IS=" << ov::test::utils::vec2str(indices_shape) << "_"; + result << "BD=" << batch_dims << "_"; + result << "DP=" << model_type.get_type_name() << "_"; + result << "IP=" << indices_type.get_type_name() << "_"; + result << "device=" << device; + return result.str(); +} + +void GatherNDLayerTest::SetUp() { + std::vector shapes; + ov::Shape indices_shape; + ov::element::Type model_type, indices_type; + int batch_dims; + std::tie(shapes, indices_shape, batch_dims, model_type, indices_type, targetDevice) = this->GetParam(); + init_input_shapes(shapes); + + auto param = std::make_shared(model_type, inputDynamicShapes.front()); + + auto gather = ngraph::builder::makeGatherND(param, indices_shape, indices_type, batch_dims); + + auto result = std::make_shared(gather); + function = std::make_shared(result, ov::ParameterVector{param}, "gatherND"); +} + + +std::string GatherND8LayerTest::getTestCaseName(const testing::TestParamInfo& obj) { + return GatherNDLayerTest::getTestCaseName(obj); +} + +void GatherND8LayerTest::SetUp() { + std::vector shapes; + ov::Shape indices_shape; + ov::element::Type model_type, indices_type; + int batch_dims; + std::tie(shapes, indices_shape, batch_dims, model_type, indices_type, targetDevice) = this->GetParam(); + init_input_shapes(shapes); + + auto param = std::make_shared(model_type, inputDynamicShapes.front()); + + auto gather = ngraph::builder::makeGatherND8(param, indices_shape, indices_type, batch_dims); + + auto result = std::make_shared(gather); + function = std::make_shared(result, ov::ParameterVector{param}, "gatherND"); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_op/gather_tree.cpp b/src/tests/functional/shared_test_classes/src/single_op/gather_tree.cpp new file mode 100644 index 00000000000000..3847b1f6bae3d3 --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/gather_tree.cpp @@ -0,0 +1,74 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/gather_tree.hpp" + +#include "common_test_utils/ov_tensor_utils.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/gather_tree.hpp" + +namespace ov { +namespace test { +using ov::test::utils::InputLayerType; + +std::string GatherTreeLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { + ov::Shape input_shape; + ov::element::Type model_type; + InputLayerType secondary_input_type; + std::string device_name; + + std::tie(input_shape, secondary_input_type, model_type, device_name) = obj.param; + + std::ostringstream result; + result << "IS=" << ov::test::utils::vec2str(input_shape) << "_"; + result << "secondary_input_type=" << secondary_input_type << "_"; + result << "netPRC=" << model_type.get_type_name() << "_"; + result << "trgDev=" << device_name; + return result.str(); +} + +void GatherTreeLayerTest::SetUp() { + std::vector input_shape; + ov::element::Type model_type; + InputLayerType secondary_input_type; + + std::tie(input_shape, secondary_input_type, model_type, targetDevice) = GetParam(); + + std::vector input_shapes_static {input_shape}; + std::vector constant_shapes_static; + if (InputLayerType::PARAMETER == secondary_input_type) { + input_shapes_static.push_back(input_shape); + input_shapes_static.push_back(ov::Shape{input_shape.at(1)}); + input_shapes_static.push_back(ov::Shape()); + } else { + constant_shapes_static.push_back(input_shape); + constant_shapes_static.push_back(ov::Shape{input_shape.at(1)}); + constant_shapes_static.push_back(ov::Shape()); + } + init_input_shapes(ov::test::static_shapes_to_test_representation(input_shapes_static)); + + ov::ParameterVector params; + ov::NodeVector inputs; + for (const auto& shape : inputDynamicShapes) { + auto param = std::make_shared(model_type, shape); + params.push_back(param); + inputs.push_back(param); + } + + for (const auto& shape : constant_shapes_static) { + auto tensor = ov::test::utils::create_and_fill_tensor(model_type, shape, input_shape.at(2) - 2, 1); + auto constant = std::make_shared(tensor); + inputs.push_back(constant); + } + + auto gt = std::make_shared(inputs[0], inputs[1], inputs[2], inputs[3]); + + auto result = std::make_shared(gt); + + function = std::make_shared(result, params, "GatherTree"); +} +} // namespace test +} // namespace ov From 1e110c9ff521f9bf99574327c59d626f1676a7bd Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Tue, 3 Oct 2023 12:25:32 +0200 Subject: [PATCH 041/257] Changing file structure of POT tool section (#20208) --- .../post_training_optimization_tool/pot_api_reference.md | 0 .../post_training_optimization_tool/pot_cli.md | 0 .../pot_cli/configuration_file_description.md | 0 .../post_training_optimization_tool/pot_cli/simplified_mode.md | 0 .../post_training_optimization_tool/pot_examples.md | 0 .../post_training_optimization_tool/pot_faq.md | 0 .../post_training_optimization_tool/protecting_model.md | 0 .../quantization_best_practices.md | 0 .../quantization_best_practices/saturation_issue.md | 0 .../quantizing_models_with_accuracy.md | 0 .../quantizing_models_with_accuracy/accuracy_aware_algorithm.md | 0 11 files changed, 0 insertions(+), 0 deletions(-) rename tools/pot/openvino/tools/pot/api/README.md => docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_api_reference.md (100%) rename tools/pot/docs/CLI.md => docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_cli.md (100%) rename tools/pot/configs/README.md => docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_cli/configuration_file_description.md (100%) rename tools/pot/docs/SimplifiedMode.md => docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_cli/simplified_mode.md (100%) rename tools/pot/docs/Examples.md => docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples.md (100%) rename tools/pot/docs/FrequentlyAskedQuestions.md => docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_faq.md (100%) rename tools/pot/openvino/tools/pot/algorithms/quantization/range_supervision/README.md => docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/protecting_model.md (100%) rename tools/pot/docs/BestPractices.md => docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantization_best_practices.md (100%) rename tools/pot/docs/SaturationIssue.md => docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantization_best_practices/saturation_issue.md (100%) rename tools/pot/docs/AccuracyAwareQuantizationUsage.md => docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantizing_models_with_accuracy.md (100%) rename tools/pot/openvino/tools/pot/algorithms/quantization/accuracy_aware/README.md => docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantizing_models_with_accuracy/accuracy_aware_algorithm.md (100%) diff --git a/tools/pot/openvino/tools/pot/api/README.md b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_api_reference.md similarity index 100% rename from tools/pot/openvino/tools/pot/api/README.md rename to docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_api_reference.md diff --git a/tools/pot/docs/CLI.md b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_cli.md similarity index 100% rename from tools/pot/docs/CLI.md rename to docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_cli.md diff --git a/tools/pot/configs/README.md b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_cli/configuration_file_description.md similarity index 100% rename from tools/pot/configs/README.md rename to docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_cli/configuration_file_description.md diff --git a/tools/pot/docs/SimplifiedMode.md b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_cli/simplified_mode.md similarity index 100% rename from tools/pot/docs/SimplifiedMode.md rename to docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_cli/simplified_mode.md diff --git a/tools/pot/docs/Examples.md b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples.md similarity index 100% rename from tools/pot/docs/Examples.md rename to docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples.md diff --git a/tools/pot/docs/FrequentlyAskedQuestions.md b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_faq.md similarity index 100% rename from tools/pot/docs/FrequentlyAskedQuestions.md rename to docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_faq.md diff --git a/tools/pot/openvino/tools/pot/algorithms/quantization/range_supervision/README.md b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/protecting_model.md similarity index 100% rename from tools/pot/openvino/tools/pot/algorithms/quantization/range_supervision/README.md rename to docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/protecting_model.md diff --git a/tools/pot/docs/BestPractices.md b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantization_best_practices.md similarity index 100% rename from tools/pot/docs/BestPractices.md rename to docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantization_best_practices.md diff --git a/tools/pot/docs/SaturationIssue.md b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantization_best_practices/saturation_issue.md similarity index 100% rename from tools/pot/docs/SaturationIssue.md rename to docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantization_best_practices/saturation_issue.md diff --git a/tools/pot/docs/AccuracyAwareQuantizationUsage.md b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantizing_models_with_accuracy.md similarity index 100% rename from tools/pot/docs/AccuracyAwareQuantizationUsage.md rename to docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantizing_models_with_accuracy.md diff --git a/tools/pot/openvino/tools/pot/algorithms/quantization/accuracy_aware/README.md b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantizing_models_with_accuracy/accuracy_aware_algorithm.md similarity index 100% rename from tools/pot/openvino/tools/pot/algorithms/quantization/accuracy_aware/README.md rename to docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantizing_models_with_accuracy/accuracy_aware_algorithm.md From 1178d983e650ed57205a78c814cdaa8b6cbbc10c Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Tue, 3 Oct 2023 14:41:37 +0200 Subject: [PATCH 042/257] Changing file structure of POT Tool Examples section (#20213) --- .../pot_examples/pot_api_examples.md | 0 .../pot_examples/pot_api_examples/pot_example_3d_segmentation.md | 0 .../pot_examples/pot_api_examples/pot_example_classification.md | 0 .../pot_examples/pot_api_examples/pot_example_face_detection.md | 0 .../pot_examples/pot_api_examples/pot_example_object_detection.md | 0 .../pot_examples/pot_api_examples/pot_example_segmentation.md | 0 .../pot_examples/pot_api_examples/pot_example_speech.md | 0 .../pot_examples/pot_cli_example.md | 0 8 files changed, 0 insertions(+), 0 deletions(-) rename tools/pot/openvino/tools/pot/api/samples/README.md => docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples.md (100%) rename tools/pot/openvino/tools/pot/api/samples/3d_segmentation/README.md => docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_3d_segmentation.md (100%) rename tools/pot/openvino/tools/pot/api/samples/classification/README.md => docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_classification.md (100%) rename tools/pot/openvino/tools/pot/api/samples/face_detection/README.md => docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_face_detection.md (100%) rename tools/pot/openvino/tools/pot/api/samples/object_detection/README.md => docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_object_detection.md (100%) rename tools/pot/openvino/tools/pot/api/samples/segmentation/README.md => docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_segmentation.md (100%) rename tools/pot/openvino/tools/pot/api/samples/speech/README.md => docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_speech.md (100%) rename tools/pot/docs/E2eExample.md => docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_cli_example.md (100%) diff --git a/tools/pot/openvino/tools/pot/api/samples/README.md b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples.md similarity index 100% rename from tools/pot/openvino/tools/pot/api/samples/README.md rename to docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples.md diff --git a/tools/pot/openvino/tools/pot/api/samples/3d_segmentation/README.md b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_3d_segmentation.md similarity index 100% rename from tools/pot/openvino/tools/pot/api/samples/3d_segmentation/README.md rename to docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_3d_segmentation.md diff --git a/tools/pot/openvino/tools/pot/api/samples/classification/README.md b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_classification.md similarity index 100% rename from tools/pot/openvino/tools/pot/api/samples/classification/README.md rename to docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_classification.md diff --git a/tools/pot/openvino/tools/pot/api/samples/face_detection/README.md b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_face_detection.md similarity index 100% rename from tools/pot/openvino/tools/pot/api/samples/face_detection/README.md rename to docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_face_detection.md diff --git a/tools/pot/openvino/tools/pot/api/samples/object_detection/README.md b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_object_detection.md similarity index 100% rename from tools/pot/openvino/tools/pot/api/samples/object_detection/README.md rename to docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_object_detection.md diff --git a/tools/pot/openvino/tools/pot/api/samples/segmentation/README.md b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_segmentation.md similarity index 100% rename from tools/pot/openvino/tools/pot/api/samples/segmentation/README.md rename to docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_segmentation.md diff --git a/tools/pot/openvino/tools/pot/api/samples/speech/README.md b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_speech.md similarity index 100% rename from tools/pot/openvino/tools/pot/api/samples/speech/README.md rename to docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_speech.md diff --git a/tools/pot/docs/E2eExample.md b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_cli_example.md similarity index 100% rename from tools/pot/docs/E2eExample.md rename to docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_cli_example.md From a2c524019a5ed89106d777b120a3d15b4de0cb8d Mon Sep 17 00:00:00 2001 From: Vladimir Paramuzov Date: Tue, 3 Oct 2023 16:46:50 +0400 Subject: [PATCH 043/257] [GPU] Use ov element type and float16 inside plugin (#20110) --- .../primitives/generate_proposals.hpp | 2 +- .../intel_gpu/primitives/primitive.hpp | 2 +- .../include/intel_gpu/runtime/half.hpp | 57 - .../include/intel_gpu/runtime/layout.hpp | 110 +- .../include/intel_gpu/runtime/memory.hpp | 8 +- .../intel_gpu/runtime/tensor_accessor.hpp | 4 +- .../intel_gpu/src/graph/CMakeLists.txt | 2 +- src/plugins/intel_gpu/src/graph/condition.cpp | 4 +- .../intel_gpu/src/graph/error_handler.cpp | 8 +- .../graph_optimizer/add_required_reorders.cpp | 4 +- .../graph_optimizer/pre_replace_deconv.cpp | 8 +- .../prepare_primitive_fusing.cpp | 15 +- .../graph_optimizer/prepare_quantization.cpp | 16 +- .../graph/graph_optimizer/reorder_inputs.cpp | 4 +- src/plugins/intel_gpu/src/graph/half.cpp | 165 -- .../src/graph/impls/cpu/activation.cpp | 2 +- .../src/graph/impls/cpu/proposal.cpp | 4 +- .../intel_gpu/src/graph/impls/cpu/range.cpp | 2 +- .../intel_gpu/src/graph/impls/cpu/tile.cpp | 2 +- .../graph/impls/ocl/binary_convolution.cpp | 2 +- .../impls/ocl/kernel_selector_helper.cpp | 8 +- .../graph/impls/ocl/kernel_selector_helper.h | 15 +- .../graph/impls/ocl/non_max_suppression.cpp | 4 +- .../src/graph/impls/ocl/quantize.cpp | 2 +- .../intel_gpu/src/graph/include/loop_inst.h | 4 +- .../src/graph/include/memory_accessor.hpp | 2 +- .../src/graph/include/quantize_inst.h | 2 +- .../src/graph/include/to_string_utils.h | 2 +- src/plugins/intel_gpu/src/graph/network.cpp | 9 +- src/plugins/intel_gpu/src/graph/quantize.cpp | 4 +- src/plugins/intel_gpu/src/graph/range.cpp | 4 +- src/plugins/intel_gpu/src/graph/tile.cpp | 2 +- src/plugins/intel_gpu/src/graph/topology.cpp | 2 +- src/plugins/intel_gpu/src/plugin/graph.cpp | 6 +- .../src/plugin/sync_infer_request.cpp | 4 +- .../intel_gpu/src/plugin/variable_state.cpp | 3 +- src/plugins/intel_gpu/src/runtime/layout.cpp | 18 +- .../is_valid_fusion_test.cpp | 2 +- .../dynamic_execution/memory_realloc_test.cpp | 4 +- .../optimized_out_execution_test.cpp | 12 +- .../intel_gpu/tests/unit/float16.natvis | 4 +- .../binary_convolution_fusion_test.cpp | 10 +- .../tests/unit/fusions/fusion_test_common.hpp | 14 +- .../fusions/scatter_nd_update_fusion_test.cpp | 2 +- .../graph_manipulation_gpu_test.cpp | 4 +- .../unit/module_tests/optionals_test.cpp | 4 +- .../tests/unit/passes/handle_reshape.cpp | 8 +- .../passes/prepare_buffer_fusing_test.cpp | 88 +- .../unit/passes/prepare_padding_test.cpp | 2 +- .../passes/prepare_primitive_fusing_test.cpp | 14 +- .../unit/shape_infer/quantize_si_test.cpp | 2 +- .../shape_infer/random_uniform_si_test.cpp | 3 +- .../tests/unit/shape_infer/range_si_test.cpp | 5 +- .../test_cases/activation_simple_gpu_test.cpp | 44 +- .../adaptive_avg_pooling_gpu_test.cpp | 6 +- .../adaptive_max_pooling_gpu_test.cpp | 6 +- .../unit/test_cases/arg_max_gpu_test.cpp | 7 +- .../test_cases/batch_to_space_gpu_test.cpp | 76 +- .../binary_convolution_gpu_test.cpp | 12 +- .../tests/unit/test_cases/border_gpu_test.cpp | 10 +- .../unit/test_cases/broadcast_gpu_test.cpp | 146 +- .../unit/test_cases/bucketize_gpu_test.cpp | 35 +- .../test_cases/concatenation_gpu_test.cpp | 64 +- .../unit/test_cases/condition_gpu_test.cpp | 5 +- .../test_cases/convert_color_gpu_test.cpp | 6 +- .../unit/test_cases/convolution_gpu_test.cpp | 156 +- .../unit/test_cases/ctc_loss_gpu_test.cpp | 13 +- .../unit/test_cases/cum_sum_gpu_test.cpp | 8 +- .../test_cases/deconvolution_gpu_test.cpp | 66 +- .../test_cases/depth_concatenate_gpu_test.cpp | 18 +- .../test_cases/depth_to_space_gpu_test.cpp | 54 +- .../unit/test_cases/detection_output_test.cpp | 86 +- .../tests/unit/test_cases/dft_gpu_test.cpp | 15 +- .../unit/test_cases/eltwise_gpu_test.cpp | 88 +- .../test_cases/embedding_bag_gpu_test.cpp | 208 +- ...al_detectron_detection_output_gpu_test.cpp | 10 +- ...nerate_proposals_single_image_gpu_test.cpp | 12 +- ...etectron_prior_grid_generator_gpu_test.cpp | 8 +- ...erimental_detectron_topk_rois_gpu_test.cpp | 17 +- .../intel_gpu/tests/unit/test_cases/eye.cpp | 22 +- .../test_cases/fully_connected_gpu_test.cpp | 79 +- .../test_cases/gather_elements_gpu_test.cpp | 2010 ++++++++-------- .../tests/unit/test_cases/gather_gpu_test.cpp | 160 +- .../unit/test_cases/gather_nd_gpu_test.cpp | 592 ++--- .../unit/test_cases/gather_tree_gpu_test.cpp | 2 +- .../tests/unit/test_cases/gemm_gpu_test.cpp | 20 +- .../generate_proposals_gpu_test.cpp | 16 +- .../unit/test_cases/grid_sample_gpu_test.cpp | 12 +- .../unit/test_cases/hash_key_gpu_test.cpp | 20 +- .../tests/unit/test_cases/lrn_gpu_test.cpp | 4 +- .../unit/test_cases/lru_caches_gpu_test.cpp | 4 +- .../unit/test_cases/lstm_dynamic_gpu_test.cpp | 6 +- .../tests/unit/test_cases/lstm_gpu_test.cpp | 182 +- .../unit/test_cases/matrix_nms_gpu_test.cpp | 35 +- .../test_cases/multiclass_nms_gpu_test.cpp | 16 +- .../tests/unit/test_cases/mvn_gpu_test.cpp | 56 +- .../test_cases/non_max_suppression_test.cpp | 30 +- .../unit/test_cases/non_zero_gpu_test.cpp | 16 +- .../unit/test_cases/normalizel2_gpu_test.cpp | 32 +- .../unit/test_cases/one_hot_gpu_test.cpp | 2 +- .../unit/test_cases/permute_gpu_test.cpp | 13 +- .../unit/test_cases/pooling_gpu_test.cpp | 106 +- .../unit/test_cases/prior_box_gpu_test.cpp | 4 +- .../propagate_constants_gpu_test.cpp | 4 +- .../unit/test_cases/proposal_cpu_test.cpp | 18 +- .../test_cases/pyramid_roi_align_gpu_test.cpp | 4 +- .../unit/test_cases/quantize_gpu_test.cpp | 8 +- .../test_cases/random_uniform_gpu_test.cpp | 34 +- .../tests/unit/test_cases/range_gpu_test.cpp | 8 +- .../tests/unit/test_cases/reduce_gpu_test.cpp | 6 +- .../unit/test_cases/region_yolo_gpu_test.cpp | 16 +- .../unit/test_cases/reorder_gpu_test.cpp | 192 +- .../unit/test_cases/reorg_yolo_gpu_test.cpp | 8 +- .../unit/test_cases/resample_gpu_test.cpp | 16 +- .../unit/test_cases/reshape_gpu_test.cpp | 34 +- .../unit/test_cases/reverse_gpu_test.cpp | 120 +- .../test_cases/reverse_sequence_gpu_test.cpp | 36 +- .../unit/test_cases/roi_align_gpu_test.cpp | 44 +- .../unit/test_cases/roi_pooling_gpu_test.cpp | 8 +- .../tests/unit/test_cases/roll_gpu_test.cpp | 17 +- .../scatter_elements_update_gpu_test.cpp | 20 +- .../test_cases/scatter_nd_update_gpu_test.cpp | 2112 ++++++++--------- .../test_cases/scatter_update_gpu_test.cpp | 282 +-- .../unit/test_cases/softmax_gpu_test.cpp | 12 +- .../test_cases/space_to_batch_gpu_test.cpp | 56 +- .../test_cases/space_to_depth_gpu_test.cpp | 132 +- .../tests/unit/test_cases/split_gpu_test.cpp | 2 +- .../tests/unit/test_cases/tile_gpu_test.cpp | 8 +- .../tests/unit/test_cases/unique_gpu_test.cpp | 12 +- .../intel_gpu/tests/unit/test_utils/float16.h | 100 - .../tests/unit/test_utils/network_test.h | 12 +- .../tests/unit/test_utils/random_gen.h | 8 +- .../tests/unit/test_utils/test_utils.cpp | 17 +- .../tests/unit/test_utils/test_utils.h | 31 +- 134 files changed, 4134 insertions(+), 4548 deletions(-) delete mode 100644 src/plugins/intel_gpu/include/intel_gpu/runtime/half.hpp delete mode 100644 src/plugins/intel_gpu/src/graph/half.cpp delete mode 100644 src/plugins/intel_gpu/tests/unit/test_utils/float16.h diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/generate_proposals.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/generate_proposals.hpp index 3524c104fd9660..908285f73e1298 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/primitives/generate_proposals.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/primitives/generate_proposals.hpp @@ -59,7 +59,7 @@ struct generate_proposals int64_t post_nms_count = 0; bool normalized = false; float nms_eta = 0.0f; - data_types roi_num_type = data_types::bin; + data_types roi_num_type = data_types::undefined; size_t hash() const override { size_t seed = primitive::hash(); diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/primitive.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/primitive.hpp index 0bbf589b0da22c..bc55ed80e4f362 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/primitives/primitive.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/primitives/primitive.hpp @@ -150,7 +150,7 @@ struct primitive { return false; for (size_t i = 0; i < output_data_types.size(); ++i) { - if (output_data_types[i].value_or(data_types::bin) != rhs.output_data_types[i].value_or(data_types::bin)) + if (output_data_types[i].value_or(data_types::undefined) != rhs.output_data_types[i].value_or(data_types::undefined)) return false; } diff --git a/src/plugins/intel_gpu/include/intel_gpu/runtime/half.hpp b/src/plugins/intel_gpu/include/intel_gpu/runtime/half.hpp deleted file mode 100644 index 0bd5f0248301d8..00000000000000 --- a/src/plugins/intel_gpu/include/intel_gpu/runtime/half.hpp +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include -#include - -namespace cldnn { - -/// @addtogroup cpp_api C++ API -/// @{ - -float half_to_float(uint16_t value); -uint16_t float_to_half(float value); - -// There is no portable half precision floating point support. -// Using wrapped integral type with the same size and alignment restrictions. -class half_impl { -public: - half_impl() = default; - - template ::value>::type> - explicit half_impl(T data, int /*direct_creation_tag*/) : _data(data) {} - - operator uint16_t() const { return _data; } - operator int32_t() const { return static_cast(half_to_float(_data)); } - operator int64_t() const { return static_cast(half_to_float(_data)); } - operator float() const { - return half_to_float(_data); - } - - explicit half_impl(float value) - : _data(float_to_half(value)) - {} - - template ::value>::type> - explicit half_impl(T value) - : half_impl(static_cast(value)) - {} - -private: - uint16_t _data; -}; - -// Use complete implementation if necessary. -#if defined HALF_HALF_HPP -using half_t = half; -#else -using half_t = half_impl; -#endif - -} // namespace cldnn diff --git a/src/plugins/intel_gpu/include/intel_gpu/runtime/layout.hpp b/src/plugins/intel_gpu/include/intel_gpu/runtime/layout.hpp index 5313c35bce1f56..3dad0cea4e008b 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/runtime/layout.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/runtime/layout.hpp @@ -5,7 +5,6 @@ #pragma once #include "tensor.hpp" -#include "half.hpp" #include #include @@ -34,41 +33,14 @@ constexpr size_t uint_type_mask = 0x40; constexpr size_t bin_type_mask = 0x20; /// @brief Possible data types could be stored in memory. -enum class data_types : size_t { - bin = sizeof(int32_t) | bin_type_mask, - u8 = sizeof(uint8_t) | uint_type_mask, - i8 = sizeof(int8_t), - f16 = sizeof(int16_t) | float_type_mask, - f32 = sizeof(float) | float_type_mask, - i32 = sizeof(int32_t), - i64 = sizeof(int64_t) -}; - - -/// Converts C++ type to @ref data_types . -template -struct type_to_data_type; -#ifndef DOXYGEN_SHOULD_SKIP_THIS -template <> -struct type_to_data_type { static constexpr data_types value = data_types::i8; }; -template <> -struct type_to_data_type { static constexpr data_types value = data_types::u8; }; -template <> -struct type_to_data_type { static constexpr data_types value = data_types::i32; }; -template <> -struct type_to_data_type { static constexpr data_types value = data_types::i64; }; -template <> -struct type_to_data_type { static constexpr data_types value = data_types::f16; }; -template <> -struct type_to_data_type { static constexpr data_types value = data_types::f32; }; -#endif +using data_types = ov::element::Type_t; /// Converts @ref data_types to C++ type. template struct data_type_to_type; #ifndef DOXYGEN_SHOULD_SKIP_THIS template <> -struct data_type_to_type { typedef uint32_t type; }; +struct data_type_to_type { typedef uint32_t type; }; template <> struct data_type_to_type { typedef uint8_t type; }; template <> @@ -78,7 +50,7 @@ struct data_type_to_type { typedef int32_t type; }; template <> struct data_type_to_type { typedef int64_t type; }; template <> -struct data_type_to_type { typedef half_t type; }; +struct data_type_to_type { typedef ov::float16 type; }; template <> struct data_type_to_type { typedef float type; }; #endif @@ -86,21 +58,24 @@ struct data_type_to_type { typedef float type; }; /// Helper class to identify key properties for data_types. struct data_type_traits { static size_t size_of(data_types data_type) { - return (static_cast(data_type) & ~(float_type_mask | uint_type_mask | bin_type_mask)); + auto et = ov::element::Type(data_type); + OPENVINO_ASSERT(et.bitwidth() >= 8, "[GPU] Unexpected data_type_traits::size_of call for type with bitwidth < 8 (", et.get_type_name(), ")"); + return et.size(); } static bool is_floating_point(data_types data_type) { - return (static_cast(data_type) & float_type_mask) != 0; + return ov::element::Type(data_type).is_real(); } static bool is_i8_u8(data_types data_type) { - return data_type == data_types::i8 || data_type == data_types::u8; + auto et = ov::element::Type(data_type); + return et.is_quantized() && et.bitwidth() == 8; } static size_t align_of(data_types data_type) { switch (data_type) { - case data_types::bin: - return alignof(data_type_to_type::type); + case data_types::u1: + return alignof(data_type_to_type::type); case data_types::i8: return alignof(data_type_to_type::type); case data_types::u8: @@ -119,32 +94,14 @@ struct data_type_traits { } static std::string name(data_types data_type) { - switch (data_type) { - case data_types::bin: - return "bin"; - case data_types::i8: - return "i8"; - case data_types::u8: - return "u8"; - case data_types::i32: - return "i32"; - case data_types::i64: - return "i64"; - case data_types::f16: - return "f16"; - case data_types::f32: - return "f32"; - default: - assert(0); - return "unknown (" + std::to_string(typename std::underlying_type::type(data_type)) + ")"; - } + return ov::element::Type(data_type).get_type_name(); } static data_types max_type(data_types dt1, data_types dt2) { - if (dt1 == data_types::bin) + if (dt1 == data_types::u1) return dt2; - if (dt2 == data_types::bin) + if (dt2 == data_types::u1) return dt1; if (size_of(dt1) < size_of(dt2)) @@ -160,7 +117,7 @@ struct data_type_traits { } static bool is_quantized(data_types dt) { - return dt == data_types::u8 || dt == data_types::i8; + return is_i8_u8(dt); } template @@ -206,13 +163,7 @@ struct data_type_traits { }; inline ::std::ostream& operator<<(::std::ostream& os, const data_types& dt) { - return os << data_type_traits::name(dt); -} - -/// Helper function to check if C++ type matches @p data_type. -template -bool data_type_match(data_types data_type) { - return data_type == type_to_data_type::value; + return os << ov::element::Type(dt); } inline data_types element_type_to_data_type(ov::element::Type t) { @@ -237,33 +188,12 @@ inline data_types element_type_to_data_type(ov::element::Type t) { case ov::element::Type_t::boolean: return cldnn::data_types::u8; case ov::element::Type_t::u1: - return cldnn::data_types::bin; + return cldnn::data_types::u1; default: throw std::runtime_error("Can't convert " + t.get_type_name() + " element type"); } } -inline ov::element::Type data_type_to_element_type(data_types t) { - switch (t) { - case cldnn::data_types::f32: - return ov::element::Type_t::f32; - case cldnn::data_types::f16: - return ov::element::Type_t::f16; - case cldnn::data_types::u8: - return ov::element::Type_t::u8; - case cldnn::data_types::i8: - return ov::element::Type_t::i8; - case cldnn::data_types::i32: - return ov::element::Type_t::i32; - case cldnn::data_types::i64: - return ov::element::Type_t::i64; - case cldnn::data_types::bin: - return ov::element::Type_t::u1; - default: - throw std::runtime_error("Can't convert " + data_type_traits::name(t) + " precision"); - } -} - /// Helper function to get both data_types and format::type in a single, unique value. Useable in 'case' statement. constexpr auto fuse(data_types dt, cldnn::format::type fmt) -> decltype(static_cast::type>(dt) | static_cast::type>(fmt)) { @@ -425,7 +355,7 @@ struct layout { layout(const layout& other) = default; layout() - : data_type(cldnn::data_types::bin) + : data_type(cldnn::data_types::undefined) , format(cldnn::format::any) , data_padding(padding()) , size(ov::PartialShape()) { } @@ -489,7 +419,7 @@ struct layout { layout with_padding(padding const& padd) const; /// Data type stored in @ref memory (see. @ref data_types) - data_types data_type; + ov::element::Type_t data_type; /// Format stored in @ref memory (see. @ref format) cldnn::format format; @@ -498,7 +428,7 @@ struct layout { padding data_padding; /// Number of bytes needed to store this layout - size_t bytes_count() const { return data_type_traits::size_of(data_type) * get_linear_size(); } + size_t bytes_count() const { return (ov::element::Type(data_type).bitwidth() * get_linear_size() + 7) >> 3; } size_t get_rank() const; diff --git a/src/plugins/intel_gpu/include/intel_gpu/runtime/memory.hpp b/src/plugins/intel_gpu/include/intel_gpu/runtime/memory.hpp index 62595de3beeb8b..0283cebdf36072 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/runtime/memory.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/runtime/memory.hpp @@ -61,7 +61,7 @@ struct memory { return true; } - if (_bytes_count == (l.data_type == data_types::bin ? ceil_div(l.count(), 32) : l.count()) * data_type_traits::size_of(l.data_type)) { + if (_bytes_count == l.bytes_count()) { return false; } @@ -186,7 +186,7 @@ template inline std::vector read_vector(cldnn::memory::ptr mem, const cldnn::stream& stream) { cldnn::data_types mem_dtype = mem->get_layout().data_type; if (mem_dtype == data_types::f16 || mem_dtype == data_types::f32) { - if (!std::is_floating_point::value && !std::is_same::value) { + if (!std::is_floating_point::value && !std::is_same::value) { OPENVINO_ASSERT(false, "[GPU] read_vector: attempt to convert floating point memory to non-floating point memory"); } } @@ -211,7 +211,7 @@ inline std::vector read_vector(cldnn::memory::ptr mem, const cldnn::stream& s case data_types::f16: { auto p_mem = reinterpret_cast(mem->buffer_ptr()); for (size_t i = 0; i < mem->count(); ++i) { - out_vecs.push_back(static_cast(half_to_float(p_mem[i]))); + out_vecs.push_back(static_cast(ov::float16::from_bits(p_mem[i]))); } break; } @@ -237,7 +237,7 @@ inline std::vector read_vector(cldnn::memory::ptr mem, const cldnn::stream& s break; } case data_types::f16: { - mem_lock lock{mem, stream}; + mem_lock lock{mem, stream}; out_vecs = std::move(std::vector(lock.begin(), lock.end())); break; } diff --git a/src/plugins/intel_gpu/include/intel_gpu/runtime/tensor_accessor.hpp b/src/plugins/intel_gpu/include/intel_gpu/runtime/tensor_accessor.hpp index 1072c1bdf7fe80..fa02095455f879 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/runtime/tensor_accessor.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/runtime/tensor_accessor.hpp @@ -13,9 +13,7 @@ namespace cldnn { inline ov::Tensor make_tensor(const layout& l, void* memory_pointer) { - ov::element::Type et = data_type_to_element_type(l.data_type); - - return ov::Tensor(et, l.get_shape(), memory_pointer); + return ov::Tensor(l.data_type, l.get_shape(), memory_pointer); } struct TensorsContainer final { diff --git a/src/plugins/intel_gpu/src/graph/CMakeLists.txt b/src/plugins/intel_gpu/src/graph/CMakeLists.txt index dd3ddb87d1016e..67a0a3282cda13 100644 --- a/src/plugins/intel_gpu/src/graph/CMakeLists.txt +++ b/src/plugins/intel_gpu/src/graph/CMakeLists.txt @@ -62,7 +62,7 @@ ov_install_static_lib(${TARGET_NAME} ${OV_CPACK_COMP_CORE}) if(ENABLE_SSE42) ie_sse42_optimization_flags(sse4_2_flags) - set_source_files_properties(impls/cpu/detection_output.cpp half.cpp PROPERTIES + set_source_files_properties(impls/cpu/detection_output.cpp PROPERTIES COMPILE_FLAGS "${sse4_2_flags}" COMPILE_DEFINITIONS "HAVE_SSE") endif() diff --git a/src/plugins/intel_gpu/src/graph/condition.cpp b/src/plugins/intel_gpu/src/graph/condition.cpp index 38ba63536fbe39..842495e0b24e0d 100644 --- a/src/plugins/intel_gpu/src/graph/condition.cpp +++ b/src/plugins/intel_gpu/src/graph/condition.cpp @@ -85,7 +85,7 @@ bool condition_inst::get_pred_from_memory(memory::ptr mem, stream& stream) { case cldnn::data_types::f32: return convert_data(mem, stream); case cldnn::data_types::f16: - return convert_data(mem, stream); + return convert_data(mem, stream); case cldnn::data_types::i64: return convert_data(mem, stream); case cldnn::data_types::i32: @@ -94,7 +94,7 @@ bool condition_inst::get_pred_from_memory(memory::ptr mem, stream& stream) { return convert_data(mem, stream); case cldnn::data_types::u8: return convert_data(mem, stream); - case cldnn::data_types::bin: + case cldnn::data_types::u1: default: return convert_data(mem, stream); } diff --git a/src/plugins/intel_gpu/src/graph/error_handler.cpp b/src/plugins/intel_gpu/src/graph/error_handler.cpp index 7403a22a1cd27b..7c305e848eb5e4 100644 --- a/src/plugins/intel_gpu/src/graph/error_handler.cpp +++ b/src/plugins/intel_gpu/src/graph/error_handler.cpp @@ -81,8 +81,8 @@ void error_on_mismatching_data_types(const std::string& file, (data_format_1 == data_types::u8 && data_format_2 == data_types::i8))) { std::stringstream error_msg; error_msg << "Data formats are incompatible." << std::endl; - error_msg << data_format_1_id << " format is: " << data_type_traits::name(data_format_1) << ", " - << data_format_2_id << " is: " << data_type_traits::name(data_format_2) << std::endl; + error_msg << data_format_1_id << " format is: " << ov::element::Type(data_format_1) << ", " + << data_format_2_id << " is: " << ov::element::Type(data_format_2) << std::endl; error_msg << "Data formats should be the same!" << std::endl; err_details::cldnn_print_error_message(file, line, instance_id, error_msg, additional_message); } @@ -217,8 +217,8 @@ void error_on_mismatch_layout(const std::string& file, } if (layout_1.data_type != layout_2.data_type) { error_msg << layout_1_id << " data type mismatch: " << layout_2_id << " data type." << std::endl; - error_msg << layout_1_id << " data type: " << data_type_traits::name(layout_1.data_type) << ", " - << layout_2_id << " data type: " << data_type_traits::name(layout_2.data_type) << std::endl; + error_msg << layout_1_id << " data type: " << ov::element::Type(layout_1.data_type) << ", " + << layout_2_id << " data type: " << ov::element::Type(layout_2.data_type) << std::endl; } if (layout_1.format != layout_2.format) { error_msg << layout_1_id << " format mismatch: " << layout_2_id << " format." << std::endl; diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/add_required_reorders.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/add_required_reorders.cpp index 00e010a6cda424..a60e5a6402dd93 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/add_required_reorders.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/add_required_reorders.cpp @@ -272,10 +272,10 @@ void add_required_reorders::run(program& p) { OPENVINO_ASSERT(correct_layout_selected, "[GPU] No layout format available for ", usr->id(), ", impl_type: ", usr->get_preferred_impl_type(), " (format: ", original_layout.format.to_string(), - ", data_type: ", data_type_traits::name(original_layout.data_type), ") ", + ", data_type: ", ov::element::Type(original_layout.data_type), ") ", "compatible with ", node.first->id(), " (format: ", node.first->get_output_layout().format.to_string(), - ", data_type: ", data_type_traits::name(node.first->get_output_layout().data_type), ")"); + ", data_type: ", ov::element::Type(node.first->get_output_layout().data_type), ")"); } } diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/pre_replace_deconv.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/pre_replace_deconv.cpp index 32c1c4ba12f0c9..9e096629a22192 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/pre_replace_deconv.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/pre_replace_deconv.cpp @@ -217,7 +217,7 @@ void pre_replace_deconv::run(program& p) { std::vector weights_vec_float; if (weights_data_type == data_types::f16) { - mem_lock src{ weights_node_ptr->as().get_attached_memory_ptr(), stream }; + mem_lock src{ weights_node_ptr->as().get_attached_memory_ptr(), stream }; for (uint32_t i = 0; i < weights_layout.count(); i++) weights_vec_float.push_back(static_cast(src.data()[i])); } else { @@ -236,8 +236,8 @@ void pre_replace_deconv::run(program& p) { subpixel_weights); if (weights_data_type == data_types::f16) { - mem_lock dst{ data_to_allocate, stream}; - program_helpers::set_weights_values(dst.data(), subpixel_weights); + mem_lock dst{ data_to_allocate, stream}; + program_helpers::set_weights_values(dst.data(), subpixel_weights); } else if (weights_data_type == data_types::f32) { mem_lock dst{ data_to_allocate, stream }; program_helpers::set_weights_values(dst.data(), subpixel_weights); @@ -282,7 +282,7 @@ void pre_replace_deconv::run(program& p) { float bias = 0; if (bias_data_type == data_types::f16) { - mem_lock src{ bias_id_node_ptr->as().get_attached_memory_ptr(), stream }; + mem_lock src{ bias_id_node_ptr->as().get_attached_memory_ptr(), stream }; bias = static_cast(src.data()[0]); } else { mem_lock src{ bias_id_node_ptr->as().get_attached_memory_ptr(), stream }; diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp index 4d89826ac54b04..d50d306700b902 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp @@ -354,14 +354,13 @@ void prepare_primitive_fusing::fuse_bias(program &p) { } case data_types::f16: { cldnn::memory_ptr new_mem = p.get_engine().allocate_memory(original_mem->get_layout()); - mem_lock new_bias_mem(new_mem, p.get_stream()); - mem_lock original_bias_mem(original_mem, p.get_stream()); - mem_lock second_bias_mem(second_mem, p.get_stream()); - uint16_t* original_data = original_bias_mem.data(); - uint16_t* new_data = second_bias_mem.data(); + mem_lock new_bias_mem(new_mem, p.get_stream()); + mem_lock original_bias_mem(original_mem, p.get_stream()); + mem_lock second_bias_mem(second_mem, p.get_stream()); + ov::float16* original_data = original_bias_mem.data(); + ov::float16* new_data = second_bias_mem.data(); for (size_t i = 0; i < original_bias_mem.size(); i++) { - float new_val = half_to_float(original_data[i]) + half_to_float(new_data[i]); - new_bias_mem[i] = float_to_half(new_val); + new_bias_mem[i] = original_data[i] + new_data[i]; } original_node.attach_memory(new_mem); @@ -853,7 +852,7 @@ void prepare_primitive_fusing::fuse_simple_primitives(program &p) { auto& input_lo = quantize_node.get_dependency(1); auto& input_hi = quantize_node.get_dependency(2); bool should_fuse = input_data.is_type() && - ((out_dt == data_types::bin && + ((out_dt == data_types::u1 && quantize_node.get_dependencies().size() == 5 && ((in_layout.feature() == input_lo.get_output_layout().feature() && in_layout.feature() == input_hi.get_output_layout().feature()) || diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_quantization.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_quantization.cpp index ba09b64676e5c5..d1b125aa8f1df5 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_quantization.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_quantization.cpp @@ -96,7 +96,7 @@ void prepare_quantization::prepare_scale_shift_opt(program &p, quantize_node& qu auto lock_memory = [&stream] (memory::ptr memory, std::function& set_data, std::function& get_data) { using float_mem_lock = mem_lock; - using uint16_t_mem_lock = mem_lock; + using float16_mem_lock = mem_lock; switch (memory->get_layout().data_type) { case data_types::f32: { std::shared_ptr data_lock_ptr = std::make_shared(memory, stream); @@ -107,18 +107,18 @@ void prepare_quantization::prepare_scale_shift_opt(program &p, quantize_node& qu get_data = [data] (size_t idx) { return data[idx]; }; - return std::pair, std::shared_ptr>(data_lock_ptr, nullptr); + return std::pair, std::shared_ptr>(data_lock_ptr, nullptr); } case data_types::f16: { - std::shared_ptr data_lock_ptr = std::make_shared(memory, stream); - uint16_t* data = data_lock_ptr->data(); + std::shared_ptr data_lock_ptr = std::make_shared(memory, stream); + ov::float16* data = data_lock_ptr->data(); set_data = [data] (size_t idx, float value) { - data[idx] = float_to_half(value); + data[idx] = ov::float16(value); }; get_data = [data] (size_t idx) { - return half_to_float(data[idx]); + return static_cast(data[idx]); }; - return std::pair, std::shared_ptr>(nullptr, data_lock_ptr); + return std::pair, std::shared_ptr>(nullptr, data_lock_ptr); } default: throw std::runtime_error("prepare_quantization: Unsupported precision of quantize output values"); @@ -358,7 +358,7 @@ void prepare_quantization::prepare_packed_quantize(program& p, quantize_node& qu auto output_dt = quantize_node.get_output_layout().data_type; if (is_binarization) { - output_dt = data_types::bin; + output_dt = data_types::u1; } quantize_node.typed_desc()->output_data_types = {optional_data_type{output_dt}}; diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/reorder_inputs.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/reorder_inputs.cpp index ab33b111ebbebb..094e645a69e05f 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/reorder_inputs.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/reorder_inputs.cpp @@ -755,7 +755,7 @@ void reorder_inputs::run(program& p, layout_optimizer& lo, reorder_factory& rf) auto& input = binary_conv_node.input(); auto input_layout = input.get_output_layout(); auto new_layout = input_layout; - new_layout.data_type = data_types::bin; + new_layout.data_type = data_types::u1; auto reorder = rf.get_reorder(input.id(), input_layout, new_layout); @@ -766,7 +766,7 @@ void reorder_inputs::run(program& p, layout_optimizer& lo, reorder_factory& rf) auto& weights = binary_conv_node.weights(); auto weights_layout = weights.get_output_layout(); if (!weights.is_type() && !weights.is_constant()) { - auto new_layout = layout{ weights_layout.get_partial_shape(), data_types::bin, format::b_fs_yx_32fp }; + auto new_layout = layout{ weights_layout.get_partial_shape(), data_types::u1, format::b_fs_yx_32fp }; auto reorder = rf.get_reorder(weights.id(), weights_layout, new_layout); if (reorder.first) { p.add_intermediate(reorder.first, binary_conv_node, 1, !reorder.second); diff --git a/src/plugins/intel_gpu/src/graph/half.cpp b/src/plugins/intel_gpu/src/graph/half.cpp deleted file mode 100644 index 2d0ae76e803ea0..00000000000000 --- a/src/plugins/intel_gpu/src/graph/half.cpp +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#ifdef HAVE_SSE -#include -#else -#include "openvino/core/type/float16.hpp" -#endif // HAVE_SSE - -#include "intel_gpu/runtime/half.hpp" - -namespace cldnn { - -#ifdef HAVE_SSE - -float half_to_float(uint16_t value) { - static const uint32_t FLOAT16_EXP_SHIFT = (23 - 10); - static const uint32_t FLOAT16_EXP_MASK = 0x7C00; - static const uint32_t FLOAT32_EXP_MASK = 0x7F800000; - static const uint32_t FLOAT16_MANTISSA_MASK = 0x03FF; - static const uint32_t FLOAT16_TO_32_BIAS_DIFF_DENORM = - ((127 - 15 - 10) - << 23); // The difference is (127-15) but we want to do the calculation in the exp place (bit 23:32) - static const uint32_t FLOAT16_TO_32_BIAS_DIFF = ((127 - 15) << 10); - static const uint32_t FLOAT16_IMPLICIT_1 = (1 << 10); - static const uint32_t FLOAT16_EXP_MIN = (1 << 10); - static const uint32_t FLOAT16_SIGN_MASK = 0x8000; - __m128i a = _mm_unpacklo_epi16(_mm_set1_epi16(value), _mm_setzero_si128()); - __m128i exps = _mm_and_si128(_mm_set1_epi32(FLOAT16_EXP_MASK), a); // Mask the exponents - __m128i mantissa = _mm_and_si128(_mm_set1_epi32(FLOAT16_MANTISSA_MASK), a); // Mask the mantissa - __m128i signs = _mm_and_si128(_mm_set1_epi32(FLOAT16_SIGN_MASK), a); - signs = _mm_slli_epi32(signs, 16); - - __m128i nans = _mm_cmpeq_epi32(exps, _mm_set1_epi32(FLOAT16_EXP_MASK)); - nans = _mm_and_si128(nans, _mm_set1_epi32(FLOAT32_EXP_MASK)); - nans = _mm_or_si128(nans, signs); - - __m128i subnormals = _mm_cmpeq_epi32(exps, _mm_setzero_si128()); - - int out32; - // e\m| 0 | 1 - // ------------ - // 0 | 0 | S - // ------------ - // 1 | N | N - // - // The expression: (~exp) & mantissa, will evaluate to 0 exactly when the number is non subnormal or it's zero (just - // like in the table) testz Tests for this condition - if (_mm_testz_si128(subnormals, mantissa)) { - __m128i tmp; - exps = _mm_add_epi32(exps, _mm_set1_epi32(FLOAT16_TO_32_BIAS_DIFF)); - tmp = _mm_or_si128(exps, mantissa); - tmp = _mm_slli_epi32(tmp, FLOAT16_EXP_SHIFT); - tmp = _mm_blendv_epi8( - tmp, - _mm_setzero_si128(), - subnormals); // The idea is of course to use blendv_ps, but epi8 will work the same and won't switch stack - tmp = _mm_or_si128(tmp, nans); - out32 = _mm_extract_epi32(tmp, 0); - } else { - __m128i normals = _mm_andnot_si128(subnormals, _mm_set1_epi32(FLOAT16_IMPLICIT_1)); // Mark all normal numbers - mantissa = _mm_or_si128(mantissa, normals); // Apply implicit bit - exps = _mm_max_epi16( - exps, - _mm_set1_epi32( - FLOAT16_EXP_MIN)); // All subnormals will have 1 in the exponent (needed for correct bias computation) - exps = _mm_slli_epi32(exps, FLOAT16_EXP_SHIFT); - exps = _mm_add_epi32(exps, _mm_set1_epi32(FLOAT16_TO_32_BIAS_DIFF_DENORM)); - __m128 tmp; - tmp = _mm_mul_ps(_mm_castsi128_ps(exps), _mm_cvtepi32_ps(mantissa)); - tmp = _mm_or_ps(tmp, _mm_castsi128_ps(nans)); - out32 = _mm_extract_ps(tmp, 0); - } - - float outf32 = *reinterpret_cast(&out32); - return outf32; -} - -uint16_t float_to_half(float value) { -#define TO_M128i(a) (*reinterpret_cast<__m128i*>(&(a))) -#define TO_M128(a) (*const_cast<__m128*>(reinterpret_cast(&(a)))) - - static const uint32_t DWORD_SIGNMASK = 0x80000000; - static const uint32_t DWORD_MINFP16 = 0x38800000; - static const uint32_t DWORD_MAXFP16 = 0x477fe000; - static const uint32_t DWORD_FP16_2_POW_10 = (1 << 10); - static const uint32_t DWORD_FP16_EXPBIAS_NO_HALF = 0xc8000000; - static const uint32_t WORD_MAXFP16 = 0x7BFF; - - static const __m128i IVec4SignMask = _mm_set1_epi32(DWORD_SIGNMASK); - static const __m128i IVec4MinNormalFp16 = _mm_set1_epi32(DWORD_MINFP16); - static const __m128i IVec4MaxNormalFp16 = _mm_set1_epi32(DWORD_MAXFP16); - static const __m128i IVec4OnePow10 = _mm_set1_epi32(DWORD_FP16_2_POW_10); - static const __m128i IVec4ExpBiasFp16 = _mm_set1_epi32(DWORD_FP16_EXPBIAS_NO_HALF); - static const __m128i IVec4MaxFp16InWords = _mm_set1_epi32(WORD_MAXFP16); - - static const __m128 FVec4MaxNormalFp16 = TO_M128(IVec4MaxNormalFp16); - static const __m128 FVec4MinNormalFp16 = TO_M128(IVec4MinNormalFp16); - static const __m128i IVec4InfF32 = _mm_set1_epi32(0x7f800000); // inf in in hex representation - static const __m128i IVec4InfF16 = _mm_set1_epi32(0x00007c00); - - static const __m128 FVec4MaxFp16InWords = TO_M128(IVec4MaxFp16InWords); - - __m128 Src = _mm_set1_ps(value); - - // Remove the sign bit from the source - __m128 AbsSrc = _mm_andnot_ps(TO_M128(IVec4SignMask), Src); - - // Create a mask to identify the DWORDs that are smaller than the minimum normalized fp16 number - __m128 CmpToMinFp16Mask = _mm_cmplt_ps(AbsSrc, FVec4MinNormalFp16); - - // Create a mask to identify the DWORDs that are larger than the maximum normalized fp16 number - __m128 CmpToMaxFp16Mask = _mm_cmpgt_ps(AbsSrc, FVec4MaxNormalFp16); - __m128i CmpToInfMask = _mm_cmpeq_epi32(TO_M128i(AbsSrc), IVec4InfF32); - // Create a mask with the minimum normalized fp16 number in the DWORDs that are smaller than it - __m128 MaskOfMinFp16 = _mm_and_ps(CmpToMinFp16Mask, FVec4MinNormalFp16); - - __m128i MaskOf2POW10 = _mm_and_si128(TO_M128i(CmpToMinFp16Mask), IVec4OnePow10); - __m128 ResultPS = _mm_add_ps(AbsSrc, MaskOfMinFp16); - __m128i Result = TO_M128i(ResultPS); - - // We need to move from a 127 biased domain to a 15 biased domain. This means subtracting 112 from the exponent. We - // will add '-112' to the exponent but since the exponent is shifted 23 bits to the left we need to shift '-112' 23 - // bits to the left as well. This gives us 0xC8000000. We are going to shift the mantissa 13 bits to the right - // (moving from 23 bits mantissa to 10). - Result = _mm_add_epi32(Result, IVec4ExpBiasFp16); - - // Shift the mantissa to go from 23 bits to 10 bits - Result = _mm_srli_epi32(Result, 13); - - Result = _mm_sub_epi16(Result, MaskOf2POW10); - - ResultPS = _mm_blendv_ps(TO_M128(Result), FVec4MaxFp16InWords, CmpToMaxFp16Mask); - Result = TO_M128i(ResultPS); - // infinity preserving blending - Result = _mm_blendv_epi8(Result, IVec4InfF16, CmpToInfMask); - - __m128i iPackedResult = _mm_packs_epi32(Result, Result); - - // iSignMask = mask of the sign bits of the source 4 dwords - __m128i iSignMask = _mm_and_si128(TO_M128i(Src), IVec4SignMask); - - // Pack the sign mask to 4 words - __m128i iSignInWords = _mm_packs_epi32(iSignMask, iSignMask); - - iPackedResult = _mm_or_si128(iPackedResult, iSignInWords); - return (uint16_t)_mm_extract_epi16(iPackedResult, 0); -} - -#else - -float half_to_float(uint16_t value) { - return ov::float16(value); -} - -uint16_t float_to_half(float value) { - return ov::float16(value); -} - -#endif // HAVE_SSE - -} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/impls/cpu/activation.cpp b/src/plugins/intel_gpu/src/graph/impls/cpu/activation.cpp index 0caa272a09a7d9..57c0f057455ba7 100644 --- a/src/plugins/intel_gpu/src/graph/impls/cpu/activation.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/cpu/activation.cpp @@ -110,7 +110,7 @@ struct activation_impl : public typed_primitive_impl { // Most of the evaluate functions expect same data type for all inputs, so we need to convert params from float typename data_type_to_type
::type param_a = static_cast::type>(additional_params.a); - auto input_dt = data_type_to_element_type(instance.get_input_layout().data_type); + auto input_dt = instance.get_input_layout().data_type; if (activation_function == activation_func::pow) { input_host_tensors.push_back(ov::Tensor(input_dt, {}, ¶m_a)); diff --git a/src/plugins/intel_gpu/src/graph/impls/cpu/proposal.cpp b/src/plugins/intel_gpu/src/graph/impls/cpu/proposal.cpp index 154467aa4c1f93..461035c1defd75 100644 --- a/src/plugins/intel_gpu/src/graph/impls/cpu/proposal.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/cpu/proposal.cpp @@ -49,11 +49,11 @@ struct proposal_t { inline float float_read_helper(const float* mem) { return *mem; } -inline float float_read_helper(const half_t* mem) { return static_cast(*mem); } +inline float float_read_helper(const ov::float16* mem) { return static_cast(*mem); } inline void float_write_helper(float* mem, float f) { *mem = f; } -inline void float_write_helper(half_t* mem, float f) { *mem = static_cast(f); } +inline void float_write_helper(ov::float16* mem, float f) { *mem = static_cast(f); } /**************************************************************************** * * diff --git a/src/plugins/intel_gpu/src/graph/impls/cpu/range.cpp b/src/plugins/intel_gpu/src/graph/impls/cpu/range.cpp index a959d1903cfdd0..99cdeebea2e00e 100644 --- a/src/plugins/intel_gpu/src/graph/impls/cpu/range.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/cpu/range.cpp @@ -66,7 +66,7 @@ struct range_impl : public typed_primitive_impl { const auto output_dt = params->get_output_layout().data_type; op = std::make_shared(); - op->set_output_type(data_type_to_element_type(output_dt)); + op->set_output_type(output_dt); } OPENVINO_ASSERT(op->evaluate(output_host_tensors, input_host_tensors), diff --git a/src/plugins/intel_gpu/src/graph/impls/cpu/tile.cpp b/src/plugins/intel_gpu/src/graph/impls/cpu/tile.cpp index d045699057f6e2..bfc982aaa634d0 100644 --- a/src/plugins/intel_gpu/src/graph/impls/cpu/tile.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/cpu/tile.cpp @@ -75,7 +75,7 @@ struct tile_impl : public typed_primitive_impl { if (repeats.empty()) OPENVINO_THROW("[GPU] Unexpected configuration of tile impl"); - auto repeats_tensor = ov::Tensor(data_type_to_element_type(data_types::i64), {repeats.size()}, repeats.data()); + auto repeats_tensor = ov::Tensor(ov::element::i64, {repeats.size()}, repeats.data()); input_host_tensors.push_back(repeats_tensor); } diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/binary_convolution.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/binary_convolution.cpp index 48bc709ce30a66..41ad3b6d92d3fc 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/binary_convolution.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/binary_convolution.cpp @@ -77,7 +77,7 @@ namespace detail { attach_binary_convolution_impl::attach_binary_convolution_impl() { implementation_map::add(impl_types::ocl, typed_primitive_impl_ocl::create, { - std::make_tuple(data_types::bin, format::b_fs_yx_32fp), + std::make_tuple(data_types::u1, format::b_fs_yx_32fp), }); } diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp index 99462d8c813e64..12e7d1b28b93ee 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp @@ -121,7 +121,7 @@ namespace cldnn { kernel_selector::data_type to_data_type(data_types dt) { switch (dt) { - case cldnn::data_types::bin: + case cldnn::data_types::u1: return kernel_selector::data_type::BINARY; case cldnn::data_types::i8: return kernel_selector::data_type::INT8; @@ -143,7 +143,7 @@ kernel_selector::data_type to_data_type(data_types dt) { data_types from_data_type(kernel_selector::data_type dt) { switch (dt) { case kernel_selector::data_type::BINARY: - return cldnn::data_types::bin; + return cldnn::data_types::u1; case kernel_selector::data_type::INT8: return cldnn::data_types::i8; case kernel_selector::data_type::UINT8: @@ -163,7 +163,7 @@ data_types from_data_type(kernel_selector::data_type dt) { kernel_selector::weights_type to_weights_type(data_types dt) { switch (dt) { - case cldnn::data_types::bin: + case cldnn::data_types::u1: return kernel_selector::weights_type::BINARY; case cldnn::data_types::i8: return kernel_selector::weights_type::INT8; @@ -183,7 +183,7 @@ kernel_selector::weights_type to_weights_type(data_types dt) { data_types from_weights_type(kernel_selector::weights_type dt) { switch (dt) { case kernel_selector::weights_type::BINARY: - return data_types::bin; + return data_types::u1; case kernel_selector::weights_type::INT8: return data_types::i8; case kernel_selector::weights_type::UINT8: diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.h b/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.h index 4fa1181aaa7378..6d72f3c2c00c7a 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.h +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.h @@ -82,18 +82,21 @@ using params = kernel_selector::Params; using weights_reorder_params = kernel_selector::WeightsReorderParams; } // namespace kernel_selector - +namespace ov { +namespace element { +enum class Type_t; +} // namespaec element +} // namespaec ov namespace cldnn { -enum class data_types : size_t; struct format; struct layout; struct program; struct fused_primitive_desc; -kernel_selector::data_type to_data_type(data_types dt); -data_types from_data_type(kernel_selector::data_type dt); -kernel_selector::weights_type to_weights_type(data_types dt); -data_types from_weights_type(kernel_selector::weights_type dt); +kernel_selector::data_type to_data_type(ov::element::Type_t dt); +ov::element::Type_t from_data_type(kernel_selector::data_type dt); +kernel_selector::weights_type to_weights_type(ov::element::Type_t dt); +ov::element::Type_t from_weights_type(kernel_selector::weights_type dt); kernel_selector::data_layout to_data_layout(format f); cldnn::format from_data_layout(kernel_selector::data_layout l); kernel_selector::weights_layout to_weights_layout(format f, bool is_grouped); diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/non_max_suppression.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/non_max_suppression.cpp index e444462918c448..7405729120bfbd 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/non_max_suppression.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/non_max_suppression.cpp @@ -162,8 +162,8 @@ struct non_max_suppression_impl : typed_primitive_impl_ocl auto& stream = node.get_program().get_stream(); switch (mem->get_layout().data_type) { case data_types::f16: { - mem_lock lock(mem, stream); - auto mem_value = static_cast(lock.data()); + mem_lock lock(mem, stream); + auto mem_value = static_cast(lock.data()); retValue = static_cast(*mem_value); } break; case data_types::f32: { diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/quantize.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/quantize.cpp index 2d4ad3ac69a199..ef3ea9b4316dc1 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/quantize.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/quantize.cpp @@ -91,7 +91,7 @@ struct quantize_impl : typed_primitive_impl_ocl { void update_dispatch_data(const kernel_impl_params& impl_param) override { auto quantize_params = get_default_params(impl_param); const auto& output_layout = impl_param.get_output_layout(); - quantize_params.packed_binary_output = output_layout.data_type == data_types::bin; + quantize_params.packed_binary_output = output_layout.data_type == data_types::u1; (_kernel_data.update_dispatch_data_func)(quantize_params, _kernel_data); } }; diff --git a/src/plugins/intel_gpu/src/graph/include/loop_inst.h b/src/plugins/intel_gpu/src/graph/include/loop_inst.h index 3054d6abcbde99..5d7dd710892181 100644 --- a/src/plugins/intel_gpu/src/graph/include/loop_inst.h +++ b/src/plugins/intel_gpu/src/graph/include/loop_inst.h @@ -89,7 +89,7 @@ struct typed_program_node : public typed_program_node_base { break; } default: - throw std::runtime_error("Invalid data type : " + data_type_traits::name(prim_layout.data_type)); + throw std::runtime_error("Invalid data type : " + ov::element::Type(prim_layout.data_type).get_type_name()); } return trip_count; } @@ -129,7 +129,7 @@ struct typed_program_node : public typed_program_node_base { break; } default: - throw std::runtime_error("Invalid data type : " + data_type_traits::name(prim_layout.data_type)); + throw std::runtime_error("Invalid data type : " + ov::element::Type(prim_layout.data_type).get_type_name()); } } diff --git a/src/plugins/intel_gpu/src/graph/include/memory_accessor.hpp b/src/plugins/intel_gpu/src/graph/include/memory_accessor.hpp index d18f0f82983c88..b7ecf905c6dd3f 100644 --- a/src/plugins/intel_gpu/src/graph/include/memory_accessor.hpp +++ b/src/plugins/intel_gpu/src/graph/include/memory_accessor.hpp @@ -59,7 +59,7 @@ struct MemoryAccessor : public ov::ITensorAccessor { const auto t_iter = m_ptrs->find(port); if (t_iter != m_ptrs->cend()) { m_accessed_data = t_iter->second; - return {data_type_to_element_type(m_accessed_data->get_layout().data_type), + return {m_accessed_data->get_layout().data_type, m_accessed_data->get_layout().get_shape(), m_accessed_data->lock(m_stream, mem_lock_type::read)}; } else if (m_clbk) { diff --git a/src/plugins/intel_gpu/src/graph/include/quantize_inst.h b/src/plugins/intel_gpu/src/graph/include/quantize_inst.h index ef3be9ba34cb6c..8d213f68bc3f29 100644 --- a/src/plugins/intel_gpu/src/graph/include/quantize_inst.h +++ b/src/plugins/intel_gpu/src/graph/include/quantize_inst.h @@ -142,7 +142,7 @@ struct typed_program_node : public typed_program_node_base { program_node& input(size_t index = 0) const { return get_dependency(index); } int get_levels() const { return get_primitive()->levels; } - bool get_packed_binary_output() const { return get_output_layout().data_type == data_types::bin; } + bool get_packed_binary_output() const { return get_output_layout().data_type == data_types::u1; } bool get_scale_shift_opt() const { return get_primitive()->scale_shift_opt; } bool get_need_pre_shift() const { return get_primitive()->need_pre_shift; } bool get_need_post_scale() const { return get_primitive()->need_post_scale; } diff --git a/src/plugins/intel_gpu/src/graph/include/to_string_utils.h b/src/plugins/intel_gpu/src/graph/include/to_string_utils.h index fb135b06d86e23..550869277c81dc 100644 --- a/src/plugins/intel_gpu/src/graph/include/to_string_utils.h +++ b/src/plugins/intel_gpu/src/graph/include/to_string_utils.h @@ -24,7 +24,7 @@ namespace cldnn { inline std::string bool_to_str(bool cond) { return cond ? "true" : "false"; } inline std::string dt_to_str(data_types dt) { - return data_type_traits::name(dt); + return ov::element::Type(dt).to_string(); } inline std::string fmt_to_str(format fmt) { diff --git a/src/plugins/intel_gpu/src/graph/network.cpp b/src/plugins/intel_gpu/src/graph/network.cpp index ea2225b48844d1..51b988076d18f3 100644 --- a/src/plugins/intel_gpu/src/graph/network.cpp +++ b/src/plugins/intel_gpu/src/graph/network.cpp @@ -14,7 +14,6 @@ #include "intel_gpu/runtime/event.hpp" #include "intel_gpu/runtime/stream.hpp" #include "intel_gpu/runtime/debug_configuration.hpp" -#include "intel_gpu/runtime/half.hpp" #include "intel_gpu/runtime/itt.hpp" #include "intel_gpu/graph/program.hpp" @@ -140,7 +139,7 @@ float convert_element(int32_t i) { return static_cast(i); } float convert_element(float f) { return f; } -float convert_element(half_t h) { return half_to_float(h); } +float convert_element(ov::float16 h) { return static_cast(h); } size_t get_x_pitch(const layout& layout) { try { @@ -266,8 +265,8 @@ void log_memory_to_file(memory::ptr mem, layout data_layout, stream& stream, std if (mem_dt == cldnn::data_types::f32) dump(actual_mem, stream, file_stream, dump_raw); else if (mem_dt == cldnn::data_types::f16) - dump(actual_mem, stream, file_stream, dump_raw); - else if (mem_dt == cldnn::data_types::bin) + dump(actual_mem, stream, file_stream, dump_raw); + else if (mem_dt == cldnn::data_types::u1) dump(actual_mem, stream, file_stream, dump_raw); else if (mem_dt == cldnn::data_types::i64) dump(actual_mem, stream, file_stream, dump_raw); @@ -311,7 +310,7 @@ static uint32_t get_unique_net_id() { static std::string get_file_path_for_binary_dump(cldnn::layout layout, std::string name) { std::string filename; - std::string data_type = data_type_traits::name(layout.data_type); + std::string data_type = ov::element::Type(layout.data_type).get_type_name(); std::string format = layout.format.to_string(); std::string tensor; auto dims = layout.get_dims(); diff --git a/src/plugins/intel_gpu/src/graph/quantize.cpp b/src/plugins/intel_gpu/src/graph/quantize.cpp index f0872e8bdc6ed0..dde4aa4b7ec8af 100644 --- a/src/plugins/intel_gpu/src/graph/quantize.cpp +++ b/src/plugins/intel_gpu/src/graph/quantize.cpp @@ -22,7 +22,7 @@ layout quantize_inst::calc_output_layout(quantize_node const& node, kernel_impl_ if (desc->output_data_types[0]) out_dt = *desc->output_data_types[0]; - if (out_dt == data_types::bin) { + if (out_dt == data_types::u1) { output_format = format::b_fs_yx_32fp; } @@ -37,7 +37,7 @@ std::vector quantize_inst::calc_output_layouts(quantize_node const&, ker auto output_format = input_layout.format; auto out_dt = desc->output_data_types[0].value_or(input_layout.data_type); - if (out_dt == data_types::bin) { + if (out_dt == data_types::u1) { output_format = format::b_fs_yx_32fp; } diff --git a/src/plugins/intel_gpu/src/graph/range.cpp b/src/plugins/intel_gpu/src/graph/range.cpp index 0b57793bb6650a..42544de1bcd17b 100644 --- a/src/plugins/intel_gpu/src/graph/range.cpp +++ b/src/plugins/intel_gpu/src/graph/range.cpp @@ -30,7 +30,7 @@ std::vector range_inst::calc_output_layouts(range_node const& /*node*/, auto output_data_type = desc->output_data_types[0].value_or(impl_param.get_input_layout().data_type); ov::op::v4::Range op; - op.set_output_type(data_type_to_element_type(output_data_type)); + op.set_output_type(output_data_type); std::vector output_shapes = {ShapeType::dynamic(1)}; std::vector input_shapes = {ov::Shape(), ov::Shape(), ov::Shape()}; @@ -63,7 +63,7 @@ std::string range_inst::to_string(range_node const& node) { auto node_info = node.desc_to_json(); json_composite op_info; - op_info.add("output_type", data_type_traits::name(desc->output_layout.data_type)); + op_info.add("output_type", ov::element::Type(desc->output_layout.data_type)); node_info->add("range info", std::move(op_info)); return lexical_cast(*node_info); diff --git a/src/plugins/intel_gpu/src/graph/tile.cpp b/src/plugins/intel_gpu/src/graph/tile.cpp index c10c06377eaa7f..13d6eba68d90e1 100644 --- a/src/plugins/intel_gpu/src/graph/tile.cpp +++ b/src/plugins/intel_gpu/src/graph/tile.cpp @@ -54,7 +54,7 @@ std::vector tile_inst::calc_output_layouts(tile_node const& /*node*/, co auto repeats = desc->repeats; const auto data_accessor = MemoryAccessor(&impl_param.memory_deps, impl_param.prog->get_stream(), [&repeats, &repeats_shape](size_t port) { - return (port == 1 && repeats.data()) ? ov::Tensor(data_type_to_element_type(data_types::i64), + return (port == 1 && repeats.data()) ? ov::Tensor(ov::element::i64, repeats_shape.to_shape(), repeats.data()) : ov::Tensor(); diff --git a/src/plugins/intel_gpu/src/graph/topology.cpp b/src/plugins/intel_gpu/src/graph/topology.cpp index 77d097f5ebc5b7..053cb78cbea15b 100644 --- a/src/plugins/intel_gpu/src/graph/topology.cpp +++ b/src/plugins/intel_gpu/src/graph/topology.cpp @@ -34,7 +34,7 @@ void topology::change_input_layout(const primitive_id& id, const layout& new_lay throw std::invalid_argument("Unknown format of layout."); if (new_layout.data_type != data_types::f16 && new_layout.data_type != data_types::f32 && - new_layout.data_type != data_types::i8 && new_layout.data_type != data_types::bin && + new_layout.data_type != data_types::i8 && new_layout.data_type != data_types::u1 && new_layout.data_type != data_types::u8 && new_layout.data_type != data_types::i32 && new_layout.data_type != data_types::i64) throw std::invalid_argument("Unknown data_type of layout."); diff --git a/src/plugins/intel_gpu/src/plugin/graph.cpp b/src/plugins/intel_gpu/src/plugin/graph.cpp index e4fe70d52b54b4..e152e0367fea8c 100644 --- a/src/plugins/intel_gpu/src/plugin/graph.cpp +++ b/src/plugins/intel_gpu/src/plugin/graph.cpp @@ -281,7 +281,7 @@ std::shared_ptr Graph::get_runtime_model(std::vector return_node; @@ -322,12 +322,12 @@ std::shared_ptr Graph::get_runtime_model(std::vectorset_friendly_name(layerName + "_result"); std::map info; - info[ov::exec_model_info::OUTPUT_PRECISIONS] = cldnn::data_type_to_element_type(prim_info.output_layout.data_type).get_type_name(); + info[ov::exec_model_info::OUTPUT_PRECISIONS] = ov::element::Type(prim_info.output_layout.data_type).get_type_name(); info[ov::exec_model_info::LAYER_TYPE] = to_IE_type_name(prim_info.type_id); info[ov::exec_model_info::OUTPUT_LAYOUTS] = prim_info.layout_str; info[ov::exec_model_info::EXECUTION_ORDER] = std::to_string(prim_info.exec_id); info[ov::exec_model_info::IMPL_TYPE] = prim_info.kernel_id; - info[ov::exec_model_info::RUNTIME_PRECISION] = cldnn::data_type_to_element_type(prim_info.runtime_precision).get_type_name(); + info[ov::exec_model_info::RUNTIME_PRECISION] = ov::element::Type(prim_info.runtime_precision).get_type_name(); std::vector originalNames{find_origin_layers(prim_info.original_id)}; for (auto& fused_id : prim_info.c_fused_ids) { diff --git a/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp b/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp index cb25b369b2e336..5e564f3b9a3ec5 100644 --- a/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp +++ b/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp @@ -107,7 +107,7 @@ bool is_convert_required(ov::element::Type src_et, ov::element::Type dst_et) { } void convert_and_copy(const cldnn::memory::ptr src, ov::ITensor const* dst, const cldnn::stream& stream) { - auto src_et = cldnn::data_type_to_element_type(src->get_layout().data_type); + auto src_et = src->get_layout().data_type; auto dst_et = dst->get_element_type(); size_t size = ov::shape_size(dst->get_shape()); @@ -558,7 +558,7 @@ cldnn::event::ptr SyncInferRequest::copy_output_data(cldnn::memory::ptr src, con const auto& layout = src->get_layout(); auto& stream = m_graph->get_network()->get_stream(); - if (is_convert_required(cldnn::data_type_to_element_type(layout.data_type), dst.get_element_type())) { + if (is_convert_required(layout.data_type, dst.get_element_type())) { convert_and_copy(src, &dst, stream); return nullptr; } else { diff --git a/src/plugins/intel_gpu/src/plugin/variable_state.cpp b/src/plugins/intel_gpu/src/plugin/variable_state.cpp index 316fb2253b987e..46a3fdae22b7ca 100644 --- a/src/plugins/intel_gpu/src/plugin/variable_state.cpp +++ b/src/plugins/intel_gpu/src/plugin/variable_state.cpp @@ -18,9 +18,8 @@ VariableState::VariableState(const std::string &name, cldnn::network::VariableSt , m_engine(engine) { auto internal_memory = m_variable_state->memory; auto internal_layout = internal_memory->get_layout(); - auto et = cldnn::data_type_to_element_type(internal_layout.data_type); auto shape = internal_layout.get_shape(); - m_state = ov::make_tensor(et, shape); + m_state = ov::make_tensor(internal_layout.data_type, shape); } void VariableState::reset() { diff --git a/src/plugins/intel_gpu/src/runtime/layout.cpp b/src/plugins/intel_gpu/src/runtime/layout.cpp index 0ca5e60de610ed..99fdadb24cc6bb 100644 --- a/src/plugins/intel_gpu/src/runtime/layout.cpp +++ b/src/plugins/intel_gpu/src/runtime/layout.cpp @@ -30,18 +30,6 @@ std::vector convert_dimensions(const std::vector& sizes, std:: } // namespace -// The definitions below are needed to follow ODR -// Otherwise statements like -// optional_value ov = type_to_data_type::value; -// optional_value ov(type_to_data_type::value); -// violate ODR and leads to undefined behavior -const data_types type_to_data_type::value; -const data_types type_to_data_type::value; -const data_types type_to_data_type::value; -const data_types type_to_data_type::value; -const data_types type_to_data_type::value; -const data_types type_to_data_type::value; - size_t layout::get_rank() const { return format.dimension(); } @@ -189,7 +177,7 @@ std::vector layout::get_dims_order() const { std::string layout::to_string() const { std::stringstream s; s << "\n{\n" - << "\tdata_type=" << data_type_traits::name(data_type) << ";\n" + << "\tdata_type=" << ov::element::Type(data_type) << ";\n" << "\tformat=" << format.to_string() << ";\n" << "\tshape=" << size << ";\n" << "\tpad_l=" << data_padding.lower_size().to_string() << ";\n" @@ -213,7 +201,7 @@ std::string layout::to_short_string() const { } }; - s << data_type_traits::name(data_type) << ":" << format.to_string() << ":"; + s << ov::element::Type(data_type) << ":" << format.to_string() << ":"; dump_shape(s, size); if (data_padding.get_dynamic_pad_dims() != tensor(0)) { @@ -421,7 +409,7 @@ size_t layout::get_linear_size() const { static_cast(1), std::multiplies()); - return (this->data_type == data_types::bin) ? ceil_div(total, 32) : total; + return total; } layout layout::with_padding(padding const& padd) const { diff --git a/src/plugins/intel_gpu/tests/unit/dynamic_execution/is_valid_fusion_test.cpp b/src/plugins/intel_gpu/tests/unit/dynamic_execution/is_valid_fusion_test.cpp index eb7ff40c391762..cec00fdfa2594d 100644 --- a/src/plugins/intel_gpu/tests/unit/dynamic_execution/is_valid_fusion_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/dynamic_execution/is_valid_fusion_test.cpp @@ -23,7 +23,7 @@ TEST(eltwise_activation_fusing_test, basic_dynamic_rank4) { layout weight_layout = layout{ov::PartialShape{1, 3, 3, 3}, data_types::f16, format::bfyx}; auto weights = engine.allocate_memory(weight_layout); - set_values(weights, { + set_values(weights, { 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, diff --git a/src/plugins/intel_gpu/tests/unit/dynamic_execution/memory_realloc_test.cpp b/src/plugins/intel_gpu/tests/unit/dynamic_execution/memory_realloc_test.cpp index b1e3e8f41f3717..b81a87650a436c 100644 --- a/src/plugins/intel_gpu/tests/unit/dynamic_execution/memory_realloc_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/dynamic_execution/memory_realloc_test.cpp @@ -27,7 +27,7 @@ TEST(memory_reuse_realloc_reset_test, basic_conv_with_padding) { layout weight_layout = layout{ov::PartialShape{1, 3, 3, 3}, data_types::f16, format::bfyx}; auto weights = engine.allocate_memory(weight_layout); - set_values(weights, { + set_values(weights, { 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, @@ -115,7 +115,7 @@ TEST(memory_reuse_realloc_reset_test, basic_conv_with_padding) { // 0, 0,"0","0","0","0", // !! check pad_after // 0, 0,"0","0","0","0", // !! check pad_after auto reorder_mem = network.get_primitive("reorder")->output_memory_ptr(); - cldnn::mem_lock reorder_mem_ptr(reorder_mem, get_test_stream()); + cldnn::mem_lock reorder_mem_ptr(reorder_mem, get_test_stream()); for (size_t i = 26; i < 29; ++i) { ASSERT_EQ((float)reorder_mem_ptr[i], 0.f); } diff --git a/src/plugins/intel_gpu/tests/unit/dynamic_execution/optimized_out_execution_test.cpp b/src/plugins/intel_gpu/tests/unit/dynamic_execution/optimized_out_execution_test.cpp index d5bcddb9f7da39..27df164b5af340 100644 --- a/src/plugins/intel_gpu/tests/unit/dynamic_execution/optimized_out_execution_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/dynamic_execution/optimized_out_execution_test.cpp @@ -38,25 +38,25 @@ TEST(optimized_out_execution_test, concat_blocked_format) { auto input3 = engine.allocate_memory(input3_layout); auto input4 = engine.allocate_memory(input4_layout); - set_values(input1, { + set_values(input1, { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f, 70.0f, 80.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f, 70.0f, 80.0f }); - set_values(input2, { + set_values(input2, { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f, 70.0f, 80.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f, 70.0f, 80.0f }); - set_values(input3, { + set_values(input3, { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f, 70.0f, 80.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f, 70.0f, 80.0f }); - set_values(input4, { + set_values(input4, { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f, 70.0f, 80.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, @@ -83,7 +83,7 @@ TEST(optimized_out_execution_test, concat_blocked_format) { network.set_input_data("input3", input3); network.set_input_data("input4", input4); - std::vector ref = { + std::vector ref = { 2.0f, 4.0f, 6.0f, 8.0f, 10.0f, 12.0f, 14.0f, 16.0f, 20.0f, 40.0f, 60.0f, 80.0f, 100.0f, 120.0f, 140.0f, 160.0f, 2.0f, 4.0f, 6.0f, 8.0f, 10.0f, 12.0f, 14.0f, 16.0f, @@ -104,7 +104,7 @@ TEST(optimized_out_execution_test, concat_blocked_format) { auto outputs = network.execute(); auto output_mem = outputs.begin()->second.get_memory(); - cldnn::mem_lock output_mem_ptr(output_mem, get_test_stream()); + cldnn::mem_lock output_mem_ptr(output_mem, get_test_stream()); for (size_t i = 0; i < output_mem->get_layout().get_buffer_size().count(); ++i) { ASSERT_EQ(output_mem_ptr[i], ref[i]); diff --git a/src/plugins/intel_gpu/tests/unit/float16.natvis b/src/plugins/intel_gpu/tests/unit/float16.natvis index 03924b67fbd546..de4a2d117066a6 100644 --- a/src/plugins/intel_gpu/tests/unit/float16.natvis +++ b/src/plugins/intel_gpu/tests/unit/float16.natvis @@ -1,7 +1,7 @@ - + +0 -0 +infinity @@ -12,4 +12,4 @@ { (1 << (format.exponent-15)) * (-2*format.sign+1.0f) * (1.0f + format.significand/1024.0f) } - \ No newline at end of file + diff --git a/src/plugins/intel_gpu/tests/unit/fusions/binary_convolution_fusion_test.cpp b/src/plugins/intel_gpu/tests/unit/fusions/binary_convolution_fusion_test.cpp index e852edbe7b894c..5079e06e0e8704 100644 --- a/src/plugins/intel_gpu/tests/unit/fusions/binary_convolution_fusion_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/fusions/binary_convolution_fusion_test.cpp @@ -71,9 +71,9 @@ class BinaryConvolutionFusingTest : public BaseFusingTest { cldnn::memory::ptr get_mem(cldnn::layout l) { auto prim = engine.allocate_memory(l); tensor s = l.get_tensor(); - if (l.data_type == data_types::bin) { + if (l.data_type == data_types::u1) { VF rnd_vec = rg.generate_random_1d(s.count() / 32, min_random, max_random); set_values(prim, rnd_vec); } else if (l.data_type == data_types::i8 || l.data_type == data_types::u8) { @@ -134,11 +134,11 @@ class BaseFusingTest : public ::testing::TestWithParam { cldnn::memory::ptr get_mem(cldnn::layout l, float fill_value) { auto prim = engine.allocate_memory(l); tensor s = l.get_tensor(); - if (l.data_type == data_types::bin) { + if (l.data_type == data_types::u1) { VF rnd_vec(s.count() / 32, static_cast(fill_value)); set_values(prim, rnd_vec); } else if (l.data_type == data_types::f16) { - VF rnd_vec(s.count(), float_to_half(fill_value)); + VF rnd_vec(s.count(), ov::float16(fill_value).to_bits()); set_values(prim, rnd_vec); } else if (l.data_type == data_types::f32) { VF rnd_vec(s.count(), fill_value); @@ -163,13 +163,13 @@ class BaseFusingTest : public ::testing::TestWithParam { VF rnd_vec = rg.generate_random_norepetitions(s.count(), min, max); set_values(prim, rnd_vec); } else if (l.data_type == data_types::f16) { - VF rnd_vec = rg.generate_random_norepetitions(s.count(), min, max); + VF rnd_vec = rg.generate_random_norepetitions(s.count(), min, max); set_values(prim, rnd_vec); } else if (l.data_type == data_types::i8) { VF rnd_vec = rg.generate_random_norepetitions(s.count(), min, max); set_values(prim, rnd_vec); } - else if (l.data_type == data_types::bin) { + else if (l.data_type == data_types::u1) { VF rnd_vec = rg.generate_random_norepetitions(s.count(), min, max); set_values(prim, rnd_vec); } @@ -184,7 +184,7 @@ class BaseFusingTest : public ::testing::TestWithParam { VF rnd_vec = rg.generate_random_1d(s.count(), min, max); set_values(prim, rnd_vec); } else if (l.data_type == data_types::f16) { - VF rnd_vec = rg.generate_random_1d(s.count(), min, max); + VF rnd_vec = rg.generate_random_1d(s.count(), min, max); set_values(prim, rnd_vec); } else if (l.data_type == data_types::i8) { VF rnd_vec = rg.generate_random_1d(s.count(), min, max); @@ -192,7 +192,7 @@ class BaseFusingTest : public ::testing::TestWithParam { } else if (l.data_type == data_types::u8) { VF rnd_vec = rg.generate_random_1d(s.count(), min, max); set_values(prim, rnd_vec); - } else if (l.data_type == data_types::bin) { + } else if (l.data_type == data_types::u1) { VF rnd_vec = rg.generate_random_1d(s.count() / 32, min, max); set_values(prim, rnd_vec); } diff --git a/src/plugins/intel_gpu/tests/unit/fusions/scatter_nd_update_fusion_test.cpp b/src/plugins/intel_gpu/tests/unit/fusions/scatter_nd_update_fusion_test.cpp index 2540b234ef105f..469d569d2692b7 100644 --- a/src/plugins/intel_gpu/tests/unit/fusions/scatter_nd_update_fusion_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/fusions/scatter_nd_update_fusion_test.cpp @@ -104,7 +104,7 @@ class ScatterNDUpdatePrimitiveFusingTest : public ::BaseFusingTest rnd_vec = generate_unique_indices(p); set_values(prim, rnd_vec); } else if (indices_layout.data_type == data_types::f16) { - VF rnd_vec = generate_unique_indices(p); + VF rnd_vec = generate_unique_indices(p); set_values(prim, rnd_vec); } else if (indices_layout.data_type == data_types::i8) { VF rnd_vec = generate_unique_indices(p); diff --git a/src/plugins/intel_gpu/tests/unit/module_tests/graph_manipulation_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/module_tests/graph_manipulation_gpu_test.cpp index c3ee67779a9e9d..670b1bbc41a6eb 100644 --- a/src/plugins/intel_gpu/tests/unit/module_tests/graph_manipulation_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/module_tests/graph_manipulation_gpu_test.cpp @@ -36,8 +36,8 @@ TEST(basic, test1) { auto weights1 = engine.allocate_memory({ data_types::f16, format::yxfb,{ 1, 1, 2, 1 } }); auto weights2 = engine.allocate_memory({ data_types::f32, format::byxf,{ 1, 1, 1, 2 } }); - set_values(input, { FLOAT16(1.1f), FLOAT16(1.2f), FLOAT16(1.3f), FLOAT16(1.4f) }); - set_values(weights1, { FLOAT16(2.1f), FLOAT16(3.1f) }); + set_values(input, { ov::float16(1.1f), ov::float16(1.2f), ov::float16(1.3f), ov::float16(1.4f) }); + set_values(weights1, { ov::float16(2.1f), ov::float16(3.1f) }); set_values(weights2, { 1.1f, 0.1f }); topology topology; diff --git a/src/plugins/intel_gpu/tests/unit/module_tests/optionals_test.cpp b/src/plugins/intel_gpu/tests/unit/module_tests/optionals_test.cpp index e4d58fd4e895fb..fea85618b8a1bb 100644 --- a/src/plugins/intel_gpu/tests/unit/module_tests/optionals_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/module_tests/optionals_test.cpp @@ -32,9 +32,9 @@ TEST(optional_data_types, basic) { ASSERT_EQ(o2.value(), cldnn::data_types::f32); } - optional_data_type o3(type_to_data_type::value); + optional_data_type o3(cldnn::data_types::f16); ASSERT_EQ(o3.value(), cldnn::data_types::f16); - optional_data_type o4(type_to_data_type::value); + optional_data_type o4(cldnn::data_types::f32); ASSERT_EQ(o4.value(), cldnn::data_types::f32); } diff --git a/src/plugins/intel_gpu/tests/unit/passes/handle_reshape.cpp b/src/plugins/intel_gpu/tests/unit/passes/handle_reshape.cpp index 31a411a63cc76f..11114402a76324 100644 --- a/src/plugins/intel_gpu/tests/unit/passes/handle_reshape.cpp +++ b/src/plugins/intel_gpu/tests/unit/passes/handle_reshape.cpp @@ -229,11 +229,11 @@ TEST(handle_reshape, reshape_input_reorder) { auto in1_layout = layout{ ov::PartialShape{-1, 16, 64, 64}, data_types::f16, format::bfyx }; auto in1_memory = engine.allocate_memory({ ov::PartialShape{2, 16, 64, 64}, data_types::f16, format::bfyx }); - auto in0 = rg.generate_random_1d(in0_memory->count(), -10, 10); - auto in1 = rg.generate_random_1d(in1_memory->count(), -10, 10); - set_values(in0_memory, in0); + auto in0 = rg.generate_random_1d(in0_memory->count(), -10, 10); + auto in1 = rg.generate_random_1d(in1_memory->count(), -10, 10); + set_values(in0_memory, in0); set_values(shape_memory, {1, 2, 16, 64, 64}); - set_values(in1_memory, in1); + set_values(in1_memory, in1); topology topology; topology.add(input_layout("input0", in0_layout)); diff --git a/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp b/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp index b5b30083ce792b..1866ddb6c19870 100644 --- a/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp @@ -383,37 +383,37 @@ TEST(prepare_buffer_fusing, in_place_concat_dynamic_onednn_batch1) { auto input_memory1 = engine.allocate_memory(in_layout1); auto input_memory2 = engine.allocate_memory(in_layout2); - set_values(input_memory1, - {FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(11.0f), FLOAT16(22.0f), FLOAT16(33.0f), FLOAT16(44.0f), FLOAT16(55.0f), FLOAT16(66.0f), FLOAT16(77.0f), FLOAT16(88.0f), - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(11.0f), FLOAT16(22.0f), FLOAT16(33.0f), FLOAT16(44.0f), FLOAT16(55.0f), FLOAT16(66.0f), FLOAT16(77.0f), FLOAT16(88.0f)}); - set_values(input_memory2, - {FLOAT16(111.0f), FLOAT16(222.0f), FLOAT16(333.0f), FLOAT16(444.0f), FLOAT16(555.0f), FLOAT16(666.0f), FLOAT16(777.0f), FLOAT16(888.0f), - FLOAT16(1111.0f), FLOAT16(2222.0f), FLOAT16(3333.0f), FLOAT16(4444.0f), FLOAT16(5555.0f), FLOAT16(6666.0f), FLOAT16(7777.0f), FLOAT16(8888.0f), - FLOAT16(111.0f), FLOAT16(222.0f), FLOAT16(333.0f), FLOAT16(444.0f), FLOAT16(555.0f), FLOAT16(666.0f), FLOAT16(777.0f), FLOAT16(888.0f), - FLOAT16(1111.0f), FLOAT16(2222.0f), FLOAT16(3333.0f), FLOAT16(4444.0f), FLOAT16(5555.0f), FLOAT16(6666.0f), FLOAT16(7777.0f), FLOAT16(8888.0f)}); + set_values(input_memory1, + {ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(11.0f), ov::float16(22.0f), ov::float16(33.0f), ov::float16(44.0f), ov::float16(55.0f), ov::float16(66.0f), ov::float16(77.0f), ov::float16(88.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(11.0f), ov::float16(22.0f), ov::float16(33.0f), ov::float16(44.0f), ov::float16(55.0f), ov::float16(66.0f), ov::float16(77.0f), ov::float16(88.0f)}); + set_values(input_memory2, + {ov::float16(111.0f), ov::float16(222.0f), ov::float16(333.0f), ov::float16(444.0f), ov::float16(555.0f), ov::float16(666.0f), ov::float16(777.0f), ov::float16(888.0f), + ov::float16(1111.0f), ov::float16(2222.0f), ov::float16(3333.0f), ov::float16(4444.0f), ov::float16(5555.0f), ov::float16(6666.0f), ov::float16(7777.0f), ov::float16(8888.0f), + ov::float16(111.0f), ov::float16(222.0f), ov::float16(333.0f), ov::float16(444.0f), ov::float16(555.0f), ov::float16(666.0f), ov::float16(777.0f), ov::float16(888.0f), + ov::float16(1111.0f), ov::float16(2222.0f), ov::float16(3333.0f), ov::float16(4444.0f), ov::float16(5555.0f), ov::float16(6666.0f), ov::float16(7777.0f), ov::float16(8888.0f)}); net.set_input_data("input1", input_memory1); net.set_input_data("input2", input_memory2); - std::vector ref_output = { - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(11.0f), FLOAT16(22.0f), FLOAT16(33.0f), FLOAT16(44.0f), FLOAT16(55.0f), FLOAT16(66.0f), FLOAT16(77.0f), FLOAT16(88.0f), - FLOAT16(111.0f), FLOAT16(222.0f), FLOAT16(333.0f), FLOAT16(444.0f), FLOAT16(555.0f), FLOAT16(666.0f), FLOAT16(777.0f), FLOAT16(888.0f), - FLOAT16(1111.0f), FLOAT16(2222.0f), FLOAT16(3333.0f), FLOAT16(4444.0f), FLOAT16(5555.0f), FLOAT16(6666.0f), FLOAT16(7777.0f), FLOAT16(8888.0f), - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(11.0f), FLOAT16(22.0f), FLOAT16(33.0f), FLOAT16(44.0f), FLOAT16(55.0f), FLOAT16(66.0f), FLOAT16(77.0f), FLOAT16(88.0f), - FLOAT16(111.0f), FLOAT16(222.0f), FLOAT16(333.0f), FLOAT16(444.0f), FLOAT16(555.0f), FLOAT16(666.0f), FLOAT16(777.0f), FLOAT16(888.0f), - FLOAT16(1111.0f), FLOAT16(2222.0f), FLOAT16(3333.0f), FLOAT16(4444.0f), FLOAT16(5555.0f), FLOAT16(6666.0f), FLOAT16(7777.0f), FLOAT16(8888.0f)}; + std::vector ref_output = { + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(11.0f), ov::float16(22.0f), ov::float16(33.0f), ov::float16(44.0f), ov::float16(55.0f), ov::float16(66.0f), ov::float16(77.0f), ov::float16(88.0f), + ov::float16(111.0f), ov::float16(222.0f), ov::float16(333.0f), ov::float16(444.0f), ov::float16(555.0f), ov::float16(666.0f), ov::float16(777.0f), ov::float16(888.0f), + ov::float16(1111.0f), ov::float16(2222.0f), ov::float16(3333.0f), ov::float16(4444.0f), ov::float16(5555.0f), ov::float16(6666.0f), ov::float16(7777.0f), ov::float16(8888.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(11.0f), ov::float16(22.0f), ov::float16(33.0f), ov::float16(44.0f), ov::float16(55.0f), ov::float16(66.0f), ov::float16(77.0f), ov::float16(88.0f), + ov::float16(111.0f), ov::float16(222.0f), ov::float16(333.0f), ov::float16(444.0f), ov::float16(555.0f), ov::float16(666.0f), ov::float16(777.0f), ov::float16(888.0f), + ov::float16(1111.0f), ov::float16(2222.0f), ov::float16(3333.0f), ov::float16(4444.0f), ov::float16(5555.0f), ov::float16(6666.0f), ov::float16(7777.0f), ov::float16(8888.0f)}; std::map output; EXPECT_NO_THROW(output = net.execute()); auto out_l = net.get_output_layout("output"); auto out_mem = output.at("output").get_memory(); - cldnn::mem_lock output_ptr(out_mem, get_test_stream()); + cldnn::mem_lock output_ptr(out_mem, get_test_stream()); - cldnn::mem_lock input1_ptr(input_memory1, get_test_stream()); - cldnn::mem_lock input2_ptr(input_memory2, get_test_stream()); + cldnn::mem_lock input1_ptr(input_memory1, get_test_stream()); + cldnn::mem_lock input2_ptr(input_memory2, get_test_stream()); const auto& concat_inst = net.get_primitive("concat"); const auto& concat_node_n = concat_inst->get_node(); @@ -461,37 +461,37 @@ TEST(prepare_buffer_fusing, in_place_concat_dynamic_onednn_batch2) { auto input_memory1 = engine.allocate_memory(in_layout1); auto input_memory2 = engine.allocate_memory(in_layout2); - set_values(input_memory1, - {FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(11.0f), FLOAT16(22.0f), FLOAT16(33.0f), FLOAT16(44.0f), FLOAT16(55.0f), FLOAT16(66.0f), FLOAT16(77.0f), FLOAT16(88.0f), - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(11.0f), FLOAT16(22.0f), FLOAT16(33.0f), FLOAT16(44.0f), FLOAT16(55.0f), FLOAT16(66.0f), FLOAT16(77.0f), FLOAT16(88.0f)}); - set_values(input_memory2, - {FLOAT16(111.0f), FLOAT16(222.0f), FLOAT16(333.0f), FLOAT16(444.0f), FLOAT16(555.0f), FLOAT16(666.0f), FLOAT16(777.0f), FLOAT16(888.0f), - FLOAT16(1111.0f), FLOAT16(2222.0f), FLOAT16(3333.0f), FLOAT16(4444.0f), FLOAT16(5555.0f), FLOAT16(6666.0f), FLOAT16(7777.0f), FLOAT16(8888.0f), - FLOAT16(111.0f), FLOAT16(222.0f), FLOAT16(333.0f), FLOAT16(444.0f), FLOAT16(555.0f), FLOAT16(666.0f), FLOAT16(777.0f), FLOAT16(888.0f), - FLOAT16(1111.0f), FLOAT16(2222.0f), FLOAT16(3333.0f), FLOAT16(4444.0f), FLOAT16(5555.0f), FLOAT16(6666.0f), FLOAT16(7777.0f), FLOAT16(8888.0f)}); + set_values(input_memory1, + {ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(11.0f), ov::float16(22.0f), ov::float16(33.0f), ov::float16(44.0f), ov::float16(55.0f), ov::float16(66.0f), ov::float16(77.0f), ov::float16(88.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(11.0f), ov::float16(22.0f), ov::float16(33.0f), ov::float16(44.0f), ov::float16(55.0f), ov::float16(66.0f), ov::float16(77.0f), ov::float16(88.0f)}); + set_values(input_memory2, + {ov::float16(111.0f), ov::float16(222.0f), ov::float16(333.0f), ov::float16(444.0f), ov::float16(555.0f), ov::float16(666.0f), ov::float16(777.0f), ov::float16(888.0f), + ov::float16(1111.0f), ov::float16(2222.0f), ov::float16(3333.0f), ov::float16(4444.0f), ov::float16(5555.0f), ov::float16(6666.0f), ov::float16(7777.0f), ov::float16(8888.0f), + ov::float16(111.0f), ov::float16(222.0f), ov::float16(333.0f), ov::float16(444.0f), ov::float16(555.0f), ov::float16(666.0f), ov::float16(777.0f), ov::float16(888.0f), + ov::float16(1111.0f), ov::float16(2222.0f), ov::float16(3333.0f), ov::float16(4444.0f), ov::float16(5555.0f), ov::float16(6666.0f), ov::float16(7777.0f), ov::float16(8888.0f)}); net.set_input_data("input1", input_memory1); net.set_input_data("input2", input_memory2); - std::vector ref_output = { - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(11.0f), FLOAT16(22.0f), FLOAT16(33.0f), FLOAT16(44.0f), FLOAT16(55.0f), FLOAT16(66.0f), FLOAT16(77.0f), FLOAT16(88.0f), - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(11.0f), FLOAT16(22.0f), FLOAT16(33.0f), FLOAT16(44.0f), FLOAT16(55.0f), FLOAT16(66.0f), FLOAT16(77.0f), FLOAT16(88.0f), - FLOAT16(111.0f), FLOAT16(222.0f), FLOAT16(333.0f), FLOAT16(444.0f), FLOAT16(555.0f), FLOAT16(666.0f), FLOAT16(777.0f), FLOAT16(888.0f), - FLOAT16(1111.0f), FLOAT16(2222.0f), FLOAT16(3333.0f), FLOAT16(4444.0f), FLOAT16(5555.0f), FLOAT16(6666.0f), FLOAT16(7777.0f), FLOAT16(8888.0f), - FLOAT16(111.0f), FLOAT16(222.0f), FLOAT16(333.0f), FLOAT16(444.0f), FLOAT16(555.0f), FLOAT16(666.0f), FLOAT16(777.0f), FLOAT16(888.0f), - FLOAT16(1111.0f), FLOAT16(2222.0f), FLOAT16(3333.0f), FLOAT16(4444.0f), FLOAT16(5555.0f), FLOAT16(6666.0f), FLOAT16(7777.0f), FLOAT16(8888.0f)}; + std::vector ref_output = { + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(11.0f), ov::float16(22.0f), ov::float16(33.0f), ov::float16(44.0f), ov::float16(55.0f), ov::float16(66.0f), ov::float16(77.0f), ov::float16(88.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(11.0f), ov::float16(22.0f), ov::float16(33.0f), ov::float16(44.0f), ov::float16(55.0f), ov::float16(66.0f), ov::float16(77.0f), ov::float16(88.0f), + ov::float16(111.0f), ov::float16(222.0f), ov::float16(333.0f), ov::float16(444.0f), ov::float16(555.0f), ov::float16(666.0f), ov::float16(777.0f), ov::float16(888.0f), + ov::float16(1111.0f), ov::float16(2222.0f), ov::float16(3333.0f), ov::float16(4444.0f), ov::float16(5555.0f), ov::float16(6666.0f), ov::float16(7777.0f), ov::float16(8888.0f), + ov::float16(111.0f), ov::float16(222.0f), ov::float16(333.0f), ov::float16(444.0f), ov::float16(555.0f), ov::float16(666.0f), ov::float16(777.0f), ov::float16(888.0f), + ov::float16(1111.0f), ov::float16(2222.0f), ov::float16(3333.0f), ov::float16(4444.0f), ov::float16(5555.0f), ov::float16(6666.0f), ov::float16(7777.0f), ov::float16(8888.0f)}; std::map output; EXPECT_NO_THROW(output = net.execute()); auto out_l = net.get_output_layout("output"); auto out_mem = output.at("output").get_memory(); - cldnn::mem_lock output_ptr(out_mem, get_test_stream()); + cldnn::mem_lock output_ptr(out_mem, get_test_stream()); - cldnn::mem_lock input1_ptr(input_memory1, get_test_stream()); - cldnn::mem_lock input2_ptr(input_memory2, get_test_stream()); + cldnn::mem_lock input1_ptr(input_memory1, get_test_stream()); + cldnn::mem_lock input2_ptr(input_memory2, get_test_stream()); const auto& concat_inst = net.get_primitive("concat"); const auto& concat_node_n = concat_inst->get_node(); diff --git a/src/plugins/intel_gpu/tests/unit/passes/prepare_padding_test.cpp b/src/plugins/intel_gpu/tests/unit/passes/prepare_padding_test.cpp index 48bbb733657f4e..c50d35ff1e2605 100644 --- a/src/plugins/intel_gpu/tests/unit/passes/prepare_padding_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/passes/prepare_padding_test.cpp @@ -21,7 +21,7 @@ TEST(prepare_padding, groupconv_with_output) { tests::random_generator rg(GET_SUITE_NAME); auto& engine = get_test_engine(); auto in_layout = layout{{1, 18, 76, 135}, data_types::f16, format::bfyx}; - auto weights_data = rg.generate_random_5d(1, 18, 1, 3, 3, -1, 1); + auto weights_data = rg.generate_random_5d(1, 18, 1, 3, 3, -1, 1); auto weights_mem = engine.allocate_memory({ {18, 1, 1, 3, 3}, data_types::f16, format::bfzyx}); set_values(weights_mem, weights_data); diff --git a/src/plugins/intel_gpu/tests/unit/passes/prepare_primitive_fusing_test.cpp b/src/plugins/intel_gpu/tests/unit/passes/prepare_primitive_fusing_test.cpp index 8f2c2cab50236f..f19140d24593ff 100644 --- a/src/plugins/intel_gpu/tests/unit/passes/prepare_primitive_fusing_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/passes/prepare_primitive_fusing_test.cpp @@ -466,7 +466,7 @@ TEST(prepare_primitive_fusing, eltwise_fusing_residual_connection) { auto conv_in_layout = layout{ ov::PartialShape{1, 3, -1, -1}, data_types::f16, format::bfyx}; auto weight_layout = layout{ ov::PartialShape{10, 3, 3, 3}, data_types::f16, format::bfyx}; auto weight_mem = engine.allocate_memory(weight_layout); - auto weight_data = rg.generate_random_4d(10, 3, 3, 3, -1, 1); + auto weight_data = rg.generate_random_4d(10, 3, 3, 3, -1, 1); set_values(weight_mem, weight_data); auto elt1_in1_layout = layout{ ov::PartialShape{1, 10, -1, -1}, data_types::f16, format::bfyx}; @@ -493,11 +493,11 @@ TEST(prepare_primitive_fusing, eltwise_fusing_residual_connection) { cldnn::network net(prog, 0); // Valid - auto conv_input_data = rg.generate_random_4d(1, 3, 7, 7, -1, 1); + auto conv_input_data = rg.generate_random_4d(1, 3, 7, 7, -1, 1); auto conv_input_mem = engine.allocate_memory(layout{ov::PartialShape{1, 3, 7, 7}, data_types::f16, format::bfyx}); set_values(conv_input_mem, conv_input_data); - auto elt_input_data = rg.generate_random_4d(1, 10, 5, 5, -10, 10); + auto elt_input_data = rg.generate_random_4d(1, 10, 5, 5, -10, 10); auto elt_input_mem = engine.allocate_memory(layout{ov::PartialShape{1, 10, 5, 5}, data_types::f16, format::bfyx}); set_values(elt_input_mem, elt_input_data); @@ -509,7 +509,7 @@ TEST(prepare_primitive_fusing, eltwise_fusing_residual_connection) { ASSERT_FALSE(conv_inst->has_unfused_subgraph()); // Invalid => unfusion - auto conv_input_data2 = rg.generate_random_4d(1, 3, 3, 3, -1, 1); + auto conv_input_data2 = rg.generate_random_4d(1, 3, 3, 3, -1, 1); auto conv_input_mem2 = engine.allocate_memory(layout{ov::PartialShape{1, 3, 3, 3}, data_types::f16, format::bfyx}); set_values(conv_input_mem2, conv_input_data2); net.set_input_data("conv_input", conv_input_mem2); @@ -552,7 +552,7 @@ TEST(prepare_primitive_fusing, fuse_constant_transposes_accuracy_test) { auto weights = engine.allocate_memory({{ 32, 2 }, data_types::f32, format::bfyx }); tests::random_generator rg(GET_SUITE_NAME); - auto input_data = rg.generate_random_2d(2, 32, -1, 1); + auto input_data = rg.generate_random_2d(2, 32, -1, 1); auto weights_data = rg.generate_random_2d(32, 2, -1, 1); set_values(input, flatten_2d(format::bfyx, input_data)); @@ -576,7 +576,7 @@ TEST(prepare_primitive_fusing, fuse_constant_transposes_accuracy_test) { auto outputs = network.execute(); auto output = outputs.at("fc").get_memory(); - cldnn::mem_lock output_ptr(output, get_test_stream()); + cldnn::mem_lock output_ptr(output, get_test_stream()); ExecutionConfig config_ref = get_test_default_config(engine); config_ref.set_property(ov::intel_gpu::optimize_data(false)); @@ -587,7 +587,7 @@ TEST(prepare_primitive_fusing, fuse_constant_transposes_accuracy_test) { auto outputs_ref = network_ref.execute(); auto output_ref = outputs_ref.at("fc").get_memory(); - cldnn::mem_lock output_ptr_ref(output_ref, get_test_stream()); + cldnn::mem_lock output_ptr_ref(output_ref, get_test_stream()); for (size_t i = 0; i < output_ptr_ref.size(); ++i) { ASSERT_EQ(output_ptr[i], output_ptr_ref[i]); diff --git a/src/plugins/intel_gpu/tests/unit/shape_infer/quantize_si_test.cpp b/src/plugins/intel_gpu/tests/unit/shape_infer/quantize_si_test.cpp index d41e13d14c9120..342386aae479c1 100644 --- a/src/plugins/intel_gpu/tests/unit/shape_infer/quantize_si_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/shape_infer/quantize_si_test.cpp @@ -74,7 +74,7 @@ INSTANTIATE_TEST_SUITE_P(smoke, quantize_test, }, { layout{ov::PartialShape{1, 2, 3, 4}, data_types::f32, format::bfyx}, - layout{ov::PartialShape{1, 2, 3, 4}, data_types::bin, format::b_fs_yx_32fp} + layout{ov::PartialShape{1, 2, 3, 4}, data_types::u1, format::b_fs_yx_32fp} }, { layout{ov::PartialShape{1, 2, 3, 4, 5}, data_types::f32, format::bfzyx}, diff --git a/src/plugins/intel_gpu/tests/unit/shape_infer/random_uniform_si_test.cpp b/src/plugins/intel_gpu/tests/unit/shape_infer/random_uniform_si_test.cpp index 36a793b7ba3365..6597351a0c728c 100644 --- a/src/plugins/intel_gpu/tests/unit/shape_infer/random_uniform_si_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/shape_infer/random_uniform_si_test.cpp @@ -64,7 +64,7 @@ TEST_P(random_uniform_si_test, shape_infer) { auto allocated_mem = engine.allocate_memory(in_layout); switch (p.out_data_type) { case data_types::f16: - set_values(allocated_mem, {float_to_half(val)}); + set_values(allocated_mem, {ov::float16(val).to_bits()}); break; case data_types::f32: set_values(allocated_mem, {static_cast::type>(val)}); @@ -81,7 +81,6 @@ TEST_P(random_uniform_si_test, shape_infer) { case data_types::u8: set_values(allocated_mem, {static_cast::type>(val)}); break; - case data_types::bin: default: break; } diff --git a/src/plugins/intel_gpu/tests/unit/shape_infer/range_si_test.cpp b/src/plugins/intel_gpu/tests/unit/shape_infer/range_si_test.cpp index e0d229d358c14a..2430d628aa2c42 100644 --- a/src/plugins/intel_gpu/tests/unit/shape_infer/range_si_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/shape_infer/range_si_test.cpp @@ -60,10 +60,10 @@ TEST_P(range_si_test, shape_infer) { auto in_layout = input_layouts[idx]; if (in_layout.is_static() && (idx < p.vals.size())) { auto prim_mem = engine.allocate_memory(in_layout); - ASSERT_NE(p.out_data_type, data_types::bin); + ASSERT_NE(p.out_data_type, data_types::undefined); switch (p.out_data_type) { case data_types::f16: - set_values(prim_mem, {float_to_half(p.vals[idx])}); + set_values(prim_mem, {ov::float16(p.vals[idx]).to_bits()}); break; case data_types::f32: set_values(prim_mem, {static_cast::type>(p.vals[idx])}); @@ -80,7 +80,6 @@ TEST_P(range_si_test, shape_infer) { case data_types::u8: set_values(prim_mem, {static_cast::type>(p.vals[idx])}); break; - case data_types::bin: default: break; } diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/activation_simple_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/activation_simple_gpu_test.cpp index e5f59be9b51a8d..0983dd1e0c7196 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/activation_simple_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/activation_simple_gpu_test.cpp @@ -493,8 +493,8 @@ TEST(activation_f16_fw_gpu, softsign_basic_yxfb) { auto& engine = get_test_engine(); auto input = engine.allocate_memory({data_types::f16, format::yxfb, {1, 1, 2, 2}}); - set_values(input, {FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.5f)}); - VF output_vec = {FLOAT16(0.5f), FLOAT16(0.66650391f), FLOAT16(0.75f), FLOAT16(0.81835938f)}; + set_values(input, {ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.5f)}); + VF output_vec = {ov::float16(0.5f), ov::float16(0.66650391f), ov::float16(0.75f), ov::float16(0.81835938f)}; topology topology(input_layout("input", input->get_layout()), activation("not", input_info("input"), activation_func::softsign)); @@ -506,8 +506,8 @@ TEST(activation_f16_fw_gpu, softsign_basic_yxfb) { auto output_memory = outputs.at("not").get_memory(); auto output_layout = output_memory->get_layout(); - cldnn::mem_lock output_ptr(output_memory, get_test_stream()); - cldnn::mem_lock input_ptr(input, get_test_stream()); + cldnn::mem_lock output_ptr(output_memory, get_test_stream()); + cldnn::mem_lock input_ptr(input, get_test_stream()); int y_size = output_layout.spatial(1); int x_size = output_layout.spatial(0); @@ -611,12 +611,12 @@ TEST(activation_f16_fw_gpu, pow_basic_yxfb) { auto input = engine.allocate_memory({ data_types::f16, format::yxfb, { 1, 1, 2, 2 } }); set_values(input, - { FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.5f) }); - VF output_vec = { FLOAT16(1.0f), FLOAT16(8.0f), FLOAT16(27.0f), FLOAT16(91.125f) }; + { ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.5f) }); + VF output_vec = { ov::float16(1.0f), ov::float16(8.0f), ov::float16(27.0f), ov::float16(91.125f) }; topology topology( input_layout("input", input->get_layout()), - activation("pow", input_info("input"), activation_func::pow, { FLOAT16(3.0f), FLOAT16(0.0f) })); + activation("pow", input_info("input"), activation_func::pow, { ov::float16(3.0f), ov::float16(0.0f) })); network network(engine, topology, get_test_default_config(engine)); network.set_input_data("input", input); auto outputs = network.execute(); @@ -625,7 +625,7 @@ TEST(activation_f16_fw_gpu, pow_basic_yxfb) { auto output_memory = outputs.at("pow").get_memory(); auto output_layout = output_memory->get_layout(); - cldnn::mem_lock output_ptr(output_memory, get_test_stream()); + cldnn::mem_lock output_ptr(output_memory, get_test_stream()); int y_size = output_layout.spatial(1); int x_size = output_layout.spatial(0); @@ -971,8 +971,8 @@ TEST(activation_f16_fw_gpu, basic_bfyx_all_functions) auto input = engine.allocate_memory({ data_types::f16, format::bfyx, { 1, 1, 2, 4 } }); auto input_params = engine.allocate_memory({ data_types::f16, format::bfyx, { 1, 2, 1, 1 } }); - set_values(input, { FLOAT16(-4.5f), FLOAT16(-2.5f), FLOAT16(-1.5f), FLOAT16(0.5f), - FLOAT16(0.9f), FLOAT16(1.5f), FLOAT16(2.0f), FLOAT16(2.5f) }); + set_values(input, { ov::float16(-4.5f), ov::float16(-2.5f), ov::float16(-1.5f), ov::float16(0.5f), + ov::float16(0.9f), ov::float16(1.5f), ov::float16(2.0f), ov::float16(2.5f) }); std::vector funcs = { activation_func::linear, @@ -984,7 +984,7 @@ TEST(activation_f16_fw_gpu, basic_bfyx_all_functions) }; activation_additional_params params = { 3.f, 2.f }; - set_values(input_params, { FLOAT16(params.a), FLOAT16(params.b) }); + set_values(input_params, { ov::float16(params.a), ov::float16(params.b) }); for (uint8_t i = 0 ; i < 2 ; i++) { for (auto func : funcs) { @@ -1005,8 +1005,8 @@ TEST(activation_f16_fw_gpu, basic_bfyx_all_functions) auto output_memory = outputs.at("activation").get_memory(); auto output_layout = output_memory->get_layout(); - cldnn::mem_lock output_ptr(output_memory, get_test_stream()); - cldnn::mem_lock input_ptr(input, get_test_stream()); + cldnn::mem_lock output_ptr(output_memory, get_test_stream()); + cldnn::mem_lock input_ptr(input, get_test_stream()); int y_size = output_layout.spatial(1); int x_size = output_layout.spatial(0); @@ -1021,28 +1021,28 @@ TEST(activation_f16_fw_gpu, basic_bfyx_all_functions) for (size_t i = 0; i < output_layout.get_linear_size(); ++i) { switch (func) { case activation_func::linear: { - VF output_vec = {FLOAT16(-11.5f), FLOAT16(-5.5f), FLOAT16(-2.5f), FLOAT16(3.5f), - FLOAT16(4.7f), FLOAT16(6.5f), FLOAT16(8.0f), FLOAT16(9.5f)}; + VF output_vec = {ov::float16(-11.5f), ov::float16(-5.5f), ov::float16(-2.5f), ov::float16(3.5f), + ov::float16(4.7f), ov::float16(6.5f), ov::float16(8.0f), ov::float16(9.5f)}; ASSERT_FLOAT_EQ(output_vec[i], output_ptr[i]); break; } case activation_func::mish: - ASSERT_NEAR((FLOAT16)((float)input_ptr[i] * std::tanh(std::log(1.f + std::exp((float)input_ptr[i])))), + ASSERT_NEAR((ov::float16)((float)input_ptr[i] * std::tanh(std::log(1.f + std::exp((float)input_ptr[i])))), output_ptr[i], 1e-2f); break; case activation_func::hswish: - ASSERT_NEAR((FLOAT16)((float)input_ptr[i] * std::fmin(std::fmax(0.f, (float)input_ptr[i] + 3.f), 6.f) / 6.f), + ASSERT_NEAR((ov::float16)((float)input_ptr[i] * std::fmin(std::fmax(0.f, (float)input_ptr[i] + 3.f), 6.f) / 6.f), output_ptr[i], 1e-3f); break; case activation_func::hard_sigmoid: - ASSERT_NEAR((FLOAT16)(std::fmin(std::fmax(0.f, (float)input_ptr[i] + 3.f), 6.f) / 6.f), + ASSERT_NEAR((ov::float16)(std::fmin(std::fmax(0.f, (float)input_ptr[i] + 3.f), 6.f) / 6.f), output_ptr[i], 1e-3f); break; case activation_func::round_half_to_even: - ASSERT_FLOAT_EQ((FLOAT16)std::rint((float)input_ptr[i]), output_ptr[i]); + ASSERT_FLOAT_EQ((ov::float16)std::rint((float)input_ptr[i]), output_ptr[i]); break; case activation_func::round_half_away_from_zero: - ASSERT_FLOAT_EQ((FLOAT16)std::round((float)input_ptr[i]), output_ptr[i]); + ASSERT_FLOAT_EQ((ov::float16)std::round((float)input_ptr[i]), output_ptr[i]); break; default: break; @@ -1804,7 +1804,7 @@ struct activation_random_test : testing::TestWithParam(mem, -127, 127, 2); break; case data_types::f16: - fill_random_typed(mem, -127, 127, 2); + fill_random_typed(mem, -127, 127, 2); break; case data_types::i8: fill_random_typed(mem, -127, 127, 1); @@ -1913,7 +1913,7 @@ struct activation_random_test : testing::TestWithParam(output, output_opt); } else if (input_type == data_types::f16) { - compare_outputs(output, output_opt); + compare_outputs(output, output_opt); } else if (input_type == data_types::i8) { compare_outputs(output, output_opt); } else if (input_type == data_types::u8) { diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/adaptive_avg_pooling_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/adaptive_avg_pooling_gpu_test.cpp index 70478709e124fa..a2cd49a704858a 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/adaptive_avg_pooling_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/adaptive_avg_pooling_gpu_test.cpp @@ -86,7 +86,7 @@ float getError() { } template<> -float getError() { +float getError() { return 0.5; } @@ -119,7 +119,7 @@ struct adaptive_avg_pooling_test public: void test() { - const auto data_type = type_to_data_type::value; + const auto data_type = ov::element::from(); AdaptiveAvgPoolingParams params; format::type plain_layout; format::type target_layout; @@ -162,7 +162,7 @@ struct adaptive_avg_pooling_test using adaptive_avg_pooling_test_f32 = adaptive_avg_pooling_test; -using adaptive_avg_pooling_test_f16 = adaptive_avg_pooling_test; +using adaptive_avg_pooling_test_f16 = adaptive_avg_pooling_test; TEST_P(adaptive_avg_pooling_test_f32, adaptive_avg_pooling_test_f32) { ASSERT_NO_FATAL_FAILURE(test()); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/adaptive_max_pooling_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/adaptive_max_pooling_gpu_test.cpp index 649d933db701c9..247a26c466ae6a 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/adaptive_max_pooling_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/adaptive_max_pooling_gpu_test.cpp @@ -91,7 +91,7 @@ float getError() { } template<> -float getError() { +float getError() { return 0.5; } @@ -124,7 +124,7 @@ struct adaptive_max_pooling_test public: void test() { - const auto data_type = type_to_data_type::value; + const auto data_type = ov::element::from(); AdaptiveMaxPoolingParams params; format::type plain_layout; format::type target_layout; @@ -218,7 +218,7 @@ struct adaptive_max_pooling_test using adaptive_max_pooling_test_f32 = adaptive_max_pooling_test; -using adaptive_max_pooling_test_f16 = adaptive_max_pooling_test; +using adaptive_max_pooling_test_f16 = adaptive_max_pooling_test; TEST_P(adaptive_max_pooling_test_f32, adaptive_max_pooling_test_f32) { ASSERT_NO_FATAL_FAILURE(test()); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/arg_max_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/arg_max_gpu_test.cpp index 60c3c04161194c..b356adb4495a86 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/arg_max_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/arg_max_gpu_test.cpp @@ -21,14 +21,13 @@ template struct arg_max_input_types { static const auto format = layoutFormat; using input_type = DataType; - static const data_types data_type = type_to_data_type::value; }; template struct argmax_gpu_test : public testing::Test { static const auto format = ArgMaxInput::format; using input_type = typename ArgMaxInput::input_type; - static const data_types data_type = ArgMaxInput::data_type; + const data_types data_type = ov::element::from(); std::vector getTypedVector(const std::vector& input) { return std::vector(input.begin(), input.end()); } @@ -54,8 +53,8 @@ using format_types = testing::Types, arg_max_input_types, arg_max_input_types, arg_max_input_types, - arg_max_input_types, - arg_max_input_types, + arg_max_input_types, + arg_max_input_types, arg_max_input_types, arg_max_input_types>; diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/batch_to_space_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/batch_to_space_gpu_test.cpp index 36eb362a7034bb..ec216b581794c7 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/batch_to_space_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/batch_to_space_gpu_test.cpp @@ -26,10 +26,10 @@ TEST(batch_to_space_fp16_gpu, i8111_bs1222_cb0000_ce0000) { auto input = engine.allocate_memory({ data_types::f16, format::bfyx, input_shape }); set_values(input, { - FLOAT16(0.0f), FLOAT16(1.0f), - FLOAT16(2.0f), FLOAT16(3.0f), - FLOAT16(4.0f), FLOAT16(5.0f), - FLOAT16(6.0f), FLOAT16(7.0f) + ov::float16(0.0f), ov::float16(1.0f), + ov::float16(2.0f), ov::float16(3.0f), + ov::float16(4.0f), ov::float16(5.0f), + ov::float16(6.0f), ov::float16(7.0f) }); topology topology; @@ -71,12 +71,12 @@ TEST(batch_to_space_fp16_gpu, i4321_bs1212_cb0000_ce0000) { auto input = engine.allocate_memory({ data_types::f16, format::bfyx, input_shape }); set_values(input, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), - FLOAT16(4.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), - FLOAT16(8.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), - FLOAT16(12.0f), FLOAT16(13.0f), FLOAT16(14.0f), FLOAT16(15.0f), - FLOAT16(16.0f), FLOAT16(17.0f), FLOAT16(18.0f), FLOAT16(19.0f), - FLOAT16(20.0f), FLOAT16(21.0f), FLOAT16(22.0f), FLOAT16(23.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), + ov::float16(4.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), + ov::float16(8.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), + ov::float16(12.0f), ov::float16(13.0f), ov::float16(14.0f), ov::float16(15.0f), + ov::float16(16.0f), ov::float16(17.0f), ov::float16(18.0f), ov::float16(19.0f), + ov::float16(20.0f), ov::float16(21.0f), ov::float16(22.0f), ov::float16(23.0f) }); topology topology; @@ -121,12 +121,12 @@ TEST(batch_to_space_fp16_gpu, i4321_bs1212_cb0010_ce0101) { auto input = engine.allocate_memory({ data_types::f16, format::bfyx, input_shape }); set_values(input, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), - FLOAT16(4.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), - FLOAT16(8.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), - FLOAT16(12.0f), FLOAT16(13.0f), FLOAT16(14.0f), FLOAT16(15.0f), - FLOAT16(16.0f), FLOAT16(17.0f), FLOAT16(18.0f), FLOAT16(19.0f), - FLOAT16(20.0f), FLOAT16(21.0f), FLOAT16(22.0f), FLOAT16(23.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), + ov::float16(4.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), + ov::float16(8.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), + ov::float16(12.0f), ov::float16(13.0f), ov::float16(14.0f), ov::float16(15.0f), + ov::float16(16.0f), ov::float16(17.0f), ov::float16(18.0f), ov::float16(19.0f), + ov::float16(20.0f), ov::float16(21.0f), ov::float16(22.0f), ov::float16(23.0f) }); topology topology; @@ -168,12 +168,12 @@ TEST(batch_to_space_fp16_gpu, i62121_bs12311_cb02000_ce00110) { auto input = engine.allocate_memory({ data_types::f16, format::bfzyx, input_shape }); set_values(input, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), - FLOAT16(4.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), - FLOAT16(8.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), - FLOAT16(12.0f), FLOAT16(13.0f), FLOAT16(14.0f), FLOAT16(15.0f), - FLOAT16(16.0f), FLOAT16(17.0f), FLOAT16(18.0f), FLOAT16(19.0f), - FLOAT16(20.0f), FLOAT16(21.0f), FLOAT16(22.0f), FLOAT16(23.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), + ov::float16(4.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), + ov::float16(8.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), + ov::float16(12.0f), ov::float16(13.0f), ov::float16(14.0f), ov::float16(15.0f), + ov::float16(16.0f), ov::float16(17.0f), ov::float16(18.0f), ov::float16(19.0f), + ov::float16(20.0f), ov::float16(21.0f), ov::float16(22.0f), ov::float16(23.0f) }); topology topology; @@ -215,14 +215,14 @@ TEST(batch_to_space_fp16_gpu, i1212112_bs112321_cb02000_ce00110) { auto input = engine.allocate_memory({ data_types::f16, format::bfwzyx, input_shape }); set_values(input, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), - FLOAT16(4.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), - FLOAT16(8.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), - FLOAT16(12.0f), FLOAT16(13.0f), FLOAT16(14.0f), FLOAT16(15.0f), - FLOAT16(16.0f), FLOAT16(17.0f), FLOAT16(18.0f), FLOAT16(19.0f), - FLOAT16(20.0f), FLOAT16(21.0f), FLOAT16(22.0f), FLOAT16(23.0f), - FLOAT16(24.0f), FLOAT16(25.0f), FLOAT16(26.0f), FLOAT16(27.0f), - FLOAT16(28.0f), FLOAT16(29.0f), FLOAT16(30.0f), FLOAT16(31.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), + ov::float16(4.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), + ov::float16(8.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), + ov::float16(12.0f), ov::float16(13.0f), ov::float16(14.0f), ov::float16(15.0f), + ov::float16(16.0f), ov::float16(17.0f), ov::float16(18.0f), ov::float16(19.0f), + ov::float16(20.0f), ov::float16(21.0f), ov::float16(22.0f), ov::float16(23.0f), + ov::float16(24.0f), ov::float16(25.0f), ov::float16(26.0f), ov::float16(27.0f), + ov::float16(28.0f), ov::float16(29.0f), ov::float16(30.0f), ov::float16(31.0f) }); topology topology; @@ -266,10 +266,10 @@ TEST(batch_to_space_fp16_gpu, i21611_bs1112_cb0000_ce0000_b_fs_yx_fsv16) { auto input = engine.allocate_memory({ data_types::f16, format::bfyx, input_shape }); set_values(input, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), - FLOAT16(8.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(13.0f), FLOAT16(14.0f), FLOAT16(15.0f), - FLOAT16(16.0f), FLOAT16(17.0f), FLOAT16(18.0f), FLOAT16(19.0f), FLOAT16(20.0f), FLOAT16(21.0f), FLOAT16(22.0f), FLOAT16(23.0f), - FLOAT16(24.0f), FLOAT16(25.0f), FLOAT16(26.0f), FLOAT16(27.0f), FLOAT16(28.0f), FLOAT16(29.0f), FLOAT16(30.0f), FLOAT16(31.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), + ov::float16(8.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(13.0f), ov::float16(14.0f), ov::float16(15.0f), + ov::float16(16.0f), ov::float16(17.0f), ov::float16(18.0f), ov::float16(19.0f), ov::float16(20.0f), ov::float16(21.0f), ov::float16(22.0f), ov::float16(23.0f), + ov::float16(24.0f), ov::float16(25.0f), ov::float16(26.0f), ov::float16(27.0f), ov::float16(28.0f), ov::float16(29.0f), ov::float16(30.0f), ov::float16(31.0f) }); topology topology; @@ -317,10 +317,10 @@ TEST(batch_to_space_fp16_gpu, i2812_bs1112_cb0000_ce0000_b_fs_yx_fsv16) { auto input = engine.allocate_memory({ data_types::f16, format::bfyx, input_shape }); set_values(input, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), - FLOAT16(8.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(13.0f), FLOAT16(14.0f), FLOAT16(15.0f), - FLOAT16(16.0f), FLOAT16(17.0f), FLOAT16(18.0f), FLOAT16(19.0f), FLOAT16(20.0f), FLOAT16(21.0f), FLOAT16(22.0f), FLOAT16(23.0f), - FLOAT16(24.0f), FLOAT16(25.0f), FLOAT16(26.0f), FLOAT16(27.0f), FLOAT16(28.0f), FLOAT16(29.0f), FLOAT16(30.0f), FLOAT16(31.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), + ov::float16(8.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(13.0f), ov::float16(14.0f), ov::float16(15.0f), + ov::float16(16.0f), ov::float16(17.0f), ov::float16(18.0f), ov::float16(19.0f), ov::float16(20.0f), ov::float16(21.0f), ov::float16(22.0f), ov::float16(23.0f), + ov::float16(24.0f), ov::float16(25.0f), ov::float16(26.0f), ov::float16(27.0f), ov::float16(28.0f), ov::float16(29.0f), ov::float16(30.0f), ov::float16(31.0f) }); topology topology; diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/binary_convolution_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/binary_convolution_gpu_test.cpp index e4e6549db79022..9561f7a94f4941 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/binary_convolution_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/binary_convolution_gpu_test.cpp @@ -210,8 +210,8 @@ TEST_P(binary_convolution_test, conv) { cldnn::feature(p.oc), cldnn::spatial(p.ow, p.oh)}; - auto input = engine.allocate_memory({ cldnn::data_types::bin, cldnn::format::b_fs_yx_32fp, is_size }); - auto weights = engine.allocate_memory({ cldnn::data_types::bin, cldnn::format::bfyx, wei_size }); + auto input = engine.allocate_memory({ cldnn::data_types::u1, cldnn::format::b_fs_yx_32fp, is_size }); + auto weights = engine.allocate_memory({ cldnn::data_types::u1, cldnn::format::bfyx, wei_size }); auto output_ref = engine.allocate_memory({ cldnn::data_types::f32, cldnn::format::bfyx, os_size }); fill(input); @@ -329,8 +329,8 @@ TEST(binary_convolution, basic_convolution_1x1_single_packed_channel) { if(engine.get_device_info().supports_immad) return; - auto input = engine.allocate_memory({ data_types::bin, format::b_fs_yx_32fp, { 1, 16, 2, 2 } }); - auto weights = engine.allocate_memory({ data_types::bin, format::bfyx, { 4, 16, 1, 1 } }); + auto input = engine.allocate_memory({ data_types::u1, format::b_fs_yx_32fp, { 1, 16, 2, 2 } }); + auto weights = engine.allocate_memory({ data_types::u1, format::bfyx, { 4, 16, 1, 1 } }); // 0 0 1 0 0 1 0 0 1 0 1 0 1 0 1 0 // 1 0 0 0 0 1 1 0 0 1 1 0 1 0 1 0 @@ -415,8 +415,8 @@ TEST(binary_convolution, basic_convolution_1x1_single_packed_channel_fp16) { if(engine.get_device_info().supports_immad) return; - auto input = engine.allocate_memory({ data_types::bin, format::b_fs_yx_32fp, { 1, 16, 2, 2 } }); - auto weights = engine.allocate_memory({ data_types::bin, format::bfyx, { 4, 16, 1, 1 } }); + auto input = engine.allocate_memory({ data_types::u1, format::b_fs_yx_32fp, { 1, 16, 2, 2 } }); + auto weights = engine.allocate_memory({ data_types::u1, format::bfyx, { 4, 16, 1, 1 } }); // 0 0 1 0 0 1 0 0 1 0 1 0 1 0 1 0 // 1 0 0 0 0 1 1 0 0 1 1 0 1 0 1 0 diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/border_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/border_gpu_test.cpp index aa43f2da007e7c..8183d128afb449 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/border_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/border_gpu_test.cpp @@ -165,12 +165,12 @@ INSTANTIATE_TEST_SUITE_P(negative_pads, testing::Values(true), testing::Values(false))); -using border_test_f16 = border_test; +using border_test_f16 = border_test; TEST_P(border_test_f16, border_test_f16) {} INSTANTIATE_TEST_SUITE_P(border_test_f16, border_test_f16, testing::Combine(testing::Values(ov::op::PadMode::REFLECT), - testing::Values(FLOAT16(123)), + testing::Values(ov::float16(123)), testing::Values(format::type::bs_fs_yx_bsv32_fsv16), testing::Values(std::array{2, 3, 4, 5}), testing::Values(std::array{1, 2, 3, 4}), @@ -180,7 +180,7 @@ INSTANTIATE_TEST_SUITE_P(border_test_f16, INSTANTIATE_TEST_SUITE_P(export_import, border_test_f16, testing::Combine(testing::Values(ov::op::PadMode::REFLECT), - testing::Values(FLOAT16(123)), + testing::Values(ov::float16(123)), testing::Values(format::type::bs_fs_yx_bsv32_fsv16), testing::Values(std::array{2, 3, 4, 5}), testing::Values(std::array{1, 2, 3, 4}), @@ -1830,7 +1830,7 @@ TEST(border_gpu, basic_zero_input_dynamic) { auto& engine = get_test_engine(); // WA to avoid crash due to attempt to allocate 0 bytes for USM memory - layout fake_input_layout = {{1}, data_types::bin, format::bfyx}; + layout fake_input_layout = {{1}, data_types::undefined, format::bfyx}; auto input = engine.allocate_memory(fake_input_layout); layout zero_input_layout = {{0, 1}, data_types::f32, format::bfyx}; @@ -1878,7 +1878,7 @@ TEST(border_gpu, basic_zero_input) { auto& engine = get_test_engine(); // WA to avoid crash due to attempt to allocate 0 bytes for USM memory - layout fake_input_layout = {{1}, data_types::bin, format::bfyx}; + layout fake_input_layout = {{1}, data_types::u8, format::bfyx}; auto input = engine.allocate_memory(fake_input_layout); layout zero_input_layout = {{0, 1}, data_types::f32, format::bfyx}; diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/broadcast_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/broadcast_gpu_test.cpp index cdc17779d9ad40..9c7aa0102c98f8 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/broadcast_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/broadcast_gpu_test.cpp @@ -1208,11 +1208,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv32_1x38x1x1_to_1x38x1x5_w_b_axes_0) } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_1x38x1x1_to_1x38x1x5_w_b_axes_0) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {1, 38, 1, 5}, {1, 38, 1, 1}, {0}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {1, 38, 1, 5}, {1, 38, 1, 1}, {0}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_1x38x1x1_to_1x38x1x5_w_b_axes_0) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {1, 38, 1, 5}, {1, 38, 1, 1}, {0}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {1, 38, 1, 5}, {1, 38, 1, 1}, {0}); } @@ -1237,11 +1237,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv32_1_to_4x5_w_b_axes_0x1) { } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_1_to_4x5_w_b_axes_0x1) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {4, 5}, {1}, {0, 1}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {4, 5}, {1}, {0, 1}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_1_to_4x5_w_b_axes_0x1) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {4, 5}, {1}, {0, 1}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {4, 5}, {1}, {0, 1}); } @@ -1266,11 +1266,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv32_1_to_3x4x5_w_b_axes_0x1x2) { } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_1_to_3x4x5_w_b_axes_0x1x2) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {3, 4, 5}, {1}, {0, 1, 2}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {3, 4, 5}, {1}, {0, 1, 2}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_1_to_3x4x5_w_b_axes_0x1x2) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {3, 4, 5}, {1}, {0, 1, 2}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {3, 4, 5}, {1}, {0, 1, 2}); } @@ -1295,11 +1295,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv16_1_to_2x3x4x5_w_b_axes_0x1x2x3) { } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_1_to_2x3x4x5_w_b_axes_0x1x2x3) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4, 5}, {1}, {0, 1, 2, 3}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4, 5}, {1}, {0, 1, 2, 3}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_1_to_2x3x4x5_w_b_axes_0x1x2x3) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4, 5}, {1}, {0, 1, 2, 3}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4, 5}, {1}, {0, 1, 2, 3}); } @@ -1324,11 +1324,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv32_42x36x1x1_to_42x36x1x5_w_o_b_axe } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_42x36x1x1_to_42x36x1x5_w_o_b_axes) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {42, 36, 1, 5}, {42, 36, 1, 1}, {}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {42, 36, 1, 5}, {42, 36, 1, 1}, {}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_42x36x1x1_to_42x36x1x5_w_o_b_axes) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {42, 36, 1, 5}, {42, 36, 1, 1}, {}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {42, 36, 1, 5}, {42, 36, 1, 1}, {}); } @@ -1353,11 +1353,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv32_35x32x1x3_to_140x128x1x12_w_o_b_ } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_35x32x1x3_to_140x128x1x12_w_o_b_axes) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {140, 128, 1, 12}, {35, 32, 1, 3}, {}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {140, 128, 1, 12}, {35, 32, 1, 3}, {}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_35x32x1x3_to_140x128x1x12_w_o_b_axes) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {140, 128, 1, 12}, {35, 32, 1, 3}, {}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {140, 128, 1, 12}, {35, 32, 1, 3}, {}); } @@ -1382,11 +1382,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv32_42x64x1x1_to_84x128x4x5_w_o_b_ax } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_42x64x1x1_to_84x128x4x5_w_o_b_axes) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {84, 128, 4, 5}, {42, 64, 1, 1}, {}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {84, 128, 4, 5}, {42, 64, 1, 1}, {}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_42x64x1x1_to_84x128x4x5_w_o_b_axes) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {84, 128, 4, 5}, {42, 64, 1, 1}, {}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {84, 128, 4, 5}, {42, 64, 1, 1}, {}); } @@ -1411,11 +1411,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv32_156x78x2x3_to_156x156x8x6_w_o_b_ } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_156x78x2x3_to_156x156x8x6_w_o_b_axes) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {156, 156, 8, 6}, {156, 78, 2, 3}, {}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {156, 156, 8, 6}, {156, 78, 2, 3}, {}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_156x78x2x3_to_156x156x8x6_w_o_b_axes) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {156, 156, 8, 6}, {156, 78, 2, 3}, {}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {156, 156, 8, 6}, {156, 78, 2, 3}, {}); } @@ -1440,11 +1440,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv32_42x2x3x4_to_126x6x6x4_w_o_b_axes } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_42x2x3x4_to_126x6x6x4_w_o_b_axes) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {126, 6, 6, 4}, {42, 2, 3, 4}, {}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {126, 6, 6, 4}, {42, 2, 3, 4}, {}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_42x2x3x4_to_126x6x6x4_w_o_b_axes) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {126, 6, 6, 4}, {42, 2, 3, 4}, {}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {126, 6, 6, 4}, {42, 2, 3, 4}, {}); } @@ -1469,11 +1469,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv32_256x91x4x5_to_256x273x8x5_w_o_b_ } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_256x91x4x5_to_256x273x8x5_w_o_b_axes) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {256, 273, 8, 5}, {256, 91, 4, 5}, {}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {256, 273, 8, 5}, {256, 91, 4, 5}, {}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_256x91x4x5_to_256x273x8x5_w_o_b_axes) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {256, 273, 8, 5}, {256, 91, 4, 5}, {}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {256, 273, 8, 5}, {256, 91, 4, 5}, {}); } @@ -1498,11 +1498,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv326_1x45x1x3_to_1x45x2x3_w_b_axes_0 } TEST(broadcast_gpu_fp16, b_fs_yx_fsv166_1x45x1x3_to_1x45x2x3_w_b_axes_0) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {1, 45, 2, 3}, {1, 45, 1, 3}, {0}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {1, 45, 2, 3}, {1, 45, 1, 3}, {0}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv166_1x45x1x3_to_1x45x2x3_w_b_axes_0) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {1, 45, 2, 3}, {1, 45, 1, 3}, {0}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {1, 45, 2, 3}, {1, 45, 1, 3}, {0}); } @@ -1527,11 +1527,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv32_1x62x1x3_to_1x62x2x6_w_b_axes_0) } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_1x62x1x3_to_1x62x2x6_w_b_axes_0) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {1, 62, 2, 6}, {1, 62, 1, 3}, {0}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {1, 62, 2, 6}, {1, 62, 1, 3}, {0}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_1x62x1x3_to_1x62x2x6_w_b_axes_0) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {1, 62, 2, 6}, {1, 62, 1, 3}, {0}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {1, 62, 2, 6}, {1, 62, 1, 3}, {0}); } @@ -1556,11 +1556,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv32_2_to_2x3_w_b_axes_1) { } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_2_to_2x3_w_b_axes_1) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3}, {2}, {1}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3}, {2}, {1}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_2_to_2x3_w_b_axes_1) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3}, {2}, {1}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3}, {2}, {1}); } @@ -1585,11 +1585,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv32_2_to_6x3_w_b_axes_1) { } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_2_to_6x3_w_b_axes_1) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {6, 3}, {2}, {1}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {6, 3}, {2}, {1}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_2_to_6x3_w_b_axes_1) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {6, 3}, {2}, {1}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {6, 3}, {2}, {1}); } @@ -1614,11 +1614,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv32_1x76x3x4_to_1x152x3x4_w_b_axes_0 } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_1x76x3x4_to_1x152x3x4_w_b_axes_0) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {1, 152, 3, 4}, {1, 76, 3, 4}, {0}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {1, 152, 3, 4}, {1, 76, 3, 4}, {0}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_1x76x3x4_to_1x152x3x4_w_b_axes_0) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {1, 152, 3, 4}, {1, 76, 3, 4}, {0}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {1, 152, 3, 4}, {1, 76, 3, 4}, {0}); } @@ -1643,11 +1643,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv32_2x4_to_2x3x4_w_b_axes_1) { } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_2x4_to_2x3x4_w_b_axes_1) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4}, {2, 4}, {1}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4}, {2, 4}, {1}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_2x4_to_2x3x4_w_b_axes_1) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4}, {2, 4}, {1}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4}, {2, 4}, {1}); } @@ -1672,11 +1672,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv32_2x3_to_2x3x4_w_b_axes_2) { } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_2x3_to_2x3x4_w_b_axes_2) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4}, {2, 3}, {2}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4}, {2, 3}, {2}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_2x3_to_2x3x4_w_b_axes_2) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4}, {2, 3}, {2}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4}, {2, 3}, {2}); } @@ -1701,11 +1701,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv32_4_to_2x3x4_w_b_axes_0_1) { } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_4_to_2x3x4_w_b_axes_0_1) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4}, {4}, {0, 1}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4}, {4}, {0, 1}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_4_to_2x3x4_w_b_axes_0_1) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4}, {4}, {0, 1}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4}, {4}, {0, 1}); } @@ -1730,11 +1730,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv32_3_to_2x3x4_w_b_axes_0_2) { } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_3_to_2x3x4_w_b_axes_0_2) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4}, {3}, {0, 2}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4}, {3}, {0, 2}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_3_to_2x3x4_w_b_axes_0_2) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4}, {3}, {0, 2}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4}, {3}, {0, 2}); } @@ -1759,11 +1759,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv16_2_to_2x3x4_w_b_axes_1_2) { } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_2_to_2x3x4_w_b_axes_1_2) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4}, {2}, {1, 2}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4}, {2}, {1, 2}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_2_to_2x3x4_w_b_axes_1_2) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4}, {2}, {1, 2}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4}, {2}, {1, 2}); } @@ -1788,11 +1788,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv32_1x128x4x5_to_2x256x4x5_w_b_axes_ } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_1x128x4x5_to_2x256x4x5_w_b_axes_0) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 256, 4, 5}, {1, 128, 4, 5}, {0}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 256, 4, 5}, {1, 128, 4, 5}, {0}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_1x128x4x5_to_2x256x4x5_w_b_axes_0) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 256, 4, 5}, {1, 128, 4, 5}, {0}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 256, 4, 5}, {1, 128, 4, 5}, {0}); } @@ -1817,11 +1817,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv32_2x4x5_to_2x3x4x5_w_b_axes_1) { } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_2x4x5_to_2x3x4x5_w_b_axes_1) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4, 5}, {2, 4, 5}, {1}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4, 5}, {2, 4, 5}, {1}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_2x4x5_to_2x3x4x5_w_b_axes_1) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4, 5}, {2, 4, 5}, {1}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4, 5}, {2, 4, 5}, {1}); } @@ -1846,11 +1846,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv32_2x3x5_to_2x3x4x5_w_b_axes_2) { } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_2x3x5_to_2x3x4x5_w_b_axes_2) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4, 5}, {2, 3, 5}, {2}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4, 5}, {2, 3, 5}, {2}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_2x3x5_to_2x3x4x5_w_b_axes_2) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4, 5}, {2, 3, 5}, {2}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4, 5}, {2, 3, 5}, {2}); } @@ -1875,11 +1875,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv32_2x3x4_to_2x3x4x5_w_b_axes_3) { } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_2x3x4_to_2x3x4x5_w_b_axes_3) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4, 5}, {2, 3, 4}, {3}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4, 5}, {2, 3, 4}, {3}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_2x3x4_to_2x3x4x5_w_b_axes_3) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4, 5}, {2, 3, 4}, {3}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4, 5}, {2, 3, 4}, {3}); } @@ -1904,11 +1904,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv16_4x5_to_2x3x4x5_w_b_axes_0_1) { } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_4x5_to_2x3x4x5_w_b_axes_0_1) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4, 5}, {4, 5}, {0, 1}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4, 5}, {4, 5}, {0, 1}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_4x5_to_2x3x4x5_w_b_axes_0_1) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4, 5}, {4, 5}, {0, 1}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4, 5}, {4, 5}, {0, 1}); } @@ -1933,11 +1933,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv32_3x5_to_2x3x4x5_w_b_axes_0_2) { } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_3x5_to_2x3x4x5_w_b_axes_0_2) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4, 5}, {3, 5}, {0, 2}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4, 5}, {3, 5}, {0, 2}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_3x5_to_2x3x4x5_w_b_axes_0_2) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4, 5}, {3, 5}, {0, 2}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4, 5}, {3, 5}, {0, 2}); } @@ -1962,11 +1962,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv32_3x4_to_2x3x4x5_w_b_axes_0_3) { } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_3x4_to_2x3x4x5_w_b_axes_0_3) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4, 5}, {3, 4}, {0, 3}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4, 5}, {3, 4}, {0, 3}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_3x4_to_2x3x4x5_w_b_axes_0_3) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4, 5}, {3, 4}, {0, 3}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4, 5}, {3, 4}, {0, 3}); } @@ -1991,11 +1991,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv32_2x5_to_2x3x4x5_w_b_axes_1_2) { } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_2x5_to_2x3x4x5_w_b_axes_1_2) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4, 5}, {2, 5}, {1, 2}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4, 5}, {2, 5}, {1, 2}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_2x5_to_2x3x4x5_w_b_axes_1_2) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4, 5}, {2, 5}, {1, 2}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4, 5}, {2, 5}, {1, 2}); } @@ -2020,11 +2020,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv32_2x4_to_2x3x4x5_w_b_axes_1_3) { } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_2x4_to_2x3x4x5_w_b_axes_1_3) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4, 5}, {2, 4}, {1, 3}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4, 5}, {2, 4}, {1, 3}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_2x4_to_2x3x4x5_w_b_axes_1_3) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4, 5}, {2, 4}, {1, 3}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4, 5}, {2, 4}, {1, 3}); } @@ -2049,11 +2049,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv32_2x3_to_2x3x4x5_w_b_axes_2_3) { } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_2x3_to_2x3x4x5_w_b_axes_2_3) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4, 5}, {2, 3}, {2, 3}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4, 5}, {2, 3}, {2, 3}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_2x3_to_2x3x4x5_w_b_axes_2_3) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4, 5}, {2, 3}, {2, 3}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4, 5}, {2, 3}, {2, 3}); } @@ -2078,11 +2078,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv32_5_to_2x3x4x5_w_b_axes_0_1_2) { } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_5_to_2x3x4x5_w_b_axes_0_1_2) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4, 5}, {5}, {0, 1, 2}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4, 5}, {5}, {0, 1, 2}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_5_to_2x3x4x5_w_b_axes_0_1_2) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4, 5}, {5}, {0, 1, 2}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4, 5}, {5}, {0, 1, 2}); } @@ -2107,11 +2107,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv32_4_to_2x3x4x5_w_b_axes_0_1_3) { } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_4_to_2x3x4x5_w_b_axes_0_1_3) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4, 5}, {4}, {0, 1, 3}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4, 5}, {4}, {0, 1, 3}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_4_to_2x3x4x5_w_b_axes_0_1_3) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4, 5}, {4}, {0, 1, 3}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4, 5}, {4}, {0, 1, 3}); } @@ -2136,11 +2136,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv32_3_to_2x3x4x5_w_b_axes_0_2_3) { } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_3_to_2x3x4x5_w_b_axes_0_2_3) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4, 5}, {3}, {0, 2, 3}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4, 5}, {3}, {0, 2, 3}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_3_to_2x3x4x5_w_b_axes_0_2_3) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4, 5}, {3}, {0, 2, 3}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4, 5}, {3}, {0, 2, 3}); } @@ -2165,11 +2165,11 @@ TEST(broadcast_gpu_int8_t, bs_fs_yx_bsv32_fsv32_2_to_2x3x4x5_w_b_axes_1_2_3) { } TEST(broadcast_gpu_fp16, b_fs_yx_fsv16_2_to_2x3x4x5_w_b_axes_1_2_3) { - start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4, 5}, {2}, {1, 2, 3}); + start_broadcast_test(format::b_fs_yx_fsv16, data_types::f16, {2, 3, 4, 5}, {2}, {1, 2, 3}); } TEST(broadcast_gpu_fp16, bs_fs_yx_bsv32_fsv16_2_to_2x3x4x5_w_b_axes_1_2_3) { - start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4, 5}, {2}, {1, 2, 3}); + start_broadcast_test(format::bs_fs_yx_bsv32_fsv16, data_types::f16, {2, 3, 4, 5}, {2}, {1, 2, 3}); } @@ -2186,7 +2186,7 @@ TEST(broadcast_gpu_int8_t, b_fs_zyx_fsv32_1x48x1x1_to_1x48x1x5_w_b_axes_0) { } TEST(broadcast_gpu_fp16, b_fs_zyx_fsv16_1x48x1x1_to_1x48x1x5_w_b_axes_0) { - start_broadcast_test_5d(format::b_fs_zyx_fsv16, data_types::f16, { 1, 48, 1, 5 }, { 1, 48, 1, 1 }, { 0 }); + start_broadcast_test_5d(format::b_fs_zyx_fsv16, data_types::f16, { 1, 48, 1, 5 }, { 1, 48, 1, 1 }, { 0 }); } @@ -2203,7 +2203,7 @@ TEST(broadcast_gpu_int8_t, b_fs_zyx_fsv32_64x256x2x1_to_128x256x4x5_w_b_axes_0x1 } TEST(broadcast_gpu_fp16, b_fs_zyx_fsv16_64x256x2x1_to_128x256x4x5_w_b_axes_0x1) { - start_broadcast_test_5d(format::b_fs_zyx_fsv16, data_types::f16, { 128, 256, 4, 5 }, { 64, 256, 2, 1}, {}); + start_broadcast_test_5d(format::b_fs_zyx_fsv16, data_types::f16, { 128, 256, 4, 5 }, { 64, 256, 2, 1}, {}); } @@ -2220,7 +2220,7 @@ TEST(broadcast_gpu_int8_t, b_fs_zyx_fsv32_1_to_4x5_w_b_axes_0x1) { } TEST(broadcast_gpu_fp16, b_fs_zyx_fsv16_1_to_4x5_w_b_axes_0x1) { - start_broadcast_test_5d(format::b_fs_zyx_fsv16, data_types::f16, { 4, 5 }, { 1 }, { 0, 1 }); + start_broadcast_test_5d(format::b_fs_zyx_fsv16, data_types::f16, { 4, 5 }, { 1 }, { 0, 1 }); } @@ -2237,9 +2237,9 @@ TEST(broadcast_gpu_int8_t, b_fs_zyx_fsv32_1_to_2x3x4x5x2_w_b_axes_0x1x2x3x4) { } TEST(broadcast_gpu_fp16, b_fs_zyx_fsv16_1_to_2x3x4x5x2_w_b_axes_0x1x2x3x4) { - start_broadcast_test_5d(format::b_fs_zyx_fsv16, data_types::f16, { 2, 3, 4, 5, 2 }, { 1 }, { 0, 1, 2, 3, 4 }); + start_broadcast_test_5d(format::b_fs_zyx_fsv16, data_types::f16, { 2, 3, 4, 5, 2 }, { 1 }, { 0, 1, 2, 3, 4 }); } TEST(export_import_broadcast_gpu_fp16, b_fs_zyx_fsv16_1_to_2x3x4x5x2_w_b_axes_0x1x2x3x4) { - start_broadcast_test_5d(format::b_fs_zyx_fsv16, data_types::f16, { 2, 3, 4, 5, 2 }, { 1 }, { 0, 1, 2, 3, 4 }, true); + start_broadcast_test_5d(format::b_fs_zyx_fsv16, data_types::f16, { 2, 3, 4, 5, 2 }, { 1 }, { 0, 1, 2, 3, 4 }, true); } diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/bucketize_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/bucketize_gpu_test.cpp index 34ab112ab3da5c..6f16ee74010a6f 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/bucketize_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/bucketize_gpu_test.cpp @@ -32,13 +32,13 @@ struct bucketize_test : testing::TestWithParam> { std::tie(p, fmt, is_caching_test) = testing::TestWithParam>::GetParam(); auto& engine = get_test_engine(); - const layout in_layout(type_to_data_type::value, + const layout in_layout(ov::element::from(), format::bfyx, tensor(format::bfyx, {1, 1, 1, static_cast(p.input_values.size())})); auto input = engine.allocate_memory(in_layout); set_values(input, p.input_values); - const layout buckets_layout(type_to_data_type::value, + const layout buckets_layout(ov::element::from(), format::bfyx, tensor(format::bfyx, {static_cast(p.buckets_values.size()), 1, 1, 1})); auto buckets = engine.allocate_memory(buckets_layout); @@ -47,17 +47,17 @@ struct bucketize_test : testing::TestWithParam> { topology topology; topology.add(input_layout("input", input->get_layout())); topology.add(input_layout("buckets", buckets->get_layout())); - topology.add(reorder("reordered_input", input_info("input"), fmt, type_to_data_type::value)); - topology.add(reorder("reordered_buckets", input_info("buckets"), fmt, type_to_data_type::value)); + topology.add(reorder("reordered_input", input_info("input"), fmt, ov::element::from())); + topology.add(reorder("reordered_buckets", input_info("buckets"), fmt, ov::element::from())); topology.add( - bucketize("bucketize_right_bound", { input_info("reordered_input"), input_info("buckets") }, type_to_data_type::value, true)); + bucketize("bucketize_right_bound", { input_info("reordered_input"), input_info("buckets") }, ov::element::from(), true)); topology.add( - bucketize("bucketize_left_bound", { input_info("reordered_input"), input_info("buckets") }, type_to_data_type::value, false)); + bucketize("bucketize_left_bound", { input_info("reordered_input"), input_info("buckets") }, ov::element::from(), false)); topology.add( - reorder("plane_bucketize_right_bound", input_info("bucketize_right_bound"), format::bfyx, type_to_data_type::value)); + reorder("plane_bucketize_right_bound", input_info("bucketize_right_bound"), format::bfyx, ov::element::from())); topology.add( - reorder("plane_bucketize_left_bound", input_info("bucketize_left_bound"), format::bfyx, type_to_data_type::value)); + reorder("plane_bucketize_left_bound", input_info("bucketize_left_bound"), format::bfyx, ov::element::from())); cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); @@ -86,9 +86,9 @@ struct bucketize_test : testing::TestWithParam> { static std::string PrintToStringParamName(const testing::TestParamInfo>& info) { std::ostringstream result; - result << "inType=" << data_type_traits::name(type_to_data_type::value) << "_"; - result << "bucketsType=" << data_type_traits::name(type_to_data_type::value) << "_"; - result << "outType=" << data_type_traits::name(type_to_data_type::value) << "_"; + result << "inType=" << ov::element::Type(ov::element::from()) << "_"; + result << "bucketsType=" << ov::element::Type(ov::element::from()) << "_"; + result << "outType=" << ov::element::Type(ov::element::from()) << "_"; result << "format=" << std::get<1>(info.param); result << "is_caching_test=" << std::get<2>(info.param); return result.str(); @@ -140,16 +140,17 @@ INSTANTIATE_BUCKETIZE_TEST_SUITE(int32_t, uint8_t, int32_t, getBucketizeParams) INSTANTIATE_BUCKETIZE_TEST_SUITE(int64_t, int8_t, int64_t, getBucketizeParams) INSTANTIATE_BUCKETIZE_TEST_SUITE(int64_t, int32_t, int32_t, getBucketizeParams) -INSTANTIATE_BUCKETIZE_TEST_SUITE(float, FLOAT16, int64_t, getBucketizeFloatingPointParams) -INSTANTIATE_BUCKETIZE_TEST_SUITE(FLOAT16, float, int32_t, getBucketizeFloatingPointParams) +using ov::float16; +INSTANTIATE_BUCKETIZE_TEST_SUITE(float, float16, int64_t, getBucketizeFloatingPointParams) +INSTANTIATE_BUCKETIZE_TEST_SUITE(float16, float, int32_t, getBucketizeFloatingPointParams) INSTANTIATE_BUCKETIZE_TEST_SUITE(float, float, int64_t, getBucketizeFloatingPointParams) -INSTANTIATE_BUCKETIZE_TEST_SUITE(FLOAT16, FLOAT16, int32_t, getBucketizeFloatingPointParams) +INSTANTIATE_BUCKETIZE_TEST_SUITE(float16, float16, int32_t, getBucketizeFloatingPointParams) INSTANTIATE_TEST_SUITE_P(export_import, - bucketize_test_FLOAT16FLOAT16int32_t, - testing::Combine(testing::ValuesIn(getBucketizeFloatingPointParams()), + bucketize_test_float16float16int32_t, + testing::Combine(testing::ValuesIn(getBucketizeFloatingPointParams()), testing::Values(layout_formats[0]), testing::Values(true)), - bucketize_test_FLOAT16FLOAT16int32_t::PrintToStringParamName); + bucketize_test_float16float16int32_t::PrintToStringParamName); #undef INSTANTIATE_BUCKETIZE_TEST_SUITE diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/concatenation_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/concatenation_gpu_test.cpp index cff4ac27ed753a..531147005171fa 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/concatenation_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/concatenation_gpu_test.cpp @@ -35,10 +35,10 @@ TEST(concat_gpu, mixed_input_types) { set_values(input0, { 1.0f, 2.0f, 3.0f, 4.0f, 2.0f, 2.0f, 3.0f, 4.0f, 3.0f, 3.0f, 3.0f, 5.0f }); set_values(input1, { 11, 12, 13, 14, 12, 12, 13, 14, 13, 13, 13, 15 }); set_values(input2, { 21, 22, 23, 24, 22, 22, 23, 24, 23, 23, 23, 25 }); - set_values(input3, { half_t(31.f), half_t(32.f), half_t(33.f), - half_t(34.f), half_t(32.f), half_t(32.f), - half_t(33.f), half_t(34.f), half_t(33.f), - half_t(33.f), half_t(33.f), half_t(35.f) }); + set_values(input3, { ov::float16(31.f), ov::float16(32.f), ov::float16(33.f), + ov::float16(34.f), ov::float16(32.f), ov::float16(32.f), + ov::float16(33.f), ov::float16(34.f), ov::float16(33.f), + ov::float16(33.f), ov::float16(33.f), ov::float16(35.f) }); set_values(input4, { 41, 42, 43, 44, 42, 42, 43, 44, 43, 43, 43, 45 }); VF output_vec = { @@ -315,22 +315,22 @@ TEST(concat_gpu, mixed_input_types_5d) { auto input2 = engine.allocate_memory({ data_types::f16, format::bfzyx, { 1, 1, 1, 4, 3 } }); auto input3 = engine.allocate_memory({ data_types::f16, format::bfzyx, { 1, 1, 1, 4, 3 } }); - set_values(input0, { half_t(1.0f), half_t(2.0f), half_t(3.0f), - half_t(4.0f), half_t(2.0f), half_t(2.0f), - half_t(3.0f), half_t(4.0f), half_t(3.0f), - half_t(3.0f), half_t(3.0f), half_t(5.0f) }); - set_values(input1, { half_t(11), half_t(12), half_t(13), - half_t(14), half_t(12), half_t(12), - half_t(13), half_t(14), half_t(13), - half_t(13), half_t(13), half_t(15) }); - set_values(input2, { half_t(21), half_t(22), half_t(23), - half_t(24), half_t(22), half_t(22), - half_t(23), half_t(24), half_t(23), - half_t(23), half_t(23), half_t(25) }); - set_values(input3, { half_t(31.f), half_t(32.f), half_t(33.f), - half_t(34.f), half_t(32.f), half_t(32.f), - half_t(33.f), half_t(34.f), half_t(33.f), - half_t(33.f), half_t(33.f), half_t(35.f) }); + set_values(input0, { ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), + ov::float16(4.0f), ov::float16(2.0f), ov::float16(2.0f), + ov::float16(3.0f), ov::float16(4.0f), ov::float16(3.0f), + ov::float16(3.0f), ov::float16(3.0f), ov::float16(5.0f) }); + set_values(input1, { ov::float16(11), ov::float16(12), ov::float16(13), + ov::float16(14), ov::float16(12), ov::float16(12), + ov::float16(13), ov::float16(14), ov::float16(13), + ov::float16(13), ov::float16(13), ov::float16(15) }); + set_values(input2, { ov::float16(21), ov::float16(22), ov::float16(23), + ov::float16(24), ov::float16(22), ov::float16(22), + ov::float16(23), ov::float16(24), ov::float16(23), + ov::float16(23), ov::float16(23), ov::float16(25) }); + set_values(input3, { ov::float16(31.f), ov::float16(32.f), ov::float16(33.f), + ov::float16(34.f), ov::float16(32.f), ov::float16(32.f), + ov::float16(33.f), ov::float16(34.f), ov::float16(33.f), + ov::float16(33.f), ov::float16(33.f), ov::float16(35.f) }); VF output_vec = { 1.0f, 2.0f, 3.0f, 4.0f, 2.0f, 2.0f, 3.0f, 4.0f, 3.0f, 3.0f, 3.0f, 5.0f, @@ -825,7 +825,7 @@ struct concat_gpu_4d : public concat_gpu { public: void test(format::type fmt) { - auto data_type = type_to_data_type::value; + auto data_type = ov::element::from(); auto& engine = get_test_engine(); const size_t batch_num = testing::get<0>(GetParam()); @@ -911,7 +911,7 @@ struct concat_gpu_4d_axis3 : public concat_axis3_gpu { public: void test(format::type fmt) { - auto data_type = type_to_data_type::value; + auto data_type = ov::element::from(); auto& engine = get_test_engine(); const size_t batch_num = testing::get<0>(GetParam()); @@ -992,7 +992,7 @@ struct concat_gpu_4d_axis3 : public concat_axis3_gpu { }; -using concat_gpu_4d_f16 = concat_gpu_4d; +using concat_gpu_4d_f16 = concat_gpu_4d; using concat_gpu_4d_i8 = concat_gpu_4d; using concat_gpu_4d_u8 = concat_gpu_4d; @@ -1027,7 +1027,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_low_precision, concat_gpu_all_params, concat_gpu::PrintToStringParamName); -using concat_gpu_4d_axis3_f16 = concat_gpu_4d_axis3; +using concat_gpu_4d_axis3_f16 = concat_gpu_4d_axis3; TEST_P(concat_gpu_4d_axis3_f16, fs_b_yx_fsv32) { ASSERT_NO_FATAL_FAILURE(test(format::fs_b_yx_fsv32)); @@ -1056,7 +1056,7 @@ struct concat_id_conv_gpu_4d : public concat_gpu { public: void test(format::type fmt) { - auto data_type = type_to_data_type::value; + auto data_type = ov::element::from(); auto& engine = get_test_engine(); const size_t batch_num = testing::get<0>(GetParam()); @@ -1158,7 +1158,7 @@ struct concat_id_conv_gpu_4d : public concat_gpu { } }; -using concat_id_conv_gpu_4d_f16 = concat_id_conv_gpu_4d; +using concat_id_conv_gpu_4d_f16 = concat_id_conv_gpu_4d; using concat_id_conv_gpu_4d_i8 = concat_id_conv_gpu_4d; TEST_P(concat_id_conv_gpu_4d_f16, input_order_opt_b_fs_yx_fsv16) { @@ -1195,7 +1195,7 @@ template struct concat_gpu_4d_implicit : public concat_gpu { public: cldnn::memory::ptr run_concat_network(std::vector>>>> input, format::type fmt, ExecutionConfig config) { - auto data_type = type_to_data_type::value; + auto data_type = ov::element::from(); auto& engine = get_test_engine(); const size_t batch_num = testing::get<0>(GetParam()); const std::vector in_features = testing::get<1>(GetParam()); @@ -1313,7 +1313,7 @@ struct concat_gpu_4d_implicit : public concat_gpu { } }; -using concat_implicit_gpu_4d_f16 = concat_gpu_4d_implicit; +using concat_implicit_gpu_4d_f16 = concat_gpu_4d_implicit; using concat_implicit_gpu_4d_i8 = concat_gpu_4d_implicit; TEST_P(concat_implicit_gpu_4d_f16, input_order_opt_b_fs_yx_fsv16) { @@ -1417,7 +1417,7 @@ template struct concat_gpu_4d_implicit_onednn : public concat_gpu { public: cldnn::memory::ptr run_concat_network(std::vector>>>> input, format::type fmt, ExecutionConfig config) { - auto data_type = type_to_data_type::value; + auto data_type = ov::element::from(); auto& engine = get_test_engine(); const size_t batch_num = testing::get<0>(GetParam()); const std::vector in_features = testing::get<1>(GetParam()); @@ -1544,7 +1544,7 @@ struct concat_gpu_4d_implicit_onednn : public concat_gpu { }; -using concat_implicit_gpu_onednn_4d_f16 = concat_gpu_4d_implicit_onednn; +using concat_implicit_gpu_onednn_4d_f16 = concat_gpu_4d_implicit_onednn; using concat_implicit_gpu_onednn_4d_i8 = concat_gpu_4d_implicit_onednn; TEST_P(concat_implicit_gpu_onednn_4d_f16, input_order_opt_b_fs_yx_fsv16) { @@ -1577,7 +1577,7 @@ template struct concat_gpu_4d_explict : public concat_gpu { public: cldnn::memory::ptr run_concat_network(std::vector>>>> input, format::type fmt, ExecutionConfig config) { - auto data_type = type_to_data_type::value; + auto data_type = ov::element::from(); auto& engine = get_test_engine(); const size_t batch_num = testing::get<0>(GetParam()); const std::vector in_features = testing::get<1>(GetParam()); // only use first element. @@ -1711,7 +1711,7 @@ struct concat_gpu_4d_explict : public concat_gpu { }; -using concat_no_implicit_gpu_onednn_4d_f16 = concat_gpu_4d_explict; +using concat_no_implicit_gpu_onednn_4d_f16 = concat_gpu_4d_explict; TEST_P(concat_no_implicit_gpu_onednn_4d_f16, input_order_opt_b_fs_yx_fsv16) { ASSERT_NO_FATAL_FAILURE(test(format::b_fs_yx_fsv16)); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/condition_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/condition_gpu_test.cpp index 0ccb5045b4c7d2..b42241b23f1e99 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/condition_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/condition_gpu_test.cpp @@ -49,7 +49,6 @@ topology generate_simple_branch (bool branch_true_false, const primitive_id& id, template < typename DataType> struct condition_data_types { using type = DataType; - static const data_types data_type = type_to_data_type::value; }; template @@ -69,7 +68,7 @@ class condition_gpu_basic_test : public ::testing::Test { void run_test() { auto& engine = get_test_engine(); - auto dat_dt = ConditionDataType::data_type; + auto dat_dt = static_cast(ov::element::from()); ExecutionConfig config = get_test_default_config(engine); config.set_property(ov::intel_gpu::optimize_data(true)); @@ -140,7 +139,7 @@ class condition_gpu_basic_test : public ::testing::Test { } }; -using test_data_types = testing::Types, +using test_data_types = testing::Types, condition_data_types>; TYPED_TEST_SUITE(condition_gpu_basic_test, test_data_types); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/convert_color_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/convert_color_gpu_test.cpp index 75f4ad71d99f9f..209cf08ab261e3 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/convert_color_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/convert_color_gpu_test.cpp @@ -194,8 +194,8 @@ TEST(convert_color, nv12_to_rgb_two_planes_buffer_fp16) { auto input_y = engine.allocate_memory({ { 1, height, width, 1 }, data_types::f16, format::bfyx }); auto input_uv = engine.allocate_memory({ { 1, height / 2 , width / 2, 2 }, data_types::f16, format::bfyx}); - std::vector input_y_data = rg.generate_random_1d(width * height, 0, 255); - std::vector input_uv_data = rg.generate_random_1d(width * height / 2, 0, 255); + std::vector input_y_data = rg.generate_random_1d(width * height, 0, 255); + std::vector input_uv_data = rg.generate_random_1d(width * height / 2, 0, 255); set_values(input_y, input_y_data); set_values(input_uv, input_uv_data); @@ -216,7 +216,7 @@ TEST(convert_color, nv12_to_rgb_two_planes_buffer_fp16) { auto outputs = network.execute(); std::vector ref_res(width * height * 3); - createReferenceDataNV12(input_y_data.data(), input_uv_data.data(), ref_res.data(), + createReferenceDataNV12(input_y_data.data(), input_uv_data.data(), ref_res.data(), 1, height, width, height * width, height * width / 2, true); auto output = outputs.at("convert_color").get_memory(); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp index 7cad29ca94ee01..414ab37f11295e 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp @@ -5067,19 +5067,19 @@ TEST_P(convolution_gpu_fs_byx_fsv32, fs_byx_fsv32) const int output_xy = 1 + (input_xy + 2 * pad - filter_xy) / stride + 2 * output_padding; auto input_size = tensor(batch_num, input_f, input_xy, input_xy); - auto input_data = rg.generate_random_4d(batch_num, input_f, input_xy, input_xy, -1, 1); + auto input_data = rg.generate_random_4d(batch_num, input_f, input_xy, input_xy, -1, 1); auto input_data_bfyx = flatten_4d(format::bfyx, input_data); auto input_mem = engine.allocate_memory({ data_types::f16, format::bfyx, input_size }); set_values(input_mem, input_data_bfyx); auto weights_size = tensor(output_f, input_f, filter_xy, filter_xy); - auto weights_data = rg.generate_random_4d(output_f, input_f, filter_xy, filter_xy, -1, 1); + auto weights_data = rg.generate_random_4d(output_f, input_f, filter_xy, filter_xy, -1, 1); auto weights_data_bfyx = flatten_4d(format::bfyx, weights_data); auto weights_mem = engine.allocate_memory({ data_types::f16, format::bfyx, weights_size }); set_values(weights_mem, weights_data_bfyx); // Will be used to store reference values calculated in branches depending on bias - auto reference_result = VVVVF(batch_num, VVVF(output_f)); + auto reference_result = VVVVF(batch_num, VVVF(output_f)); topology topology( input_layout("input", input_mem->get_layout()), @@ -5092,7 +5092,7 @@ TEST_P(convolution_gpu_fs_byx_fsv32, fs_byx_fsv32) { // Generate bias data auto biases_size = tensor(1, output_f, 1, 1); - auto biases_data = rg.generate_random_1d(output_f, -1, 1); + auto biases_data = rg.generate_random_1d(output_f, -1, 1); auto biases_mem = engine.allocate_memory({ data_types::f16, format::bfyx, biases_size }); set_values(biases_mem, biases_data); @@ -5171,7 +5171,7 @@ TEST_P(convolution_gpu_fs_byx_fsv32, fs_byx_fsv32) network.execute(); auto out_mem = network.get_output("conv_fsv").get_memory(); - cldnn::mem_lock out_ptr(out_mem, get_test_stream()); + cldnn::mem_lock out_ptr(out_mem, get_test_stream()); ASSERT_EQ(out_mem->get_layout().format, format::fs_b_yx_fsv32); @@ -5214,19 +5214,19 @@ TEST(convolution_f16_fsv_gpu, convolution_f16_fsv_gpu_padding) { const int output_xy = 1 + (input_xy - filter_xy) / stride; auto input_size = tensor(batch_num, input_f, input_xy, input_xy); - auto input_data = rg.generate_random_4d(batch_num, input_f, input_xy, input_xy, -1, 1); + auto input_data = rg.generate_random_4d(batch_num, input_f, input_xy, input_xy, -1, 1); auto input_data_bfyx = flatten_4d(format::bfyx, input_data); auto input_mem = engine.allocate_memory({ data_types::f16, format::bfyx, input_size }); set_values(input_mem, input_data_bfyx); auto weights_size = tensor(output_f, input_f, filter_xy, filter_xy); - auto weights_data = rg.generate_random_4d(output_f, input_f, filter_xy, filter_xy, -1, 1); + auto weights_data = rg.generate_random_4d(output_f, input_f, filter_xy, filter_xy, -1, 1); auto weights_data_bfyx = flatten_4d(format::bfyx, weights_data); auto weights_mem = engine.allocate_memory({ data_types::f16, format::bfyx, weights_size }); set_values(weights_mem, weights_data_bfyx); // Will be used to store reference values calculated in branches depending on bias - auto reference_result = VVVVF(batch_num, VVVF(output_f)); + auto reference_result = VVVVF(batch_num, VVVF(output_f)); topology topology( input_layout("input", input_mem->get_layout()), @@ -5238,7 +5238,7 @@ TEST(convolution_f16_fsv_gpu, convolution_f16_fsv_gpu_padding) { // Generate bias data auto biases_size = tensor(1, output_f, 1, 1); - auto biases_data = rg.generate_random_1d(output_f, -1, 1); + auto biases_data = rg.generate_random_1d(output_f, -1, 1); auto biases_mem = engine.allocate_memory({ data_types::f16, format::bfyx, biases_size }); set_values(biases_mem, biases_data); @@ -5281,7 +5281,7 @@ TEST(convolution_f16_fsv_gpu, convolution_f16_fsv_gpu_padding) { network.execute(); auto out_mem = network.get_output("conv_fsv").get_memory(); - cldnn::mem_lock out_ptr(out_mem, get_test_stream()); + cldnn::mem_lock out_ptr(out_mem, get_test_stream()); ASSERT_EQ(out_mem->get_layout().format, format::fs_b_yx_fsv32); @@ -5358,14 +5358,14 @@ TEST_P(convolution_gpu_fs_byx_fsv32_crop, fs_byx_fsv32_crop) const int output_xy = 1 + (input_xy + 2 * pad - filter_xy) / stride + 2 * output_padding; auto weights_size = tensor(output_f, input_f, filter_xy, filter_xy); - auto weights_data = rg.generate_random_4d(output_f, input_f, filter_xy, filter_xy, -1, 1); + auto weights_data = rg.generate_random_4d(output_f, input_f, filter_xy, filter_xy, -1, 1); auto weights_data_bfyx = flatten_4d(format::bfyx, weights_data); auto weights_mem = engine.allocate_memory({ data_types::f16, format::bfyx, weights_size }); set_values(weights_mem, weights_data_bfyx); // ref input - auto half_input_data = rg.generate_random_4d(batch_num, input_f, input_xy, input_xy, -1, 1); - auto input_data = VVVVF(batch_num); + auto half_input_data = rg.generate_random_4d(batch_num, input_f, input_xy, input_xy, -1, 1); + auto input_data = VVVVF(batch_num); // concatenated cldnn input tensor for (auto bi = 0; bi < batch_num; ++bi) @@ -5402,14 +5402,14 @@ TEST_P(convolution_gpu_fs_byx_fsv32_crop, fs_byx_fsv32_crop) topology.add(right_crop); // Will be used to store reference values calculated in branches depending on bias - auto half_ref_result = VVVVF(batch_num, VVVF(output_f)); + auto half_ref_result = VVVVF(batch_num, VVVF(output_f)); // reference convolution and concat if (with_bias) { // Generate bias data auto biases_size = tensor(1, output_f, 1, 1); - auto biases_data = rg.generate_random_1d(output_f, -1, 1); + auto biases_data = rg.generate_random_1d(output_f, -1, 1); auto biases_mem = engine.allocate_memory({ data_types::f16, format::bfyx, biases_size }); set_values(biases_mem, biases_data); @@ -5478,7 +5478,7 @@ TEST_P(convolution_gpu_fs_byx_fsv32_crop, fs_byx_fsv32_crop) topology.add(reorder("reorder", input_info("conv_fsv"), { data_types::f16, format::bfyx, input_size })); topology.add(concatenation("concat", { input_info("left_crop"), input_info("reorder") }, 1)); - auto ref_result = VVVVF(batch_num); + auto ref_result = VVVVF(batch_num); // concatenate half ref input and ref conv output, by features for (auto bi = 0; bi < batch_num; ++bi) { @@ -5505,7 +5505,7 @@ TEST_P(convolution_gpu_fs_byx_fsv32_crop, fs_byx_fsv32_crop) network.execute(); auto out_mem = network.get_output("concat").get_memory(); - cldnn::mem_lock out_ptr(out_mem, get_test_stream()); + cldnn::mem_lock out_ptr(out_mem, get_test_stream()); ASSERT_EQ(out_mem->get_layout().format, format::bfyx); @@ -5660,21 +5660,21 @@ TEST(convolution_gpu, bfyx_iyxo_5x5_fp16) const int output_y = 1 + (input_size_y + 2 * pad - filter_xy) / stride + 2 * output_padding; auto input_size = tensor(batch_num, input_f, input_size_x, input_size_y); - auto input_data = rg.generate_random_4d(batch_num, input_f, input_size_y, input_size_x, -1, 1); + auto input_data = rg.generate_random_4d(batch_num, input_f, input_size_y, input_size_x, -1, 1); auto input_data_bfyx = flatten_4d(format::bfyx, input_data); auto input_mem = engine.allocate_memory({ data_types::f16, format::bfyx, input_size }); set_values(input_mem, input_data_bfyx); auto weights_size = tensor(output_f, input_f, filter_xy, filter_xy); - auto weights_data = rg.generate_random_4d(output_f, input_f, filter_xy, filter_xy, -1, 1); + auto weights_data = rg.generate_random_4d(output_f, input_f, filter_xy, filter_xy, -1, 1); auto weights_data_bfyx = flatten_4d(format::bfyx, weights_data); auto weights_mem = engine.allocate_memory({ data_types::f16, format::bfyx, weights_size }); set_values(weights_mem, weights_data_bfyx); // Will be used to store reference values calculated in branches depending on bias - auto reference_result = VVVVF(batch_num, VVVF(output_f)); + auto reference_result = VVVVF(batch_num, VVVF(output_f)); topology topology( input_layout("input", input_mem->get_layout()), @@ -5685,7 +5685,7 @@ TEST(convolution_gpu, bfyx_iyxo_5x5_fp16) { // Generate bias data auto biases_size = tensor(1, output_f, 1, 1); - auto biases_data = rg.generate_random_1d(output_f, -1, 1); + auto biases_data = rg.generate_random_1d(output_f, -1, 1); auto biases_mem = engine.allocate_memory({ data_types::f16, format::bfyx, biases_size }); set_values(biases_mem, biases_data); @@ -5749,7 +5749,7 @@ TEST(convolution_gpu, bfyx_iyxo_5x5_fp16) network.execute(); auto out_mem = network.get_output("out").get_memory(); - cldnn::mem_lock out_ptr(out_mem, get_test_stream()); + cldnn::mem_lock out_ptr(out_mem, get_test_stream()); auto output_layout = out_mem->get_layout(); ASSERT_EQ(output_layout.format, format::bfyx); @@ -6028,14 +6028,14 @@ TEST_P(convolution_gpu_block_layout3D, bfzyx_bsv16_fsv16_fp16) input_format = format::bs_fs_zyx_bsv16_fsv16; auto input_size = tensor(batch_num, input_f, input_xy, input_xy, 1); - auto input_data = rg.generate_random_4d(batch_num, input_f, input_xy, input_xy, 0, 1); + auto input_data = rg.generate_random_4d(batch_num, input_f, input_xy, input_xy, 0, 1); auto input_data_bfyx = flatten_4d(format::bfyx, input_data); auto input_mem = engine.allocate_memory({ data_types::f16, format::bfzyx, input_size }); set_values(input_mem, input_data_bfyx); auto weights_size = tensor(output_f, input_f, filter_xy, filter_xy, 1); - auto weights_data = rg.generate_random_4d(output_f, input_f, filter_xy, filter_xy, 0, 1); + auto weights_data = rg.generate_random_4d(output_f, input_f, filter_xy, filter_xy, 0, 1); auto weights_data_bfyx = flatten_4d(format::bfyx, weights_data); @@ -6043,7 +6043,7 @@ TEST_P(convolution_gpu_block_layout3D, bfzyx_bsv16_fsv16_fp16) set_values(weights_mem, weights_data_bfyx); // Will be used to store reference values calculated in branches depending on bias - auto reference_result = VVVVF(batch_num, VVVF(output_f)); + auto reference_result = VVVVF(batch_num, VVVF(output_f)); topology topology( input_layout("input", input_mem->get_layout()), @@ -6056,7 +6056,7 @@ TEST_P(convolution_gpu_block_layout3D, bfzyx_bsv16_fsv16_fp16) { // Generate bias data auto biases_size = tensor(1, output_f, 1, 1, 1); - auto biases_data = rg.generate_random_1d(output_f, -1, 1); + auto biases_data = rg.generate_random_1d(output_f, -1, 1); auto biases_mem = engine.allocate_memory({ data_types::f16, format::bfzyx, biases_size }); set_values(biases_mem, biases_data); @@ -6120,12 +6120,12 @@ TEST_P(convolution_gpu_block_layout3D, bfzyx_bsv16_fsv16_fp16) network.execute(); auto out_mem = network.get_output("conv_bsv16_fsv16").get_memory(); - cldnn::mem_lock out_ptr(out_mem, get_test_stream()); + cldnn::mem_lock out_ptr(out_mem, get_test_stream()); auto out_mem_bfyx = network.get_output("reorder_bfzyx").get_memory(); - cldnn::mem_lock out_ptr_bfyx(out_mem_bfyx, get_test_stream()); + cldnn::mem_lock out_ptr_bfyx(out_mem_bfyx, get_test_stream()); - blockedFormatZeroCheck(out_mem); + blockedFormatZeroCheck(out_mem); ASSERT_EQ(out_mem->get_layout().format, input_format); @@ -6470,14 +6470,14 @@ TEST_P(convolution_gpu_block_layout, bfyx_bsv16_fsv16_fp16) tests::random_generator rg(GET_SUITE_NAME); auto input_size = tensor(batch_num, input_f, input_xy, input_xy); - auto input_data = rg.generate_random_4d(batch_num, input_f, input_xy, input_xy, 0, 1); + auto input_data = rg.generate_random_4d(batch_num, input_f, input_xy, input_xy, 0, 1); auto input_data_bfyx = flatten_4d(format::bfyx, input_data); auto input_mem = engine.allocate_memory({ data_types::f16, format::bfyx, input_size }); set_values(input_mem, input_data_bfyx); auto weights_size = tensor(output_f, input_f, filter_xy, filter_xy); - auto weights_data = rg.generate_random_4d(output_f, input_f, filter_xy, filter_xy, 0, 1); + auto weights_data = rg.generate_random_4d(output_f, input_f, filter_xy, filter_xy, 0, 1); auto weights_data_bfyx = flatten_4d(format::bfyx, weights_data); @@ -6485,7 +6485,7 @@ TEST_P(convolution_gpu_block_layout, bfyx_bsv16_fsv16_fp16) set_values(weights_mem, weights_data_bfyx); // Will be used to store reference values calculated in branches depending on bias - auto reference_result = VVVVF(batch_num, VVVF(output_f)); + auto reference_result = VVVVF(batch_num, VVVF(output_f)); topology topology( input_layout("input", input_mem->get_layout()), @@ -6498,7 +6498,7 @@ TEST_P(convolution_gpu_block_layout, bfyx_bsv16_fsv16_fp16) { // Generate bias data auto biases_size = tensor(1, output_f, 1, 1); - auto biases_data = rg.generate_random_1d(output_f, -1, 1); + auto biases_data = rg.generate_random_1d(output_f, -1, 1); auto biases_mem = engine.allocate_memory({ data_types::f16, format::bfyx, biases_size }); set_values(biases_mem, biases_data); @@ -6562,10 +6562,10 @@ TEST_P(convolution_gpu_block_layout, bfyx_bsv16_fsv16_fp16) network.execute(); auto out_mem = network.get_output("conv_bsv16_fsv16").get_memory(); - cldnn::mem_lock out_ptr(out_mem, get_test_stream()); + cldnn::mem_lock out_ptr(out_mem, get_test_stream()); auto out_mem_bfyx = network.get_output("reorder_bfyx").get_memory(); - cldnn::mem_lock out_ptr_bfyx(out_mem_bfyx, get_test_stream()); + cldnn::mem_lock out_ptr_bfyx(out_mem_bfyx, get_test_stream()); ASSERT_EQ(out_mem->get_layout().format, format::bs_fs_yx_bsv16_fsv16); @@ -6784,19 +6784,19 @@ TEST_P(convolution_depthwise_gpu, depthwise_conv_fs_b_yx_fsv32) const int output_x = 1 + (input_xy + 2 * pad_x - filter_x) / stride + 2 * output_padding; auto input_size = tensor(batch_num, input_f, input_xy, input_xy); - auto input_data = rg.generate_random_4d(batch_num, input_f, input_xy, input_xy, -1, 1); + auto input_data = rg.generate_random_4d(batch_num, input_f, input_xy, input_xy, -1, 1); auto input_data_bfyx = flatten_4d(format::bfyx, input_data); auto input_mem = engine.allocate_memory({ data_types::f16, format::bfyx, input_size }); set_values(input_mem, input_data_bfyx); auto weights_size = tensor(group(groups), batch(1), feature(1), spatial(filter_x, filter_y)); - auto weights_data = rg.generate_random_4d(output_f, 1, filter_y, filter_x, -1, 1); + auto weights_data = rg.generate_random_4d(output_f, 1, filter_y, filter_x, -1, 1); auto weights_data_bfyx = flatten_4d(format::bfyx, weights_data); auto weights_mem = engine.allocate_memory({ data_types::f16, format::goiyx, weights_size }); set_values(weights_mem, weights_data_bfyx); // Will be used to store reference values calculated in branches depending on bias - auto reference_result = VVVVF(batch_num, VVVF(output_f)); + auto reference_result = VVVVF(batch_num, VVVF(output_f)); topology topology( input_layout("input", input_mem->get_layout()), @@ -6839,7 +6839,7 @@ TEST_P(convolution_depthwise_gpu, depthwise_conv_fs_b_yx_fsv32) network.execute(); auto out_mem = network.get_output("conv_fsv").get_memory(); - cldnn::mem_lock out_ptr(out_mem, get_test_stream()); + cldnn::mem_lock out_ptr(out_mem, get_test_stream()); ASSERT_EQ(out_mem->get_layout().format, format::fs_b_yx_fsv32); @@ -6928,19 +6928,19 @@ TEST_P(convolution_depthwise_gpu_fsv16, depthwise_conv_b_fs_yx_fsv16) const int output_x = 1 + (input_xy + 2 * pad_x - filter_x) / stride + 2 * output_padding; auto input_size = tensor(batch_num, input_f, input_xy, input_xy); - auto input_data = rg.generate_random_4d(batch_num, input_f, input_xy, input_xy, -1, 1); + auto input_data = rg.generate_random_4d(batch_num, input_f, input_xy, input_xy, -1, 1); auto input_data_bfyx = flatten_4d(format::bfyx, input_data); auto input_mem = engine.allocate_memory({ data_types::f16, format::bfyx, input_size }); set_values(input_mem, input_data_bfyx); auto weights_size = tensor(group(output_f), batch(1), feature(1), spatial(filter_x, filter_y)); - auto weights_data = rg.generate_random_4d(output_f, 1, filter_y, filter_x, -1, 1); + auto weights_data = rg.generate_random_4d(output_f, 1, filter_y, filter_x, -1, 1); auto weights_data_bfyx = flatten_4d(format::bfyx, weights_data); auto weights_mem = engine.allocate_memory({ data_types::f16, format::goiyx, weights_size }); set_values(weights_mem, weights_data_bfyx); // Will be used to store reference values calculated in branches depending on bias - auto reference_result = VVVVF(batch_num, VVVF(output_f)); + auto reference_result = VVVVF(batch_num, VVVF(output_f)); topology topology( input_layout("input", input_mem->get_layout()), @@ -6983,7 +6983,7 @@ TEST_P(convolution_depthwise_gpu_fsv16, depthwise_conv_b_fs_yx_fsv16) network.execute(); auto out_mem = network.get_output("conv_fsv").get_memory(); - cldnn::mem_lock out_ptr(out_mem, get_test_stream()); + cldnn::mem_lock out_ptr(out_mem, get_test_stream()); ASSERT_EQ(out_mem->get_layout().format, format::b_fs_yx_fsv16); @@ -7060,19 +7060,19 @@ TEST_P(convolution_depthwise_gpu_fsv16_xy, depthwise_conv_b_fs_yx_fsv16) const int output_x = 1 + (input_x + 2 * pad_x - filter_x) / stride + 2 * output_padding; auto input_size = tensor(batch_num, input_f, input_x, input_y); - auto input_data = rg.generate_random_4d(batch_num, input_f, input_y, input_x, -1, 1); + auto input_data = rg.generate_random_4d(batch_num, input_f, input_y, input_x, -1, 1); auto input_data_bfyx = flatten_4d(format::bfyx, input_data); auto input_mem = engine.allocate_memory({ data_types::f16, format::bfyx, input_size }); set_values(input_mem, input_data_bfyx); auto weights_size = tensor(group(output_f), batch(1), feature(1), spatial(filter_x, filter_y)); - auto weights_data = rg.generate_random_4d(output_f, 1, filter_y, filter_x, -1, 1); + auto weights_data = rg.generate_random_4d(output_f, 1, filter_y, filter_x, -1, 1); auto weights_data_bfyx = flatten_4d(format::bfyx, weights_data); auto weights_mem = engine.allocate_memory({ data_types::f16, format::goiyx, weights_size }); set_values(weights_mem, weights_data_bfyx); // Will be used to store reference values calculated in branches depending on bias - auto reference_result = VVVVF(batch_num, VVVF(output_f)); + auto reference_result = VVVVF(batch_num, VVVF(output_f)); topology topology( input_layout("input", input_mem->get_layout()), @@ -7122,7 +7122,7 @@ TEST_P(convolution_depthwise_gpu_fsv16_xy, depthwise_conv_b_fs_yx_fsv16) auto out_mem = network.get_output("out").get_memory(); - cldnn::mem_lock out_ptr(out_mem, get_test_stream()); + cldnn::mem_lock out_ptr(out_mem, get_test_stream()); ASSERT_EQ(out_mem->get_layout().format, format::b_fs_yx_fsv16); for (int bi = 0; bi < batch_num; ++bi) { @@ -7276,19 +7276,19 @@ TEST_P(convolution_depthwise_gpu_bfyx, depthwise_conv_bfyx) const int output_x = 1 + (input_xy + 2 * pad_x - filter_x) / stride + 2 * output_padding; auto input_size = tensor(batch_num, input_f, input_xy, input_xy); - auto input_data = rg.generate_random_4d(batch_num, input_f, input_xy, input_xy, -1, 1); + auto input_data = rg.generate_random_4d(batch_num, input_f, input_xy, input_xy, -1, 1); auto input_data_bfyx = flatten_4d(format::bfyx, input_data); auto input_mem = engine.allocate_memory({ data_types::f16, format::bfyx, input_size }); set_values(input_mem, input_data_bfyx); auto weights_size = tensor(group(output_f), batch(1), feature(1), spatial(filter_x, filter_y)); - auto weights_data = rg.generate_random_4d(output_f, 1, filter_y, filter_x, -1, 1); + auto weights_data = rg.generate_random_4d(output_f, 1, filter_y, filter_x, -1, 1); auto weights_data_bfyx = flatten_4d(format::bfyx, weights_data); auto weights_mem = engine.allocate_memory({ data_types::f16, format::goiyx, weights_size }); set_values(weights_mem, weights_data_bfyx); // Will be used to store reference values calculated in branches depending on bias - auto reference_result = VVVVF(batch_num, VVVF(output_f)); + auto reference_result = VVVVF(batch_num, VVVF(output_f)); topology topology( input_layout("input", input_mem->get_layout()), @@ -7328,7 +7328,7 @@ TEST_P(convolution_depthwise_gpu_bfyx, depthwise_conv_bfyx) network.execute(); auto out_mem = network.get_output("conv").get_memory(); - cldnn::mem_lock out_ptr(out_mem, get_test_stream()); + cldnn::mem_lock out_ptr(out_mem, get_test_stream()); ASSERT_EQ(out_mem->get_layout().format, format::bfyx); @@ -7726,25 +7726,25 @@ TEST_P(convolution_general_gpu, conv_fp16_cases) { auto with_bias = testing::get<13>(GetParam()); auto input_size = tensor(batch_num, input_f, input_x, input_y); - auto input_data = rg.generate_random_4d(batch_num, input_f, input_y, input_x, -1, 1); + auto input_data = rg.generate_random_4d(batch_num, input_f, input_y, input_x, -1, 1); auto input_data_bfyx = flatten_4d(format::bfyx, input_data); auto input_mem = engine.allocate_memory({ data_types::f16, format::bfyx, input_size }); set_values(input_mem, input_data_bfyx); auto weights_size = tensor(output_f, input_f, filter_y, filter_x, 1); - auto weights_data = rg.generate_random_4d(output_f, input_f, filter_y, filter_x, -1, 1); + auto weights_data = rg.generate_random_4d(output_f, input_f, filter_y, filter_x, -1, 1); auto weights_data_bfyx = flatten_4d(format::bfyx, weights_data); auto weights_mem = engine.allocate_memory({ data_types::f16, format::bfyx, weights_size }); set_values(weights_mem, weights_data_bfyx); // Will be used to store reference values calculated in branches depending on bias - auto expected_result = VVVVF(batch_num, VVVF(output_f)); + auto expected_result = VVVVF(batch_num, VVVF(output_f)); topology topology; // Calculate reference values if (with_bias) { auto biases_size = tensor(1, output_f, 1, 1); - auto biases_data = rg.generate_random_1d(output_f, -1, 1); + auto biases_data = rg.generate_random_1d(output_f, -1, 1); auto biases_mem = engine.allocate_memory({ data_types::f16, format::bfyx, biases_size }); set_values(biases_mem, biases_data); @@ -7818,7 +7818,7 @@ TEST_P(convolution_general_gpu, conv_fp16_cases) { network.execute(); auto out_mem = network.get_output("conv_fsv").get_memory(); - cldnn::mem_lock out_ptr(out_mem, get_test_stream()); + cldnn::mem_lock out_ptr(out_mem, get_test_stream()); auto out_lay = out_mem->get_layout(); ASSERT_EQ(out_mem->get_layout().format, input_data_format); @@ -7886,13 +7886,13 @@ TEST_P(convolution_gpu_fsv16_to_bfyx, conv_b_fs_yx_fsv16_to_bfyx_padding) const std::ptrdiff_t pad_x = (filter_x - 1) / 2; auto input_size = tensor(input_b, input_f, input_x, input_y); - auto input_data = rg.generate_random_4d(input_b, input_f, input_y, input_x, -1, 1); + auto input_data = rg.generate_random_4d(input_b, input_f, input_y, input_x, -1, 1); auto input_data_bfyx = flatten_4d(format::bfyx, input_data); auto input_mem = engine.allocate_memory({ data_types::f16, format::bfyx, input_size }); set_values(input_mem, input_data_bfyx); auto weights_size = tensor(input_b, input_f, filter_x, filter_y, 1); - auto weights_data = rg.generate_random_4d(input_b, input_f, filter_x, filter_y, -1, 1); + auto weights_data = rg.generate_random_4d(input_b, input_f, filter_x, filter_y, -1, 1); auto weights_data_bfyx = flatten_4d(format::bfyx, weights_data); auto weights_mem = engine.allocate_memory({ data_types::f16, format::oiyx, weights_size }); set_values(weights_mem, weights_data_bfyx); @@ -7926,7 +7926,7 @@ TEST_P(convolution_gpu_fsv16_to_bfyx, conv_b_fs_yx_fsv16_to_bfyx_padding) auto ref_out = network_ref.execute(); auto ref_out_mem = ref_out.begin()->second.get_memory(); - cldnn::mem_lock ref_out_ptr(ref_out_mem, get_test_stream()); + cldnn::mem_lock ref_out_ptr(ref_out_mem, get_test_stream()); // Exec target network (fusing: conv+reorder) ExecutionConfig config_target = get_test_default_config(engine); @@ -7939,7 +7939,7 @@ TEST_P(convolution_gpu_fsv16_to_bfyx, conv_b_fs_yx_fsv16_to_bfyx_padding) auto target_out = network_target.execute(); auto target_out_mem = target_out.begin()->second.get_memory(); - cldnn::mem_lock target_out_ptr(target_out_mem, get_test_stream()); + cldnn::mem_lock target_out_ptr(target_out_mem, get_test_stream()); // Compare ref and target result for (size_t i = 0; i < ref_out_ptr.size(); i++) { @@ -7986,13 +7986,13 @@ TEST_P(convolution_gpu_fsv16_to_bfyx, conv_b_fs_yx_fsv16_to_bfyx_different_type) const std::ptrdiff_t pad_x = (filter_x - 1) / 2; auto input_size = tensor(input_b, input_f, input_x, input_y); - auto input_data = rg.generate_random_4d(input_b, input_f, input_y, input_x, -1, 1); + auto input_data = rg.generate_random_4d(input_b, input_f, input_y, input_x, -1, 1); auto input_data_bfyx = flatten_4d(format::bfyx, input_data); auto input_mem = engine.allocate_memory({ data_types::f16, format::bfyx, input_size }); set_values(input_mem, input_data_bfyx); auto weights_size = tensor(input_b, input_f, filter_x, filter_y, 1); - auto weights_data = rg.generate_random_4d(input_b, input_f, filter_x, filter_y, -1, 1); + auto weights_data = rg.generate_random_4d(input_b, input_f, filter_x, filter_y, -1, 1); auto weights_data_bfyx = flatten_4d(format::bfyx, weights_data); auto weights_mem = engine.allocate_memory({ data_types::f16, format::goiyx, weights_size }); set_values(weights_mem, weights_data_bfyx); @@ -8275,7 +8275,7 @@ class convolution_test_base { bool bigger_pad() { return _bigger_pad; } bool grouped_weights_shape() { return _grouped_weights_shape; } - data_types input_type() const { return type_to_data_type::value; } + data_types input_type() const { return ov::element::from(); } format input_format() const { return _input_fmt; } tensor input_size() const { return tensor(TensorValue(batch_num()), @@ -8284,7 +8284,7 @@ class convolution_test_base { TensorValue(input_y())); } - data_types weights_type() const { return type_to_data_type::value; } + data_types weights_type() const { return ov::element::from(); } tensor weights_size() const { return tensor(TensorValue(output_features()), TensorValue(weights_input_features()), @@ -8314,7 +8314,7 @@ class convolution_test_base { } } - data_types output_type() const { return type_to_data_type::value; } + data_types output_type() const { return ov::element::from(); } }; struct convolution_random_test_all_params { @@ -8995,7 +8995,7 @@ class convolution_test : public tests::generic_test { if (generic_params->data_type == data_types::f32) { prepare_input_for_test_typed(inputs); } else { - prepare_input_for_test_typed(inputs); + prepare_input_for_test_typed(inputs); } } @@ -9128,7 +9128,7 @@ class convolution_test : public tests::generic_test { if (generic_params->data_type == data_types::f32) { return generate_reference_typed(inputs); } else { - return generate_reference_typed(inputs); + return generate_reference_typed(inputs); } } @@ -9236,25 +9236,25 @@ TEST_P(convolution_gpu_onednn, conv_onednn_cases) { auto with_bias = testing::get<13>(GetParam()); auto input_size = tensor(batch_num, input_f, input_x, input_y); - auto input_data = rg.generate_random_4d(batch_num, input_f, input_y, input_x, -1, 1); + auto input_data = rg.generate_random_4d(batch_num, input_f, input_y, input_x, -1, 1); auto input_data_bfyx = flatten_4d(format::bfyx, input_data); auto input_mem = engine.allocate_memory({ data_types::f16, format::bfyx, input_size }); set_values(input_mem, input_data_bfyx); auto weights_size = tensor(output_f, input_f, filter_y, filter_x, 1); - auto weights_data = rg.generate_random_4d(output_f, input_f, filter_y, filter_x, -1, 1); + auto weights_data = rg.generate_random_4d(output_f, input_f, filter_y, filter_x, -1, 1); auto weights_data_bfyx = flatten_4d(format::bfyx, weights_data); auto weights_mem = engine.allocate_memory({ data_types::f16, format::bfyx, weights_size }); set_values(weights_mem, weights_data_bfyx); // Will be used to store reference values calculated in branches depending on bias - auto expected_result = VVVVF(batch_num, VVVF(output_f)); + auto expected_result = VVVVF(batch_num, VVVF(output_f)); topology topology; // Calculate reference values if (with_bias) { auto biases_size = tensor(1, output_f, 1, 1); - auto biases_data = rg.generate_random_1d(output_f, -1, 1); + auto biases_data = rg.generate_random_1d(output_f, -1, 1); auto biases_mem = engine.allocate_memory({ data_types::f16, format::bfyx, biases_size }); set_values(biases_mem, biases_data); @@ -9332,7 +9332,7 @@ TEST_P(convolution_gpu_onednn, conv_onednn_cases) { for (auto& p : network.get_primitives_info()) std::cerr << p.original_id << " " << p.kernel_id << std::endl; - auto out_ptr = get_output_values_to_float(network, outputs.find("conv_fsv")->second); + auto out_ptr = get_output_values_to_float(network, outputs.find("conv_fsv")->second); auto out_lay = network.get_node_output_layout("conv_fsv"); ASSERT_EQ(out_lay.batch(), expected_result.size()); ASSERT_EQ(out_lay.feature(), expected_result[0].size()); @@ -9368,13 +9368,13 @@ TEST(convolution_gpu_onednn, padding_for_cldnn_kernel_after_onednn) { int output_b = 1, output_f = 16, output_y = 6, output_x = 6; auto input_size = tensor(input_b, input_f, input_x, input_y); - auto input_data = rg.generate_random_4d(input_b, input_f, input_y, input_x, -1, 1); + auto input_data = rg.generate_random_4d(input_b, input_f, input_y, input_x, -1, 1); auto input_data_bfyx = flatten_4d(format::bfyx, input_data); auto input_mem = engine.allocate_memory({ data_types::f16, format::bfyx, input_size }); set_values(input_mem, input_data_bfyx); auto weights_size = tensor(16, 16, 1, 1, 1); - auto weights_data = rg.generate_random_4d(output_f, input_f, 1, 1, -1, 1); + auto weights_data = rg.generate_random_4d(output_f, input_f, 1, 1, -1, 1); auto weights_data_bfyx = flatten_4d(format::bfyx, weights_data); auto weights_mem = engine.allocate_memory({ data_types::f16, format::bfyx, weights_size }); set_values(weights_mem, weights_data_bfyx); @@ -9448,11 +9448,11 @@ TEST(convolution_gpu_onednn, spatial_1d) { ov::PartialShape weights_pshape = {16, 16, 3}; layout in_layout{ input_pshape, data_types::f16, format::bfyx }; layout weights_layout{ weights_pshape, data_types::f16, format::bfyx }; - auto input_data = rg.generate_random_1d(in_layout.count(), -1, 1); + auto input_data = rg.generate_random_1d(in_layout.count(), -1, 1); auto input_mem = engine.allocate_memory(in_layout); set_values(input_mem, input_data); - auto weights_data = rg.generate_random_1d(weights_layout.count(), -1, 1); + auto weights_data = rg.generate_random_1d(weights_layout.count(), -1, 1); auto weights_mem = engine.allocate_memory(weights_layout); set_values(weights_mem, weights_data); @@ -9865,11 +9865,11 @@ void test_convolution_f32_gpu_convolution_gpu_bfyx_f16_depthwise_x_block_size_1( } TEST(convolution_f32_gpu, convolution_gpu_bfyx_f16_depthwise_x_block_size_1) { - test_convolution_f32_gpu_convolution_gpu_bfyx_f16_depthwise_x_block_size_1(false); + test_convolution_f32_gpu_convolution_gpu_bfyx_f16_depthwise_x_block_size_1(false); } TEST(export_import_convolution_f32_gpu, convolution_gpu_bfyx_f16_depthwise_x_block_size_1) { - test_convolution_f32_gpu_convolution_gpu_bfyx_f16_depthwise_x_block_size_1(true); + test_convolution_f32_gpu_convolution_gpu_bfyx_f16_depthwise_x_block_size_1(true); } diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/ctc_loss_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/ctc_loss_gpu_test.cpp index cc1b6b5999ae6d..50119f429982a3 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/ctc_loss_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/ctc_loss_gpu_test.cpp @@ -51,8 +51,8 @@ struct ctc_loss_gpu_test : public testing::TestWithParam>::GetParam(); auto& engine = get_test_engine(); - const auto float_data_type = type_to_data_type::value; - const auto int_data_type = type_to_data_type::value; + const auto float_data_type = ov::element::from(); + const auto int_data_type = ov::element::from(); const auto plane_format = format::bfyx; std::vector> inputs; @@ -241,13 +241,14 @@ const std::vector layout_formats = { testing::Values(false)), \ ctc_loss_gpu_test_##float_type##int_type::PrintToStringParamName); +using ov::float16; INSTANTIATE_CTC_LOSS_TEST_SUITE(float, int64_t); -INSTANTIATE_CTC_LOSS_TEST_SUITE(FLOAT16, int32_t); +INSTANTIATE_CTC_LOSS_TEST_SUITE(float16, int32_t); INSTANTIATE_TEST_SUITE_P(export_import, - ctc_loss_gpu_test_FLOAT16int32_t, - testing::Combine(testing::Values(getCTCLossParams()[0]), + ctc_loss_gpu_test_float16int32_t, + testing::Combine(testing::Values(getCTCLossParams()[0]), testing::Values(layout_formats[0]), testing::Values(true)), - ctc_loss_gpu_test_FLOAT16int32_t::PrintToStringParamName); + ctc_loss_gpu_test_float16int32_t::PrintToStringParamName); } // namespace diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/cum_sum_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/cum_sum_gpu_test.cpp index 27ee14a74f1f62..db0ca85ac9d10d 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/cum_sum_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/cum_sum_gpu_test.cpp @@ -152,7 +152,7 @@ class cum_sum_gpu : public ::testing::TestWithParam { data_types get_alloc_data_type(void) { if (std::is_same::value) return data_types::f32; - else if (std::is_same::value) + else if (std::is_same::value) return data_types::f16; else if (std::is_same::value) return data_types::i32; @@ -181,7 +181,7 @@ class cum_sum_gpu : public ::testing::TestWithParam { auto input = engine.allocate_memory({ get_alloc_data_type(), in_out_format, shape }); const int inputSize = b * f * w * z * y * x; - VF inputVals = std::is_same::value ? + VF inputVals = std::is_same::value ? rg.generate_random_1d(inputSize, -1, 1, 1) : rg.generate_random_1d(inputSize, -100, 100, 8); @@ -211,7 +211,7 @@ class cum_sum_gpu : public ::testing::TestWithParam { } }; -class cum_sum_gpu_fp16 : public ::cum_sum_gpu {}; +class cum_sum_gpu_fp16 : public ::cum_sum_gpu {}; class cum_sum_gpu_fp32 : public ::cum_sum_gpu {}; class cum_sum_gpu_int32 : public ::cum_sum_gpu {}; class cum_sum_gpu_int64 : public ::cum_sum_gpu {}; @@ -282,7 +282,7 @@ TEST(cum_sum_gpu_f16, DISABLED_basic_1d) { }; auto input = engine.allocate_memory({ data_types::f16, format::bfyx, shape }); - set_values(input, vectorCast(inputVals)); + set_values(input, vectorCast(inputVals)); topology topology; topology.add(input_layout("Input0", input->get_layout())); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/deconvolution_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/deconvolution_gpu_test.cpp index aa9efd90b61b38..384aa68f5ba185 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/deconvolution_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/deconvolution_gpu_test.cpp @@ -31,7 +31,7 @@ struct deconvolution_traits { }; template <> -struct deconvolution_traits { +struct deconvolution_traits { using accumulator_type = float; }; @@ -984,11 +984,11 @@ TYPED_TEST(deconvolution_basic, basic_f16_wsiz2x2_in2x2x1x2_bfyx_yxfb_stride2_pa ov::intel_gpu::ExecutionConfig config = get_test_default_config(engine); config.set_property(ov::intel_gpu::optimize_data(true)); - set_values(input, { FLOAT16(8.f), FLOAT16(0.5f), - FLOAT16(6.f), FLOAT16(9.f), + set_values(input, { ov::float16(8.f), ov::float16(0.5f), + ov::float16(6.f), ov::float16(9.f), - FLOAT16(1.f), FLOAT16(3.f), - FLOAT16(2.f), FLOAT16(4.f) }); + ov::float16(1.f), ov::float16(3.f), + ov::float16(2.f), ov::float16(4.f) }); set_values(weights, { -2.f, 2.f, 7.f, -0.5f}); set_values(biases, { 1.0f }); @@ -1758,11 +1758,11 @@ TYPED_TEST(deconvolution_basic, basic_f16_k9x9_s2x2_pad4x4) { tests::random_generator rg(GET_SUITE_NAME); auto& engine = get_test_engine(); - VVVVF input_rnd = rg.generate_random_4d(1, 32, 16, 16, -2, 2); - VF input_rnd_vec = flatten_4d(format::bfyx, input_rnd); - VVVVF filter_rnd = rg.generate_random_4d(1, 32, 9, 9, -1, 1); - VF filter_rnd_vec = flatten_4d(format::bfyx, filter_rnd); - VF bias_rnd = rg.generate_random_1d(1, -1, 1); + VVVVF input_rnd = rg.generate_random_4d(1, 32, 16, 16, -2, 2); + VF input_rnd_vec = flatten_4d(format::bfyx, input_rnd); + VVVVF filter_rnd = rg.generate_random_4d(1, 32, 9, 9, -1, 1); + VF filter_rnd_vec = flatten_4d(format::bfyx, filter_rnd); + VF bias_rnd = rg.generate_random_1d(1, -1, 1); VF filter_rnd_f32_vec, bias_f32_rnd; for (unsigned int i = 0; i < filter_rnd_vec.size(); i++) @@ -1797,9 +1797,9 @@ TYPED_TEST(deconvolution_basic, basic_f16_k9x9_s2x2_pad4x4) { auto outputs_ref = network_ref.execute(); auto output_ref_prim = outputs_ref.at("plane_output").get_memory(); - cldnn::mem_lock output_ref_ptr(output_ref_prim, get_test_stream()); + cldnn::mem_lock output_ref_ptr(output_ref_prim, get_test_stream()); - std::vector output_vec_ref; + std::vector output_vec_ref; for (unsigned int i = 0; i < output_ref_prim->get_layout().count(); i++) { output_vec_ref.push_back(output_ref_ptr[i]); } @@ -1821,7 +1821,7 @@ TYPED_TEST(deconvolution_basic, basic_f16_k9x9_s2x2_pad4x4) { ASSERT_EQ(outputs_act.size(), size_t(1)); ASSERT_EQ(outputs_act.begin()->first, "out"); auto output_act_prim = outputs_act.begin()->second.get_memory(); - cldnn::mem_lock output_act_ptr(output_act_prim, get_test_stream()); + cldnn::mem_lock output_act_ptr(output_act_prim, get_test_stream()); std::vector output_vec; for (unsigned int i = 0; i < output_act_prim->get_layout().count(); i++) { @@ -1924,14 +1924,14 @@ TEST(deconvolution_f16_fw_gpu, basic_wsiz2x2_in2x2x1x2_b_fs_yx_fsv16_stride2_pad auto weights = engine.allocate_memory({ data_types::f16, format::oiyx,{ 1, 1, 2, 2 } }); auto biases = engine.allocate_memory({ data_types::f16, format::bfyx,{ 1, 1, 1, 1 } }); - set_values(input, { FLOAT16(8.f), FLOAT16(0.5f), - FLOAT16(6.f), FLOAT16(9.f), + set_values(input, { ov::float16(8.f), ov::float16(0.5f), + ov::float16(6.f), ov::float16(9.f), - FLOAT16(1.f), FLOAT16(3.f), - FLOAT16(2.f), FLOAT16(4.f) }); - set_values(weights, { FLOAT16(-2.f), FLOAT16(2.f), - FLOAT16(7.f), FLOAT16(-0.5f)}); - set_values(biases, { FLOAT16(1.0f) }); + ov::float16(1.f), ov::float16(3.f), + ov::float16(2.f), ov::float16(4.f) }); + set_values(weights, { ov::float16(-2.f), ov::float16(2.f), + ov::float16(7.f), ov::float16(-0.5f)}); + set_values(biases, { ov::float16(1.0f) }); topology topology( input_layout("input", input->get_layout()), @@ -2412,14 +2412,14 @@ void test_deconvolution_f16_fw_gpu_basic_wsiz2x2_in1x2x2x2_fs_b_yx_fsv32_stride1 auto weights = engine.allocate_memory({ data_types::f16, format::bfyx,{ 2, 1, 2, 2 } }); auto biases = engine.allocate_memory({ data_types::f16, format::bfyx,{ 1, 2, 1, 1 } }); - set_values(input, { FLOAT16(8.f), FLOAT16(0.5f), FLOAT16(6.f), FLOAT16(9.f), - FLOAT16(1.f), FLOAT16(3.f), FLOAT16(2.f), FLOAT16(4.f) + set_values(input, { ov::float16(8.f), ov::float16(0.5f), ov::float16(6.f), ov::float16(9.f), + ov::float16(1.f), ov::float16(3.f), ov::float16(2.f), ov::float16(4.f) }); set_values(weights, { - FLOAT16(-2.f), FLOAT16(2.f), FLOAT16(7.f), FLOAT16(-0.5f), - FLOAT16(-4.f), FLOAT16(1.f), FLOAT16(-9.f), FLOAT16(-7.f) + ov::float16(-2.f), ov::float16(2.f), ov::float16(7.f), ov::float16(-0.5f), + ov::float16(-4.f), ov::float16(1.f), ov::float16(-9.f), ov::float16(-7.f) }); - set_values(biases, { FLOAT16(1.0f), FLOAT16(-1.0f) }); + set_values(biases, { ov::float16(1.0f), ov::float16(-1.0f) }); topology topology( input_layout("input", input->get_layout()), @@ -2544,8 +2544,8 @@ struct typed_comparator { }; template <> -struct typed_comparator { - static ::testing::AssertionResult compare(const char* lhs_expr, const char* rhs_expr, FLOAT16 ref, FLOAT16 val) { +struct typed_comparator { + static ::testing::AssertionResult compare(const char* lhs_expr, const char* rhs_expr, ov::float16 ref, ov::float16 val) { double abs_error = std::abs(0.05 * (double)ref); return ::testing::internal::DoubleNearPredFormat(lhs_expr, rhs_expr, "5 percent", (double)ref, (double)val, abs_error); } @@ -2652,8 +2652,8 @@ class deconvolution_random_test_base { type_test_ranges::min, type_test_ranges::max); - auto in_layout = cldnn::layout(cldnn::type_to_data_type::value, params.input_format, params.input_size); - auto wei_layout = cldnn::layout(cldnn::type_to_data_type::value, params.weights_format, params.weights_size); + auto in_layout = cldnn::layout(ov::element::from(), params.input_format, params.input_size); + auto wei_layout = cldnn::layout(ov::element::from(), params.weights_format, params.weights_size); auto wei_mem = eng.allocate_memory(wei_layout); auto in_mem = eng.allocate_memory(in_layout); @@ -2670,7 +2670,7 @@ class deconvolution_random_test_base { if (params.with_bias) { auto bias_size = cldnn::tensor(feature(params.weights_size.batch[0] * params.weights_size.group[0])); - auto bias_lay = cldnn::layout(cldnn::type_to_data_type::value, cldnn::format::bfyx, bias_size); + auto bias_lay = cldnn::layout(ov::element::from(), cldnn::format::bfyx, bias_size); auto bias_mem = eng.allocate_memory(bias_lay); bias_data = rg.generate_random_1d(bias_lay.feature(), -1, 1); set_values(bias_mem, bias_data); @@ -2763,7 +2763,7 @@ class deconvolution_random_test : public testing::TestWithParam(); break; case data_types::f16: - run_typed_in(); + run_typed_in(); break; case data_types::i8: run_typed_in(); @@ -2795,7 +2795,7 @@ class deconvolution_random_test : public testing::TestWithParam(); break; case data_types::f16: - run_typed(); + run_typed(); break; default: break; @@ -2810,7 +2810,7 @@ class deconvolution_random_test : public testing::TestWithParam(); break; case data_types::f16: - run_typed_in_wei(); + run_typed_in_wei(); break; case data_types::i8: run_typed_in_wei(); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/depth_concatenate_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/depth_concatenate_gpu_test.cpp index 0e9031f7d7a503..4d6824ca129cef 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/depth_concatenate_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/depth_concatenate_gpu_test.cpp @@ -417,17 +417,17 @@ TEST(depth_concatenate_f32_gpu, test06_padded_input) { auto input1 = engine.allocate_memory({ data_types::f16, format::fs_b_yx_fsv32, {1, input_f, 1, 1} }); auto input2 = engine.allocate_memory({ data_types::f16, format::fs_b_yx_fsv32, {1, input_f, 1, 1} }); - auto input1_data = rg.generate_random_4d(1, input_f, 1, 1, -1, 1); - auto input2_data = rg.generate_random_4d(1, input_f, 1, 1, -1, 1); + auto input1_data = rg.generate_random_4d(1, input_f, 1, 1, -1, 1); + auto input2_data = rg.generate_random_4d(1, input_f, 1, 1, -1, 1); set_values(input1, flatten_4d(format::bfyx, input1_data)); set_values(input2, flatten_4d(format::bfyx, input2_data)); auto weights = engine.allocate_memory({ data_types::f16, format::oiyx, {input_f, input_f, 3, 3} }); // Construct weights for convolution that just double input values. - VVVVF weights_data; + VVVVF weights_data; weights_data.resize(input_f); for (size_t oi = 0; oi < input_f; ++oi) { - weights_data[oi].resize(input_f, VVF(3, VF(3, FLOAT16(0.f)))); + weights_data[oi].resize(input_f, VVF(3, VF(3, ov::float16(0.f)))); weights_data[oi][oi][1][1] = 2.f; } set_values(weights, flatten_4d(format::bfyx, weights_data)); @@ -500,17 +500,17 @@ TEST(depth_concatenate_f32_gpu, test07_padded_output) { auto input1 = engine.allocate_memory({ data_types::f16, format::fs_b_yx_fsv32, {1, input_f, 1, 1} }); auto input2 = engine.allocate_memory({ data_types::f16, format::fs_b_yx_fsv32, {1, input_f, 1, 1} }); - auto input1_data = rg.generate_random_4d(1, input_f, 1, 1, -1, 1); - auto input2_data = rg.generate_random_4d(1, input_f, 1, 1, -1, 1); + auto input1_data = rg.generate_random_4d(1, input_f, 1, 1, -1, 1); + auto input2_data = rg.generate_random_4d(1, input_f, 1, 1, -1, 1); set_values(input1, flatten_4d(format::bfyx, input1_data)); set_values(input2, flatten_4d(format::bfyx, input2_data)); auto weights = engine.allocate_memory({ data_types::f16, format::oiyx, {output_f, output_f, 3, 3} }); // Construct weights for convolution that just double input values. - VVVVF weights_data; + VVVVF weights_data; weights_data.resize(output_f); for (size_t oi = 0; oi < output_f; ++oi) { - weights_data[oi].resize(output_f, VVF(3, VF(3, FLOAT16(0.f)))); + weights_data[oi].resize(output_f, VVF(3, VF(3, ov::float16(0.f)))); weights_data[oi][oi][1][1] = 2.f; } set_values(weights, flatten_4d(format::bfyx, weights_data)); @@ -1248,7 +1248,7 @@ class depth_concatenate_test : public tests::generic_test { if (generic_params->data_type == data_types::f32) { return generate_reference_typed(inputs); } else { - return generate_reference_typed(inputs); + return generate_reference_typed(inputs); } } diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/depth_to_space_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/depth_to_space_gpu_test.cpp index c1b88246f94f7c..024f1fe6c75500 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/depth_to_space_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/depth_to_space_gpu_test.cpp @@ -28,8 +28,8 @@ TEST(depth_to_space_fp16_gpu, d1411_bs2) { size_t block_size = 2; set_values(input1, { - FLOAT16(0.0f), FLOAT16(1.0f), - FLOAT16(2.0f), FLOAT16(3.0f) + ov::float16(0.0f), ov::float16(1.0f), + ov::float16(2.0f), ov::float16(3.0f) }); topology topology; @@ -68,10 +68,10 @@ TEST(depth_to_space_fp16_gpu, d1421_bs2) { size_t block_size = 2; set_values(input1, { - FLOAT16(0.0f), FLOAT16(1.0f), - FLOAT16(2.0f), FLOAT16(3.0f), - FLOAT16(4.0f), FLOAT16(5.0f), - FLOAT16(6.0f), FLOAT16(7.0f) + ov::float16(0.0f), ov::float16(1.0f), + ov::float16(2.0f), ov::float16(3.0f), + ov::float16(4.0f), ov::float16(5.0f), + ov::float16(6.0f), ov::float16(7.0f) }); topology topology; @@ -110,23 +110,23 @@ TEST(depth_to_space_fp16_gpu, d1933_bs3) { size_t block_size = 3; set_values(input1, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(9.0f), - FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(13.0f), FLOAT16(14.0f), - FLOAT16(15.0f), FLOAT16(16.0f), FLOAT16(17.0f), FLOAT16(18.0f), FLOAT16(19.0f), - FLOAT16(20.0f), FLOAT16(21.0f), FLOAT16(22.0f), FLOAT16(23.0f), FLOAT16(24.0f), - FLOAT16(25.0f), FLOAT16(26.0f), FLOAT16(27.0f), FLOAT16(28.0f), FLOAT16(29.0f), - FLOAT16(30.0f), FLOAT16(31.0f), FLOAT16(32.0f), FLOAT16(33.0f), FLOAT16(34.0f), - FLOAT16(35.0f), FLOAT16(36.0f), FLOAT16(37.0f), FLOAT16(38.0f), FLOAT16(39.0f), - FLOAT16(40.0f), FLOAT16(41.0f), FLOAT16(42.0f), FLOAT16(43.0f), FLOAT16(44.0f), - FLOAT16(45.0f), FLOAT16(46.0f), FLOAT16(47.0f), FLOAT16(48.0f), FLOAT16(49.0f), - FLOAT16(50.0f), FLOAT16(51.0f), FLOAT16(52.0f), FLOAT16(53.0f), FLOAT16(54.0f), - FLOAT16(55.0f), FLOAT16(56.0f), FLOAT16(57.0f), FLOAT16(58.0f), FLOAT16(59.0f), - FLOAT16(60.0f), FLOAT16(61.0f), FLOAT16(62.0f), FLOAT16(63.0f), FLOAT16(64.0f), - FLOAT16(65.0f), FLOAT16(66.0f), FLOAT16(67.0f), FLOAT16(68.0f), FLOAT16(69.0f), - FLOAT16(70.0f), FLOAT16(71.0f), FLOAT16(72.0f), FLOAT16(73.0f), FLOAT16(74.0f), - FLOAT16(75.0f), FLOAT16(76.0f), FLOAT16(77.0f), FLOAT16(78.0f), FLOAT16(79.0f), - FLOAT16(80.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(9.0f), + ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(13.0f), ov::float16(14.0f), + ov::float16(15.0f), ov::float16(16.0f), ov::float16(17.0f), ov::float16(18.0f), ov::float16(19.0f), + ov::float16(20.0f), ov::float16(21.0f), ov::float16(22.0f), ov::float16(23.0f), ov::float16(24.0f), + ov::float16(25.0f), ov::float16(26.0f), ov::float16(27.0f), ov::float16(28.0f), ov::float16(29.0f), + ov::float16(30.0f), ov::float16(31.0f), ov::float16(32.0f), ov::float16(33.0f), ov::float16(34.0f), + ov::float16(35.0f), ov::float16(36.0f), ov::float16(37.0f), ov::float16(38.0f), ov::float16(39.0f), + ov::float16(40.0f), ov::float16(41.0f), ov::float16(42.0f), ov::float16(43.0f), ov::float16(44.0f), + ov::float16(45.0f), ov::float16(46.0f), ov::float16(47.0f), ov::float16(48.0f), ov::float16(49.0f), + ov::float16(50.0f), ov::float16(51.0f), ov::float16(52.0f), ov::float16(53.0f), ov::float16(54.0f), + ov::float16(55.0f), ov::float16(56.0f), ov::float16(57.0f), ov::float16(58.0f), ov::float16(59.0f), + ov::float16(60.0f), ov::float16(61.0f), ov::float16(62.0f), ov::float16(63.0f), ov::float16(64.0f), + ov::float16(65.0f), ov::float16(66.0f), ov::float16(67.0f), ov::float16(68.0f), ov::float16(69.0f), + ov::float16(70.0f), ov::float16(71.0f), ov::float16(72.0f), ov::float16(73.0f), ov::float16(74.0f), + ov::float16(75.0f), ov::float16(76.0f), ov::float16(77.0f), ov::float16(78.0f), ov::float16(79.0f), + ov::float16(80.0f) }); topology topology; @@ -212,8 +212,8 @@ TEST(depth_to_space_fp32_gpu, d112960540_bs2) { auto input1 = engine.allocate_memory({ data_types::f16, format::bfyx, { 1, 12, 960, 540 } }); size_t block_size = 2; - auto random_input = rg.generate_random_4d(1, 12, 540, 960, -1, 1); - auto input_rnd_vec = flatten_4d(format::bfyx, random_input); + auto random_input = rg.generate_random_4d(1, 12, 540, 960, -1, 1); + auto input_rnd_vec = flatten_4d(format::bfyx, random_input); set_values(input1, input_rnd_vec); topology topology_act; @@ -229,7 +229,7 @@ TEST(depth_to_space_fp32_gpu, d112960540_bs2) { auto outputs = network_act.execute(); auto output = outputs.at("depth_to_space").get_memory(); - cldnn::mem_lock output_ptr (output, get_test_stream()); + cldnn::mem_lock output_ptr (output, get_test_stream()); std::vector perm = { 0,3,4,1,5,2 }; @@ -254,7 +254,7 @@ TEST(depth_to_space_fp32_gpu, d112960540_bs2) { auto outputs_ref = network_ref.execute(); auto output_ref = outputs_ref.at("reshape2").get_memory(); - cldnn::mem_lock output_ptr_ref(output_ref, get_test_stream()); + cldnn::mem_lock output_ptr_ref(output_ref, get_test_stream()); for (size_t i = 0; i < output->get_layout().count(); ++i) { ASSERT_EQ(output_ptr_ref[i], output_ptr[i]); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/detection_output_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/detection_output_test.cpp index c00252dfbb19a7..a94b01e680f600 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/detection_output_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/detection_output_test.cpp @@ -137,9 +137,9 @@ class detection_output_test : public ::testing::Test { const int keep_top_k = 150; auto& engine = get_test_engine(); - cldnn::memory::ptr input_location = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ this->num_of_images, this->num_priors * num_loc_classes * 4, 1, 1 } }); - cldnn::memory::ptr input_confidence = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ this->num_of_images, this->num_priors * this->num_classes, 1, 1 } }); - cldnn::memory::ptr input_prior_box = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ 1, 2, 1, this->num_priors * 4 } }); + cldnn::memory::ptr input_location = engine.allocate_memory({ ov::element::from(), format::bfyx,{ this->num_of_images, this->num_priors * num_loc_classes * 4, 1, 1 } }); + cldnn::memory::ptr input_confidence = engine.allocate_memory({ ov::element::from(), format::bfyx,{ this->num_of_images, this->num_priors * this->num_classes, 1, 1 } }); + cldnn::memory::ptr input_prior_box = engine.allocate_memory({ ov::element::from(), format::bfyx,{ 1, 2, 1, this->num_priors * 4 } }); topology topology; topology.add(input_layout("input_location", input_location->get_layout())); @@ -171,9 +171,9 @@ class detection_output_test : public ::testing::Test { const int keep_top_k = 150; auto& engine = get_test_engine(); - cldnn::memory::ptr input_location = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ this->num_of_images, this->num_priors * num_loc_classes * 4, 1, 1 } }); - cldnn::memory::ptr input_confidence = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ this->num_of_images, this->num_priors * this->num_classes, 1, 1 } }); - cldnn::memory::ptr input_prior_box = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ 1, 2, 1, this->num_priors * 4 } }); + cldnn::memory::ptr input_location = engine.allocate_memory({ ov::element::from(), format::bfyx,{ this->num_of_images, this->num_priors * num_loc_classes * 4, 1, 1 } }); + cldnn::memory::ptr input_confidence = engine.allocate_memory({ ov::element::from(), format::bfyx,{ this->num_of_images, this->num_priors * this->num_classes, 1, 1 } }); + cldnn::memory::ptr input_prior_box = engine.allocate_memory({ ov::element::from(), format::bfyx,{ 1, 2, 1, this->num_priors * 4 } }); topology topology; topology.add(input_layout("input_location", input_location->get_layout())); @@ -212,9 +212,9 @@ class detection_output_test : public ::testing::Test { const int background_label_id = 0; auto& engine = get_test_engine(); - cldnn::memory::ptr input_location = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ this->num_of_images, this->num_priors * num_loc_classes * 4, 1, 1 } }); - cldnn::memory::ptr input_confidence = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ this->num_of_images, this->num_priors * this->num_classes, 1, 1 } }); - cldnn::memory::ptr input_prior_box = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ 1, 2, 1, this->num_priors * 4 } }); + cldnn::memory::ptr input_location = engine.allocate_memory({ ov::element::from(), format::bfyx,{ this->num_of_images, this->num_priors * num_loc_classes * 4, 1, 1 } }); + cldnn::memory::ptr input_confidence = engine.allocate_memory({ ov::element::from(), format::bfyx,{ this->num_of_images, this->num_priors * this->num_classes, 1, 1 } }); + cldnn::memory::ptr input_prior_box = engine.allocate_memory({ ov::element::from(), format::bfyx,{ 1, 2, 1, this->num_priors * 4 } }); this->init_buffers(input_prior_box, input_confidence, input_location, share_location); @@ -260,9 +260,9 @@ class detection_output_test : public ::testing::Test { const int background_label_id = 0; auto& engine = get_test_engine(); - cldnn::memory::ptr input_location = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ this->num_of_images, this->num_priors * num_loc_classes * 4, 1, 1 } }); - cldnn::memory::ptr input_confidence = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ this->num_of_images, this->num_priors * this->num_classes, 1, 1 } }); - cldnn::memory::ptr input_prior_box = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ 1, 2, 1, this->num_priors * 4 } }); + cldnn::memory::ptr input_location = engine.allocate_memory({ ov::element::from(), format::bfyx,{ this->num_of_images, this->num_priors * num_loc_classes * 4, 1, 1 } }); + cldnn::memory::ptr input_confidence = engine.allocate_memory({ ov::element::from(), format::bfyx,{ this->num_of_images, this->num_priors * this->num_classes, 1, 1 } }); + cldnn::memory::ptr input_prior_box = engine.allocate_memory({ ov::element::from(), format::bfyx,{ 1, 2, 1, this->num_priors * 4 } }); this->init_buffers(input_prior_box, input_confidence, input_location, share_location); @@ -302,9 +302,9 @@ class detection_output_test : public ::testing::Test { const int background_label_id = 0; auto& engine = get_test_engine(); - cldnn::memory::ptr input_location = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ this->num_of_images, this->num_priors * num_loc_classes * 4, 1, 1 } }); - cldnn::memory::ptr input_confidence = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ this->num_of_images, this->num_priors * this->num_classes, 1, 1 } }); - cldnn::memory::ptr input_prior_box = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ 1, 2, 1, this->num_priors * 4 } }); + cldnn::memory::ptr input_location = engine.allocate_memory({ ov::element::from(), format::bfyx,{ this->num_of_images, this->num_priors * num_loc_classes * 4, 1, 1 } }); + cldnn::memory::ptr input_confidence = engine.allocate_memory({ ov::element::from(), format::bfyx,{ this->num_of_images, this->num_priors * this->num_classes, 1, 1 } }); + cldnn::memory::ptr input_prior_box = engine.allocate_memory({ ov::element::from(), format::bfyx,{ 1, 2, 1, this->num_priors * 4 } }); this->init_buffers(input_prior_box, input_confidence, input_location, share_location); @@ -355,9 +355,9 @@ class detection_output_test : public ::testing::Test { const int background_label_id = 0; auto& engine = get_test_engine(); - cldnn::memory::ptr input_location = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ this->num_of_images, this->num_priors * num_loc_classes * 4, 1, 1 } }); - cldnn::memory::ptr input_confidence = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ this->num_of_images, this->num_priors * this->num_classes, 1, 1 } }); - cldnn::memory::ptr input_prior_box = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ 1, 2, 1, this->num_priors * 4 } }); + cldnn::memory::ptr input_location = engine.allocate_memory({ ov::element::from(), format::bfyx,{ this->num_of_images, this->num_priors * num_loc_classes * 4, 1, 1 } }); + cldnn::memory::ptr input_confidence = engine.allocate_memory({ ov::element::from(), format::bfyx,{ this->num_of_images, this->num_priors * this->num_classes, 1, 1 } }); + cldnn::memory::ptr input_prior_box = engine.allocate_memory({ ov::element::from(), format::bfyx,{ 1, 2, 1, this->num_priors * 4 } }); this->init_buffers(input_prior_box, input_confidence, input_location, share_location); @@ -411,11 +411,11 @@ class detection_output_test : public ::testing::Test { const bool decrease_label_id = true; auto& engine = get_test_engine(); - cldnn::memory::ptr input_location = engine.allocate_memory({ type_to_data_type::value, format::bfyx, + cldnn::memory::ptr input_location = engine.allocate_memory({ ov::element::from(), format::bfyx, { this->num_of_images, this->num_priors * num_loc_classes * 4, 1, 1 } }); - cldnn::memory::ptr input_confidence = engine.allocate_memory({ type_to_data_type::value, format::bfyx, + cldnn::memory::ptr input_confidence = engine.allocate_memory({ ov::element::from(), format::bfyx, { this->num_of_images, this->num_priors * this->num_classes, 1, 1 } }); - cldnn::memory::ptr input_prior_box = engine.allocate_memory({ type_to_data_type::value, format::bfyx, + cldnn::memory::ptr input_prior_box = engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, 2, 1, this->num_priors * 4 } }); this->init_buffers(input_prior_box, input_confidence, input_location, share_location); @@ -430,7 +430,7 @@ class detection_output_test : public ::testing::Test { top_k, eta, code_type, variance_encoded_in_target, confidence_threshold, prior_info_size, prior_coordinates_offset, prior_is_normalized, input_width, input_height, decrease_label_id )); - topology.add(reorder("output_reorder", input_info("detection_output"), format::bfyx, type_to_data_type::value)); + topology.add(reorder("output_reorder", input_info("detection_output"), format::bfyx, ov::element::from())); auto config = get_test_default_config(engine); config.set_property(ov::intel_gpu::force_implementations(ov::intel_gpu::ImplForcingMap{{"detection_output", {format::bfyx, "", impl_types::cpu}}})); @@ -472,9 +472,9 @@ class detection_output_test : public ::testing::Test { const int background_label_id = -1; auto& engine = get_test_engine(); - cldnn::memory::ptr input_location = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ this->num_of_images, this->num_priors * num_loc_classes * 4, 1, 1 } }); - cldnn::memory::ptr input_confidence = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ this->num_of_images, this->num_priors * this->num_classes, 1, 1 } }); - cldnn::memory::ptr input_prior_box = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ 1, 2, 1, this->num_priors * 4 } }); + cldnn::memory::ptr input_location = engine.allocate_memory({ ov::element::from(), format::bfyx,{ this->num_of_images, this->num_priors * num_loc_classes * 4, 1, 1 } }); + cldnn::memory::ptr input_confidence = engine.allocate_memory({ ov::element::from(), format::bfyx,{ this->num_of_images, this->num_priors * this->num_classes, 1, 1 } }); + cldnn::memory::ptr input_prior_box = engine.allocate_memory({ ov::element::from(), format::bfyx,{ 1, 2, 1, this->num_priors * 4 } }); this->init_buffers(input_prior_box, input_confidence, input_location, share_location); @@ -533,9 +533,9 @@ class detection_output_test : public ::testing::Test { const int top_k = 2; auto& engine = get_test_engine(); - cldnn::memory::ptr input_location = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ this->num_of_images, this->num_priors * num_loc_classes * 4, 1, 1 } }); - cldnn::memory::ptr input_confidence = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ this->num_of_images, this->num_priors * this->num_classes, 1, 1 } }); - cldnn::memory::ptr input_prior_box = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ 1, 2, 1, this->num_priors * 4 } }); + cldnn::memory::ptr input_location = engine.allocate_memory({ ov::element::from(), format::bfyx,{ this->num_of_images, this->num_priors * num_loc_classes * 4, 1, 1 } }); + cldnn::memory::ptr input_confidence = engine.allocate_memory({ ov::element::from(), format::bfyx,{ this->num_of_images, this->num_priors * this->num_classes, 1, 1 } }); + cldnn::memory::ptr input_prior_box = engine.allocate_memory({ ov::element::from(), format::bfyx,{ 1, 2, 1, this->num_priors * 4 } }); this->init_buffers(input_prior_box, input_confidence, input_location, share_location); @@ -581,9 +581,9 @@ class detection_output_test : public ::testing::Test { const int background_label_id = 0; auto& engine = get_test_engine(); - cldnn::memory::ptr input_location = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ this->num_of_images, this->num_priors * num_loc_classes * 4, 1, 1 } }); - cldnn::memory::ptr input_confidence = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ this->num_of_images, this->num_priors * this->num_classes, 1, 1 } }); - cldnn::memory::ptr input_prior_box = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ 1, 2, 1, this->num_priors * 4 } }); + cldnn::memory::ptr input_location = engine.allocate_memory({ ov::element::from(), format::bfyx,{ this->num_of_images, this->num_priors * num_loc_classes * 4, 1, 1 } }); + cldnn::memory::ptr input_confidence = engine.allocate_memory({ ov::element::from(), format::bfyx,{ this->num_of_images, this->num_priors * this->num_classes, 1, 1 } }); + cldnn::memory::ptr input_prior_box = engine.allocate_memory({ ov::element::from(), format::bfyx,{ 1, 2, 1, this->num_priors * 4 } }); this->init_buffers(input_prior_box, input_confidence, input_location, share_location); @@ -632,9 +632,9 @@ class detection_output_test : public ::testing::Test { const int top_k = 2; auto& engine = get_test_engine(); - cldnn::memory::ptr input_location = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ this->num_of_images, this->num_priors * num_loc_classes * 4, 1, 1 } }); - cldnn::memory::ptr input_confidence = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ this->num_of_images, this->num_priors * this->num_classes, 1, 1 } }); - cldnn::memory::ptr input_prior_box = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ 1, 2, 1, this->num_priors * 4 } }); + cldnn::memory::ptr input_location = engine.allocate_memory({ ov::element::from(), format::bfyx,{ this->num_of_images, this->num_priors * num_loc_classes * 4, 1, 1 } }); + cldnn::memory::ptr input_confidence = engine.allocate_memory({ ov::element::from(), format::bfyx,{ this->num_of_images, this->num_priors * this->num_classes, 1, 1 } }); + cldnn::memory::ptr input_prior_box = engine.allocate_memory({ ov::element::from(), format::bfyx,{ 1, 2, 1, this->num_priors * 4 } }); this->init_buffers(input_prior_box, input_confidence, input_location, share_location); @@ -677,9 +677,9 @@ class detection_output_test : public ::testing::Test { const int top_k = 2; auto& engine = get_test_engine(); - cldnn::memory::ptr input_location = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ this->num_of_images, this->num_priors * num_loc_classes * 4, 1, 1 } }); - cldnn::memory::ptr input_confidence = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ this->num_of_images, this->num_priors * this->num_classes, 1, 1 } }); - cldnn::memory::ptr input_prior_box = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ 1, 2, 1, this->num_priors * 4 } }); + cldnn::memory::ptr input_location = engine.allocate_memory({ ov::element::from(), format::bfyx,{ this->num_of_images, this->num_priors * num_loc_classes * 4, 1, 1 } }); + cldnn::memory::ptr input_confidence = engine.allocate_memory({ ov::element::from(), format::bfyx,{ this->num_of_images, this->num_priors * this->num_classes, 1, 1 } }); + cldnn::memory::ptr input_prior_box = engine.allocate_memory({ ov::element::from(), format::bfyx,{ 1, 2, 1, this->num_priors * 4 } }); this->init_buffers(input_prior_box, input_confidence, input_location, share_location); topology topology; @@ -690,7 +690,7 @@ class detection_output_test : public ::testing::Test { topology.add(reorder("input_confidence_padded", input_info("input_confidence"), input_location->get_layout().with_padding(padding{ { 0, 0, 2, 7 },{ 0, 0, 13, 1 } }))); topology.add(detection_output("detection_output", { input_info("input_location_padded"), input_info("input_confidence_padded"), input_info("input_prior_box") }, this->num_classes, keep_top_k, share_location, background_label_id, this->nms_threshold, top_k)); - topology.add(reorder("output_reorder", input_info("detection_output"), format::bfyx, type_to_data_type::value)); + topology.add(reorder("output_reorder", input_info("detection_output"), format::bfyx, ov::element::from())); auto config = get_test_default_config(engine); config.set_property(ov::intel_gpu::force_implementations(ov::intel_gpu::ImplForcingMap{{"detection_output", {format::bfyx, "", impl_types::cpu}}})); @@ -738,9 +738,9 @@ class detection_output_test : public ::testing::Test { const bool prior_is_normalized = true; auto& engine = get_test_engine(); - cldnn::memory::ptr input_location = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ this->num_of_images, this->num_priors * num_loc_classes * 4, 1, 1 } }); - cldnn::memory::ptr input_confidence = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ this->num_of_images, this->num_priors * this->num_classes, 1, 1 } }); - cldnn::memory::ptr input_prior_box = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ 1, 1, 1, this->num_priors * prior_info_size } }); + cldnn::memory::ptr input_location = engine.allocate_memory({ ov::element::from(), format::bfyx,{ this->num_of_images, this->num_priors * num_loc_classes * 4, 1, 1 } }); + cldnn::memory::ptr input_confidence = engine.allocate_memory({ ov::element::from(), format::bfyx,{ this->num_of_images, this->num_priors * this->num_classes, 1, 1 } }); + cldnn::memory::ptr input_prior_box = engine.allocate_memory({ ov::element::from(), format::bfyx,{ 1, 1, 1, this->num_priors * prior_info_size } }); this->init_buffers(input_prior_box, input_confidence, input_location, share_location, variance_encoded_in_target, prior_info_size, prior_coordinates_offset, prior_is_normalized); @@ -751,7 +751,7 @@ class detection_output_test : public ::testing::Test { topology.add(input_layout("input_prior_box", input_prior_box->get_layout())); topology.add(reorder("input_location_padded", input_info("input_location"), input_location->get_layout().with_padding(padding{ { 0, 0, 12, 3 },{ 0, 0, 5, 11 } }))); topology.add(reorder("input_confidence_padded", input_info("input_confidence"), input_location->get_layout().with_padding(padding{ { 0, 0, 2, 7 },{ 0, 0, 13, 1 } }))); - topology.add(reorder("output_reorder", input_info("detection_output"), format::bfyx, type_to_data_type::value)); + topology.add(reorder("output_reorder", input_info("detection_output"), format::bfyx, ov::element::from())); topology.add(detection_output("detection_output", { input_info("input_location_padded"), input_info("input_confidence_padded"), input_info("input_prior_box") }, this->num_classes, keep_top_k, share_location, background_label_id, this->nms_threshold, top_k, @@ -797,7 +797,7 @@ class detection_output_test : public ::testing::Test { const float nms_threshold; }; -typedef ::testing::Types detection_output_test_types; +typedef ::testing::Types detection_output_test_types; TYPED_TEST_SUITE(detection_output_test, detection_output_test_types); TYPED_TEST(detection_output_test, test_setup_basic) { diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/dft_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/dft_gpu_test.cpp index e1a7f04658af8c..3099c8dad5d9d3 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/dft_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/dft_gpu_test.cpp @@ -61,7 +61,7 @@ float getThreshold(dft_type type) { } template <> -float getThreshold(dft_type type) { +float getThreshold(dft_type type) { if (type.direction == dft_direction::forward && type.mode == dft_mode::complex) { return 4e-2f; } @@ -106,7 +106,7 @@ struct dft_gpu_test : public testing::TestWithParam { auto& engine = get_test_engine(); - auto data_type = type_to_data_type::value; + auto data_type = ov::element::from(); const layout data_layout(data_type, plain_format, tensor(plain_format, p.input_shape)); auto input = engine.allocate_memory(data_layout); set_values(input, convert(p.input_values)); @@ -146,7 +146,7 @@ struct dft_gpu_test : public testing::TestWithParam { std::ostringstream result; result << "InputShape=" << vec2str(p.input_shape) << "_"; - result << "Precision=" << data_type_traits::name(type_to_data_type::value) << "_"; + result << "Precision=" << ov::element::Type(ov::element::from()) << "_"; result << "Axes=" << vec2str(p.axes) << "_"; result << "SignalSize=" << vec2str(p.signal_size) << "_"; result << "Inverse=" << (type.direction == dft_direction::inverse) << "_"; @@ -1990,13 +1990,13 @@ const std::vector blocked_format_5d = { }; using dft_gpu_test_float = dft_gpu_test; -using dft_gpu_test_half_t = dft_gpu_test; +using dft_gpu_test_float16 = dft_gpu_test; TEST_P(dft_gpu_test_float, test) { ASSERT_NO_FATAL_FAILURE(test()); } -TEST_P(dft_gpu_test_half_t, test) { +TEST_P(dft_gpu_test_float16, test) { ASSERT_NO_FATAL_FAILURE(test()); } @@ -2010,9 +2010,10 @@ TEST_P(dft_gpu_test_half_t, test) { testing::Values(false)), \ dft_gpu_test_##inputType::PrintToStringParamName); +using ov::float16; #define INSTANTIATE_DFT_TEST_SUITE_WITH_TYPES(dftType, dimension) \ INSTANTIATE_DFT_TEST_SUITE(dftType, dimension, float) \ - INSTANTIATE_DFT_TEST_SUITE(dftType, dimension, half_t) + INSTANTIATE_DFT_TEST_SUITE(dftType, dimension, float16) INSTANTIATE_DFT_TEST_SUITE_WITH_TYPES(DFT, 4d) INSTANTIATE_DFT_TEST_SUITE_WITH_TYPES(DFT, 5d) @@ -2042,7 +2043,7 @@ TEST(dft_gpu_test, irdft_output_shape) { dft_params p = IRDFT_params_5d.front(); auto& engine = get_test_engine(); - auto data_type = type_to_data_type::value; + auto data_type = ov::element::from(); const layout data_layout(data_type, plain_format_5d, tensor(plain_format_5d, p.input_shape)); auto input = engine.allocate_memory(data_layout); set_values(input, convert(p.input_values)); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/eltwise_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/eltwise_gpu_test.cpp index 6eacb4ef2065b6..c1309b720daf16 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/eltwise_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/eltwise_gpu_test.cpp @@ -89,8 +89,8 @@ void generic_eltwise_test(cldnn::format test_input_fmt, int input_b, int input_f auto& engine = get_test_engine(); tensor input_tensor( input_b, input_f, input_x, input_y ); - auto input1 = engine.allocate_memory({ type_to_data_type::value, test_input_fmt, input_tensor }); - auto input2 = engine.allocate_memory({ type_to_data_type::value, test_input_fmt, input_tensor }); + auto input1 = engine.allocate_memory({ ov::element::from(), test_input_fmt, input_tensor }); + auto input2 = engine.allocate_memory({ ov::element::from(), test_input_fmt, input_tensor }); set_values(input1, input1_rnd_vec); set_values(input2, input2_rnd_vec); @@ -164,7 +164,7 @@ void run_eltwise_generic_test(cldnn::eltwise_mode mode) { generic_eltwise_test(test_inputs_fmt, 1, 1, input_size.first, input_size.second, mode, false, 0.f, 0, 0, 0, 0); if (f16_supported) - generic_eltwise_test(test_inputs_fmt, 1, 1, input_size.first, input_size.second, mode, false, (FLOAT16)0.f, 0, 0, 0, 0); + generic_eltwise_test(test_inputs_fmt, 1, 1, input_size.first, input_size.second, mode, false, (ov::float16)0.f, 0, 0, 0, 0); } @@ -233,8 +233,8 @@ void generic_eltwise_bool_test(cldnn::format test_input_fmt, int input_b, int in auto& engine = get_test_engine(); tensor input_tensor( input_b, input_f, input_x, input_y ); - auto input1 = engine.allocate_memory({ type_to_data_type::value, test_input_fmt, input_tensor }); - auto input2 = engine.allocate_memory({ type_to_data_type::value, test_input_fmt, input_tensor }); + auto input1 = engine.allocate_memory({ ov::element::from(), test_input_fmt, input_tensor }); + auto input2 = engine.allocate_memory({ ov::element::from(), test_input_fmt, input_tensor }); set_values(input1, input1_rnd_vec); set_values(input2, input2_rnd_vec); @@ -3194,10 +3194,10 @@ TEST(eltwise_gpu_f16, fs_b_yx_fsv32_basic) tensor input_tensor(2, 2, 2, 2); auto fp16_bfyx_2x2x2x2_input = { - FLOAT16(1111),FLOAT16(1112),FLOAT16(1121),FLOAT16(1122), - FLOAT16(1211),FLOAT16(1212),FLOAT16(1221),FLOAT16(1222), - FLOAT16(2111),FLOAT16(2112),FLOAT16(2121),FLOAT16(2122), - FLOAT16(2211),FLOAT16(2212),FLOAT16(2221),FLOAT16(2222) + ov::float16(1111),ov::float16(1112),ov::float16(1121),ov::float16(1122), + ov::float16(1211),ov::float16(1212),ov::float16(1221),ov::float16(1222), + ov::float16(2111),ov::float16(2112),ov::float16(2121),ov::float16(2122), + ov::float16(2211),ov::float16(2212),ov::float16(2221),ov::float16(2222) }; auto& engine = get_test_engine(); @@ -3225,7 +3225,7 @@ TEST(eltwise_gpu_f16, fs_b_yx_fsv32_basic) auto golden_outputs = golden_network.execute(); auto golden_output = golden_outputs.at("eltwise").get_memory(); - cldnn::mem_lock golden_ptr(golden_output, get_test_stream()); + cldnn::mem_lock golden_ptr(golden_output, get_test_stream()); // GOLDEN BFYX ELTWISE - END // FS_B_YX_FSV32 ELTWISE topology FSV32_topology; @@ -3242,7 +3242,7 @@ TEST(eltwise_gpu_f16, fs_b_yx_fsv32_basic) auto FSV32_outputs = FSV32_network.execute(); auto FSV32_output = FSV32_outputs.at("reorderOutput").get_memory(); - cldnn::mem_lock FSV32_ptr(FSV32_output, get_test_stream()); + cldnn::mem_lock FSV32_ptr(FSV32_output, get_test_stream()); // FS_B_YX_FSV32 ELTWISE - END ASSERT_EQ(golden_ptr.size(), FSV32_ptr.size()); @@ -3270,11 +3270,11 @@ TEST(eltwise_gpu_f16, fs_b_yx_fsv32_broadcast) tensor input1_tensor(input_b, input_f, input1_x, input1_y); tensor input2_tensor(input_b, input_f, input2_x, input2_y); - VVVVF input1_rnd = rg.generate_random_4d(input_b, input_f, input1_y, input1_x, 1, 3); - VVVVF input2_rnd = rg.generate_random_4d(input_b, input_f, input2_y, input2_x, 1, 3); + VVVVF input1_rnd = rg.generate_random_4d(input_b, input_f, input1_y, input1_x, 1, 3); + VVVVF input2_rnd = rg.generate_random_4d(input_b, input_f, input2_y, input2_x, 1, 3); - VF input1_flatten = flatten_4d(format::bfyx, input1_rnd); - VF input2_flatten = flatten_4d(format::bfyx, input2_rnd); + VF input1_flatten = flatten_4d(format::bfyx, input1_rnd); + VF input2_flatten = flatten_4d(format::bfyx, input2_rnd); auto input1 = engine.allocate_memory({ data_types::f16,format::bfyx, input1_tensor }); auto input2 = engine.allocate_memory({ data_types::f16,format::bfyx, input2_tensor }); @@ -3293,7 +3293,7 @@ TEST(eltwise_gpu_f16, fs_b_yx_fsv32_broadcast) auto ref_outputs = ref_network.execute(); auto ref_output = ref_outputs.at("eltwise").get_memory(); - cldnn::mem_lock ref_ptr(ref_output, get_test_stream()); + cldnn::mem_lock ref_ptr(ref_output, get_test_stream()); topology fsv32_topology; fsv32_topology.add(input_layout("input1", input1->get_layout())); @@ -3309,7 +3309,7 @@ TEST(eltwise_gpu_f16, fs_b_yx_fsv32_broadcast) auto fsv32_outputs = fsv32_network.execute(); auto fsv32_output = fsv32_outputs.at("reorder_bfyx").get_memory(); - cldnn::mem_lock fsv32_ptr(fsv32_output, get_test_stream()); + cldnn::mem_lock fsv32_ptr(fsv32_output, get_test_stream()); ASSERT_EQ(ref_ptr.size(), fsv32_ptr.size()); @@ -3335,11 +3335,11 @@ TEST(eltwise_gpu_f16, fs_b_yx_fsv32_broadcast_bfyx) tensor input1_tensor(input_b, input_f, input1_x, input1_y); tensor input2_tensor(1, input_f, 1, 1); - VVVVF input1_rnd = rg.generate_random_4d(input_b, input_f, input1_y, input1_x, 1, 3); - VVVVF input2_rnd = rg.generate_random_4d(1, input_f, 1, 1, 1, 3); + VVVVF input1_rnd = rg.generate_random_4d(input_b, input_f, input1_y, input1_x, 1, 3); + VVVVF input2_rnd = rg.generate_random_4d(1, input_f, 1, 1, 1, 3); - VF input1_flatten = flatten_4d(format::bfyx, input1_rnd); - VF input2_flatten = flatten_4d(format::bfyx, input2_rnd); + VF input1_flatten = flatten_4d(format::bfyx, input1_rnd); + VF input2_flatten = flatten_4d(format::bfyx, input2_rnd); auto input1 = engine.allocate_memory({ data_types::f16, format::bfyx, input1_tensor }); auto input2 = engine.allocate_memory({ data_types::f16, format::bfyx, input2_tensor }); @@ -3358,7 +3358,7 @@ TEST(eltwise_gpu_f16, fs_b_yx_fsv32_broadcast_bfyx) auto ref_outputs = ref_network.execute(); auto ref_output = ref_outputs.at("eltwise").get_memory(); - cldnn::mem_lock ref_ptr(ref_output, get_test_stream()); + cldnn::mem_lock ref_ptr(ref_output, get_test_stream()); topology fsv32_topology; fsv32_topology.add(input_layout("input1", input1->get_layout())); @@ -3373,7 +3373,7 @@ TEST(eltwise_gpu_f16, fs_b_yx_fsv32_broadcast_bfyx) auto fsv32_outputs = fsv32_network.execute(); auto fsv32_output = fsv32_outputs.at("reorder_bfyx").get_memory(); - cldnn::mem_lock fsv32_ptr(fsv32_output, get_test_stream()); + cldnn::mem_lock fsv32_ptr(fsv32_output, get_test_stream()); ASSERT_EQ(ref_ptr.size(), fsv32_ptr.size()); @@ -3441,8 +3441,8 @@ TEST(eltwise_gpu_f16, bfyx_and_fs_b_yx_fsv32_basic) // Inputs are 32x96x2x2 tests::random_generator rg(GET_SUITE_NAME); tensor input_tensor(32, 96, 20, 20); - VVVVF input_rnd = rg.generate_random_4d(32, 96, 20, 20, 1, 3); - VF fp16_bfyx_32x96x2x2_input = flatten_4d(format::bfyx, input_rnd); + VVVVF input_rnd = rg.generate_random_4d(32, 96, 20, 20, 1, 3); + VF fp16_bfyx_32x96x2x2_input = flatten_4d(format::bfyx, input_rnd); auto& engine = get_test_engine(); bool f16_supported = engine.get_device_info().supports_fp16; @@ -3469,7 +3469,7 @@ TEST(eltwise_gpu_f16, bfyx_and_fs_b_yx_fsv32_basic) auto golden_outputs = golden_network.execute(); auto golden_output = golden_outputs.at("eltwise").get_memory(); - cldnn::mem_lock golden_ptr(golden_output, get_test_stream()); + cldnn::mem_lock golden_ptr(golden_output, get_test_stream()); // GOLDEN BFYX ELTWISE - END // MIXED INPUT, FS_B_YX_FSV32 OUTPUT topology FS_B_YX_FSV32_OUTPUT_topology; @@ -3486,7 +3486,7 @@ TEST(eltwise_gpu_f16, bfyx_and_fs_b_yx_fsv32_basic) auto FS_B_YX_FSV32_OUTPUT_outputs = FS_B_YX_FSV32_OUTPUT_network.execute(); auto FS_B_YX_FSV32_OUTPUT_output = FS_B_YX_FSV32_OUTPUT_outputs.at("reorderOutput").get_memory(); - cldnn::mem_lock FS_B_YX_FSV32_OUTPUT_ptr(FS_B_YX_FSV32_OUTPUT_output, get_test_stream()); + cldnn::mem_lock FS_B_YX_FSV32_OUTPUT_ptr(FS_B_YX_FSV32_OUTPUT_output, get_test_stream()); // MIXED INPUT, FS_B_YX_FSV32 OUTPUT - END // MIXED INPUT, BYXF OUTPUT topology BYXF_OUTPUT_topology; @@ -3503,7 +3503,7 @@ TEST(eltwise_gpu_f16, bfyx_and_fs_b_yx_fsv32_basic) auto BYXF_OUTPUT_outputs = BYXF_OUTPUT_network.execute(); auto BYXF_OUTPUT_output = BYXF_OUTPUT_outputs.at("reorderOutput").get_memory(); - cldnn::mem_lock BYXF_OUTPUT_ptr(BYXF_OUTPUT_output, get_test_stream()); + cldnn::mem_lock BYXF_OUTPUT_ptr(BYXF_OUTPUT_output, get_test_stream()); // MIXED INPUT, BYXF OUTPUT - END ASSERT_EQ(golden_ptr.size(), FS_B_YX_FSV32_OUTPUT_ptr.size()); @@ -3521,8 +3521,8 @@ TEST(eltwise_gpu_f16, bfyx_and_fs_b_yx_fsv32_output_padding) { // Inputs are 32x96x2x2 tests::random_generator rg(GET_SUITE_NAME); tensor input_tensor(32, 96, 20, 20); - VVVVF input_rnd = rg.generate_random_4d(32, 96, 20, 20, 1, 3); - VF fp16_bfyx_32x96x2x2_input = flatten_4d(format::bfyx, input_rnd); + VVVVF input_rnd = rg.generate_random_4d(32, 96, 20, 20, 1, 3); + VF fp16_bfyx_32x96x2x2_input = flatten_4d(format::bfyx, input_rnd); auto& engine = get_test_engine(); bool f16_supported = engine.get_device_info().supports_fp16; @@ -3549,7 +3549,7 @@ TEST(eltwise_gpu_f16, bfyx_and_fs_b_yx_fsv32_output_padding) { auto golden_outputs = golden_network.execute(); auto golden_output = golden_outputs.at("eltwise").get_memory(); - cldnn::mem_lock golden_ptr(golden_output, get_test_stream()); + cldnn::mem_lock golden_ptr(golden_output, get_test_stream()); // GOLDEN BFYX ELTWISE - END // MIXED INPUT, FS_B_YX_FSV32 OUTPUT topology FS_B_YX_FSV32_OUTPUT_topology; @@ -3567,7 +3567,7 @@ TEST(eltwise_gpu_f16, bfyx_and_fs_b_yx_fsv32_output_padding) { auto FS_B_YX_FSV32_OUTPUT_outputs = FS_B_YX_FSV32_OUTPUT_network.execute(); auto FS_B_YX_FSV32_OUTPUT_output = FS_B_YX_FSV32_OUTPUT_outputs.at("reorderOutput").get_memory(); - cldnn::mem_lock FS_B_YX_FSV32_OUTPUT_ptr(FS_B_YX_FSV32_OUTPUT_output, get_test_stream()); + cldnn::mem_lock FS_B_YX_FSV32_OUTPUT_ptr(FS_B_YX_FSV32_OUTPUT_output, get_test_stream()); // MIXED INPUT, FS_B_YX_FSV32 OUTPUT - END // MIXED INPUT, BYXF OUTPUT topology BYXF_OUTPUT_topology; @@ -3585,7 +3585,7 @@ TEST(eltwise_gpu_f16, bfyx_and_fs_b_yx_fsv32_output_padding) { auto BYXF_OUTPUT_outputs = BYXF_OUTPUT_network.execute(); auto BYXF_OUTPUT_output = BYXF_OUTPUT_outputs.at("reorderOutput").get_memory(); - cldnn::mem_lock BYXF_OUTPUT_ptr(BYXF_OUTPUT_output, get_test_stream()); + cldnn::mem_lock BYXF_OUTPUT_ptr(BYXF_OUTPUT_output, get_test_stream()); // MIXED INPUT, BYXF OUTPUT - END ASSERT_EQ(golden_ptr.size(), FS_B_YX_FSV32_OUTPUT_ptr.size()); @@ -3604,8 +3604,8 @@ TEST(eltwise_gpu_f16, bfyx_and_fs_b_yx_fsv32_input_padding) // Inputs are 32x96x20x20 tests::random_generator rg(GET_SUITE_NAME); tensor input_tensor(32, 96, 20, 20); - VVVVF input_rnd = rg.generate_random_4d(32, 96, 20, 20, 1, 3); - VF fp16_bfyx_32x96x2x2_input = flatten_4d(format::bfyx, input_rnd); + VVVVF input_rnd = rg.generate_random_4d(32, 96, 20, 20, 1, 3); + VF fp16_bfyx_32x96x2x2_input = flatten_4d(format::bfyx, input_rnd); auto& engine = get_test_engine(); bool f16_supported = engine.get_device_info().supports_fp16; @@ -3634,7 +3634,7 @@ TEST(eltwise_gpu_f16, bfyx_and_fs_b_yx_fsv32_input_padding) auto golden_outputs = golden_network.execute(); auto golden_output = golden_outputs.at("eltwise").get_memory(); - cldnn::mem_lock golden_ptr(golden_output, get_test_stream()); + cldnn::mem_lock golden_ptr(golden_output, get_test_stream()); // GOLDEN BFYX ELTWISE - END // MIXED INPUT, FS_B_YX_FSV32 OUTPUT topology FS_B_YX_FSV32_OUTPUT_topology; @@ -3651,7 +3651,7 @@ TEST(eltwise_gpu_f16, bfyx_and_fs_b_yx_fsv32_input_padding) auto FS_B_YX_FSV32_OUTPUT_outputs = FS_B_YX_FSV32_OUTPUT_network.execute(); auto FS_B_YX_FSV32_OUTPUT_output = FS_B_YX_FSV32_OUTPUT_outputs.at("reorderOutput").get_memory(); - cldnn::mem_lock FS_B_YX_FSV32_OUTPUT_ptr(FS_B_YX_FSV32_OUTPUT_output, get_test_stream()); + cldnn::mem_lock FS_B_YX_FSV32_OUTPUT_ptr(FS_B_YX_FSV32_OUTPUT_output, get_test_stream()); // MIXED INPUT, FS_B_YX_FSV32 OUTPUT - END // MIXED INPUT, BYXF OUTPUT topology BYXF_OUTPUT_topology; @@ -3668,7 +3668,7 @@ TEST(eltwise_gpu_f16, bfyx_and_fs_b_yx_fsv32_input_padding) auto BYXF_OUTPUT_outputs = BYXF_OUTPUT_network.execute(); auto BYXF_OUTPUT_output = BYXF_OUTPUT_outputs.at("reorderOutput").get_memory(); - cldnn::mem_lock BYXF_OUTPUT_ptr(BYXF_OUTPUT_output, get_test_stream()); + cldnn::mem_lock BYXF_OUTPUT_ptr(BYXF_OUTPUT_output, get_test_stream()); // MIXED INPUT, BYXF OUTPUT - END ASSERT_EQ(golden_ptr.size(), FS_B_YX_FSV32_OUTPUT_ptr.size()); @@ -3899,7 +3899,7 @@ TEST(DISABLED_eltwise_gpu, generic_random) { for (int output_padding_x = 0; output_padding_x <= 1; ++output_padding_x) { generic_eltwise_test(test_input_fmt, input_b, input_f, input_yx.first, input_yx.second, mode, relu_activated, slope, input_padding_y, input_padding_x, output_padding_y, output_padding_x); if (!f16_supported) continue; - generic_eltwise_test(test_input_fmt, input_b, input_f, input_yx.first, input_yx.second, mode, relu_activated, (FLOAT16)slope, input_padding_y, input_padding_x, output_padding_y, output_padding_x); + generic_eltwise_test(test_input_fmt, input_b, input_f, input_yx.first, input_yx.second, mode, relu_activated, (ov::float16)slope, input_padding_y, input_padding_x, output_padding_y, output_padding_x); } } } @@ -3960,7 +3960,7 @@ struct eltwise_same_input_test : testing::TestWithParam(mem, -127, 127, 2); break; case data_types::f16: - fill_random_typed(mem, -127, 127, 2); + fill_random_typed(mem, -127, 127, 2); break; case data_types::i8: fill_random_typed(mem, -127, 127, 1); @@ -4031,7 +4031,7 @@ struct eltwise_same_input_test : testing::TestWithParam(output, input); } else if (params.input_type == data_types::f16) { - compare_outputs(output, input); + compare_outputs(output, input); } else if (params.input_type == data_types::i8) { compare_outputs(output, input); } else if (params.input_type == data_types::u8) { @@ -4536,7 +4536,7 @@ struct eltwise_random_test : testing::TestWithParam } static std::string PrintToString(const eltwise_random_test_params& params) { - std::string res = " data (" + cldnn::data_type_traits::name(params.input_type) + "), "; + std::string res = " data (" + ov::element::Type(params.input_type).get_type_name() + "), "; res += " format (" + format::traits(params.in_format).str + ") input1 : "; res += params.first_input_size.to_string() + " / input2 : "; res += params.second_input_size.to_string() + "\n"; @@ -4574,7 +4574,7 @@ struct eltwise_random_test : testing::TestWithParam fill_random_typed(mem, -127, 127, 2); break; case data_types::f16: - fill_random_typed(mem, -127, 127, 2); + fill_random_typed(mem, -127, 127, 2); break; case data_types::i8: fill_random_typed(mem, -127, 127, 1); @@ -4670,7 +4670,7 @@ struct eltwise_random_test : testing::TestWithParam if (params.input_type == data_types::f32) { compare_outputs(output, output_opt); } else if (params.input_type == data_types::f16) { - compare_outputs(output, output_opt); + compare_outputs(output, output_opt); } else if (params.input_type == data_types::i8) { compare_outputs(output, output_opt); } else if (params.input_type == data_types::u8) { diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/embedding_bag_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/embedding_bag_gpu_test.cpp index 9220efbdd1bc3b..05eba5f5ea90b2 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/embedding_bag_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/embedding_bag_gpu_test.cpp @@ -27,11 +27,11 @@ TEST(embedding_bag_fp16_gpu, packed_sum_basic) { tensor output_shape = {3, 2, 1, 1}; set_values(emb_table, { - FLOAT16(-0.2f), FLOAT16(-0.6f), - FLOAT16(-0.1f), FLOAT16(-0.4f), - FLOAT16(-1.9f), FLOAT16(-1.8f), - FLOAT16(-1.0f), FLOAT16(1.5f), - FLOAT16(0.8f), FLOAT16(-0.7f) + ov::float16(-0.2f), ov::float16(-0.6f), + ov::float16(-0.1f), ov::float16(-0.4f), + ov::float16(-1.9f), ov::float16(-1.8f), + ov::float16(-1.0f), ov::float16(1.5f), + ov::float16(0.8f), ov::float16(-0.7f) }); set_values(indices, { 0, 2, @@ -39,9 +39,9 @@ TEST(embedding_bag_fp16_gpu, packed_sum_basic) { 3, 4 }); set_values(per_sample_weights, { - FLOAT16(0.5f), FLOAT16(0.5f), - FLOAT16(0.5f), FLOAT16(0.5f), - FLOAT16(0.5f), FLOAT16(0.5f) + ov::float16(0.5f), ov::float16(0.5f), + ov::float16(0.5f), ov::float16(0.5f), + ov::float16(0.5f), ov::float16(0.5f) }); auto type = embedding_bag::packed_sum; @@ -86,11 +86,11 @@ TEST(embedding_bag_fp16_gpu, packed_sum_basic_without_weights) { tensor output_shape = {3, 2, 1, 1}; set_values(emb_table, { - FLOAT16(-0.2f), FLOAT16(-0.6f), - FLOAT16(-0.1f), FLOAT16(-0.4f), - FLOAT16(-1.9f), FLOAT16(-1.8f), - FLOAT16(-1.0f), FLOAT16(1.5f), - FLOAT16(0.8f), FLOAT16(-0.7f) + ov::float16(-0.2f), ov::float16(-0.6f), + ov::float16(-0.1f), ov::float16(-0.4f), + ov::float16(-1.9f), ov::float16(-1.8f), + ov::float16(-1.0f), ov::float16(1.5f), + ov::float16(0.8f), ov::float16(-0.7f) }); set_values(indices, { 0, 2, @@ -161,11 +161,11 @@ TEST(embedding_bag_fp16_gpu, packed_sum_dim2) { * ] */ set_values(emb_table, { - FLOAT16(-0.2f), FLOAT16( 1.3f), FLOAT16( 0.5f), FLOAT16(-0.3f), - FLOAT16( 2.3f), FLOAT16( 1.3f), FLOAT16(-0.4f), FLOAT16(-0.7f), - FLOAT16( 3.3f), FLOAT16(-4.1f), FLOAT16( 2.1f), FLOAT16( 0.8f), - FLOAT16( 3.5f), FLOAT16(-5.7f), FLOAT16(-0.1f), FLOAT16( 0.3f), - FLOAT16( 0.3f), FLOAT16( 1.0f), FLOAT16( 2.3f), FLOAT16(-4.1f) + ov::float16(-0.2f), ov::float16( 1.3f), ov::float16( 0.5f), ov::float16(-0.3f), + ov::float16( 2.3f), ov::float16( 1.3f), ov::float16(-0.4f), ov::float16(-0.7f), + ov::float16( 3.3f), ov::float16(-4.1f), ov::float16( 2.1f), ov::float16( 0.8f), + ov::float16( 3.5f), ov::float16(-5.7f), ov::float16(-0.1f), ov::float16( 0.3f), + ov::float16( 0.3f), ov::float16( 1.0f), ov::float16( 2.3f), ov::float16(-4.1f) }); set_values(indices, { 0, 2, @@ -173,9 +173,9 @@ TEST(embedding_bag_fp16_gpu, packed_sum_dim2) { 3, 4 }); set_values(per_sample_weights, { - FLOAT16(0.5f), FLOAT16(0.5f), - FLOAT16(0.5f), FLOAT16(0.5f), - FLOAT16(0.5f), FLOAT16(0.5f) + ov::float16(0.5f), ov::float16(0.5f), + ov::float16(0.5f), ov::float16(0.5f), + ov::float16(0.5f), ov::float16(0.5f) }); auto type = embedding_bag::packed_sum; @@ -279,16 +279,16 @@ TEST(embedding_bag_fp16_gpu, packed_sum_dim3) { * ] */ set_values(emb_table, { - FLOAT16(-0.2f), FLOAT16( 1.3f), FLOAT16( 0.5f), FLOAT16(-0.3f), FLOAT16( 0.4f), FLOAT16(-0.4f), - FLOAT16(-0.1f), FLOAT16( 1.0f), FLOAT16( 2.1f), FLOAT16( 0.7f), FLOAT16(-0.2f), FLOAT16(-0.7f), - FLOAT16( 1.9f), FLOAT16(-2.4f), FLOAT16( 3.4f), FLOAT16(-0.7f), FLOAT16(-0.4f), FLOAT16( 0.5f), - FLOAT16( 2.3f), FLOAT16( 1.3f), FLOAT16(-0.4f), FLOAT16(-0.7f), FLOAT16( 1.8f), FLOAT16(-0.9f), - FLOAT16( 1.5f), FLOAT16(-2.4f), FLOAT16( 4.2f), FLOAT16( 3.2f), FLOAT16(-0.6f), FLOAT16( 0.9f), - FLOAT16( 3.3f), FLOAT16(-4.1f), FLOAT16( 2.1f), FLOAT16( 0.8f), FLOAT16( 5.2f), FLOAT16(-2.5f), - FLOAT16( 0.8f), FLOAT16(-1.9f), FLOAT16( 0.7f), FLOAT16( 3.4f), FLOAT16(-3.3f), FLOAT16( 0.1f), - FLOAT16( 3.5f), FLOAT16(-5.7f), FLOAT16(-0.1f), FLOAT16( 0.3f), FLOAT16( 0.4f), FLOAT16( 3.3f), - FLOAT16( 6.1f), FLOAT16( 8.3f), FLOAT16( 0.4f), FLOAT16(-4.4f), FLOAT16(-5.2f), FLOAT16( 0.9f), - FLOAT16( 0.3f), FLOAT16( 1.0f), FLOAT16( 2.3f), FLOAT16(-4.1f), FLOAT16( 2.0f), FLOAT16(-5.7f) + ov::float16(-0.2f), ov::float16( 1.3f), ov::float16( 0.5f), ov::float16(-0.3f), ov::float16( 0.4f), ov::float16(-0.4f), + ov::float16(-0.1f), ov::float16( 1.0f), ov::float16( 2.1f), ov::float16( 0.7f), ov::float16(-0.2f), ov::float16(-0.7f), + ov::float16( 1.9f), ov::float16(-2.4f), ov::float16( 3.4f), ov::float16(-0.7f), ov::float16(-0.4f), ov::float16( 0.5f), + ov::float16( 2.3f), ov::float16( 1.3f), ov::float16(-0.4f), ov::float16(-0.7f), ov::float16( 1.8f), ov::float16(-0.9f), + ov::float16( 1.5f), ov::float16(-2.4f), ov::float16( 4.2f), ov::float16( 3.2f), ov::float16(-0.6f), ov::float16( 0.9f), + ov::float16( 3.3f), ov::float16(-4.1f), ov::float16( 2.1f), ov::float16( 0.8f), ov::float16( 5.2f), ov::float16(-2.5f), + ov::float16( 0.8f), ov::float16(-1.9f), ov::float16( 0.7f), ov::float16( 3.4f), ov::float16(-3.3f), ov::float16( 0.1f), + ov::float16( 3.5f), ov::float16(-5.7f), ov::float16(-0.1f), ov::float16( 0.3f), ov::float16( 0.4f), ov::float16( 3.3f), + ov::float16( 6.1f), ov::float16( 8.3f), ov::float16( 0.4f), ov::float16(-4.4f), ov::float16(-5.2f), ov::float16( 0.9f), + ov::float16( 0.3f), ov::float16( 1.0f), ov::float16( 2.3f), ov::float16(-4.1f), ov::float16( 2.0f), ov::float16(-5.7f) }); set_values(indices, { 0, 2, @@ -296,9 +296,9 @@ TEST(embedding_bag_fp16_gpu, packed_sum_dim3) { 3, 4 }); set_values(per_sample_weights, { - FLOAT16(0.5f), FLOAT16(0.5f), - FLOAT16(0.5f), FLOAT16(0.5f), - FLOAT16(0.5f), FLOAT16(0.5f) + ov::float16(0.5f), ov::float16(0.5f), + ov::float16(0.5f), ov::float16(0.5f), + ov::float16(0.5f), ov::float16(0.5f) }); auto type = embedding_bag::packed_sum; @@ -379,11 +379,11 @@ TEST(embedding_bag_fp16_gpu, offsets_sum_basic) { tensor output_shape = {3, 2, 1, 1}; set_values(emb_table, { - FLOAT16(-0.2f), FLOAT16(-0.6f), - FLOAT16(-0.1f), FLOAT16(-0.4f), - FLOAT16(-1.9f), FLOAT16(-1.8f), - FLOAT16(-1.0f), FLOAT16(1.5f), - FLOAT16(0.8f), FLOAT16(-0.7f) + ov::float16(-0.2f), ov::float16(-0.6f), + ov::float16(-0.1f), ov::float16(-0.4f), + ov::float16(-1.9f), ov::float16(-1.8f), + ov::float16(-1.0f), ov::float16(1.5f), + ov::float16(0.8f), ov::float16(-0.7f) }); set_values(indices, { 0, 2, 3, 4 @@ -392,7 +392,7 @@ TEST(embedding_bag_fp16_gpu, offsets_sum_basic) { 0, 2, 2 }); set_values(per_sample_weights, { - FLOAT16(0.5f), FLOAT16(0.5f), FLOAT16(0.5f), FLOAT16(0.5f) + ov::float16(0.5f), ov::float16(0.5f), ov::float16(0.5f), ov::float16(0.5f) }); auto type = embedding_bag::offsets_sum; @@ -443,11 +443,11 @@ TEST(embedding_bag_fp16_gpu, offsets_sum_basic_first_empty) { tensor output_shape = {3, 2, 1, 1}; set_values(emb_table, { - FLOAT16(-0.2f), FLOAT16(-0.6f), - FLOAT16(-0.1f), FLOAT16(-0.4f), - FLOAT16(-1.9f), FLOAT16(-1.8f), - FLOAT16(-1.0f), FLOAT16(1.5f), - FLOAT16(0.8f), FLOAT16(-0.7f) + ov::float16(-0.2f), ov::float16(-0.6f), + ov::float16(-0.1f), ov::float16(-0.4f), + ov::float16(-1.9f), ov::float16(-1.8f), + ov::float16(-1.0f), ov::float16(1.5f), + ov::float16(0.8f), ov::float16(-0.7f) }); set_values(indices, { 0, 2, 3, 4 @@ -456,7 +456,7 @@ TEST(embedding_bag_fp16_gpu, offsets_sum_basic_first_empty) { 0, 0, 2 }); set_values(per_sample_weights, { - FLOAT16(0.5f), FLOAT16(0.5f), FLOAT16(0.5f), FLOAT16(0.5f) + ov::float16(0.5f), ov::float16(0.5f), ov::float16(0.5f), ov::float16(0.5f) }); auto type = embedding_bag::offsets_sum; @@ -508,11 +508,11 @@ TEST(embedding_bag_fp16_gpu, offsets_sum_basic_last_empty) { tensor output_shape = {3, 2, 1, 1}; set_values(emb_table, { - FLOAT16(-0.2f), FLOAT16(-0.6f), - FLOAT16(-0.1f), FLOAT16(-0.4f), - FLOAT16(-1.9f), FLOAT16(-1.8f), - FLOAT16(-1.0f), FLOAT16(1.5f), - FLOAT16(0.8f), FLOAT16(-0.7f) + ov::float16(-0.2f), ov::float16(-0.6f), + ov::float16(-0.1f), ov::float16(-0.4f), + ov::float16(-1.9f), ov::float16(-1.8f), + ov::float16(-1.0f), ov::float16(1.5f), + ov::float16(0.8f), ov::float16(-0.7f) }); set_values(indices, { 0, 2, 3, 4 @@ -521,7 +521,7 @@ TEST(embedding_bag_fp16_gpu, offsets_sum_basic_last_empty) { 0, 2, 4 }); set_values(per_sample_weights, { - FLOAT16(0.5f), FLOAT16(0.5f), FLOAT16(0.5f), FLOAT16(0.5f) + ov::float16(0.5f), ov::float16(0.5f), ov::float16(0.5f), ov::float16(0.5f) }); auto type = embedding_bag::offsets_sum; @@ -570,11 +570,11 @@ TEST(embedding_bag_fp16_gpu, offsets_sum_without_weights_and_def_index) { tensor output_shape = {3, 2, 1, 1}; set_values(emb_table, { - FLOAT16(-0.2f), FLOAT16(-0.6f), - FLOAT16(-0.1f), FLOAT16(-0.4f), - FLOAT16(-1.9f), FLOAT16(-1.8f), - FLOAT16(-1.0f), FLOAT16(1.5f), - FLOAT16(0.8f), FLOAT16(-0.7f) + ov::float16(-0.2f), ov::float16(-0.6f), + ov::float16(-0.1f), ov::float16(-0.4f), + ov::float16(-1.9f), ov::float16(-1.8f), + ov::float16(-1.0f), ov::float16(1.5f), + ov::float16(0.8f), ov::float16(-0.7f) }); set_values(indices, { 0, 2, 3, 4 @@ -675,16 +675,16 @@ TEST(embedding_bag_fp16_gpu, offsets_sum_dim3) { * ] */ set_values(emb_table, { - FLOAT16(-0.2f), FLOAT16( 1.3f), FLOAT16( 0.5f), FLOAT16(-0.3f), FLOAT16( 0.4f), FLOAT16(-0.4f), - FLOAT16(-0.1f), FLOAT16( 1.0f), FLOAT16( 2.1f), FLOAT16( 0.7f), FLOAT16(-0.2f), FLOAT16(-0.7f), - FLOAT16( 1.9f), FLOAT16(-2.4f), FLOAT16( 3.4f), FLOAT16(-0.7f), FLOAT16(-0.4f), FLOAT16( 0.5f), - FLOAT16( 2.3f), FLOAT16( 1.3f), FLOAT16(-0.4f), FLOAT16(-0.7f), FLOAT16( 1.8f), FLOAT16(-0.9f), - FLOAT16( 1.5f), FLOAT16(-2.4f), FLOAT16( 4.2f), FLOAT16( 3.2f), FLOAT16(-0.6f), FLOAT16( 0.9f), - FLOAT16( 3.3f), FLOAT16(-4.1f), FLOAT16( 2.1f), FLOAT16( 0.8f), FLOAT16( 5.2f), FLOAT16(-2.5f), - FLOAT16( 0.8f), FLOAT16(-1.9f), FLOAT16( 0.7f), FLOAT16( 3.4f), FLOAT16(-3.3f), FLOAT16( 0.1f), - FLOAT16( 3.5f), FLOAT16(-5.7f), FLOAT16(-0.1f), FLOAT16( 0.3f), FLOAT16( 0.4f), FLOAT16( 3.3f), - FLOAT16( 6.1f), FLOAT16( 8.3f), FLOAT16( 0.4f), FLOAT16(-4.4f), FLOAT16(-5.2f), FLOAT16( 0.9f), - FLOAT16( 0.3f), FLOAT16( 1.0f), FLOAT16( 2.3f), FLOAT16(-4.1f), FLOAT16( 2.0f), FLOAT16(-5.7f) + ov::float16(-0.2f), ov::float16( 1.3f), ov::float16( 0.5f), ov::float16(-0.3f), ov::float16( 0.4f), ov::float16(-0.4f), + ov::float16(-0.1f), ov::float16( 1.0f), ov::float16( 2.1f), ov::float16( 0.7f), ov::float16(-0.2f), ov::float16(-0.7f), + ov::float16( 1.9f), ov::float16(-2.4f), ov::float16( 3.4f), ov::float16(-0.7f), ov::float16(-0.4f), ov::float16( 0.5f), + ov::float16( 2.3f), ov::float16( 1.3f), ov::float16(-0.4f), ov::float16(-0.7f), ov::float16( 1.8f), ov::float16(-0.9f), + ov::float16( 1.5f), ov::float16(-2.4f), ov::float16( 4.2f), ov::float16( 3.2f), ov::float16(-0.6f), ov::float16( 0.9f), + ov::float16( 3.3f), ov::float16(-4.1f), ov::float16( 2.1f), ov::float16( 0.8f), ov::float16( 5.2f), ov::float16(-2.5f), + ov::float16( 0.8f), ov::float16(-1.9f), ov::float16( 0.7f), ov::float16( 3.4f), ov::float16(-3.3f), ov::float16( 0.1f), + ov::float16( 3.5f), ov::float16(-5.7f), ov::float16(-0.1f), ov::float16( 0.3f), ov::float16( 0.4f), ov::float16( 3.3f), + ov::float16( 6.1f), ov::float16( 8.3f), ov::float16( 0.4f), ov::float16(-4.4f), ov::float16(-5.2f), ov::float16( 0.9f), + ov::float16( 0.3f), ov::float16( 1.0f), ov::float16( 2.3f), ov::float16(-4.1f), ov::float16( 2.0f), ov::float16(-5.7f) }); set_values(indices, { 0, 2, 3, 4 @@ -693,8 +693,8 @@ TEST(embedding_bag_fp16_gpu, offsets_sum_dim3) { 0, 2, 2 }); set_values(per_sample_weights, { - FLOAT16(0.5f), FLOAT16(0.5f), - FLOAT16(0.5f), FLOAT16(0.5f) + ov::float16(0.5f), ov::float16(0.5f), + ov::float16(0.5f), ov::float16(0.5f) }); auto type = embedding_bag::offsets_sum; @@ -777,11 +777,11 @@ TEST(embedding_bag_fp16_gpu, segments_sum_basic) { tensor output_shape = {3, 2, 1, 1}; set_values(emb_table, { - FLOAT16(-0.2f), FLOAT16(-0.6f), - FLOAT16(-0.1f), FLOAT16(-0.4f), - FLOAT16(-1.9f), FLOAT16(-1.8f), - FLOAT16(-1.0f), FLOAT16(1.5f), - FLOAT16(0.8f), FLOAT16(-0.7f) + ov::float16(-0.2f), ov::float16(-0.6f), + ov::float16(-0.1f), ov::float16(-0.4f), + ov::float16(-1.9f), ov::float16(-1.8f), + ov::float16(-1.0f), ov::float16(1.5f), + ov::float16(0.8f), ov::float16(-0.7f) }); set_values(indices, { 0, 2, 3, 4 @@ -790,7 +790,7 @@ TEST(embedding_bag_fp16_gpu, segments_sum_basic) { 0, 0, 2, 2 }); set_values(per_sample_weights, { - FLOAT16(0.5f), FLOAT16(0.5f), FLOAT16(0.5f), FLOAT16(0.5f) + ov::float16(0.5f), ov::float16(0.5f), ov::float16(0.5f), ov::float16(0.5f) }); auto type = embedding_bag::segments_sum; @@ -842,11 +842,11 @@ TEST(embedding_bag_fp16_gpu, segments_sum_basic_first_empty) { tensor output_shape = {3, 2, 1, 1}; set_values(emb_table, { - FLOAT16(-0.2f), FLOAT16(-0.6f), - FLOAT16(-0.1f), FLOAT16(-0.4f), - FLOAT16(-1.9f), FLOAT16(-1.8f), - FLOAT16(-1.0f), FLOAT16(1.5f), - FLOAT16(0.8f), FLOAT16(-0.7f) + ov::float16(-0.2f), ov::float16(-0.6f), + ov::float16(-0.1f), ov::float16(-0.4f), + ov::float16(-1.9f), ov::float16(-1.8f), + ov::float16(-1.0f), ov::float16(1.5f), + ov::float16(0.8f), ov::float16(-0.7f) }); set_values(indices, { 0, 2, 3, 4 @@ -855,7 +855,7 @@ TEST(embedding_bag_fp16_gpu, segments_sum_basic_first_empty) { 1, 1, 2, 2 }); set_values(per_sample_weights, { - FLOAT16(0.5f), FLOAT16(0.5f), FLOAT16(0.5f), FLOAT16(0.5f) + ov::float16(0.5f), ov::float16(0.5f), ov::float16(0.5f), ov::float16(0.5f) }); auto type = embedding_bag::segments_sum; @@ -907,11 +907,11 @@ TEST(embedding_bag_fp16_gpu, segments_sum_basic_last_empty) { tensor output_shape = {3, 2, 1, 1}; set_values(emb_table, { - FLOAT16(-0.2f), FLOAT16(-0.6f), - FLOAT16(-0.1f), FLOAT16(-0.4f), - FLOAT16(-1.9f), FLOAT16(-1.8f), - FLOAT16(-1.0f), FLOAT16(1.5f), - FLOAT16(0.8f), FLOAT16(-0.7f) + ov::float16(-0.2f), ov::float16(-0.6f), + ov::float16(-0.1f), ov::float16(-0.4f), + ov::float16(-1.9f), ov::float16(-1.8f), + ov::float16(-1.0f), ov::float16(1.5f), + ov::float16(0.8f), ov::float16(-0.7f) }); set_values(indices, { 0, 2, 3, 4 @@ -920,7 +920,7 @@ TEST(embedding_bag_fp16_gpu, segments_sum_basic_last_empty) { 0, 0, 1, 1 }); set_values(per_sample_weights, { - FLOAT16(0.5f), FLOAT16(0.5f), FLOAT16(0.5f), FLOAT16(0.5f) + ov::float16(0.5f), ov::float16(0.5f), ov::float16(0.5f), ov::float16(0.5f) }); auto type = embedding_bag::segments_sum; @@ -969,11 +969,11 @@ TEST(embedding_bag_fp16_gpu, segments_sum_without_weights_and_def_index) { tensor output_shape = {3, 2, 1, 1}; set_values(emb_table, { - FLOAT16(-0.2f), FLOAT16(-0.6f), - FLOAT16(-0.1f), FLOAT16(-0.4f), - FLOAT16(-1.9f), FLOAT16(-1.8f), - FLOAT16(-1.0f), FLOAT16(1.5f), - FLOAT16(0.8f), FLOAT16(-0.7f) + ov::float16(-0.2f), ov::float16(-0.6f), + ov::float16(-0.1f), ov::float16(-0.4f), + ov::float16(-1.9f), ov::float16(-1.8f), + ov::float16(-1.0f), ov::float16(1.5f), + ov::float16(0.8f), ov::float16(-0.7f) }); set_values(indices, { 0, 2, 3, 4 @@ -1074,16 +1074,16 @@ TEST(embedding_bag_fp16_gpu, segments_sum_dim3) { * ] */ set_values(emb_table, { - FLOAT16(-0.2f), FLOAT16( 1.3f), FLOAT16( 0.5f), FLOAT16(-0.3f), FLOAT16( 0.4f), FLOAT16(-0.4f), - FLOAT16(-0.1f), FLOAT16( 1.0f), FLOAT16( 2.1f), FLOAT16( 0.7f), FLOAT16(-0.2f), FLOAT16(-0.7f), - FLOAT16( 1.9f), FLOAT16(-2.4f), FLOAT16( 3.4f), FLOAT16(-0.7f), FLOAT16(-0.4f), FLOAT16( 0.5f), - FLOAT16( 2.3f), FLOAT16( 1.3f), FLOAT16(-0.4f), FLOAT16(-0.7f), FLOAT16( 1.8f), FLOAT16(-0.9f), - FLOAT16( 1.5f), FLOAT16(-2.4f), FLOAT16( 4.2f), FLOAT16( 3.2f), FLOAT16(-0.6f), FLOAT16( 0.9f), - FLOAT16( 3.3f), FLOAT16(-4.1f), FLOAT16( 2.1f), FLOAT16( 0.8f), FLOAT16( 5.2f), FLOAT16(-2.5f), - FLOAT16( 0.8f), FLOAT16(-1.9f), FLOAT16( 0.7f), FLOAT16( 3.4f), FLOAT16(-3.3f), FLOAT16( 0.1f), - FLOAT16( 3.5f), FLOAT16(-5.7f), FLOAT16(-0.1f), FLOAT16( 0.3f), FLOAT16( 0.4f), FLOAT16( 3.3f), - FLOAT16( 6.1f), FLOAT16( 8.3f), FLOAT16( 0.4f), FLOAT16(-4.4f), FLOAT16(-5.2f), FLOAT16( 0.9f), - FLOAT16( 0.3f), FLOAT16( 1.0f), FLOAT16( 2.3f), FLOAT16(-4.1f), FLOAT16( 2.0f), FLOAT16(-5.7f) + ov::float16(-0.2f), ov::float16( 1.3f), ov::float16( 0.5f), ov::float16(-0.3f), ov::float16( 0.4f), ov::float16(-0.4f), + ov::float16(-0.1f), ov::float16( 1.0f), ov::float16( 2.1f), ov::float16( 0.7f), ov::float16(-0.2f), ov::float16(-0.7f), + ov::float16( 1.9f), ov::float16(-2.4f), ov::float16( 3.4f), ov::float16(-0.7f), ov::float16(-0.4f), ov::float16( 0.5f), + ov::float16( 2.3f), ov::float16( 1.3f), ov::float16(-0.4f), ov::float16(-0.7f), ov::float16( 1.8f), ov::float16(-0.9f), + ov::float16( 1.5f), ov::float16(-2.4f), ov::float16( 4.2f), ov::float16( 3.2f), ov::float16(-0.6f), ov::float16( 0.9f), + ov::float16( 3.3f), ov::float16(-4.1f), ov::float16( 2.1f), ov::float16( 0.8f), ov::float16( 5.2f), ov::float16(-2.5f), + ov::float16( 0.8f), ov::float16(-1.9f), ov::float16( 0.7f), ov::float16( 3.4f), ov::float16(-3.3f), ov::float16( 0.1f), + ov::float16( 3.5f), ov::float16(-5.7f), ov::float16(-0.1f), ov::float16( 0.3f), ov::float16( 0.4f), ov::float16( 3.3f), + ov::float16( 6.1f), ov::float16( 8.3f), ov::float16( 0.4f), ov::float16(-4.4f), ov::float16(-5.2f), ov::float16( 0.9f), + ov::float16( 0.3f), ov::float16( 1.0f), ov::float16( 2.3f), ov::float16(-4.1f), ov::float16( 2.0f), ov::float16(-5.7f) }); set_values(indices, { 0, 2, 3, 4 @@ -1092,8 +1092,8 @@ TEST(embedding_bag_fp16_gpu, segments_sum_dim3) { 0, 0, 2, 2 }); set_values(per_sample_weights, { - FLOAT16(0.5f), FLOAT16(0.5f), - FLOAT16(0.5f), FLOAT16(0.5f) + ov::float16(0.5f), ov::float16(0.5f), + ov::float16(0.5f), ov::float16(0.5f) }); auto type = embedding_bag::segments_sum; diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/experimental_detectron_detection_output_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/experimental_detectron_detection_output_gpu_test.cpp index 46c13f3ac9a8f9..877d0e621b09bc 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/experimental_detectron_detection_output_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/experimental_detectron_detection_output_gpu_test.cpp @@ -28,7 +28,7 @@ float getError() { } template <> -float getError() { +float getError() { return 0.2; } @@ -70,7 +70,7 @@ struct experimental_detectron_detection_output_test format::type fmt; bool is_caching_test; std::tie(param, fmt, is_caching_test) = this->GetParam(); - auto data_type = type_to_data_type::value; + auto data_type = ov::element::from(); auto& engine = get_test_engine(); @@ -197,7 +197,7 @@ struct experimental_detectron_detection_output_test }; using experimental_detectron_detection_output_test_f32 = experimental_detectron_detection_output_test; -using experimental_detectron_detection_output_test_f16 = experimental_detectron_detection_output_test; +using experimental_detectron_detection_output_test_f16 = experimental_detectron_detection_output_test; TEST_P(experimental_detectron_detection_output_test_f32, basic) { ASSERT_NO_FATAL_FAILURE(test()); @@ -442,7 +442,7 @@ INSTANTIATE_TEST_SUITE_P(experimental_detectron_detection_output_gpu_test, INSTANTIATE_TEST_SUITE_P(experimental_detectron_detection_output_gpu_test, experimental_detectron_detection_output_test_f16, testing::Combine( - ::testing::ValuesIn(getExperimentalDetectronDetectionOutputParams()), + ::testing::ValuesIn(getExperimentalDetectronDetectionOutputParams()), ::testing::ValuesIn(layouts), ::testing::Values(false) )); @@ -450,7 +450,7 @@ INSTANTIATE_TEST_SUITE_P(experimental_detectron_detection_output_gpu_test, INSTANTIATE_TEST_SUITE_P(export_import, experimental_detectron_detection_output_test_f16, testing::Combine( - ::testing::Values(getExperimentalDetectronDetectionOutputParams()[0]), + ::testing::Values(getExperimentalDetectronDetectionOutputParams()[0]), ::testing::Values(layouts[0]), ::testing::Values(true) )); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/experimental_detectron_generate_proposals_single_image_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/experimental_detectron_generate_proposals_single_image_gpu_test.cpp index 7a9470b3ff36e0..14a847c8e25064 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/experimental_detectron_generate_proposals_single_image_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/experimental_detectron_generate_proposals_single_image_gpu_test.cpp @@ -93,8 +93,8 @@ float getError() { } template<> -float getError() { - return 0.2; +float getError() { + return 0.25; } template @@ -179,7 +179,7 @@ struct experimental_detectron_generate_proposals_single_image_test format::type data_layout; bool is_caching_test; std::tie(param, data_layout, is_caching_test) = this->GetParam(); - const auto data_type = type_to_data_type::value; + const auto data_type = ov::element::from(); auto &engine = get_test_engine(); @@ -289,7 +289,7 @@ using experimental_detectron_generate_proposals_single_image_test_f32 = experime TEST_P(experimental_detectron_generate_proposals_single_image_test_f32, basic) { ASSERT_NO_FATAL_FAILURE(test()); } -using experimental_detectron_generate_proposals_single_image_test_f16 = experimental_detectron_generate_proposals_single_image_test; +using experimental_detectron_generate_proposals_single_image_test_f16 = experimental_detectron_generate_proposals_single_image_test; TEST_P(experimental_detectron_generate_proposals_single_image_test_f16, basic) { ASSERT_NO_FATAL_FAILURE(test()); } @@ -309,7 +309,7 @@ INSTANTIATE_TEST_SUITE_P( experimental_detectron_generate_proposals_single_image_gpu_test, experimental_detectron_generate_proposals_single_image_test_f16, ::testing::Combine( - ::testing::ValuesIn(getExperimentalDetectronGenerateProposalsSingleImageParams()), + ::testing::ValuesIn(getExperimentalDetectronGenerateProposalsSingleImageParams()), ::testing::Values(format::bfyx), ::testing::Values(false) ), @@ -320,7 +320,7 @@ INSTANTIATE_TEST_SUITE_P( export_import, experimental_detectron_generate_proposals_single_image_test_f16, ::testing::Combine( - ::testing::Values(getExperimentalDetectronGenerateProposalsSingleImageParams()[0]), + ::testing::Values(getExperimentalDetectronGenerateProposalsSingleImageParams()[0]), ::testing::Values(format::bfyx), ::testing::Values(true) ), diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/experimental_detectron_prior_grid_generator_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/experimental_detectron_prior_grid_generator_gpu_test.cpp index 3a52028e650139..87b806d67ff937 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/experimental_detectron_prior_grid_generator_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/experimental_detectron_prior_grid_generator_gpu_test.cpp @@ -34,7 +34,7 @@ struct experimental_detectron_prior_grid_generator_test : public ::testing::TestWithParam> { public: void test() { - auto data_type = type_to_data_type::value; + auto data_type = ov::element::from(); ExperimentalDetectronPriorGridGeneratorParams params = testing::TestWithParam>::GetParam(); auto& engine = get_test_engine(); @@ -213,7 +213,7 @@ struct PrintToStringParamName { }; using experimental_detectron_prior_grid_generator_test_f32 = experimental_detectron_prior_grid_generator_test; -using experimental_detectron_prior_grid_generator_test_f16 = experimental_detectron_prior_grid_generator_test; +using experimental_detectron_prior_grid_generator_test_f16 = experimental_detectron_prior_grid_generator_test; TEST_P(experimental_detectron_prior_grid_generator_test_f32, experimental_detectron_prior_grid_generator_test_f32) { ASSERT_NO_FATAL_FAILURE(test()); @@ -230,10 +230,10 @@ INSTANTIATE_TEST_SUITE_P(smoke_experimental_detectron_prior_grid_generator_test_ INSTANTIATE_TEST_SUITE_P(smoke_experimental_detectron_prior_grid_generator_test_f16, experimental_detectron_prior_grid_generator_test_f16, - ::testing::ValuesIn(generateExperimentalPGGParams()), + ::testing::ValuesIn(generateExperimentalPGGParams()), PrintToStringParamName()); INSTANTIATE_TEST_SUITE_P(export_import, experimental_detectron_prior_grid_generator_test_f16, - ::testing::Values(generateExperimentalPGGParams(true)[0]), + ::testing::Values(generateExperimentalPGGParams(true)[0]), PrintToStringParamName()); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/experimental_detectron_topk_rois_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/experimental_detectron_topk_rois_gpu_test.cpp index 2dd3ce2b417bcb..435f7fd2add660 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/experimental_detectron_topk_rois_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/experimental_detectron_topk_rois_gpu_test.cpp @@ -17,14 +17,13 @@ template struct experimental_detectron_topk_rois_input_types { static const auto format = layoutFormat; using type = DataType; - static const data_types data_type = type_to_data_type::value; }; template struct experimental_detectron_topk_rois_gpu_test : public testing::Test { static const auto format = EdTopkRoisInput::format; - static const auto data_type = EdTopkRoisInput::data_type; using input_type = typename EdTopkRoisInput::type; + const ov::element::Type data_type = ov::element::from(); std::vector getTypedVector(const std::vector& input) { return std::vector(input.begin(), input.end()); @@ -45,12 +44,12 @@ using format_types = testing::Types, experimental_detectron_topk_rois_input_types, experimental_detectron_topk_rois_input_types, - experimental_detectron_topk_rois_input_types, - experimental_detectron_topk_rois_input_types, - experimental_detectron_topk_rois_input_types, - experimental_detectron_topk_rois_input_types, - experimental_detectron_topk_rois_input_types, - experimental_detectron_topk_rois_input_types>; + experimental_detectron_topk_rois_input_types, + experimental_detectron_topk_rois_input_types, + experimental_detectron_topk_rois_input_types, + experimental_detectron_topk_rois_input_types, + experimental_detectron_topk_rois_input_types, + experimental_detectron_topk_rois_input_types>; TYPED_TEST_SUITE(experimental_detectron_topk_rois_gpu_test, format_types); @@ -133,7 +132,7 @@ TYPED_TEST(experimental_detectron_topk_rois_gpu_test, check_set_indices_layer_mo TEST(experimental_detectron_topk_rois_gpu_test, export_import) { const auto test_format = format::bs_fs_yx_bsv32_fsv16; - const data_types test_data_type = type_to_data_type::value; + const data_types test_data_type = ov::element::from(); auto& engine = get_test_engine(); // topk is more than model size diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/eye.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/eye.cpp index 22ee147e12c747..d3545e89935739 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/eye.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/eye.cpp @@ -39,11 +39,11 @@ class EyeTest : public ::testing::TestWithParamGetParam(); - auto num_rows = engine_.allocate_memory({type_to_data_type::value, fmt, tensor{1}}); + auto num_rows = engine_.allocate_memory({ov::element::from(), fmt, tensor{1}}); set_values(num_rows, {rows}); - auto num_coloms = engine_.allocate_memory({type_to_data_type::value, fmt, tensor{1}}); + auto num_coloms = engine_.allocate_memory({ov::element::from(), fmt, tensor{1}}); set_values(num_coloms, {cols}); - auto diagonal_index = engine_.allocate_memory({type_to_data_type::value, fmt, tensor{1}}); + auto diagonal_index = engine_.allocate_memory({ov::element::from(), fmt, tensor{1}}); set_values(diagonal_index, {diag}); topology tp; @@ -54,7 +54,7 @@ class EyeTest : public ::testing::TestWithParam::value, fmt, tensor{batch_rank}}); + auto batch = engine_.allocate_memory({ov::element::from(), fmt, tensor{batch_rank}}); set_values(batch, batch_shape); tp.add(data("batch", batch)); } @@ -66,23 +66,23 @@ class EyeTest : public ::testing::TestWithParam{ input_info("num_rows"), input_info("num_columns"), input_info("diagonal_index"), input_info("batch") }; ouput_op_name = "eye"; auto eye_primitive = - eye("eye", inputs, tensor{output_shape}, diag, type_to_data_type::value); + eye("eye", inputs, tensor{output_shape}, diag, ov::element::from()); tp.add(std::move(eye_primitive)); } else { - tp.add(reorder("r_num_rows", input_info("num_rows"), fmt, type_to_data_type::value)); - tp.add(reorder("r_num_columns", input_info("num_columns"), fmt, type_to_data_type::value)); - tp.add(reorder("r_diagonal_index", input_info("diagonal_index"), fmt, type_to_data_type::value)); + tp.add(reorder("r_num_rows", input_info("num_rows"), fmt, ov::element::from())); + tp.add(reorder("r_num_columns", input_info("num_columns"), fmt, ov::element::from())); + tp.add(reorder("r_diagonal_index", input_info("diagonal_index"), fmt, ov::element::from())); if (!batch_shape.empty()) { - tp.add(reorder("r_batch", input_info("batch"), fmt, type_to_data_type::value)); + tp.add(reorder("r_batch", input_info("batch"), fmt, ov::element::from())); } auto inputs = batch_shape.empty() ? std::vector{ input_info("r_num_rows"), input_info("r_num_columns"), input_info("r_diagonal_index") } : std::vector{ input_info("r_num_rows"), input_info("r_num_columns"), input_info("r_diagonal_index"), input_info("r_batch") }; auto eye_primitive = - eye("eye", inputs, tensor{output_shape}, diag, type_to_data_type::value); + eye("eye", inputs, tensor{output_shape}, diag, ov::element::from()); tp.add(std::move(eye_primitive)); ouput_op_name = "output"; - tp.add(reorder("output", input_info("eye"), oupput_fmt, type_to_data_type::value)); + tp.add(reorder("output", input_info("eye"), oupput_fmt, ov::element::from())); } cldnn::network::ptr network = get_network(engine_, tp, get_test_default_config(engine_), get_test_stream_ptr(), is_caching_test); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/fully_connected_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/fully_connected_gpu_test.cpp index 0b99d36ce3624f..71301447bb28b9 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/fully_connected_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/fully_connected_gpu_test.cpp @@ -6,7 +6,6 @@ #include "intel_gpu/runtime/layout.hpp" #include "openvino/core/partial_shape.hpp" #include "test_utils.h" -#include "float16.h" #include "random_generator.hpp" #include "network_test.h" #include @@ -76,9 +75,9 @@ void generic_fully_connected_test(cldnn::format test_input_fmt, cldnn::format te auto& engine = get_test_engine(); tensor input_tensor(input_b, f, x, y); tensor weights_tensor(output_f, f, x, y); - auto input = engine.allocate_memory({ type_to_data_type::value, test_input_fmt, input_tensor }); - auto weights = engine.allocate_memory({ type_to_data_type::value, test_weights_fmt, weights_tensor }); - auto bias = engine.allocate_memory({ type_to_data_type::value, format::bfyx, { 1, 1, output_f, 1 } }); + auto input = engine.allocate_memory({ ov::element::from(), test_input_fmt, input_tensor }); + auto weights = engine.allocate_memory({ ov::element::from(), test_weights_fmt, weights_tensor }); + auto bias = engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, 1, output_f, 1 } }); set_values(input, input_rnd_vec); set_values(weights, weights_rnd_vec); set_values(bias, bias_rnd_vec); @@ -165,7 +164,7 @@ TEST(DISABLED_fully_connected_gpu, generic_random_short) { generic_fully_connected_test(test_input_fmt, test_weights_fmt, b, f, sizes.second, sizes.first, output_f, relu_activated); if (!f16_supported) continue; - generic_fully_connected_test(test_input_fmt, test_weights_fmt, + generic_fully_connected_test(test_input_fmt, test_weights_fmt, b, f, sizes.second, sizes.first, output_f, relu_activated); } } @@ -780,18 +779,18 @@ TEST(fully_connected_gpu, compressed_scale_fp16) { auto weights_mem = engine.allocate_memory({ {8, 4}, data_types::f16, format::bfyx }); auto scale_mem = engine.allocate_memory({ {1, 8}, data_types::f16, format::bfyx }); - set_values(input_mem, { FLOAT16(-0.5f), FLOAT16(2.0f), FLOAT16(0.5f), FLOAT16(1.0f), - FLOAT16(0.5f), FLOAT16(-2.0f), FLOAT16(-0.5f), FLOAT16(-1.0f) }); - set_values(weights_mem, {FLOAT16( 1.5f), FLOAT16( 1.0f), FLOAT16( 0.5f), FLOAT16(-1.0f), - FLOAT16( 0.0f), FLOAT16( 0.5f), FLOAT16( 0.5f), FLOAT16(-0.5f), - FLOAT16(-2.0f), FLOAT16(-0.5f), FLOAT16( 1.0f), FLOAT16( 1.5f), - FLOAT16(-2.0f), FLOAT16(-0.5f), FLOAT16( 1.0f), FLOAT16( 1.5f), - FLOAT16( 2.0f), FLOAT16( 0.5f), FLOAT16(-1.0f), FLOAT16(-1.5f), - FLOAT16( 2.0f), FLOAT16( 0.5f), FLOAT16(-1.0f), FLOAT16(-1.5f), - FLOAT16(-1.5f), FLOAT16(-1.0f), FLOAT16(-0.5f), FLOAT16( 1.0f), - FLOAT16( 0.0f), FLOAT16(-0.5f), FLOAT16(0.5f), FLOAT16( 0.5f) }); + set_values(input_mem, { ov::float16(-0.5f), ov::float16(2.0f), ov::float16(0.5f), ov::float16(1.0f), + ov::float16(0.5f), ov::float16(-2.0f), ov::float16(-0.5f), ov::float16(-1.0f) }); + set_values(weights_mem, {ov::float16( 1.5f), ov::float16( 1.0f), ov::float16( 0.5f), ov::float16(-1.0f), + ov::float16( 0.0f), ov::float16( 0.5f), ov::float16( 0.5f), ov::float16(-0.5f), + ov::float16(-2.0f), ov::float16(-0.5f), ov::float16( 1.0f), ov::float16( 1.5f), + ov::float16(-2.0f), ov::float16(-0.5f), ov::float16( 1.0f), ov::float16( 1.5f), + ov::float16( 2.0f), ov::float16( 0.5f), ov::float16(-1.0f), ov::float16(-1.5f), + ov::float16( 2.0f), ov::float16( 0.5f), ov::float16(-1.0f), ov::float16(-1.5f), + ov::float16(-1.5f), ov::float16(-1.0f), ov::float16(-0.5f), ov::float16( 1.0f), + ov::float16( 0.0f), ov::float16(-0.5f), ov::float16(0.5f), ov::float16( 0.5f) }); - set_values(scale_mem, {FLOAT16(2.0f), FLOAT16(4.0f), FLOAT16(-2.0f), FLOAT16(-4.0f), FLOAT16(0.5f), FLOAT16(-0.5f), FLOAT16(2.0f), FLOAT16(2.0f)}); + set_values(scale_mem, {ov::float16(2.0f), ov::float16(4.0f), ov::float16(-2.0f), ov::float16(-4.0f), ov::float16(0.5f), ov::float16(-0.5f), ov::float16(2.0f), ov::float16(2.0f)}); topology topology( input_layout("input", input_mem->get_layout()), @@ -812,14 +811,14 @@ TEST(fully_connected_gpu, compressed_scale_fp16) { auto output_mem = outputs.begin()->second.get_memory(); - cldnn::mem_lock output_ptr (output_mem, get_test_stream()); + cldnn::mem_lock output_ptr (output_mem, get_test_stream()); ov::PartialShape expected_shape{2, 8}; ASSERT_EQ(expected_shape, output_mem->get_layout().get_partial_shape()); - std::vector expected_result = { - FLOAT16(1.0f), FLOAT16( 3.0f), FLOAT16(-4.0f), FLOAT16(-8.0f), FLOAT16(-1.0f), FLOAT16( 1.0f), FLOAT16(-1.0f), FLOAT16(-0.5f), - FLOAT16(-1.0f), FLOAT16(-3.0f), FLOAT16( 4.0f), FLOAT16( 8.0f), FLOAT16( 1.0f), FLOAT16(-1.0f), FLOAT16( 1.0f), FLOAT16( 0.5f)}; + std::vector expected_result = { + ov::float16(1.0f), ov::float16( 3.0f), ov::float16(-4.0f), ov::float16(-8.0f), ov::float16(-1.0f), ov::float16( 1.0f), ov::float16(-1.0f), ov::float16(-0.5f), + ov::float16(-1.0f), ov::float16(-3.0f), ov::float16( 4.0f), ov::float16( 8.0f), ov::float16( 1.0f), ov::float16(-1.0f), ov::float16( 1.0f), ov::float16( 0.5f)}; for (size_t i = 0; i < expected_result.size(); i++) { ASSERT_FLOAT_EQ(expected_result[i], output_ptr[i]) << "i = " << i; @@ -1020,9 +1019,9 @@ TEST(fully_connected_gpu, DISABLED_fs_byx_fsv32_b12) { // Generate random input data and set values tests::random_generator rg(GET_SUITE_NAME); - auto input_data = rg.generate_random_4d(batch_num, input_f, input_y, input_x, -1, 1); - auto weights_data = rg.generate_random_4d(output_f, input_f, input_y, input_x, -1, 1); - auto bias_data = rg.generate_random_1d(output_f, -1, 1); + auto input_data = rg.generate_random_4d(batch_num, input_f, input_y, input_x, -1, 1); + auto weights_data = rg.generate_random_4d(output_f, input_f, input_y, input_x, -1, 1); + auto bias_data = rg.generate_random_1d(output_f, -1, 1); auto input_data_bfyx = flatten_4d(format::bfyx, input_data); auto weights_data_bfyx = flatten_4d(format::bfyx, weights_data); @@ -1054,7 +1053,7 @@ TEST(fully_connected_gpu, DISABLED_fs_byx_fsv32_b12) { auto outputs = network.execute(); auto output_prim = outputs.at("out").get_memory(); - cldnn::mem_lock output_ptr(output_prim, get_test_stream()); + cldnn::mem_lock output_ptr(output_prim, get_test_stream()); for (size_t bi = 0; bi < batch_num; ++bi) { @@ -1097,9 +1096,9 @@ TEST(fully_connected_gpu, DISABLED_fs_byx_fsv32_b34) // Generate random input data and set values tests::random_generator rg(GET_SUITE_NAME); - auto input_data = rg.generate_random_4d(batch_num, input_f, input_y, input_x, -1, 1); - auto weights_data = rg.generate_random_4d(output_f, input_f, input_y, input_x, -1, 1); - auto bias_data = rg.generate_random_1d(output_f, -1, 1); + auto input_data = rg.generate_random_4d(batch_num, input_f, input_y, input_x, -1, 1); + auto weights_data = rg.generate_random_4d(output_f, input_f, input_y, input_x, -1, 1); + auto bias_data = rg.generate_random_1d(output_f, -1, 1); auto input_data_bfyx = flatten_4d(format::bfyx, input_data); auto weights_data_bfyx = flatten_4d(format::bfyx, weights_data); @@ -1131,7 +1130,7 @@ TEST(fully_connected_gpu, DISABLED_fs_byx_fsv32_b34) auto outputs = network.execute(); auto output_prim = outputs.at("out").get_memory(); - cldnn::mem_lock output_ptr(output_prim, get_test_stream()); + cldnn::mem_lock output_ptr(output_prim, get_test_stream()); for (size_t bi = 0; bi < batch_num; ++bi) { @@ -1195,7 +1194,7 @@ struct fully_connected_random_test : ::testing::TestWithParam; -using fully_connected_random_test_f16 = fully_connected_random_test; +using fully_connected_random_test_f16 = fully_connected_random_test; TEST_P(fully_connected_random_test_f32, basic) { run_test(); @@ -1341,7 +1340,7 @@ struct fully_connected_random_test_3d : ::testing::TestWithParam; -using fully_connected_random_test_f16_3d = fully_connected_random_test_3d; +using fully_connected_random_test_f16_3d = fully_connected_random_test_3d; using fully_connected_random_test_i8_3d = fully_connected_random_test_3d; @@ -1497,11 +1496,11 @@ class fully_connected_quantized_test : public ::testing::Test { size_t output_f() { return _weights.size(); } data_types input_data_type() { - return type_to_data_type::value; + return ov::element::from(); } data_types output_data_type() { - return type_to_data_type::value; + return ov::element::from(); } bool has_bias() { return _bias.size() > 0; } @@ -1581,7 +1580,7 @@ class fully_connected_quantized_test : public ::testing::Test { [](tensor::value_type x) { return x != 1l; }); size_t input_rank = std::distance(input_sizes.begin(), last_dim.base()); auto fc_prim = fully_connected("fc_prim", input_info("input"), "weights", "bias", cldnn::padding(), input_rank); - fc_prim.output_data_types = {type_to_data_type::value}; + fc_prim.output_data_types = {static_cast(ov::element::from())}; topo.add(fc_prim); topo.add(data("quant_input_low", quantization_input_low)); @@ -2497,9 +2496,9 @@ struct dynamic_fully_connected_gpu : ::testing::TestWithParam::value; - auto weights_dt = cldnn::type_to_data_type::value; - auto output_dt = cldnn::type_to_data_type::value; + auto input_dt = ov::element::from(); + auto weights_dt = ov::element::from(); + auto output_dt = ov::element::from(); auto& engine = get_test_engine(); auto input_dyn_layout = layout{ ov::PartialShape{ ov::Dimension(), input_f }, input_dt, format::bfyx }; @@ -2580,7 +2579,7 @@ struct dynamic_fully_connected_gpu : ::testing::TestWithParam; -using dynamic_fully_connected_gpu_f16_3d = dynamic_fully_connected_gpu; +using dynamic_fully_connected_gpu_f16_3d = dynamic_fully_connected_gpu; using dynamic_fully_connected_gpu_i8_3d = dynamic_fully_connected_gpu; static const std::vector @@ -2773,11 +2772,11 @@ class fully_connected_types_test : public ::testing::Test { size_t output_f() { return _weights.size(); } data_types input_data_type() { - return type_to_data_type::value; + return ov::element::from(); } data_types weights_data_type() { - return type_to_data_type::value; + return ov::element::from(); } bool has_bias() { return _bias.size() > 0; } @@ -2845,7 +2844,7 @@ class fully_connected_types_test : public ::testing::Test { [](tensor::value_type x) { return x != 1l; }); size_t input_rank = std::distance(input_sizes.begin(), last_dim.base()); auto fc_prim = fully_connected("output", input_info("input"), "weights", "bias", cldnn::padding(), input_rank); - fc_prim.output_data_types = {type_to_data_type::value}; + fc_prim.output_data_types = { static_cast(ov::element::from()) }; topo.add(fc_prim); ExecutionConfig config = get_test_default_config(engine); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/gather_elements_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/gather_elements_gpu_test.cpp index 3cee30b34c30bc..a1fb490efb5cb4 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/gather_elements_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/gather_elements_gpu_test.cpp @@ -53,54 +53,54 @@ TEST(gather_elements_gpu_fp16, d3283_i2283_a0) { auto input1 = engine.allocate_memory({ data_types::f16, format::bfyx, { 2, 2, 8, 3 } }); // indices set_values(input0, { - FLOAT16(0), FLOAT16(1), FLOAT16(8), FLOAT16(5), FLOAT16(5), FLOAT16(2), FLOAT16(0), FLOAT16(7), - FLOAT16(7), FLOAT16(10), FLOAT16(4), FLOAT16(5), FLOAT16(9), FLOAT16(0), FLOAT16(0), FLOAT16(5), - FLOAT16(7), FLOAT16(0), FLOAT16(4), FLOAT16(0), FLOAT16(4), FLOAT16(7), FLOAT16(6), FLOAT16(10), - FLOAT16(9), FLOAT16(5), FLOAT16(1), FLOAT16(7), FLOAT16(4), FLOAT16(7), FLOAT16(10), FLOAT16(8), - FLOAT16(2), FLOAT16(0), FLOAT16(8), FLOAT16(3), FLOAT16(6), FLOAT16(8), FLOAT16(10), FLOAT16(4), - FLOAT16(2), FLOAT16(10), FLOAT16(7), FLOAT16(8), FLOAT16(7), FLOAT16(0), FLOAT16(6), FLOAT16(9), - FLOAT16(2), FLOAT16(4), FLOAT16(8), FLOAT16(5), FLOAT16(2), FLOAT16(3), FLOAT16(3), FLOAT16(1), - FLOAT16(5), FLOAT16(9), FLOAT16(10), FLOAT16(0), FLOAT16(9), FLOAT16(5), FLOAT16(5), FLOAT16(3), - FLOAT16(10), FLOAT16(5), FLOAT16(2), FLOAT16(0), FLOAT16(10), FLOAT16(0), FLOAT16(5), FLOAT16(4), - FLOAT16(3), FLOAT16(10), FLOAT16(5), FLOAT16(5), FLOAT16(10), FLOAT16(0), FLOAT16(8), FLOAT16(8), - FLOAT16(9), FLOAT16(1), FLOAT16(0), FLOAT16(7), FLOAT16(9), FLOAT16(6), FLOAT16(8), FLOAT16(7), - FLOAT16(10), FLOAT16(9), FLOAT16(2), FLOAT16(3), FLOAT16(3), FLOAT16(5), FLOAT16(6), FLOAT16(9), - FLOAT16(4), FLOAT16(9), FLOAT16(2), FLOAT16(4), FLOAT16(5), FLOAT16(5), FLOAT16(3), FLOAT16(1), - FLOAT16(1), FLOAT16(6), FLOAT16(8), FLOAT16(0), FLOAT16(5), FLOAT16(5), FLOAT16(10), FLOAT16(8), - FLOAT16(6), FLOAT16(9), FLOAT16(6), FLOAT16(9), FLOAT16(1), FLOAT16(2), FLOAT16(7), FLOAT16(1), - FLOAT16(1), FLOAT16(3), FLOAT16(0), FLOAT16(4), FLOAT16(0), FLOAT16(7), FLOAT16(10), FLOAT16(2), - FLOAT16(1), FLOAT16(3), FLOAT16(9), FLOAT16(7), FLOAT16(1), FLOAT16(7), FLOAT16(4), FLOAT16(4), - FLOAT16(5), FLOAT16(1), FLOAT16(6), FLOAT16(9), FLOAT16(6), FLOAT16(10), FLOAT16(6), FLOAT16(1), + ov::float16(0), ov::float16(1), ov::float16(8), ov::float16(5), ov::float16(5), ov::float16(2), ov::float16(0), ov::float16(7), + ov::float16(7), ov::float16(10), ov::float16(4), ov::float16(5), ov::float16(9), ov::float16(0), ov::float16(0), ov::float16(5), + ov::float16(7), ov::float16(0), ov::float16(4), ov::float16(0), ov::float16(4), ov::float16(7), ov::float16(6), ov::float16(10), + ov::float16(9), ov::float16(5), ov::float16(1), ov::float16(7), ov::float16(4), ov::float16(7), ov::float16(10), ov::float16(8), + ov::float16(2), ov::float16(0), ov::float16(8), ov::float16(3), ov::float16(6), ov::float16(8), ov::float16(10), ov::float16(4), + ov::float16(2), ov::float16(10), ov::float16(7), ov::float16(8), ov::float16(7), ov::float16(0), ov::float16(6), ov::float16(9), + ov::float16(2), ov::float16(4), ov::float16(8), ov::float16(5), ov::float16(2), ov::float16(3), ov::float16(3), ov::float16(1), + ov::float16(5), ov::float16(9), ov::float16(10), ov::float16(0), ov::float16(9), ov::float16(5), ov::float16(5), ov::float16(3), + ov::float16(10), ov::float16(5), ov::float16(2), ov::float16(0), ov::float16(10), ov::float16(0), ov::float16(5), ov::float16(4), + ov::float16(3), ov::float16(10), ov::float16(5), ov::float16(5), ov::float16(10), ov::float16(0), ov::float16(8), ov::float16(8), + ov::float16(9), ov::float16(1), ov::float16(0), ov::float16(7), ov::float16(9), ov::float16(6), ov::float16(8), ov::float16(7), + ov::float16(10), ov::float16(9), ov::float16(2), ov::float16(3), ov::float16(3), ov::float16(5), ov::float16(6), ov::float16(9), + ov::float16(4), ov::float16(9), ov::float16(2), ov::float16(4), ov::float16(5), ov::float16(5), ov::float16(3), ov::float16(1), + ov::float16(1), ov::float16(6), ov::float16(8), ov::float16(0), ov::float16(5), ov::float16(5), ov::float16(10), ov::float16(8), + ov::float16(6), ov::float16(9), ov::float16(6), ov::float16(9), ov::float16(1), ov::float16(2), ov::float16(7), ov::float16(1), + ov::float16(1), ov::float16(3), ov::float16(0), ov::float16(4), ov::float16(0), ov::float16(7), ov::float16(10), ov::float16(2), + ov::float16(1), ov::float16(3), ov::float16(9), ov::float16(7), ov::float16(1), ov::float16(7), ov::float16(4), ov::float16(4), + ov::float16(5), ov::float16(1), ov::float16(6), ov::float16(9), ov::float16(6), ov::float16(10), ov::float16(6), ov::float16(1), }); set_values(input1, { - FLOAT16(0), FLOAT16(1), FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(0), FLOAT16(0), FLOAT16(0), - FLOAT16(2), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(1), FLOAT16(1), - FLOAT16(2), FLOAT16(1), FLOAT16(2), FLOAT16(1), FLOAT16(2), FLOAT16(1), FLOAT16(0), FLOAT16(2), - FLOAT16(1), FLOAT16(0), FLOAT16(1), FLOAT16(2), FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(2), - FLOAT16(2), FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(0), - FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(0), FLOAT16(0), FLOAT16(2), - FLOAT16(1), FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(0), FLOAT16(2), FLOAT16(0), - FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(2), FLOAT16(2), FLOAT16(0), FLOAT16(1), FLOAT16(1), - FLOAT16(2), FLOAT16(2), FLOAT16(1), FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(0), FLOAT16(0), - FLOAT16(0), FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(2), - FLOAT16(1), FLOAT16(2), FLOAT16(1), FLOAT16(2), FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(2), - FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(2), FLOAT16(0), FLOAT16(2), + ov::float16(0), ov::float16(1), ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(0), ov::float16(0), ov::float16(0), + ov::float16(2), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(1), ov::float16(1), + ov::float16(2), ov::float16(1), ov::float16(2), ov::float16(1), ov::float16(2), ov::float16(1), ov::float16(0), ov::float16(2), + ov::float16(1), ov::float16(0), ov::float16(1), ov::float16(2), ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(2), + ov::float16(2), ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(0), + ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(0), ov::float16(0), ov::float16(2), + ov::float16(1), ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(0), ov::float16(2), ov::float16(0), + ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(2), ov::float16(2), ov::float16(0), ov::float16(1), ov::float16(1), + ov::float16(2), ov::float16(2), ov::float16(1), ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(0), ov::float16(0), + ov::float16(0), ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(2), + ov::float16(1), ov::float16(2), ov::float16(1), ov::float16(2), ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(2), + ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(2), ov::float16(0), ov::float16(2), }); std::vector expected_results = { - FLOAT16(0), FLOAT16(4), FLOAT16(2), FLOAT16(4), FLOAT16(5), FLOAT16(2), FLOAT16(0), FLOAT16(7), - FLOAT16(1), FLOAT16(10), FLOAT16(4), FLOAT16(5), FLOAT16(9), FLOAT16(0), FLOAT16(5), FLOAT16(3), - FLOAT16(6), FLOAT16(5), FLOAT16(6), FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(6), FLOAT16(1), - FLOAT16(3), FLOAT16(5), FLOAT16(5), FLOAT16(4), FLOAT16(4), FLOAT16(7), FLOAT16(8), FLOAT16(2), - FLOAT16(1), FLOAT16(1), FLOAT16(0), FLOAT16(7), FLOAT16(9), FLOAT16(8), FLOAT16(4), FLOAT16(4), - FLOAT16(5), FLOAT16(1), FLOAT16(6), FLOAT16(9), FLOAT16(6), FLOAT16(0), FLOAT16(6), FLOAT16(1), - FLOAT16(2), FLOAT16(9), FLOAT16(2), FLOAT16(4), FLOAT16(5), FLOAT16(2), FLOAT16(3), FLOAT16(7), - FLOAT16(7), FLOAT16(10), FLOAT16(4), FLOAT16(0), FLOAT16(5), FLOAT16(0), FLOAT16(5), FLOAT16(3), - FLOAT16(6), FLOAT16(9), FLOAT16(2), FLOAT16(0), FLOAT16(4), FLOAT16(2), FLOAT16(6), FLOAT16(10), - FLOAT16(9), FLOAT16(3), FLOAT16(0), FLOAT16(4), FLOAT16(10), FLOAT16(7), FLOAT16(10), FLOAT16(2), - FLOAT16(9), FLOAT16(3), FLOAT16(0), FLOAT16(7), FLOAT16(6), FLOAT16(8), FLOAT16(8), FLOAT16(4), - FLOAT16(2), FLOAT16(10), FLOAT16(7), FLOAT16(3), FLOAT16(3), FLOAT16(10), FLOAT16(6), FLOAT16(1), + ov::float16(0), ov::float16(4), ov::float16(2), ov::float16(4), ov::float16(5), ov::float16(2), ov::float16(0), ov::float16(7), + ov::float16(1), ov::float16(10), ov::float16(4), ov::float16(5), ov::float16(9), ov::float16(0), ov::float16(5), ov::float16(3), + ov::float16(6), ov::float16(5), ov::float16(6), ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(6), ov::float16(1), + ov::float16(3), ov::float16(5), ov::float16(5), ov::float16(4), ov::float16(4), ov::float16(7), ov::float16(8), ov::float16(2), + ov::float16(1), ov::float16(1), ov::float16(0), ov::float16(7), ov::float16(9), ov::float16(8), ov::float16(4), ov::float16(4), + ov::float16(5), ov::float16(1), ov::float16(6), ov::float16(9), ov::float16(6), ov::float16(0), ov::float16(6), ov::float16(1), + ov::float16(2), ov::float16(9), ov::float16(2), ov::float16(4), ov::float16(5), ov::float16(2), ov::float16(3), ov::float16(7), + ov::float16(7), ov::float16(10), ov::float16(4), ov::float16(0), ov::float16(5), ov::float16(0), ov::float16(5), ov::float16(3), + ov::float16(6), ov::float16(9), ov::float16(2), ov::float16(0), ov::float16(4), ov::float16(2), ov::float16(6), ov::float16(10), + ov::float16(9), ov::float16(3), ov::float16(0), ov::float16(4), ov::float16(10), ov::float16(7), ov::float16(10), ov::float16(2), + ov::float16(9), ov::float16(3), ov::float16(0), ov::float16(7), ov::float16(6), ov::float16(8), ov::float16(8), ov::float16(4), + ov::float16(2), ov::float16(10), ov::float16(7), ov::float16(3), ov::float16(3), ov::float16(10), ov::float16(6), ov::float16(1), }; DoTest(engine, input0, input1, expected_results, tensor(2, 2, 8, 3), axis); @@ -113,72 +113,72 @@ TEST(gather_elements_gpu_fp16, d2235_i2235_a3) { auto input0 = engine.allocate_memory({ data_types::f16, format::bfyx, { 2, 2, 3, 5 } }); // data auto input1 = engine.allocate_memory({ data_types::f16, format::bfyx, { 2, 2, 3, 5 } }); // indices set_values(input0, { - FLOAT16(0), FLOAT16(1), FLOAT16(8), - FLOAT16(5), FLOAT16(5), FLOAT16(2), - FLOAT16(0), FLOAT16(7), FLOAT16(7), - FLOAT16(10), FLOAT16(4), FLOAT16(5), - FLOAT16(9), FLOAT16(0), FLOAT16(0), - FLOAT16(5), FLOAT16(7), FLOAT16(0), - FLOAT16(4), FLOAT16(0), FLOAT16(4), - FLOAT16(7), FLOAT16(6), FLOAT16(10), - FLOAT16(9), FLOAT16(5), FLOAT16(1), - FLOAT16(7), FLOAT16(4), FLOAT16(7), - FLOAT16(10), FLOAT16(8), FLOAT16(2), - FLOAT16(0), FLOAT16(8), FLOAT16(3), - FLOAT16(6), FLOAT16(8), FLOAT16(10), - FLOAT16(4), FLOAT16(2), FLOAT16(10), - FLOAT16(7), FLOAT16(8), FLOAT16(7), - FLOAT16(0), FLOAT16(6), FLOAT16(9), - FLOAT16(2), FLOAT16(4), FLOAT16(8), - FLOAT16(5), FLOAT16(2), FLOAT16(3), - FLOAT16(3), FLOAT16(1), FLOAT16(5), - FLOAT16(9), FLOAT16(10), FLOAT16(0), + ov::float16(0), ov::float16(1), ov::float16(8), + ov::float16(5), ov::float16(5), ov::float16(2), + ov::float16(0), ov::float16(7), ov::float16(7), + ov::float16(10), ov::float16(4), ov::float16(5), + ov::float16(9), ov::float16(0), ov::float16(0), + ov::float16(5), ov::float16(7), ov::float16(0), + ov::float16(4), ov::float16(0), ov::float16(4), + ov::float16(7), ov::float16(6), ov::float16(10), + ov::float16(9), ov::float16(5), ov::float16(1), + ov::float16(7), ov::float16(4), ov::float16(7), + ov::float16(10), ov::float16(8), ov::float16(2), + ov::float16(0), ov::float16(8), ov::float16(3), + ov::float16(6), ov::float16(8), ov::float16(10), + ov::float16(4), ov::float16(2), ov::float16(10), + ov::float16(7), ov::float16(8), ov::float16(7), + ov::float16(0), ov::float16(6), ov::float16(9), + ov::float16(2), ov::float16(4), ov::float16(8), + ov::float16(5), ov::float16(2), ov::float16(3), + ov::float16(3), ov::float16(1), ov::float16(5), + ov::float16(9), ov::float16(10), ov::float16(0), }); set_values(input1, { - FLOAT16(0), FLOAT16(1), FLOAT16(2), - FLOAT16(2), FLOAT16(2), FLOAT16(0), - FLOAT16(0), FLOAT16(0), FLOAT16(2), - FLOAT16(0), FLOAT16(0), FLOAT16(0), - FLOAT16(1), FLOAT16(0), FLOAT16(1), - FLOAT16(1), FLOAT16(2), FLOAT16(1), - FLOAT16(2), FLOAT16(1), FLOAT16(2), - FLOAT16(1), FLOAT16(0), FLOAT16(2), - FLOAT16(1), FLOAT16(0), FLOAT16(1), - FLOAT16(2), FLOAT16(0), FLOAT16(0), - FLOAT16(1), FLOAT16(2), FLOAT16(2), - FLOAT16(1), FLOAT16(1), FLOAT16(1), - FLOAT16(1), FLOAT16(0), FLOAT16(2), - FLOAT16(0), FLOAT16(2), FLOAT16(2), - FLOAT16(2), FLOAT16(2), FLOAT16(2), - FLOAT16(0), FLOAT16(0), FLOAT16(2), - FLOAT16(1), FLOAT16(2), FLOAT16(2), - FLOAT16(2), FLOAT16(2), FLOAT16(0), - FLOAT16(2), FLOAT16(0), FLOAT16(0), - FLOAT16(0), FLOAT16(0), FLOAT16(2), + ov::float16(0), ov::float16(1), ov::float16(2), + ov::float16(2), ov::float16(2), ov::float16(0), + ov::float16(0), ov::float16(0), ov::float16(2), + ov::float16(0), ov::float16(0), ov::float16(0), + ov::float16(1), ov::float16(0), ov::float16(1), + ov::float16(1), ov::float16(2), ov::float16(1), + ov::float16(2), ov::float16(1), ov::float16(2), + ov::float16(1), ov::float16(0), ov::float16(2), + ov::float16(1), ov::float16(0), ov::float16(1), + ov::float16(2), ov::float16(0), ov::float16(0), + ov::float16(1), ov::float16(2), ov::float16(2), + ov::float16(1), ov::float16(1), ov::float16(1), + ov::float16(1), ov::float16(0), ov::float16(2), + ov::float16(0), ov::float16(2), ov::float16(2), + ov::float16(2), ov::float16(2), ov::float16(2), + ov::float16(0), ov::float16(0), ov::float16(2), + ov::float16(1), ov::float16(2), ov::float16(2), + ov::float16(2), ov::float16(2), ov::float16(0), + ov::float16(2), ov::float16(0), ov::float16(0), + ov::float16(0), ov::float16(0), ov::float16(2), }); std::vector expected_results = { - FLOAT16(0), FLOAT16(1), FLOAT16(8), - FLOAT16(2), FLOAT16(2), FLOAT16(5), - FLOAT16(0), FLOAT16(0), FLOAT16(7), - FLOAT16(10), FLOAT16(10), FLOAT16(10), - FLOAT16(0), FLOAT16(9), FLOAT16(0), - FLOAT16(7), FLOAT16(0), FLOAT16(7), - FLOAT16(4), FLOAT16(0), FLOAT16(4), - FLOAT16(6), FLOAT16(7), FLOAT16(10), - FLOAT16(5), FLOAT16(9), FLOAT16(5), - FLOAT16(7), FLOAT16(7), FLOAT16(7), - FLOAT16(8), FLOAT16(2), FLOAT16(2), - FLOAT16(8), FLOAT16(8), FLOAT16(8), - FLOAT16(8), FLOAT16(6), FLOAT16(10), - FLOAT16(4), FLOAT16(10), FLOAT16(10), - FLOAT16(7), FLOAT16(7), FLOAT16(7), - FLOAT16(0), FLOAT16(0), FLOAT16(9), - FLOAT16(4), FLOAT16(8), FLOAT16(8), - FLOAT16(3), FLOAT16(3), FLOAT16(5), - FLOAT16(5), FLOAT16(3), FLOAT16(3), - FLOAT16(9), FLOAT16(9), FLOAT16(0), + ov::float16(0), ov::float16(1), ov::float16(8), + ov::float16(2), ov::float16(2), ov::float16(5), + ov::float16(0), ov::float16(0), ov::float16(7), + ov::float16(10), ov::float16(10), ov::float16(10), + ov::float16(0), ov::float16(9), ov::float16(0), + ov::float16(7), ov::float16(0), ov::float16(7), + ov::float16(4), ov::float16(0), ov::float16(4), + ov::float16(6), ov::float16(7), ov::float16(10), + ov::float16(5), ov::float16(9), ov::float16(5), + ov::float16(7), ov::float16(7), ov::float16(7), + ov::float16(8), ov::float16(2), ov::float16(2), + ov::float16(8), ov::float16(8), ov::float16(8), + ov::float16(8), ov::float16(6), ov::float16(10), + ov::float16(4), ov::float16(10), ov::float16(10), + ov::float16(7), ov::float16(7), ov::float16(7), + ov::float16(0), ov::float16(0), ov::float16(9), + ov::float16(4), ov::float16(8), ov::float16(8), + ov::float16(3), ov::float16(3), ov::float16(5), + ov::float16(5), ov::float16(3), ov::float16(3), + ov::float16(9), ov::float16(9), ov::float16(0), }; DoTest(engine, input0, input1, expected_results, tensor(2, 2, 3, 5), axis); @@ -191,93 +191,93 @@ TEST(gather_elements_gpu_fp16, d1329_i1359_an1) { auto input0 = engine.allocate_memory({ data_types::f16, format::bfyx, { 1, 3, 2, 9 } }); // data auto input1 = engine.allocate_memory({ data_types::f16, format::bfyx, { 1, 3, 5, 9 } }); // indices set_values(input0, { - FLOAT16(0), FLOAT16(1), - FLOAT16(8), FLOAT16(5), - FLOAT16(5), FLOAT16(2), - FLOAT16(0), FLOAT16(7), - FLOAT16(7), FLOAT16(10), - FLOAT16(4), FLOAT16(5), - FLOAT16(9), FLOAT16(0), - FLOAT16(0), FLOAT16(5), - FLOAT16(7), FLOAT16(0), - FLOAT16(4), FLOAT16(0), - FLOAT16(4), FLOAT16(7), - FLOAT16(6), FLOAT16(10), - FLOAT16(9), FLOAT16(5), - FLOAT16(1), FLOAT16(7), - FLOAT16(4), FLOAT16(7), - FLOAT16(10), FLOAT16(8), - FLOAT16(2), FLOAT16(0), - FLOAT16(8), FLOAT16(3), - FLOAT16(6), FLOAT16(8), - FLOAT16(10), FLOAT16(4), - FLOAT16(2), FLOAT16(10), - FLOAT16(7), FLOAT16(8), - FLOAT16(7), FLOAT16(0), - FLOAT16(6), FLOAT16(9), - FLOAT16(2), FLOAT16(4), - FLOAT16(8), FLOAT16(5), - FLOAT16(2), FLOAT16(3), + ov::float16(0), ov::float16(1), + ov::float16(8), ov::float16(5), + ov::float16(5), ov::float16(2), + ov::float16(0), ov::float16(7), + ov::float16(7), ov::float16(10), + ov::float16(4), ov::float16(5), + ov::float16(9), ov::float16(0), + ov::float16(0), ov::float16(5), + ov::float16(7), ov::float16(0), + ov::float16(4), ov::float16(0), + ov::float16(4), ov::float16(7), + ov::float16(6), ov::float16(10), + ov::float16(9), ov::float16(5), + ov::float16(1), ov::float16(7), + ov::float16(4), ov::float16(7), + ov::float16(10), ov::float16(8), + ov::float16(2), ov::float16(0), + ov::float16(8), ov::float16(3), + ov::float16(6), ov::float16(8), + ov::float16(10), ov::float16(4), + ov::float16(2), ov::float16(10), + ov::float16(7), ov::float16(8), + ov::float16(7), ov::float16(0), + ov::float16(6), ov::float16(9), + ov::float16(2), ov::float16(4), + ov::float16(8), ov::float16(5), + ov::float16(2), ov::float16(3), }); set_values(input1, { - FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(1), - FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(0), - FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(0), - FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(1), FLOAT16(1), - FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(1), - FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(0), - FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(0), FLOAT16(0), - FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(0), - FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(1), - FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(1), - FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(0), FLOAT16(1), - FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(1), - FLOAT16(1), FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(1), - FLOAT16(1), FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(1), - FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(1), - FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(1), - FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(0), - FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(0), - FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(0), - FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(0), - FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(1), - FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(0), - FLOAT16(1), FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(1), - FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(0), FLOAT16(1), - FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(1), - FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(0), - FLOAT16(1), FLOAT16(1), FLOAT16(0), FLOAT16(1), FLOAT16(1), + ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(1), + ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(0), + ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(0), + ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(1), ov::float16(1), + ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(1), + ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(0), + ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(0), ov::float16(0), + ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(0), + ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(1), + ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(1), + ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(0), ov::float16(1), + ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(1), + ov::float16(1), ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(1), + ov::float16(1), ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(1), + ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(1), + ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(1), + ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(0), + ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(0), + ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(0), + ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(0), + ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(1), + ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(0), + ov::float16(1), ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(1), + ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(0), ov::float16(1), + ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(1), + ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(0), + ov::float16(1), ov::float16(1), ov::float16(0), ov::float16(1), ov::float16(1), }); std::vector expected_results = { - FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(1), - FLOAT16(8), FLOAT16(8), FLOAT16(8), FLOAT16(5), FLOAT16(8), - FLOAT16(5), FLOAT16(5), FLOAT16(5), FLOAT16(5), FLOAT16(5), - FLOAT16(0), FLOAT16(7), FLOAT16(0), FLOAT16(7), FLOAT16(7), - FLOAT16(10), FLOAT16(7), FLOAT16(7), FLOAT16(10), FLOAT16(10), - FLOAT16(4), FLOAT16(4), FLOAT16(5), FLOAT16(4), FLOAT16(4), - FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(9), FLOAT16(9), - FLOAT16(5), FLOAT16(0), FLOAT16(0), FLOAT16(5), FLOAT16(0), - FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(0), - FLOAT16(4), FLOAT16(4), FLOAT16(0), FLOAT16(0), FLOAT16(0), - FLOAT16(7), FLOAT16(7), FLOAT16(7), FLOAT16(4), FLOAT16(7), - FLOAT16(6), FLOAT16(6), FLOAT16(6), FLOAT16(6), FLOAT16(10), - FLOAT16(5), FLOAT16(9), FLOAT16(5), FLOAT16(9), FLOAT16(5), - FLOAT16(7), FLOAT16(1), FLOAT16(7), FLOAT16(1), FLOAT16(7), - FLOAT16(4), FLOAT16(4), FLOAT16(4), FLOAT16(7), FLOAT16(7), - FLOAT16(8), FLOAT16(10), FLOAT16(10), FLOAT16(10), FLOAT16(8), - FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(2), - FLOAT16(8), FLOAT16(8), FLOAT16(3), FLOAT16(8), FLOAT16(8), - FLOAT16(6), FLOAT16(6), FLOAT16(6), FLOAT16(8), FLOAT16(6), - FLOAT16(10), FLOAT16(4), FLOAT16(10), FLOAT16(10), FLOAT16(10), - FLOAT16(10), FLOAT16(2), FLOAT16(2), FLOAT16(10), FLOAT16(10), - FLOAT16(7), FLOAT16(8), FLOAT16(7), FLOAT16(7), FLOAT16(7), - FLOAT16(0), FLOAT16(7), FLOAT16(0), FLOAT16(0), FLOAT16(0), - FLOAT16(6), FLOAT16(9), FLOAT16(9), FLOAT16(6), FLOAT16(9), - FLOAT16(2), FLOAT16(2), FLOAT16(4), FLOAT16(2), FLOAT16(4), - FLOAT16(5), FLOAT16(8), FLOAT16(8), FLOAT16(5), FLOAT16(8), - FLOAT16(3), FLOAT16(3), FLOAT16(2), FLOAT16(3), FLOAT16(3), + ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(1), + ov::float16(8), ov::float16(8), ov::float16(8), ov::float16(5), ov::float16(8), + ov::float16(5), ov::float16(5), ov::float16(5), ov::float16(5), ov::float16(5), + ov::float16(0), ov::float16(7), ov::float16(0), ov::float16(7), ov::float16(7), + ov::float16(10), ov::float16(7), ov::float16(7), ov::float16(10), ov::float16(10), + ov::float16(4), ov::float16(4), ov::float16(5), ov::float16(4), ov::float16(4), + ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(9), ov::float16(9), + ov::float16(5), ov::float16(0), ov::float16(0), ov::float16(5), ov::float16(0), + ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(0), + ov::float16(4), ov::float16(4), ov::float16(0), ov::float16(0), ov::float16(0), + ov::float16(7), ov::float16(7), ov::float16(7), ov::float16(4), ov::float16(7), + ov::float16(6), ov::float16(6), ov::float16(6), ov::float16(6), ov::float16(10), + ov::float16(5), ov::float16(9), ov::float16(5), ov::float16(9), ov::float16(5), + ov::float16(7), ov::float16(1), ov::float16(7), ov::float16(1), ov::float16(7), + ov::float16(4), ov::float16(4), ov::float16(4), ov::float16(7), ov::float16(7), + ov::float16(8), ov::float16(10), ov::float16(10), ov::float16(10), ov::float16(8), + ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(2), + ov::float16(8), ov::float16(8), ov::float16(3), ov::float16(8), ov::float16(8), + ov::float16(6), ov::float16(6), ov::float16(6), ov::float16(8), ov::float16(6), + ov::float16(10), ov::float16(4), ov::float16(10), ov::float16(10), ov::float16(10), + ov::float16(10), ov::float16(2), ov::float16(2), ov::float16(10), ov::float16(10), + ov::float16(7), ov::float16(8), ov::float16(7), ov::float16(7), ov::float16(7), + ov::float16(0), ov::float16(7), ov::float16(0), ov::float16(0), ov::float16(0), + ov::float16(6), ov::float16(9), ov::float16(9), ov::float16(6), ov::float16(9), + ov::float16(2), ov::float16(2), ov::float16(4), ov::float16(2), ov::float16(4), + ov::float16(5), ov::float16(8), ov::float16(8), ov::float16(5), ov::float16(8), + ov::float16(3), ov::float16(3), ov::float16(2), ov::float16(3), ov::float16(3), }; DoTest(engine, input0, input1, expected_results, tensor(1, 3, 5, 9), axis); @@ -291,66 +291,66 @@ TEST(gather_elements_gpu_fp16, d12853_i12923_a3) { auto input1 = engine.allocate_memory({ data_types::f16, format::bfzyx, { 1, 2, 8, 2, 3 } }); // indices set_values(input0, { - FLOAT16(0), FLOAT16(1), FLOAT16(8), FLOAT16(5), FLOAT16(5), FLOAT16(2), FLOAT16(0), FLOAT16(7), - FLOAT16(7), FLOAT16(10), FLOAT16(4), FLOAT16(5), FLOAT16(9), FLOAT16(0), FLOAT16(0), FLOAT16(5), - FLOAT16(7), FLOAT16(0), FLOAT16(4), FLOAT16(0), FLOAT16(4), FLOAT16(7), FLOAT16(6), FLOAT16(10), - FLOAT16(9), FLOAT16(5), FLOAT16(1), FLOAT16(7), FLOAT16(4), FLOAT16(7), FLOAT16(10), FLOAT16(8), - FLOAT16(2), FLOAT16(0), FLOAT16(8), FLOAT16(3), FLOAT16(6), FLOAT16(8), FLOAT16(10), FLOAT16(4), - FLOAT16(2), FLOAT16(10), FLOAT16(7), FLOAT16(8), FLOAT16(7), FLOAT16(0), FLOAT16(6), FLOAT16(9), - FLOAT16(2), FLOAT16(4), FLOAT16(8), FLOAT16(5), FLOAT16(2), FLOAT16(3), FLOAT16(3), FLOAT16(1), - FLOAT16(5), FLOAT16(9), FLOAT16(10), FLOAT16(0), FLOAT16(9), FLOAT16(5), FLOAT16(5), FLOAT16(3), - FLOAT16(10), FLOAT16(5), FLOAT16(2), FLOAT16(0), FLOAT16(10), FLOAT16(0), FLOAT16(5), FLOAT16(4), - FLOAT16(3), FLOAT16(10), FLOAT16(5), FLOAT16(5), FLOAT16(10), FLOAT16(0), FLOAT16(8), FLOAT16(8), - FLOAT16(9), FLOAT16(1), FLOAT16(0), FLOAT16(7), FLOAT16(9), FLOAT16(6), FLOAT16(8), FLOAT16(7), - FLOAT16(10), FLOAT16(9), FLOAT16(2), FLOAT16(3), FLOAT16(3), FLOAT16(5), FLOAT16(6), FLOAT16(9), - FLOAT16(4), FLOAT16(9), FLOAT16(2), FLOAT16(4), FLOAT16(5), FLOAT16(5), FLOAT16(3), FLOAT16(1), - FLOAT16(1), FLOAT16(6), FLOAT16(8), FLOAT16(0), FLOAT16(5), FLOAT16(5), FLOAT16(10), FLOAT16(8), - FLOAT16(6), FLOAT16(9), FLOAT16(6), FLOAT16(9), FLOAT16(1), FLOAT16(2), FLOAT16(7), FLOAT16(1), - FLOAT16(1), FLOAT16(3), FLOAT16(0), FLOAT16(4), FLOAT16(0), FLOAT16(7), FLOAT16(10), FLOAT16(2), - FLOAT16(1), FLOAT16(3), FLOAT16(9), FLOAT16(7), FLOAT16(1), FLOAT16(7), FLOAT16(4), FLOAT16(4), - FLOAT16(5), FLOAT16(1), FLOAT16(6), FLOAT16(9), FLOAT16(6), FLOAT16(10), FLOAT16(6), FLOAT16(1), - FLOAT16(10), FLOAT16(4), FLOAT16(1), FLOAT16(6), FLOAT16(2), FLOAT16(5), FLOAT16(5), FLOAT16(10), - FLOAT16(1), FLOAT16(2), FLOAT16(3), FLOAT16(6), FLOAT16(1), FLOAT16(7), FLOAT16(6), FLOAT16(8), - FLOAT16(2), FLOAT16(5), FLOAT16(4), FLOAT16(2), FLOAT16(0), FLOAT16(9), FLOAT16(4), FLOAT16(1), - FLOAT16(10), FLOAT16(4), FLOAT16(1), FLOAT16(9), FLOAT16(1), FLOAT16(1), FLOAT16(0), FLOAT16(4), - FLOAT16(2), FLOAT16(1), FLOAT16(8), FLOAT16(5), FLOAT16(3), FLOAT16(4), FLOAT16(8), FLOAT16(10), - FLOAT16(7), FLOAT16(2), FLOAT16(7), FLOAT16(9), FLOAT16(2), FLOAT16(9), FLOAT16(5), FLOAT16(5), - FLOAT16(6), FLOAT16(8), FLOAT16(8), FLOAT16(5), FLOAT16(10), FLOAT16(6), FLOAT16(4), FLOAT16(9), - FLOAT16(7), FLOAT16(7), FLOAT16(10), FLOAT16(10), FLOAT16(9), FLOAT16(3), FLOAT16(5), FLOAT16(5), - FLOAT16(1), FLOAT16(4), FLOAT16(6), FLOAT16(9), FLOAT16(4), FLOAT16(8), FLOAT16(9), FLOAT16(7), - FLOAT16(8), FLOAT16(7), FLOAT16(8), FLOAT16(0), FLOAT16(9), FLOAT16(5), FLOAT16(5), FLOAT16(0), - FLOAT16(7), FLOAT16(5), FLOAT16(7), FLOAT16(7), FLOAT16(2), FLOAT16(10), FLOAT16(9), FLOAT16(9), - FLOAT16(5), FLOAT16(1), FLOAT16(4), FLOAT16(10), FLOAT16(2), FLOAT16(4), FLOAT16(3), FLOAT16(5), + ov::float16(0), ov::float16(1), ov::float16(8), ov::float16(5), ov::float16(5), ov::float16(2), ov::float16(0), ov::float16(7), + ov::float16(7), ov::float16(10), ov::float16(4), ov::float16(5), ov::float16(9), ov::float16(0), ov::float16(0), ov::float16(5), + ov::float16(7), ov::float16(0), ov::float16(4), ov::float16(0), ov::float16(4), ov::float16(7), ov::float16(6), ov::float16(10), + ov::float16(9), ov::float16(5), ov::float16(1), ov::float16(7), ov::float16(4), ov::float16(7), ov::float16(10), ov::float16(8), + ov::float16(2), ov::float16(0), ov::float16(8), ov::float16(3), ov::float16(6), ov::float16(8), ov::float16(10), ov::float16(4), + ov::float16(2), ov::float16(10), ov::float16(7), ov::float16(8), ov::float16(7), ov::float16(0), ov::float16(6), ov::float16(9), + ov::float16(2), ov::float16(4), ov::float16(8), ov::float16(5), ov::float16(2), ov::float16(3), ov::float16(3), ov::float16(1), + ov::float16(5), ov::float16(9), ov::float16(10), ov::float16(0), ov::float16(9), ov::float16(5), ov::float16(5), ov::float16(3), + ov::float16(10), ov::float16(5), ov::float16(2), ov::float16(0), ov::float16(10), ov::float16(0), ov::float16(5), ov::float16(4), + ov::float16(3), ov::float16(10), ov::float16(5), ov::float16(5), ov::float16(10), ov::float16(0), ov::float16(8), ov::float16(8), + ov::float16(9), ov::float16(1), ov::float16(0), ov::float16(7), ov::float16(9), ov::float16(6), ov::float16(8), ov::float16(7), + ov::float16(10), ov::float16(9), ov::float16(2), ov::float16(3), ov::float16(3), ov::float16(5), ov::float16(6), ov::float16(9), + ov::float16(4), ov::float16(9), ov::float16(2), ov::float16(4), ov::float16(5), ov::float16(5), ov::float16(3), ov::float16(1), + ov::float16(1), ov::float16(6), ov::float16(8), ov::float16(0), ov::float16(5), ov::float16(5), ov::float16(10), ov::float16(8), + ov::float16(6), ov::float16(9), ov::float16(6), ov::float16(9), ov::float16(1), ov::float16(2), ov::float16(7), ov::float16(1), + ov::float16(1), ov::float16(3), ov::float16(0), ov::float16(4), ov::float16(0), ov::float16(7), ov::float16(10), ov::float16(2), + ov::float16(1), ov::float16(3), ov::float16(9), ov::float16(7), ov::float16(1), ov::float16(7), ov::float16(4), ov::float16(4), + ov::float16(5), ov::float16(1), ov::float16(6), ov::float16(9), ov::float16(6), ov::float16(10), ov::float16(6), ov::float16(1), + ov::float16(10), ov::float16(4), ov::float16(1), ov::float16(6), ov::float16(2), ov::float16(5), ov::float16(5), ov::float16(10), + ov::float16(1), ov::float16(2), ov::float16(3), ov::float16(6), ov::float16(1), ov::float16(7), ov::float16(6), ov::float16(8), + ov::float16(2), ov::float16(5), ov::float16(4), ov::float16(2), ov::float16(0), ov::float16(9), ov::float16(4), ov::float16(1), + ov::float16(10), ov::float16(4), ov::float16(1), ov::float16(9), ov::float16(1), ov::float16(1), ov::float16(0), ov::float16(4), + ov::float16(2), ov::float16(1), ov::float16(8), ov::float16(5), ov::float16(3), ov::float16(4), ov::float16(8), ov::float16(10), + ov::float16(7), ov::float16(2), ov::float16(7), ov::float16(9), ov::float16(2), ov::float16(9), ov::float16(5), ov::float16(5), + ov::float16(6), ov::float16(8), ov::float16(8), ov::float16(5), ov::float16(10), ov::float16(6), ov::float16(4), ov::float16(9), + ov::float16(7), ov::float16(7), ov::float16(10), ov::float16(10), ov::float16(9), ov::float16(3), ov::float16(5), ov::float16(5), + ov::float16(1), ov::float16(4), ov::float16(6), ov::float16(9), ov::float16(4), ov::float16(8), ov::float16(9), ov::float16(7), + ov::float16(8), ov::float16(7), ov::float16(8), ov::float16(0), ov::float16(9), ov::float16(5), ov::float16(5), ov::float16(0), + ov::float16(7), ov::float16(5), ov::float16(7), ov::float16(7), ov::float16(2), ov::float16(10), ov::float16(9), ov::float16(9), + ov::float16(5), ov::float16(1), ov::float16(4), ov::float16(10), ov::float16(2), ov::float16(4), ov::float16(3), ov::float16(5), }); set_values(input1, { - FLOAT16(0), FLOAT16(2), FLOAT16(4), FLOAT16(3), FLOAT16(4), FLOAT16(0), FLOAT16(0), FLOAT16(1), - FLOAT16(4), FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(1), FLOAT16(1), - FLOAT16(3), FLOAT16(1), FLOAT16(4), FLOAT16(2), FLOAT16(4), FLOAT16(2), FLOAT16(1), FLOAT16(3), - FLOAT16(2), FLOAT16(1), FLOAT16(2), FLOAT16(4), FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(3), - FLOAT16(4), FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(0), FLOAT16(4), FLOAT16(0), - FLOAT16(3), FLOAT16(4), FLOAT16(3), FLOAT16(4), FLOAT16(4), FLOAT16(1), FLOAT16(0), FLOAT16(3), - FLOAT16(2), FLOAT16(4), FLOAT16(4), FLOAT16(4), FLOAT16(4), FLOAT16(0), FLOAT16(4), FLOAT16(0), - FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(4), FLOAT16(3), FLOAT16(0), FLOAT16(2), FLOAT16(2), - FLOAT16(3), FLOAT16(4), FLOAT16(2), FLOAT16(2), FLOAT16(0), FLOAT16(3), FLOAT16(1), FLOAT16(1), - FLOAT16(0), FLOAT16(3), FLOAT16(3), FLOAT16(4), FLOAT16(2), FLOAT16(0), FLOAT16(0), FLOAT16(3), - FLOAT16(3), FLOAT16(4), FLOAT16(3), FLOAT16(3), FLOAT16(1), FLOAT16(1), FLOAT16(2), FLOAT16(3), - FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(2), FLOAT16(2), FLOAT16(4), FLOAT16(0), FLOAT16(4), + ov::float16(0), ov::float16(2), ov::float16(4), ov::float16(3), ov::float16(4), ov::float16(0), ov::float16(0), ov::float16(1), + ov::float16(4), ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(1), ov::float16(1), + ov::float16(3), ov::float16(1), ov::float16(4), ov::float16(2), ov::float16(4), ov::float16(2), ov::float16(1), ov::float16(3), + ov::float16(2), ov::float16(1), ov::float16(2), ov::float16(4), ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(3), + ov::float16(4), ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(0), ov::float16(4), ov::float16(0), + ov::float16(3), ov::float16(4), ov::float16(3), ov::float16(4), ov::float16(4), ov::float16(1), ov::float16(0), ov::float16(3), + ov::float16(2), ov::float16(4), ov::float16(4), ov::float16(4), ov::float16(4), ov::float16(0), ov::float16(4), ov::float16(0), + ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(4), ov::float16(3), ov::float16(0), ov::float16(2), ov::float16(2), + ov::float16(3), ov::float16(4), ov::float16(2), ov::float16(2), ov::float16(0), ov::float16(3), ov::float16(1), ov::float16(1), + ov::float16(0), ov::float16(3), ov::float16(3), ov::float16(4), ov::float16(2), ov::float16(0), ov::float16(0), ov::float16(3), + ov::float16(3), ov::float16(4), ov::float16(3), ov::float16(3), ov::float16(1), ov::float16(1), ov::float16(2), ov::float16(3), + ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(2), ov::float16(2), ov::float16(4), ov::float16(0), ov::float16(4), }); std::vector expected_results = { - FLOAT16(0), FLOAT16(0), FLOAT16(8), FLOAT16(7), FLOAT16(6), FLOAT16(2), FLOAT16(0), FLOAT16(5), - FLOAT16(2), FLOAT16(1), FLOAT16(4), FLOAT16(5), FLOAT16(9), FLOAT16(2), FLOAT16(0), FLOAT16(5), - FLOAT16(10), FLOAT16(4), FLOAT16(5), FLOAT16(0), FLOAT16(10), FLOAT16(5), FLOAT16(3), FLOAT16(4), - FLOAT16(5), FLOAT16(4), FLOAT16(10), FLOAT16(5), FLOAT16(2), FLOAT16(0), FLOAT16(5), FLOAT16(4), - FLOAT16(6), FLOAT16(9), FLOAT16(2), FLOAT16(4), FLOAT16(5), FLOAT16(6), FLOAT16(7), FLOAT16(7), - FLOAT16(1), FLOAT16(9), FLOAT16(8), FLOAT16(9), FLOAT16(1), FLOAT16(5), FLOAT16(8), FLOAT16(8), - FLOAT16(5), FLOAT16(2), FLOAT16(3), FLOAT16(6), FLOAT16(1), FLOAT16(7), FLOAT16(6), FLOAT16(2), - FLOAT16(1), FLOAT16(3), FLOAT16(0), FLOAT16(6), FLOAT16(2), FLOAT16(7), FLOAT16(6), FLOAT16(1), - FLOAT16(7), FLOAT16(8), FLOAT16(8), FLOAT16(5), FLOAT16(0), FLOAT16(9), FLOAT16(0), FLOAT16(4), - FLOAT16(2), FLOAT16(2), FLOAT16(7), FLOAT16(5), FLOAT16(3), FLOAT16(9), FLOAT16(4), FLOAT16(5), - FLOAT16(7), FLOAT16(1), FLOAT16(7), FLOAT16(7), FLOAT16(4), FLOAT16(8), FLOAT16(5), FLOAT16(9), - FLOAT16(1), FLOAT16(7), FLOAT16(10), FLOAT16(0), FLOAT16(9), FLOAT16(4), FLOAT16(5), FLOAT16(5), + ov::float16(0), ov::float16(0), ov::float16(8), ov::float16(7), ov::float16(6), ov::float16(2), ov::float16(0), ov::float16(5), + ov::float16(2), ov::float16(1), ov::float16(4), ov::float16(5), ov::float16(9), ov::float16(2), ov::float16(0), ov::float16(5), + ov::float16(10), ov::float16(4), ov::float16(5), ov::float16(0), ov::float16(10), ov::float16(5), ov::float16(3), ov::float16(4), + ov::float16(5), ov::float16(4), ov::float16(10), ov::float16(5), ov::float16(2), ov::float16(0), ov::float16(5), ov::float16(4), + ov::float16(6), ov::float16(9), ov::float16(2), ov::float16(4), ov::float16(5), ov::float16(6), ov::float16(7), ov::float16(7), + ov::float16(1), ov::float16(9), ov::float16(8), ov::float16(9), ov::float16(1), ov::float16(5), ov::float16(8), ov::float16(8), + ov::float16(5), ov::float16(2), ov::float16(3), ov::float16(6), ov::float16(1), ov::float16(7), ov::float16(6), ov::float16(2), + ov::float16(1), ov::float16(3), ov::float16(0), ov::float16(6), ov::float16(2), ov::float16(7), ov::float16(6), ov::float16(1), + ov::float16(7), ov::float16(8), ov::float16(8), ov::float16(5), ov::float16(0), ov::float16(9), ov::float16(0), ov::float16(4), + ov::float16(2), ov::float16(2), ov::float16(7), ov::float16(5), ov::float16(3), ov::float16(9), ov::float16(4), ov::float16(5), + ov::float16(7), ov::float16(1), ov::float16(7), ov::float16(7), ov::float16(4), ov::float16(8), ov::float16(5), ov::float16(9), + ov::float16(1), ov::float16(7), ov::float16(10), ov::float16(0), ov::float16(9), ov::float16(4), ov::float16(5), ov::float16(5), }; DoTest(engine, input0, input1, expected_results, tensor(1, 2, 8, 2, 3), axis); @@ -364,85 +364,85 @@ TEST(gather_elements_gpu_fp16, d25441_i22441_an4) { auto input1 = engine.allocate_memory({ data_types::f16, format::bfzyx, { 2, 2, 4, 4, 1 } }); // indices set_values(input0, { - FLOAT16(0), FLOAT16(1), FLOAT16(8), FLOAT16(5), - FLOAT16(5), FLOAT16(2), FLOAT16(0), FLOAT16(7), - FLOAT16(7), FLOAT16(10), FLOAT16(4), FLOAT16(5), - FLOAT16(9), FLOAT16(0), FLOAT16(0), FLOAT16(5), - FLOAT16(7), FLOAT16(0), FLOAT16(4), FLOAT16(0), - FLOAT16(4), FLOAT16(7), FLOAT16(6), FLOAT16(10), - FLOAT16(9), FLOAT16(5), FLOAT16(1), FLOAT16(7), - FLOAT16(4), FLOAT16(7), FLOAT16(10), FLOAT16(8), - FLOAT16(2), FLOAT16(0), FLOAT16(8), FLOAT16(3), - FLOAT16(6), FLOAT16(8), FLOAT16(10), FLOAT16(4), - FLOAT16(2), FLOAT16(10), FLOAT16(7), FLOAT16(8), - FLOAT16(7), FLOAT16(0), FLOAT16(6), FLOAT16(9), - FLOAT16(2), FLOAT16(4), FLOAT16(8), FLOAT16(5), - FLOAT16(2), FLOAT16(3), FLOAT16(3), FLOAT16(1), - FLOAT16(5), FLOAT16(9), FLOAT16(10), FLOAT16(0), - FLOAT16(9), FLOAT16(5), FLOAT16(5), FLOAT16(3), - FLOAT16(10), FLOAT16(5), FLOAT16(2), FLOAT16(0), - FLOAT16(10), FLOAT16(0), FLOAT16(5), FLOAT16(4), - FLOAT16(3), FLOAT16(10), FLOAT16(5), FLOAT16(5), - FLOAT16(10), FLOAT16(0), FLOAT16(8), FLOAT16(8), - FLOAT16(9), FLOAT16(1), FLOAT16(0), FLOAT16(7), - FLOAT16(9), FLOAT16(6), FLOAT16(8), FLOAT16(7), - FLOAT16(10), FLOAT16(9), FLOAT16(2), FLOAT16(3), - FLOAT16(3), FLOAT16(5), FLOAT16(6), FLOAT16(9), - FLOAT16(4), FLOAT16(9), FLOAT16(2), FLOAT16(4), - FLOAT16(5), FLOAT16(5), FLOAT16(3), FLOAT16(1), - FLOAT16(1), FLOAT16(6), FLOAT16(8), FLOAT16(0), - FLOAT16(5), FLOAT16(5), FLOAT16(10), FLOAT16(8), - FLOAT16(6), FLOAT16(9), FLOAT16(6), FLOAT16(9), - FLOAT16(1), FLOAT16(2), FLOAT16(7), FLOAT16(1), - FLOAT16(1), FLOAT16(3), FLOAT16(0), FLOAT16(4), - FLOAT16(0), FLOAT16(7), FLOAT16(10), FLOAT16(2), - FLOAT16(1), FLOAT16(3), FLOAT16(9), FLOAT16(7), - FLOAT16(1), FLOAT16(7), FLOAT16(4), FLOAT16(4), - FLOAT16(5), FLOAT16(1), FLOAT16(6), FLOAT16(9), - FLOAT16(6), FLOAT16(10), FLOAT16(6), FLOAT16(1), - FLOAT16(10), FLOAT16(4), FLOAT16(1), FLOAT16(6), - FLOAT16(2), FLOAT16(5), FLOAT16(5), FLOAT16(10), - FLOAT16(1), FLOAT16(2), FLOAT16(3), FLOAT16(6), - FLOAT16(1), FLOAT16(7), FLOAT16(6), FLOAT16(8), + ov::float16(0), ov::float16(1), ov::float16(8), ov::float16(5), + ov::float16(5), ov::float16(2), ov::float16(0), ov::float16(7), + ov::float16(7), ov::float16(10), ov::float16(4), ov::float16(5), + ov::float16(9), ov::float16(0), ov::float16(0), ov::float16(5), + ov::float16(7), ov::float16(0), ov::float16(4), ov::float16(0), + ov::float16(4), ov::float16(7), ov::float16(6), ov::float16(10), + ov::float16(9), ov::float16(5), ov::float16(1), ov::float16(7), + ov::float16(4), ov::float16(7), ov::float16(10), ov::float16(8), + ov::float16(2), ov::float16(0), ov::float16(8), ov::float16(3), + ov::float16(6), ov::float16(8), ov::float16(10), ov::float16(4), + ov::float16(2), ov::float16(10), ov::float16(7), ov::float16(8), + ov::float16(7), ov::float16(0), ov::float16(6), ov::float16(9), + ov::float16(2), ov::float16(4), ov::float16(8), ov::float16(5), + ov::float16(2), ov::float16(3), ov::float16(3), ov::float16(1), + ov::float16(5), ov::float16(9), ov::float16(10), ov::float16(0), + ov::float16(9), ov::float16(5), ov::float16(5), ov::float16(3), + ov::float16(10), ov::float16(5), ov::float16(2), ov::float16(0), + ov::float16(10), ov::float16(0), ov::float16(5), ov::float16(4), + ov::float16(3), ov::float16(10), ov::float16(5), ov::float16(5), + ov::float16(10), ov::float16(0), ov::float16(8), ov::float16(8), + ov::float16(9), ov::float16(1), ov::float16(0), ov::float16(7), + ov::float16(9), ov::float16(6), ov::float16(8), ov::float16(7), + ov::float16(10), ov::float16(9), ov::float16(2), ov::float16(3), + ov::float16(3), ov::float16(5), ov::float16(6), ov::float16(9), + ov::float16(4), ov::float16(9), ov::float16(2), ov::float16(4), + ov::float16(5), ov::float16(5), ov::float16(3), ov::float16(1), + ov::float16(1), ov::float16(6), ov::float16(8), ov::float16(0), + ov::float16(5), ov::float16(5), ov::float16(10), ov::float16(8), + ov::float16(6), ov::float16(9), ov::float16(6), ov::float16(9), + ov::float16(1), ov::float16(2), ov::float16(7), ov::float16(1), + ov::float16(1), ov::float16(3), ov::float16(0), ov::float16(4), + ov::float16(0), ov::float16(7), ov::float16(10), ov::float16(2), + ov::float16(1), ov::float16(3), ov::float16(9), ov::float16(7), + ov::float16(1), ov::float16(7), ov::float16(4), ov::float16(4), + ov::float16(5), ov::float16(1), ov::float16(6), ov::float16(9), + ov::float16(6), ov::float16(10), ov::float16(6), ov::float16(1), + ov::float16(10), ov::float16(4), ov::float16(1), ov::float16(6), + ov::float16(2), ov::float16(5), ov::float16(5), ov::float16(10), + ov::float16(1), ov::float16(2), ov::float16(3), ov::float16(6), + ov::float16(1), ov::float16(7), ov::float16(6), ov::float16(8), }); set_values(input1, { - FLOAT16(0), FLOAT16(2), FLOAT16(4), FLOAT16(3), - FLOAT16(4), FLOAT16(0), FLOAT16(0), FLOAT16(1), - FLOAT16(4), FLOAT16(0), FLOAT16(1), FLOAT16(0), - FLOAT16(1), FLOAT16(0), FLOAT16(1), FLOAT16(1), - FLOAT16(3), FLOAT16(1), FLOAT16(4), FLOAT16(2), - FLOAT16(4), FLOAT16(2), FLOAT16(1), FLOAT16(3), - FLOAT16(2), FLOAT16(1), FLOAT16(2), FLOAT16(4), - FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(3), - FLOAT16(4), FLOAT16(2), FLOAT16(2), FLOAT16(2), - FLOAT16(2), FLOAT16(0), FLOAT16(4), FLOAT16(0), - FLOAT16(3), FLOAT16(4), FLOAT16(3), FLOAT16(4), - FLOAT16(4), FLOAT16(1), FLOAT16(0), FLOAT16(3), - FLOAT16(2), FLOAT16(4), FLOAT16(4), FLOAT16(4), - FLOAT16(4), FLOAT16(0), FLOAT16(4), FLOAT16(0), - FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(4), - FLOAT16(3), FLOAT16(0), FLOAT16(2), FLOAT16(4), + ov::float16(0), ov::float16(2), ov::float16(4), ov::float16(3), + ov::float16(4), ov::float16(0), ov::float16(0), ov::float16(1), + ov::float16(4), ov::float16(0), ov::float16(1), ov::float16(0), + ov::float16(1), ov::float16(0), ov::float16(1), ov::float16(1), + ov::float16(3), ov::float16(1), ov::float16(4), ov::float16(2), + ov::float16(4), ov::float16(2), ov::float16(1), ov::float16(3), + ov::float16(2), ov::float16(1), ov::float16(2), ov::float16(4), + ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(3), + ov::float16(4), ov::float16(2), ov::float16(2), ov::float16(2), + ov::float16(2), ov::float16(0), ov::float16(4), ov::float16(0), + ov::float16(3), ov::float16(4), ov::float16(3), ov::float16(4), + ov::float16(4), ov::float16(1), ov::float16(0), ov::float16(3), + ov::float16(2), ov::float16(4), ov::float16(4), ov::float16(4), + ov::float16(4), ov::float16(0), ov::float16(4), ov::float16(0), + ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(4), + ov::float16(3), ov::float16(0), ov::float16(2), ov::float16(4), }); std::vector expected_results = { - FLOAT16(0), FLOAT16(0), FLOAT16(2), FLOAT16(5), - FLOAT16(10), FLOAT16(2), FLOAT16(0), FLOAT16(10), - FLOAT16(3), FLOAT16(10), FLOAT16(1), FLOAT16(5), - FLOAT16(4), FLOAT16(0), FLOAT16(10), FLOAT16(8), - FLOAT16(2), FLOAT16(0), FLOAT16(2), FLOAT16(3), - FLOAT16(10), FLOAT16(8), FLOAT16(6), FLOAT16(1), - FLOAT16(2), FLOAT16(5), FLOAT16(7), FLOAT16(5), - FLOAT16(4), FLOAT16(0), FLOAT16(6), FLOAT16(3), - FLOAT16(10), FLOAT16(9), FLOAT16(6), FLOAT16(9), - FLOAT16(1), FLOAT16(6), FLOAT16(5), FLOAT16(7), - FLOAT16(5), FLOAT16(2), FLOAT16(6), FLOAT16(6), - FLOAT16(1), FLOAT16(5), FLOAT16(6), FLOAT16(1), - FLOAT16(6), FLOAT16(4), FLOAT16(1), FLOAT16(6), - FLOAT16(2), FLOAT16(6), FLOAT16(5), FLOAT16(7), - FLOAT16(1), FLOAT16(9), FLOAT16(2), FLOAT16(6), - FLOAT16(6), FLOAT16(5), FLOAT16(10), FLOAT16(8), + ov::float16(0), ov::float16(0), ov::float16(2), ov::float16(5), + ov::float16(10), ov::float16(2), ov::float16(0), ov::float16(10), + ov::float16(3), ov::float16(10), ov::float16(1), ov::float16(5), + ov::float16(4), ov::float16(0), ov::float16(10), ov::float16(8), + ov::float16(2), ov::float16(0), ov::float16(2), ov::float16(3), + ov::float16(10), ov::float16(8), ov::float16(6), ov::float16(1), + ov::float16(2), ov::float16(5), ov::float16(7), ov::float16(5), + ov::float16(4), ov::float16(0), ov::float16(6), ov::float16(3), + ov::float16(10), ov::float16(9), ov::float16(6), ov::float16(9), + ov::float16(1), ov::float16(6), ov::float16(5), ov::float16(7), + ov::float16(5), ov::float16(2), ov::float16(6), ov::float16(6), + ov::float16(1), ov::float16(5), ov::float16(6), ov::float16(1), + ov::float16(6), ov::float16(4), ov::float16(1), ov::float16(6), + ov::float16(2), ov::float16(6), ov::float16(5), ov::float16(7), + ov::float16(1), ov::float16(9), ov::float16(2), ov::float16(6), + ov::float16(6), ov::float16(5), ov::float16(10), ov::float16(8), }; DoTest(engine, input0, input1, expected_results, tensor(2, 2, 4, 4, 1), axis); @@ -456,133 +456,133 @@ TEST(gather_elements_gpu_fp16, d32843_i12843_a0) { auto input1 = engine.allocate_memory({ data_types::f16, format::bfzyx, { 1, 2, 8, 4, 3 } }); // indices set_values(input0, { - FLOAT16(0), FLOAT16(1), FLOAT16(8), FLOAT16(5), FLOAT16(5), FLOAT16(2), FLOAT16(0), FLOAT16(7), - FLOAT16(7), FLOAT16(10), FLOAT16(4), FLOAT16(5), FLOAT16(9), FLOAT16(0), FLOAT16(0), FLOAT16(5), - FLOAT16(7), FLOAT16(0), FLOAT16(4), FLOAT16(0), FLOAT16(4), FLOAT16(7), FLOAT16(6), FLOAT16(10), - FLOAT16(9), FLOAT16(5), FLOAT16(1), FLOAT16(7), FLOAT16(4), FLOAT16(7), FLOAT16(10), FLOAT16(8), - FLOAT16(2), FLOAT16(0), FLOAT16(8), FLOAT16(3), FLOAT16(6), FLOAT16(8), FLOAT16(10), FLOAT16(4), - FLOAT16(2), FLOAT16(10), FLOAT16(7), FLOAT16(8), FLOAT16(7), FLOAT16(0), FLOAT16(6), FLOAT16(9), - FLOAT16(2), FLOAT16(4), FLOAT16(8), FLOAT16(5), FLOAT16(2), FLOAT16(3), FLOAT16(3), FLOAT16(1), - FLOAT16(5), FLOAT16(9), FLOAT16(10), FLOAT16(0), FLOAT16(9), FLOAT16(5), FLOAT16(5), FLOAT16(3), - FLOAT16(10), FLOAT16(5), FLOAT16(2), FLOAT16(0), FLOAT16(10), FLOAT16(0), FLOAT16(5), FLOAT16(4), - FLOAT16(3), FLOAT16(10), FLOAT16(5), FLOAT16(5), FLOAT16(10), FLOAT16(0), FLOAT16(8), FLOAT16(8), - FLOAT16(9), FLOAT16(1), FLOAT16(0), FLOAT16(7), FLOAT16(9), FLOAT16(6), FLOAT16(8), FLOAT16(7), - FLOAT16(10), FLOAT16(9), FLOAT16(2), FLOAT16(3), FLOAT16(3), FLOAT16(5), FLOAT16(6), FLOAT16(9), - FLOAT16(4), FLOAT16(9), FLOAT16(2), FLOAT16(4), FLOAT16(5), FLOAT16(5), FLOAT16(3), FLOAT16(1), - FLOAT16(1), FLOAT16(6), FLOAT16(8), FLOAT16(0), FLOAT16(5), FLOAT16(5), FLOAT16(10), FLOAT16(8), - FLOAT16(6), FLOAT16(9), FLOAT16(6), FLOAT16(9), FLOAT16(1), FLOAT16(2), FLOAT16(7), FLOAT16(1), - FLOAT16(1), FLOAT16(3), FLOAT16(0), FLOAT16(4), FLOAT16(0), FLOAT16(7), FLOAT16(10), FLOAT16(2), - FLOAT16(1), FLOAT16(3), FLOAT16(9), FLOAT16(7), FLOAT16(1), FLOAT16(7), FLOAT16(4), FLOAT16(4), - FLOAT16(5), FLOAT16(1), FLOAT16(6), FLOAT16(9), FLOAT16(6), FLOAT16(10), FLOAT16(6), FLOAT16(1), - FLOAT16(10), FLOAT16(4), FLOAT16(1), FLOAT16(6), FLOAT16(2), FLOAT16(5), FLOAT16(5), FLOAT16(10), - FLOAT16(1), FLOAT16(2), FLOAT16(3), FLOAT16(6), FLOAT16(1), FLOAT16(7), FLOAT16(6), FLOAT16(8), - FLOAT16(2), FLOAT16(5), FLOAT16(4), FLOAT16(2), FLOAT16(0), FLOAT16(9), FLOAT16(4), FLOAT16(1), - FLOAT16(10), FLOAT16(4), FLOAT16(1), FLOAT16(9), FLOAT16(1), FLOAT16(1), FLOAT16(0), FLOAT16(4), - FLOAT16(2), FLOAT16(1), FLOAT16(8), FLOAT16(5), FLOAT16(3), FLOAT16(4), FLOAT16(8), FLOAT16(10), - FLOAT16(7), FLOAT16(2), FLOAT16(7), FLOAT16(9), FLOAT16(2), FLOAT16(9), FLOAT16(5), FLOAT16(5), - FLOAT16(6), FLOAT16(8), FLOAT16(8), FLOAT16(5), FLOAT16(10), FLOAT16(6), FLOAT16(4), FLOAT16(9), - FLOAT16(7), FLOAT16(7), FLOAT16(10), FLOAT16(10), FLOAT16(9), FLOAT16(3), FLOAT16(5), FLOAT16(5), - FLOAT16(1), FLOAT16(4), FLOAT16(6), FLOAT16(9), FLOAT16(4), FLOAT16(8), FLOAT16(9), FLOAT16(7), - FLOAT16(8), FLOAT16(7), FLOAT16(8), FLOAT16(0), FLOAT16(9), FLOAT16(5), FLOAT16(5), FLOAT16(0), - FLOAT16(7), FLOAT16(5), FLOAT16(7), FLOAT16(7), FLOAT16(2), FLOAT16(10), FLOAT16(9), FLOAT16(9), - FLOAT16(5), FLOAT16(1), FLOAT16(4), FLOAT16(10), FLOAT16(2), FLOAT16(4), FLOAT16(3), FLOAT16(5), - FLOAT16(9), FLOAT16(4), FLOAT16(5), FLOAT16(8), FLOAT16(4), FLOAT16(2), FLOAT16(10), FLOAT16(1), - FLOAT16(6), FLOAT16(6), FLOAT16(0), FLOAT16(0), FLOAT16(8), FLOAT16(8), FLOAT16(3), FLOAT16(4), - FLOAT16(7), FLOAT16(7), FLOAT16(2), FLOAT16(9), FLOAT16(7), FLOAT16(9), FLOAT16(1), FLOAT16(0), - FLOAT16(8), FLOAT16(6), FLOAT16(2), FLOAT16(2), FLOAT16(0), FLOAT16(4), FLOAT16(10), FLOAT16(10), - FLOAT16(4), FLOAT16(2), FLOAT16(7), FLOAT16(3), FLOAT16(8), FLOAT16(8), FLOAT16(4), FLOAT16(3), - FLOAT16(2), FLOAT16(0), FLOAT16(2), FLOAT16(10), FLOAT16(2), FLOAT16(9), FLOAT16(1), FLOAT16(4), - FLOAT16(6), FLOAT16(1), FLOAT16(9), FLOAT16(1), FLOAT16(10), FLOAT16(2), FLOAT16(2), FLOAT16(1), - FLOAT16(2), FLOAT16(6), FLOAT16(7), FLOAT16(8), FLOAT16(7), FLOAT16(8), FLOAT16(7), FLOAT16(6), - FLOAT16(0), FLOAT16(6), FLOAT16(2), FLOAT16(3), FLOAT16(7), FLOAT16(1), FLOAT16(8), FLOAT16(5), - FLOAT16(6), FLOAT16(6), FLOAT16(3), FLOAT16(7), FLOAT16(1), FLOAT16(1), FLOAT16(5), FLOAT16(9), - FLOAT16(8), FLOAT16(6), FLOAT16(8), FLOAT16(3), FLOAT16(1), FLOAT16(5), FLOAT16(3), FLOAT16(6), - FLOAT16(5), FLOAT16(4), FLOAT16(2), FLOAT16(4), FLOAT16(4), FLOAT16(4), FLOAT16(5), FLOAT16(4), - FLOAT16(3), FLOAT16(0), FLOAT16(4), FLOAT16(2), FLOAT16(7), FLOAT16(7), FLOAT16(5), FLOAT16(8), - FLOAT16(7), FLOAT16(10), FLOAT16(5), FLOAT16(10), FLOAT16(3), FLOAT16(5), FLOAT16(5), FLOAT16(7), - FLOAT16(4), FLOAT16(6), FLOAT16(10), FLOAT16(1), FLOAT16(7), FLOAT16(3), FLOAT16(5), FLOAT16(5), - FLOAT16(9), FLOAT16(0), FLOAT16(3), FLOAT16(7), FLOAT16(6), FLOAT16(10), FLOAT16(2), FLOAT16(10), - FLOAT16(2), FLOAT16(9), FLOAT16(7), FLOAT16(5), FLOAT16(8), FLOAT16(0), FLOAT16(1), FLOAT16(7), - FLOAT16(7), FLOAT16(4), FLOAT16(6), FLOAT16(8), FLOAT16(10), FLOAT16(7), FLOAT16(3), FLOAT16(8), - FLOAT16(1), FLOAT16(0), FLOAT16(5), FLOAT16(0), FLOAT16(1), FLOAT16(9), FLOAT16(8), FLOAT16(8), - FLOAT16(4), FLOAT16(0), FLOAT16(6), FLOAT16(5), FLOAT16(0), FLOAT16(5), FLOAT16(4), FLOAT16(2), - FLOAT16(4), FLOAT16(6), FLOAT16(7), FLOAT16(7), FLOAT16(5), FLOAT16(3), FLOAT16(8), FLOAT16(4), - FLOAT16(7), FLOAT16(3), FLOAT16(0), FLOAT16(1), FLOAT16(5), FLOAT16(8), FLOAT16(2), FLOAT16(0), - FLOAT16(0), FLOAT16(1), FLOAT16(7), FLOAT16(3), FLOAT16(0), FLOAT16(5), FLOAT16(5), FLOAT16(5), - FLOAT16(4), FLOAT16(1), FLOAT16(3), FLOAT16(9), FLOAT16(7), FLOAT16(6), FLOAT16(7), FLOAT16(3), - FLOAT16(0), FLOAT16(10), FLOAT16(5), FLOAT16(0), FLOAT16(9), FLOAT16(0), FLOAT16(4), FLOAT16(5), - FLOAT16(6), FLOAT16(8), FLOAT16(7), FLOAT16(5), FLOAT16(0), FLOAT16(1), FLOAT16(10), FLOAT16(2), - FLOAT16(3), FLOAT16(6), FLOAT16(6), FLOAT16(1), FLOAT16(6), FLOAT16(10), FLOAT16(3), FLOAT16(9), - FLOAT16(10), FLOAT16(2), FLOAT16(2), FLOAT16(4), FLOAT16(8), FLOAT16(9), FLOAT16(2), FLOAT16(8), - FLOAT16(7), FLOAT16(4), FLOAT16(2), FLOAT16(7), FLOAT16(1), FLOAT16(2), FLOAT16(1), FLOAT16(6), - FLOAT16(0), FLOAT16(1), FLOAT16(6), FLOAT16(4), FLOAT16(0), FLOAT16(7), FLOAT16(4), FLOAT16(9), - FLOAT16(1), FLOAT16(10), FLOAT16(0), FLOAT16(0), FLOAT16(5), FLOAT16(8), FLOAT16(10), FLOAT16(2), - FLOAT16(3), FLOAT16(8), FLOAT16(5), FLOAT16(8), FLOAT16(7), FLOAT16(7), FLOAT16(8), FLOAT16(0), - FLOAT16(2), FLOAT16(2), FLOAT16(6), FLOAT16(7), FLOAT16(6), FLOAT16(4), FLOAT16(2), FLOAT16(2), - FLOAT16(7), FLOAT16(1), FLOAT16(8), FLOAT16(1), FLOAT16(0), FLOAT16(7), FLOAT16(1), FLOAT16(10), - FLOAT16(5), FLOAT16(6), FLOAT16(10), FLOAT16(0), FLOAT16(6), FLOAT16(7), FLOAT16(5), FLOAT16(0), - FLOAT16(4), FLOAT16(5), FLOAT16(8), FLOAT16(0), FLOAT16(4), FLOAT16(10), FLOAT16(5), FLOAT16(3), - FLOAT16(4), FLOAT16(8), FLOAT16(2), FLOAT16(1), FLOAT16(4), FLOAT16(10), FLOAT16(10), FLOAT16(2), - FLOAT16(0), FLOAT16(1), FLOAT16(5), FLOAT16(1), FLOAT16(5), FLOAT16(1), FLOAT16(9), FLOAT16(4), - FLOAT16(4), FLOAT16(3), FLOAT16(7), FLOAT16(6), FLOAT16(9), FLOAT16(8), FLOAT16(9), FLOAT16(7), - FLOAT16(4), FLOAT16(10), FLOAT16(6), FLOAT16(3), FLOAT16(5), FLOAT16(5), FLOAT16(4), FLOAT16(2), - FLOAT16(0), FLOAT16(4), FLOAT16(5), FLOAT16(3), FLOAT16(1), FLOAT16(2), FLOAT16(8), FLOAT16(5), - FLOAT16(7), FLOAT16(9), FLOAT16(2), FLOAT16(7), FLOAT16(2), FLOAT16(4), FLOAT16(0), FLOAT16(5), + ov::float16(0), ov::float16(1), ov::float16(8), ov::float16(5), ov::float16(5), ov::float16(2), ov::float16(0), ov::float16(7), + ov::float16(7), ov::float16(10), ov::float16(4), ov::float16(5), ov::float16(9), ov::float16(0), ov::float16(0), ov::float16(5), + ov::float16(7), ov::float16(0), ov::float16(4), ov::float16(0), ov::float16(4), ov::float16(7), ov::float16(6), ov::float16(10), + ov::float16(9), ov::float16(5), ov::float16(1), ov::float16(7), ov::float16(4), ov::float16(7), ov::float16(10), ov::float16(8), + ov::float16(2), ov::float16(0), ov::float16(8), ov::float16(3), ov::float16(6), ov::float16(8), ov::float16(10), ov::float16(4), + ov::float16(2), ov::float16(10), ov::float16(7), ov::float16(8), ov::float16(7), ov::float16(0), ov::float16(6), ov::float16(9), + ov::float16(2), ov::float16(4), ov::float16(8), ov::float16(5), ov::float16(2), ov::float16(3), ov::float16(3), ov::float16(1), + ov::float16(5), ov::float16(9), ov::float16(10), ov::float16(0), ov::float16(9), ov::float16(5), ov::float16(5), ov::float16(3), + ov::float16(10), ov::float16(5), ov::float16(2), ov::float16(0), ov::float16(10), ov::float16(0), ov::float16(5), ov::float16(4), + ov::float16(3), ov::float16(10), ov::float16(5), ov::float16(5), ov::float16(10), ov::float16(0), ov::float16(8), ov::float16(8), + ov::float16(9), ov::float16(1), ov::float16(0), ov::float16(7), ov::float16(9), ov::float16(6), ov::float16(8), ov::float16(7), + ov::float16(10), ov::float16(9), ov::float16(2), ov::float16(3), ov::float16(3), ov::float16(5), ov::float16(6), ov::float16(9), + ov::float16(4), ov::float16(9), ov::float16(2), ov::float16(4), ov::float16(5), ov::float16(5), ov::float16(3), ov::float16(1), + ov::float16(1), ov::float16(6), ov::float16(8), ov::float16(0), ov::float16(5), ov::float16(5), ov::float16(10), ov::float16(8), + ov::float16(6), ov::float16(9), ov::float16(6), ov::float16(9), ov::float16(1), ov::float16(2), ov::float16(7), ov::float16(1), + ov::float16(1), ov::float16(3), ov::float16(0), ov::float16(4), ov::float16(0), ov::float16(7), ov::float16(10), ov::float16(2), + ov::float16(1), ov::float16(3), ov::float16(9), ov::float16(7), ov::float16(1), ov::float16(7), ov::float16(4), ov::float16(4), + ov::float16(5), ov::float16(1), ov::float16(6), ov::float16(9), ov::float16(6), ov::float16(10), ov::float16(6), ov::float16(1), + ov::float16(10), ov::float16(4), ov::float16(1), ov::float16(6), ov::float16(2), ov::float16(5), ov::float16(5), ov::float16(10), + ov::float16(1), ov::float16(2), ov::float16(3), ov::float16(6), ov::float16(1), ov::float16(7), ov::float16(6), ov::float16(8), + ov::float16(2), ov::float16(5), ov::float16(4), ov::float16(2), ov::float16(0), ov::float16(9), ov::float16(4), ov::float16(1), + ov::float16(10), ov::float16(4), ov::float16(1), ov::float16(9), ov::float16(1), ov::float16(1), ov::float16(0), ov::float16(4), + ov::float16(2), ov::float16(1), ov::float16(8), ov::float16(5), ov::float16(3), ov::float16(4), ov::float16(8), ov::float16(10), + ov::float16(7), ov::float16(2), ov::float16(7), ov::float16(9), ov::float16(2), ov::float16(9), ov::float16(5), ov::float16(5), + ov::float16(6), ov::float16(8), ov::float16(8), ov::float16(5), ov::float16(10), ov::float16(6), ov::float16(4), ov::float16(9), + ov::float16(7), ov::float16(7), ov::float16(10), ov::float16(10), ov::float16(9), ov::float16(3), ov::float16(5), ov::float16(5), + ov::float16(1), ov::float16(4), ov::float16(6), ov::float16(9), ov::float16(4), ov::float16(8), ov::float16(9), ov::float16(7), + ov::float16(8), ov::float16(7), ov::float16(8), ov::float16(0), ov::float16(9), ov::float16(5), ov::float16(5), ov::float16(0), + ov::float16(7), ov::float16(5), ov::float16(7), ov::float16(7), ov::float16(2), ov::float16(10), ov::float16(9), ov::float16(9), + ov::float16(5), ov::float16(1), ov::float16(4), ov::float16(10), ov::float16(2), ov::float16(4), ov::float16(3), ov::float16(5), + ov::float16(9), ov::float16(4), ov::float16(5), ov::float16(8), ov::float16(4), ov::float16(2), ov::float16(10), ov::float16(1), + ov::float16(6), ov::float16(6), ov::float16(0), ov::float16(0), ov::float16(8), ov::float16(8), ov::float16(3), ov::float16(4), + ov::float16(7), ov::float16(7), ov::float16(2), ov::float16(9), ov::float16(7), ov::float16(9), ov::float16(1), ov::float16(0), + ov::float16(8), ov::float16(6), ov::float16(2), ov::float16(2), ov::float16(0), ov::float16(4), ov::float16(10), ov::float16(10), + ov::float16(4), ov::float16(2), ov::float16(7), ov::float16(3), ov::float16(8), ov::float16(8), ov::float16(4), ov::float16(3), + ov::float16(2), ov::float16(0), ov::float16(2), ov::float16(10), ov::float16(2), ov::float16(9), ov::float16(1), ov::float16(4), + ov::float16(6), ov::float16(1), ov::float16(9), ov::float16(1), ov::float16(10), ov::float16(2), ov::float16(2), ov::float16(1), + ov::float16(2), ov::float16(6), ov::float16(7), ov::float16(8), ov::float16(7), ov::float16(8), ov::float16(7), ov::float16(6), + ov::float16(0), ov::float16(6), ov::float16(2), ov::float16(3), ov::float16(7), ov::float16(1), ov::float16(8), ov::float16(5), + ov::float16(6), ov::float16(6), ov::float16(3), ov::float16(7), ov::float16(1), ov::float16(1), ov::float16(5), ov::float16(9), + ov::float16(8), ov::float16(6), ov::float16(8), ov::float16(3), ov::float16(1), ov::float16(5), ov::float16(3), ov::float16(6), + ov::float16(5), ov::float16(4), ov::float16(2), ov::float16(4), ov::float16(4), ov::float16(4), ov::float16(5), ov::float16(4), + ov::float16(3), ov::float16(0), ov::float16(4), ov::float16(2), ov::float16(7), ov::float16(7), ov::float16(5), ov::float16(8), + ov::float16(7), ov::float16(10), ov::float16(5), ov::float16(10), ov::float16(3), ov::float16(5), ov::float16(5), ov::float16(7), + ov::float16(4), ov::float16(6), ov::float16(10), ov::float16(1), ov::float16(7), ov::float16(3), ov::float16(5), ov::float16(5), + ov::float16(9), ov::float16(0), ov::float16(3), ov::float16(7), ov::float16(6), ov::float16(10), ov::float16(2), ov::float16(10), + ov::float16(2), ov::float16(9), ov::float16(7), ov::float16(5), ov::float16(8), ov::float16(0), ov::float16(1), ov::float16(7), + ov::float16(7), ov::float16(4), ov::float16(6), ov::float16(8), ov::float16(10), ov::float16(7), ov::float16(3), ov::float16(8), + ov::float16(1), ov::float16(0), ov::float16(5), ov::float16(0), ov::float16(1), ov::float16(9), ov::float16(8), ov::float16(8), + ov::float16(4), ov::float16(0), ov::float16(6), ov::float16(5), ov::float16(0), ov::float16(5), ov::float16(4), ov::float16(2), + ov::float16(4), ov::float16(6), ov::float16(7), ov::float16(7), ov::float16(5), ov::float16(3), ov::float16(8), ov::float16(4), + ov::float16(7), ov::float16(3), ov::float16(0), ov::float16(1), ov::float16(5), ov::float16(8), ov::float16(2), ov::float16(0), + ov::float16(0), ov::float16(1), ov::float16(7), ov::float16(3), ov::float16(0), ov::float16(5), ov::float16(5), ov::float16(5), + ov::float16(4), ov::float16(1), ov::float16(3), ov::float16(9), ov::float16(7), ov::float16(6), ov::float16(7), ov::float16(3), + ov::float16(0), ov::float16(10), ov::float16(5), ov::float16(0), ov::float16(9), ov::float16(0), ov::float16(4), ov::float16(5), + ov::float16(6), ov::float16(8), ov::float16(7), ov::float16(5), ov::float16(0), ov::float16(1), ov::float16(10), ov::float16(2), + ov::float16(3), ov::float16(6), ov::float16(6), ov::float16(1), ov::float16(6), ov::float16(10), ov::float16(3), ov::float16(9), + ov::float16(10), ov::float16(2), ov::float16(2), ov::float16(4), ov::float16(8), ov::float16(9), ov::float16(2), ov::float16(8), + ov::float16(7), ov::float16(4), ov::float16(2), ov::float16(7), ov::float16(1), ov::float16(2), ov::float16(1), ov::float16(6), + ov::float16(0), ov::float16(1), ov::float16(6), ov::float16(4), ov::float16(0), ov::float16(7), ov::float16(4), ov::float16(9), + ov::float16(1), ov::float16(10), ov::float16(0), ov::float16(0), ov::float16(5), ov::float16(8), ov::float16(10), ov::float16(2), + ov::float16(3), ov::float16(8), ov::float16(5), ov::float16(8), ov::float16(7), ov::float16(7), ov::float16(8), ov::float16(0), + ov::float16(2), ov::float16(2), ov::float16(6), ov::float16(7), ov::float16(6), ov::float16(4), ov::float16(2), ov::float16(2), + ov::float16(7), ov::float16(1), ov::float16(8), ov::float16(1), ov::float16(0), ov::float16(7), ov::float16(1), ov::float16(10), + ov::float16(5), ov::float16(6), ov::float16(10), ov::float16(0), ov::float16(6), ov::float16(7), ov::float16(5), ov::float16(0), + ov::float16(4), ov::float16(5), ov::float16(8), ov::float16(0), ov::float16(4), ov::float16(10), ov::float16(5), ov::float16(3), + ov::float16(4), ov::float16(8), ov::float16(2), ov::float16(1), ov::float16(4), ov::float16(10), ov::float16(10), ov::float16(2), + ov::float16(0), ov::float16(1), ov::float16(5), ov::float16(1), ov::float16(5), ov::float16(1), ov::float16(9), ov::float16(4), + ov::float16(4), ov::float16(3), ov::float16(7), ov::float16(6), ov::float16(9), ov::float16(8), ov::float16(9), ov::float16(7), + ov::float16(4), ov::float16(10), ov::float16(6), ov::float16(3), ov::float16(5), ov::float16(5), ov::float16(4), ov::float16(2), + ov::float16(0), ov::float16(4), ov::float16(5), ov::float16(3), ov::float16(1), ov::float16(2), ov::float16(8), ov::float16(5), + ov::float16(7), ov::float16(9), ov::float16(2), ov::float16(7), ov::float16(2), ov::float16(4), ov::float16(0), ov::float16(5), }); set_values(input1, { - FLOAT16(0), FLOAT16(1), FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(0), FLOAT16(0), FLOAT16(0), - FLOAT16(2), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(1), FLOAT16(1), - FLOAT16(2), FLOAT16(1), FLOAT16(2), FLOAT16(1), FLOAT16(2), FLOAT16(1), FLOAT16(0), FLOAT16(2), - FLOAT16(1), FLOAT16(0), FLOAT16(1), FLOAT16(2), FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(2), - FLOAT16(2), FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(0), - FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(0), FLOAT16(0), FLOAT16(2), - FLOAT16(1), FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(0), FLOAT16(2), FLOAT16(0), - FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(2), FLOAT16(2), FLOAT16(0), FLOAT16(1), FLOAT16(1), - FLOAT16(2), FLOAT16(2), FLOAT16(1), FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(0), FLOAT16(0), - FLOAT16(0), FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(2), - FLOAT16(1), FLOAT16(2), FLOAT16(1), FLOAT16(2), FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(2), - FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(2), FLOAT16(0), FLOAT16(0), - FLOAT16(2), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(2), - FLOAT16(2), FLOAT16(0), FLOAT16(2), FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(0), - FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(1), FLOAT16(0), FLOAT16(2), - FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(2), FLOAT16(2), FLOAT16(1), FLOAT16(1), - FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(1), FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(1), - FLOAT16(2), FLOAT16(2), FLOAT16(0), FLOAT16(1), FLOAT16(2), FLOAT16(2), FLOAT16(1), FLOAT16(0), - FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(2), FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(0), - FLOAT16(1), FLOAT16(2), FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(2), FLOAT16(1), FLOAT16(2), - FLOAT16(0), FLOAT16(2), FLOAT16(2), FLOAT16(1), FLOAT16(1), FLOAT16(2), FLOAT16(1), FLOAT16(1), - FLOAT16(2), FLOAT16(0), FLOAT16(1), FLOAT16(2), FLOAT16(2), FLOAT16(1), FLOAT16(2), FLOAT16(2), - FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(2), FLOAT16(2), - FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(2), FLOAT16(1), FLOAT16(1), FLOAT16(2), + ov::float16(0), ov::float16(1), ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(0), ov::float16(0), ov::float16(0), + ov::float16(2), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(1), ov::float16(1), + ov::float16(2), ov::float16(1), ov::float16(2), ov::float16(1), ov::float16(2), ov::float16(1), ov::float16(0), ov::float16(2), + ov::float16(1), ov::float16(0), ov::float16(1), ov::float16(2), ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(2), + ov::float16(2), ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(0), + ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(0), ov::float16(0), ov::float16(2), + ov::float16(1), ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(0), ov::float16(2), ov::float16(0), + ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(2), ov::float16(2), ov::float16(0), ov::float16(1), ov::float16(1), + ov::float16(2), ov::float16(2), ov::float16(1), ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(0), ov::float16(0), + ov::float16(0), ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(2), + ov::float16(1), ov::float16(2), ov::float16(1), ov::float16(2), ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(2), + ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(2), ov::float16(0), ov::float16(0), + ov::float16(2), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(2), + ov::float16(2), ov::float16(0), ov::float16(2), ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(0), + ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(1), ov::float16(0), ov::float16(2), + ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(2), ov::float16(2), ov::float16(1), ov::float16(1), + ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(1), ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(1), + ov::float16(2), ov::float16(2), ov::float16(0), ov::float16(1), ov::float16(2), ov::float16(2), ov::float16(1), ov::float16(0), + ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(2), ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(0), + ov::float16(1), ov::float16(2), ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(2), ov::float16(1), ov::float16(2), + ov::float16(0), ov::float16(2), ov::float16(2), ov::float16(1), ov::float16(1), ov::float16(2), ov::float16(1), ov::float16(1), + ov::float16(2), ov::float16(0), ov::float16(1), ov::float16(2), ov::float16(2), ov::float16(1), ov::float16(2), ov::float16(2), + ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(2), ov::float16(2), + ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(2), ov::float16(1), ov::float16(1), ov::float16(2), }); std::vector expected_results = { - FLOAT16(0), FLOAT16(8), FLOAT16(5), FLOAT16(0), FLOAT16(1), FLOAT16(2), FLOAT16(0), FLOAT16(7), - FLOAT16(4), FLOAT16(10), FLOAT16(4), FLOAT16(5), FLOAT16(9), FLOAT16(0), FLOAT16(5), FLOAT16(5), - FLOAT16(4), FLOAT16(4), FLOAT16(7), FLOAT16(9), FLOAT16(5), FLOAT16(8), FLOAT16(6), FLOAT16(4), - FLOAT16(8), FLOAT16(5), FLOAT16(8), FLOAT16(1), FLOAT16(4), FLOAT16(7), FLOAT16(5), FLOAT16(0), - FLOAT16(0), FLOAT16(5), FLOAT16(7), FLOAT16(7), FLOAT16(2), FLOAT16(8), FLOAT16(5), FLOAT16(4), - FLOAT16(4), FLOAT16(1), FLOAT16(3), FLOAT16(9), FLOAT16(7), FLOAT16(0), FLOAT16(6), FLOAT16(3), - FLOAT16(9), FLOAT16(10), FLOAT16(5), FLOAT16(0), FLOAT16(9), FLOAT16(3), FLOAT16(4), FLOAT16(1), - FLOAT16(5), FLOAT16(9), FLOAT16(10), FLOAT16(5), FLOAT16(0), FLOAT16(5), FLOAT16(3), FLOAT16(4), - FLOAT16(3), FLOAT16(6), FLOAT16(2), FLOAT16(9), FLOAT16(10), FLOAT16(10), FLOAT16(5), FLOAT16(4), - FLOAT16(3), FLOAT16(2), FLOAT16(2), FLOAT16(4), FLOAT16(0), FLOAT16(0), FLOAT16(8), FLOAT16(8), - FLOAT16(4), FLOAT16(4), FLOAT16(7), FLOAT16(7), FLOAT16(9), FLOAT16(6), FLOAT16(4), FLOAT16(6), - FLOAT16(10), FLOAT16(9), FLOAT16(2), FLOAT16(10), FLOAT16(2), FLOAT16(7), FLOAT16(6), FLOAT16(9), - FLOAT16(1), FLOAT16(9), FLOAT16(2), FLOAT16(4), FLOAT16(10), FLOAT16(5), FLOAT16(3), FLOAT16(2), - FLOAT16(3), FLOAT16(6), FLOAT16(5), FLOAT16(0), FLOAT16(5), FLOAT16(8), FLOAT16(7), FLOAT16(8), - FLOAT16(0), FLOAT16(6), FLOAT16(2), FLOAT16(9), FLOAT16(6), FLOAT16(1), FLOAT16(7), FLOAT16(2), - FLOAT16(1), FLOAT16(3), FLOAT16(3), FLOAT16(7), FLOAT16(0), FLOAT16(7), FLOAT16(5), FLOAT16(9), - FLOAT16(8), FLOAT16(3), FLOAT16(10), FLOAT16(3), FLOAT16(1), FLOAT16(5), FLOAT16(4), FLOAT16(6), - FLOAT16(4), FLOAT16(5), FLOAT16(6), FLOAT16(4), FLOAT16(4), FLOAT16(10), FLOAT16(5), FLOAT16(1), - FLOAT16(3), FLOAT16(4), FLOAT16(2), FLOAT16(1), FLOAT16(7), FLOAT16(7), FLOAT16(5), FLOAT16(10), - FLOAT16(7), FLOAT16(1), FLOAT16(5), FLOAT16(10), FLOAT16(3), FLOAT16(1), FLOAT16(5), FLOAT16(4), - FLOAT16(2), FLOAT16(3), FLOAT16(7), FLOAT16(1), FLOAT16(7), FLOAT16(8), FLOAT16(5), FLOAT16(5), - FLOAT16(4), FLOAT16(4), FLOAT16(3), FLOAT16(3), FLOAT16(5), FLOAT16(10), FLOAT16(4), FLOAT16(2), - FLOAT16(2), FLOAT16(9), FLOAT16(7), FLOAT16(5), FLOAT16(3), FLOAT16(4), FLOAT16(8), FLOAT16(5), - FLOAT16(7), FLOAT16(4), FLOAT16(6), FLOAT16(8), FLOAT16(2), FLOAT16(7), FLOAT16(3), FLOAT16(5), + ov::float16(0), ov::float16(8), ov::float16(5), ov::float16(0), ov::float16(1), ov::float16(2), ov::float16(0), ov::float16(7), + ov::float16(4), ov::float16(10), ov::float16(4), ov::float16(5), ov::float16(9), ov::float16(0), ov::float16(5), ov::float16(5), + ov::float16(4), ov::float16(4), ov::float16(7), ov::float16(9), ov::float16(5), ov::float16(8), ov::float16(6), ov::float16(4), + ov::float16(8), ov::float16(5), ov::float16(8), ov::float16(1), ov::float16(4), ov::float16(7), ov::float16(5), ov::float16(0), + ov::float16(0), ov::float16(5), ov::float16(7), ov::float16(7), ov::float16(2), ov::float16(8), ov::float16(5), ov::float16(4), + ov::float16(4), ov::float16(1), ov::float16(3), ov::float16(9), ov::float16(7), ov::float16(0), ov::float16(6), ov::float16(3), + ov::float16(9), ov::float16(10), ov::float16(5), ov::float16(0), ov::float16(9), ov::float16(3), ov::float16(4), ov::float16(1), + ov::float16(5), ov::float16(9), ov::float16(10), ov::float16(5), ov::float16(0), ov::float16(5), ov::float16(3), ov::float16(4), + ov::float16(3), ov::float16(6), ov::float16(2), ov::float16(9), ov::float16(10), ov::float16(10), ov::float16(5), ov::float16(4), + ov::float16(3), ov::float16(2), ov::float16(2), ov::float16(4), ov::float16(0), ov::float16(0), ov::float16(8), ov::float16(8), + ov::float16(4), ov::float16(4), ov::float16(7), ov::float16(7), ov::float16(9), ov::float16(6), ov::float16(4), ov::float16(6), + ov::float16(10), ov::float16(9), ov::float16(2), ov::float16(10), ov::float16(2), ov::float16(7), ov::float16(6), ov::float16(9), + ov::float16(1), ov::float16(9), ov::float16(2), ov::float16(4), ov::float16(10), ov::float16(5), ov::float16(3), ov::float16(2), + ov::float16(3), ov::float16(6), ov::float16(5), ov::float16(0), ov::float16(5), ov::float16(8), ov::float16(7), ov::float16(8), + ov::float16(0), ov::float16(6), ov::float16(2), ov::float16(9), ov::float16(6), ov::float16(1), ov::float16(7), ov::float16(2), + ov::float16(1), ov::float16(3), ov::float16(3), ov::float16(7), ov::float16(0), ov::float16(7), ov::float16(5), ov::float16(9), + ov::float16(8), ov::float16(3), ov::float16(10), ov::float16(3), ov::float16(1), ov::float16(5), ov::float16(4), ov::float16(6), + ov::float16(4), ov::float16(5), ov::float16(6), ov::float16(4), ov::float16(4), ov::float16(10), ov::float16(5), ov::float16(1), + ov::float16(3), ov::float16(4), ov::float16(2), ov::float16(1), ov::float16(7), ov::float16(7), ov::float16(5), ov::float16(10), + ov::float16(7), ov::float16(1), ov::float16(5), ov::float16(10), ov::float16(3), ov::float16(1), ov::float16(5), ov::float16(4), + ov::float16(2), ov::float16(3), ov::float16(7), ov::float16(1), ov::float16(7), ov::float16(8), ov::float16(5), ov::float16(5), + ov::float16(4), ov::float16(4), ov::float16(3), ov::float16(3), ov::float16(5), ov::float16(10), ov::float16(4), ov::float16(2), + ov::float16(2), ov::float16(9), ov::float16(7), ov::float16(5), ov::float16(3), ov::float16(4), ov::float16(8), ov::float16(5), + ov::float16(7), ov::float16(4), ov::float16(6), ov::float16(8), ov::float16(2), ov::float16(7), ov::float16(3), ov::float16(5), }; DoTest(engine, input0, input1, expected_results, tensor(1, 2, 8, 4, 3), axis); @@ -596,396 +596,396 @@ TEST(gather_elements_gpu_fp16, d223442_i226442_a5) { auto input1 = engine.allocate_memory({ data_types::f16, format::bfwzyx, { 2, 2, 6, 4, 4, 2 } }); // indices set_values(input0, { - FLOAT16(0), FLOAT16(1), FLOAT16(8), - FLOAT16(5), FLOAT16(5), FLOAT16(2), - FLOAT16(0), FLOAT16(7), FLOAT16(7), - FLOAT16(10), FLOAT16(4), FLOAT16(5), - FLOAT16(9), FLOAT16(0), FLOAT16(0), - FLOAT16(5), FLOAT16(7), FLOAT16(0), - FLOAT16(4), FLOAT16(0), FLOAT16(4), - FLOAT16(7), FLOAT16(6), FLOAT16(10), - FLOAT16(9), FLOAT16(5), FLOAT16(1), - FLOAT16(7), FLOAT16(4), FLOAT16(7), - FLOAT16(10), FLOAT16(8), FLOAT16(2), - FLOAT16(0), FLOAT16(8), FLOAT16(3), - FLOAT16(6), FLOAT16(8), FLOAT16(10), - FLOAT16(4), FLOAT16(2), FLOAT16(10), - FLOAT16(7), FLOAT16(8), FLOAT16(7), - FLOAT16(0), FLOAT16(6), FLOAT16(9), - FLOAT16(2), FLOAT16(4), FLOAT16(8), - FLOAT16(5), FLOAT16(2), FLOAT16(3), - FLOAT16(3), FLOAT16(1), FLOAT16(5), - FLOAT16(9), FLOAT16(10), FLOAT16(0), - FLOAT16(9), FLOAT16(5), FLOAT16(5), - FLOAT16(3), FLOAT16(10), FLOAT16(5), - FLOAT16(2), FLOAT16(0), FLOAT16(10), - FLOAT16(0), FLOAT16(5), FLOAT16(4), - FLOAT16(3), FLOAT16(10), FLOAT16(5), - FLOAT16(5), FLOAT16(10), FLOAT16(0), - FLOAT16(8), FLOAT16(8), FLOAT16(9), - FLOAT16(1), FLOAT16(0), FLOAT16(7), - FLOAT16(9), FLOAT16(6), FLOAT16(8), - FLOAT16(7), FLOAT16(10), FLOAT16(9), - FLOAT16(2), FLOAT16(3), FLOAT16(3), - FLOAT16(5), FLOAT16(6), FLOAT16(9), - FLOAT16(4), FLOAT16(9), FLOAT16(2), - FLOAT16(4), FLOAT16(5), FLOAT16(5), - FLOAT16(3), FLOAT16(1), FLOAT16(1), - FLOAT16(6), FLOAT16(8), FLOAT16(0), - FLOAT16(5), FLOAT16(5), FLOAT16(10), - FLOAT16(8), FLOAT16(6), FLOAT16(9), - FLOAT16(6), FLOAT16(9), FLOAT16(1), - FLOAT16(2), FLOAT16(7), FLOAT16(1), - FLOAT16(1), FLOAT16(3), FLOAT16(0), - FLOAT16(4), FLOAT16(0), FLOAT16(7), - FLOAT16(10), FLOAT16(2), FLOAT16(1), - FLOAT16(3), FLOAT16(9), FLOAT16(7), - FLOAT16(1), FLOAT16(7), FLOAT16(4), - FLOAT16(4), FLOAT16(5), FLOAT16(1), - FLOAT16(6), FLOAT16(9), FLOAT16(6), - FLOAT16(10), FLOAT16(6), FLOAT16(1), - FLOAT16(10), FLOAT16(4), FLOAT16(1), - FLOAT16(6), FLOAT16(2), FLOAT16(5), - FLOAT16(5), FLOAT16(10), FLOAT16(1), - FLOAT16(2), FLOAT16(3), FLOAT16(6), - FLOAT16(1), FLOAT16(7), FLOAT16(6), - FLOAT16(8), FLOAT16(2), FLOAT16(5), - FLOAT16(4), FLOAT16(2), FLOAT16(0), - FLOAT16(9), FLOAT16(4), FLOAT16(1), - FLOAT16(10), FLOAT16(4), FLOAT16(1), - FLOAT16(9), FLOAT16(1), FLOAT16(1), - FLOAT16(0), FLOAT16(4), FLOAT16(2), - FLOAT16(1), FLOAT16(8), FLOAT16(5), - FLOAT16(3), FLOAT16(4), FLOAT16(8), - FLOAT16(10), FLOAT16(7), FLOAT16(2), - FLOAT16(7), FLOAT16(9), FLOAT16(2), - FLOAT16(9), FLOAT16(5), FLOAT16(5), - FLOAT16(6), FLOAT16(8), FLOAT16(8), - FLOAT16(5), FLOAT16(10), FLOAT16(6), - FLOAT16(4), FLOAT16(9), FLOAT16(7), - FLOAT16(7), FLOAT16(10), FLOAT16(10), - FLOAT16(9), FLOAT16(3), FLOAT16(5), - FLOAT16(5), FLOAT16(1), FLOAT16(4), - FLOAT16(6), FLOAT16(9), FLOAT16(4), - FLOAT16(8), FLOAT16(9), FLOAT16(7), - FLOAT16(8), FLOAT16(7), FLOAT16(8), - FLOAT16(0), FLOAT16(9), FLOAT16(5), - FLOAT16(5), FLOAT16(0), FLOAT16(7), - FLOAT16(5), FLOAT16(7), FLOAT16(7), - FLOAT16(2), FLOAT16(10), FLOAT16(9), - FLOAT16(9), FLOAT16(5), FLOAT16(1), - FLOAT16(4), FLOAT16(10), FLOAT16(2), - FLOAT16(4), FLOAT16(3), FLOAT16(5), - FLOAT16(9), FLOAT16(4), FLOAT16(5), - FLOAT16(8), FLOAT16(4), FLOAT16(2), - FLOAT16(10), FLOAT16(1), FLOAT16(6), - FLOAT16(6), FLOAT16(0), FLOAT16(0), - FLOAT16(8), FLOAT16(8), FLOAT16(3), - FLOAT16(4), FLOAT16(7), FLOAT16(7), - FLOAT16(2), FLOAT16(9), FLOAT16(7), - FLOAT16(9), FLOAT16(1), FLOAT16(0), - FLOAT16(8), FLOAT16(6), FLOAT16(2), - FLOAT16(2), FLOAT16(0), FLOAT16(4), - FLOAT16(10), FLOAT16(10), FLOAT16(4), - FLOAT16(2), FLOAT16(7), FLOAT16(3), - FLOAT16(8), FLOAT16(8), FLOAT16(4), - FLOAT16(3), FLOAT16(2), FLOAT16(0), - FLOAT16(2), FLOAT16(10), FLOAT16(2), - FLOAT16(9), FLOAT16(1), FLOAT16(4), - FLOAT16(6), FLOAT16(1), FLOAT16(9), - FLOAT16(1), FLOAT16(10), FLOAT16(2), - FLOAT16(2), FLOAT16(1), FLOAT16(2), - FLOAT16(6), FLOAT16(7), FLOAT16(8), - FLOAT16(7), FLOAT16(8), FLOAT16(7), - FLOAT16(6), FLOAT16(0), FLOAT16(6), - FLOAT16(2), FLOAT16(3), FLOAT16(7), - FLOAT16(1), FLOAT16(8), FLOAT16(5), - FLOAT16(6), FLOAT16(6), FLOAT16(3), - FLOAT16(7), FLOAT16(1), FLOAT16(1), - FLOAT16(5), FLOAT16(9), FLOAT16(8), - FLOAT16(6), FLOAT16(8), FLOAT16(3), - FLOAT16(1), FLOAT16(5), FLOAT16(3), - FLOAT16(6), FLOAT16(5), FLOAT16(4), - FLOAT16(2), FLOAT16(4), FLOAT16(4), - FLOAT16(4), FLOAT16(5), FLOAT16(4), - FLOAT16(3), FLOAT16(0), FLOAT16(4), - FLOAT16(2), FLOAT16(7), FLOAT16(7), - FLOAT16(5), FLOAT16(8), FLOAT16(7), - FLOAT16(10), FLOAT16(5), FLOAT16(10), - FLOAT16(3), FLOAT16(5), FLOAT16(5), - FLOAT16(7), FLOAT16(4), FLOAT16(6), - FLOAT16(10), FLOAT16(1), FLOAT16(7), - FLOAT16(3), FLOAT16(5), FLOAT16(5), - FLOAT16(9), FLOAT16(0), FLOAT16(3), - FLOAT16(7), FLOAT16(6), FLOAT16(10), - FLOAT16(2), FLOAT16(10), FLOAT16(2), - FLOAT16(9), FLOAT16(7), FLOAT16(5), - FLOAT16(8), FLOAT16(0), FLOAT16(1), - FLOAT16(7), FLOAT16(7), FLOAT16(4), - FLOAT16(6), FLOAT16(8), FLOAT16(10), - FLOAT16(7), FLOAT16(3), FLOAT16(8), + ov::float16(0), ov::float16(1), ov::float16(8), + ov::float16(5), ov::float16(5), ov::float16(2), + ov::float16(0), ov::float16(7), ov::float16(7), + ov::float16(10), ov::float16(4), ov::float16(5), + ov::float16(9), ov::float16(0), ov::float16(0), + ov::float16(5), ov::float16(7), ov::float16(0), + ov::float16(4), ov::float16(0), ov::float16(4), + ov::float16(7), ov::float16(6), ov::float16(10), + ov::float16(9), ov::float16(5), ov::float16(1), + ov::float16(7), ov::float16(4), ov::float16(7), + ov::float16(10), ov::float16(8), ov::float16(2), + ov::float16(0), ov::float16(8), ov::float16(3), + ov::float16(6), ov::float16(8), ov::float16(10), + ov::float16(4), ov::float16(2), ov::float16(10), + ov::float16(7), ov::float16(8), ov::float16(7), + ov::float16(0), ov::float16(6), ov::float16(9), + ov::float16(2), ov::float16(4), ov::float16(8), + ov::float16(5), ov::float16(2), ov::float16(3), + ov::float16(3), ov::float16(1), ov::float16(5), + ov::float16(9), ov::float16(10), ov::float16(0), + ov::float16(9), ov::float16(5), ov::float16(5), + ov::float16(3), ov::float16(10), ov::float16(5), + ov::float16(2), ov::float16(0), ov::float16(10), + ov::float16(0), ov::float16(5), ov::float16(4), + ov::float16(3), ov::float16(10), ov::float16(5), + ov::float16(5), ov::float16(10), ov::float16(0), + ov::float16(8), ov::float16(8), ov::float16(9), + ov::float16(1), ov::float16(0), ov::float16(7), + ov::float16(9), ov::float16(6), ov::float16(8), + ov::float16(7), ov::float16(10), ov::float16(9), + ov::float16(2), ov::float16(3), ov::float16(3), + ov::float16(5), ov::float16(6), ov::float16(9), + ov::float16(4), ov::float16(9), ov::float16(2), + ov::float16(4), ov::float16(5), ov::float16(5), + ov::float16(3), ov::float16(1), ov::float16(1), + ov::float16(6), ov::float16(8), ov::float16(0), + ov::float16(5), ov::float16(5), ov::float16(10), + ov::float16(8), ov::float16(6), ov::float16(9), + ov::float16(6), ov::float16(9), ov::float16(1), + ov::float16(2), ov::float16(7), ov::float16(1), + ov::float16(1), ov::float16(3), ov::float16(0), + ov::float16(4), ov::float16(0), ov::float16(7), + ov::float16(10), ov::float16(2), ov::float16(1), + ov::float16(3), ov::float16(9), ov::float16(7), + ov::float16(1), ov::float16(7), ov::float16(4), + ov::float16(4), ov::float16(5), ov::float16(1), + ov::float16(6), ov::float16(9), ov::float16(6), + ov::float16(10), ov::float16(6), ov::float16(1), + ov::float16(10), ov::float16(4), ov::float16(1), + ov::float16(6), ov::float16(2), ov::float16(5), + ov::float16(5), ov::float16(10), ov::float16(1), + ov::float16(2), ov::float16(3), ov::float16(6), + ov::float16(1), ov::float16(7), ov::float16(6), + ov::float16(8), ov::float16(2), ov::float16(5), + ov::float16(4), ov::float16(2), ov::float16(0), + ov::float16(9), ov::float16(4), ov::float16(1), + ov::float16(10), ov::float16(4), ov::float16(1), + ov::float16(9), ov::float16(1), ov::float16(1), + ov::float16(0), ov::float16(4), ov::float16(2), + ov::float16(1), ov::float16(8), ov::float16(5), + ov::float16(3), ov::float16(4), ov::float16(8), + ov::float16(10), ov::float16(7), ov::float16(2), + ov::float16(7), ov::float16(9), ov::float16(2), + ov::float16(9), ov::float16(5), ov::float16(5), + ov::float16(6), ov::float16(8), ov::float16(8), + ov::float16(5), ov::float16(10), ov::float16(6), + ov::float16(4), ov::float16(9), ov::float16(7), + ov::float16(7), ov::float16(10), ov::float16(10), + ov::float16(9), ov::float16(3), ov::float16(5), + ov::float16(5), ov::float16(1), ov::float16(4), + ov::float16(6), ov::float16(9), ov::float16(4), + ov::float16(8), ov::float16(9), ov::float16(7), + ov::float16(8), ov::float16(7), ov::float16(8), + ov::float16(0), ov::float16(9), ov::float16(5), + ov::float16(5), ov::float16(0), ov::float16(7), + ov::float16(5), ov::float16(7), ov::float16(7), + ov::float16(2), ov::float16(10), ov::float16(9), + ov::float16(9), ov::float16(5), ov::float16(1), + ov::float16(4), ov::float16(10), ov::float16(2), + ov::float16(4), ov::float16(3), ov::float16(5), + ov::float16(9), ov::float16(4), ov::float16(5), + ov::float16(8), ov::float16(4), ov::float16(2), + ov::float16(10), ov::float16(1), ov::float16(6), + ov::float16(6), ov::float16(0), ov::float16(0), + ov::float16(8), ov::float16(8), ov::float16(3), + ov::float16(4), ov::float16(7), ov::float16(7), + ov::float16(2), ov::float16(9), ov::float16(7), + ov::float16(9), ov::float16(1), ov::float16(0), + ov::float16(8), ov::float16(6), ov::float16(2), + ov::float16(2), ov::float16(0), ov::float16(4), + ov::float16(10), ov::float16(10), ov::float16(4), + ov::float16(2), ov::float16(7), ov::float16(3), + ov::float16(8), ov::float16(8), ov::float16(4), + ov::float16(3), ov::float16(2), ov::float16(0), + ov::float16(2), ov::float16(10), ov::float16(2), + ov::float16(9), ov::float16(1), ov::float16(4), + ov::float16(6), ov::float16(1), ov::float16(9), + ov::float16(1), ov::float16(10), ov::float16(2), + ov::float16(2), ov::float16(1), ov::float16(2), + ov::float16(6), ov::float16(7), ov::float16(8), + ov::float16(7), ov::float16(8), ov::float16(7), + ov::float16(6), ov::float16(0), ov::float16(6), + ov::float16(2), ov::float16(3), ov::float16(7), + ov::float16(1), ov::float16(8), ov::float16(5), + ov::float16(6), ov::float16(6), ov::float16(3), + ov::float16(7), ov::float16(1), ov::float16(1), + ov::float16(5), ov::float16(9), ov::float16(8), + ov::float16(6), ov::float16(8), ov::float16(3), + ov::float16(1), ov::float16(5), ov::float16(3), + ov::float16(6), ov::float16(5), ov::float16(4), + ov::float16(2), ov::float16(4), ov::float16(4), + ov::float16(4), ov::float16(5), ov::float16(4), + ov::float16(3), ov::float16(0), ov::float16(4), + ov::float16(2), ov::float16(7), ov::float16(7), + ov::float16(5), ov::float16(8), ov::float16(7), + ov::float16(10), ov::float16(5), ov::float16(10), + ov::float16(3), ov::float16(5), ov::float16(5), + ov::float16(7), ov::float16(4), ov::float16(6), + ov::float16(10), ov::float16(1), ov::float16(7), + ov::float16(3), ov::float16(5), ov::float16(5), + ov::float16(9), ov::float16(0), ov::float16(3), + ov::float16(7), ov::float16(6), ov::float16(10), + ov::float16(2), ov::float16(10), ov::float16(2), + ov::float16(9), ov::float16(7), ov::float16(5), + ov::float16(8), ov::float16(0), ov::float16(1), + ov::float16(7), ov::float16(7), ov::float16(4), + ov::float16(6), ov::float16(8), ov::float16(10), + ov::float16(7), ov::float16(3), ov::float16(8), }); set_values(input1, { - FLOAT16(0), FLOAT16(1), FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(0), - FLOAT16(0), FLOAT16(0), FLOAT16(2), FLOAT16(0), FLOAT16(0), FLOAT16(0), - FLOAT16(1), FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(2), FLOAT16(1), - FLOAT16(2), FLOAT16(1), FLOAT16(2), FLOAT16(1), FLOAT16(0), FLOAT16(2), - FLOAT16(1), FLOAT16(0), FLOAT16(1), FLOAT16(2), FLOAT16(0), FLOAT16(0), - FLOAT16(1), FLOAT16(2), FLOAT16(2), FLOAT16(1), FLOAT16(1), FLOAT16(1), - FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(0), FLOAT16(2), FLOAT16(2), - FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(0), FLOAT16(0), FLOAT16(2), - FLOAT16(1), FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(0), - FLOAT16(2), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(2), - FLOAT16(2), FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(2), FLOAT16(2), - FLOAT16(1), FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(0), FLOAT16(0), - FLOAT16(0), FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(1), FLOAT16(0), - FLOAT16(0), FLOAT16(2), FLOAT16(1), FLOAT16(2), FLOAT16(1), FLOAT16(2), - FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(2), FLOAT16(0), FLOAT16(0), - FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(2), FLOAT16(0), FLOAT16(0), - FLOAT16(2), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(0), - FLOAT16(0), FLOAT16(2), FLOAT16(2), FLOAT16(0), FLOAT16(2), FLOAT16(0), - FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(0), FLOAT16(1), FLOAT16(1), - FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(1), FLOAT16(0), FLOAT16(2), - FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(2), FLOAT16(2), - FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(1), - FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(1), FLOAT16(2), FLOAT16(2), - FLOAT16(0), FLOAT16(1), FLOAT16(2), FLOAT16(2), FLOAT16(1), FLOAT16(0), - FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(2), FLOAT16(1), FLOAT16(1), - FLOAT16(1), FLOAT16(0), FLOAT16(1), FLOAT16(2), FLOAT16(1), FLOAT16(1), - FLOAT16(1), FLOAT16(2), FLOAT16(1), FLOAT16(2), FLOAT16(0), FLOAT16(2), - FLOAT16(2), FLOAT16(1), FLOAT16(1), FLOAT16(2), FLOAT16(1), FLOAT16(1), - FLOAT16(2), FLOAT16(0), FLOAT16(1), FLOAT16(2), FLOAT16(2), FLOAT16(1), - FLOAT16(2), FLOAT16(2), FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(1), - FLOAT16(0), FLOAT16(0), FLOAT16(2), FLOAT16(2), FLOAT16(0), FLOAT16(1), - FLOAT16(1), FLOAT16(1), FLOAT16(2), FLOAT16(1), FLOAT16(1), FLOAT16(0), - FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(1), FLOAT16(0), FLOAT16(0), - FLOAT16(2), FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(1), - FLOAT16(0), FLOAT16(2), FLOAT16(2), FLOAT16(1), FLOAT16(1), FLOAT16(2), - FLOAT16(2), FLOAT16(1), FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(2), - FLOAT16(2), FLOAT16(2), FLOAT16(1), FLOAT16(2), FLOAT16(0), FLOAT16(0), - FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(2), FLOAT16(0), FLOAT16(2), - FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(2), FLOAT16(1), FLOAT16(2), - FLOAT16(0), FLOAT16(2), FLOAT16(0), FLOAT16(2), FLOAT16(1), FLOAT16(0), - FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(0), - FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(2), FLOAT16(0), FLOAT16(1), - FLOAT16(0), FLOAT16(1), FLOAT16(2), FLOAT16(2), FLOAT16(0), FLOAT16(0), - FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(2), - FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(2), - FLOAT16(0), FLOAT16(0), FLOAT16(2), FLOAT16(2), FLOAT16(1), FLOAT16(1), - FLOAT16(1), FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(1), FLOAT16(1), - FLOAT16(1), FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(0), - FLOAT16(2), FLOAT16(0), FLOAT16(2), FLOAT16(0), FLOAT16(2), FLOAT16(0), - FLOAT16(1), FLOAT16(2), FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(0), - FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(2), FLOAT16(1), FLOAT16(1), - FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(2), - FLOAT16(0), FLOAT16(0), FLOAT16(2), FLOAT16(1), FLOAT16(1), FLOAT16(2), - FLOAT16(0), FLOAT16(2), FLOAT16(2), FLOAT16(0), FLOAT16(0), FLOAT16(1), - FLOAT16(0), FLOAT16(1), FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(0), - FLOAT16(2), FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(1), - FLOAT16(2), FLOAT16(1), FLOAT16(2), FLOAT16(1), FLOAT16(1), FLOAT16(0), - FLOAT16(1), FLOAT16(2), FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(1), - FLOAT16(1), FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(2), FLOAT16(2), - FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(2), FLOAT16(1), - FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(2), - FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(2), - FLOAT16(0), FLOAT16(0), FLOAT16(2), FLOAT16(1), FLOAT16(1), FLOAT16(1), - FLOAT16(2), FLOAT16(2), FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(0), - FLOAT16(2), FLOAT16(2), FLOAT16(1), FLOAT16(2), FLOAT16(2), FLOAT16(2), - FLOAT16(0), FLOAT16(2), FLOAT16(2), FLOAT16(0), FLOAT16(1), FLOAT16(0), - FLOAT16(2), FLOAT16(1), FLOAT16(2), FLOAT16(2), FLOAT16(1), FLOAT16(2), - FLOAT16(2), FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(0), FLOAT16(1), - FLOAT16(2), FLOAT16(1), FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(1), - FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(2), FLOAT16(2), FLOAT16(2), - FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(2), FLOAT16(0), FLOAT16(0), - FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(2), FLOAT16(1), - FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(2), FLOAT16(2), FLOAT16(2), - FLOAT16(1), FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(0), - FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(2), FLOAT16(1), FLOAT16(1), - FLOAT16(0), FLOAT16(2), FLOAT16(1), FLOAT16(1), FLOAT16(2), FLOAT16(2), - FLOAT16(0), FLOAT16(2), FLOAT16(0), FLOAT16(2), FLOAT16(2), FLOAT16(1), - FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(2), FLOAT16(1), FLOAT16(0), - FLOAT16(0), FLOAT16(1), FLOAT16(2), FLOAT16(1), FLOAT16(0), FLOAT16(0), - FLOAT16(2), FLOAT16(1), FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(2), - FLOAT16(2), FLOAT16(0), FLOAT16(0), FLOAT16(2), FLOAT16(2), FLOAT16(1), - FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(2), FLOAT16(1), FLOAT16(1), - FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(2), - FLOAT16(1), FLOAT16(2), FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(2), - FLOAT16(2), FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(0), FLOAT16(1), - FLOAT16(0), FLOAT16(2), FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(1), - FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(1), FLOAT16(2), FLOAT16(2), - FLOAT16(0), FLOAT16(2), FLOAT16(0), FLOAT16(2), FLOAT16(2), FLOAT16(2), - FLOAT16(2), FLOAT16(2), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(2), - FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(0), FLOAT16(1), - FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(1), - FLOAT16(1), FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(1), FLOAT16(2), - FLOAT16(2), FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(1), - FLOAT16(0), FLOAT16(2), FLOAT16(0), FLOAT16(2), FLOAT16(2), FLOAT16(2), - FLOAT16(2), FLOAT16(1), FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(0), - FLOAT16(0), FLOAT16(2), FLOAT16(1), FLOAT16(1), FLOAT16(2), FLOAT16(2), - FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(0), FLOAT16(1), - FLOAT16(1), FLOAT16(2), FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(1), - FLOAT16(2), FLOAT16(1), FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(1), - FLOAT16(2), FLOAT16(0), FLOAT16(0), FLOAT16(2), FLOAT16(0), FLOAT16(0), - FLOAT16(0), FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(0), FLOAT16(2), - FLOAT16(1), FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(1), - FLOAT16(2), FLOAT16(1), FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(1), - FLOAT16(2), FLOAT16(0), FLOAT16(0), FLOAT16(2), FLOAT16(1), FLOAT16(2), - FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(0), - FLOAT16(1), FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(0), FLOAT16(0), - FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(2), FLOAT16(0), FLOAT16(1), - FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(0), - FLOAT16(2), FLOAT16(1), FLOAT16(2), FLOAT16(1), FLOAT16(0), FLOAT16(0), - FLOAT16(0), FLOAT16(1), FLOAT16(2), FLOAT16(1), FLOAT16(0), FLOAT16(1), - FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(2), FLOAT16(0), FLOAT16(0), - FLOAT16(0), FLOAT16(1), FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(0), - FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(2), FLOAT16(0), FLOAT16(2), - FLOAT16(1), FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(1), FLOAT16(2), - FLOAT16(0), FLOAT16(2), FLOAT16(1), FLOAT16(2), FLOAT16(0), FLOAT16(0), - FLOAT16(0), FLOAT16(1), FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(1), - FLOAT16(1), FLOAT16(2), FLOAT16(2), FLOAT16(0), FLOAT16(1), FLOAT16(2), - FLOAT16(2), FLOAT16(2), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(0), - FLOAT16(0), FLOAT16(1), FLOAT16(2), FLOAT16(1), FLOAT16(2), FLOAT16(2), - FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(2), FLOAT16(0), FLOAT16(2), - FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(0), FLOAT16(1), FLOAT16(2), - FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(2), FLOAT16(2), FLOAT16(1), - FLOAT16(0), FLOAT16(2), FLOAT16(2), FLOAT16(1), FLOAT16(1), FLOAT16(1), - FLOAT16(2), FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(2), FLOAT16(0), - FLOAT16(1), FLOAT16(0), FLOAT16(1), FLOAT16(2), FLOAT16(2), FLOAT16(0), - FLOAT16(0), FLOAT16(1), FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(2), - FLOAT16(1), FLOAT16(2), FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(1), - FLOAT16(1), FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(1), FLOAT16(2), + ov::float16(0), ov::float16(1), ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(0), + ov::float16(0), ov::float16(0), ov::float16(2), ov::float16(0), ov::float16(0), ov::float16(0), + ov::float16(1), ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(2), ov::float16(1), + ov::float16(2), ov::float16(1), ov::float16(2), ov::float16(1), ov::float16(0), ov::float16(2), + ov::float16(1), ov::float16(0), ov::float16(1), ov::float16(2), ov::float16(0), ov::float16(0), + ov::float16(1), ov::float16(2), ov::float16(2), ov::float16(1), ov::float16(1), ov::float16(1), + ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(0), ov::float16(2), ov::float16(2), + ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(0), ov::float16(0), ov::float16(2), + ov::float16(1), ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(0), + ov::float16(2), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(2), + ov::float16(2), ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(2), ov::float16(2), + ov::float16(1), ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(0), ov::float16(0), + ov::float16(0), ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(1), ov::float16(0), + ov::float16(0), ov::float16(2), ov::float16(1), ov::float16(2), ov::float16(1), ov::float16(2), + ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(2), ov::float16(0), ov::float16(0), + ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(2), ov::float16(0), ov::float16(0), + ov::float16(2), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(0), + ov::float16(0), ov::float16(2), ov::float16(2), ov::float16(0), ov::float16(2), ov::float16(0), + ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(0), ov::float16(1), ov::float16(1), + ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(1), ov::float16(0), ov::float16(2), + ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(2), ov::float16(2), + ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(1), + ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(1), ov::float16(2), ov::float16(2), + ov::float16(0), ov::float16(1), ov::float16(2), ov::float16(2), ov::float16(1), ov::float16(0), + ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(2), ov::float16(1), ov::float16(1), + ov::float16(1), ov::float16(0), ov::float16(1), ov::float16(2), ov::float16(1), ov::float16(1), + ov::float16(1), ov::float16(2), ov::float16(1), ov::float16(2), ov::float16(0), ov::float16(2), + ov::float16(2), ov::float16(1), ov::float16(1), ov::float16(2), ov::float16(1), ov::float16(1), + ov::float16(2), ov::float16(0), ov::float16(1), ov::float16(2), ov::float16(2), ov::float16(1), + ov::float16(2), ov::float16(2), ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(1), + ov::float16(0), ov::float16(0), ov::float16(2), ov::float16(2), ov::float16(0), ov::float16(1), + ov::float16(1), ov::float16(1), ov::float16(2), ov::float16(1), ov::float16(1), ov::float16(0), + ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(1), ov::float16(0), ov::float16(0), + ov::float16(2), ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(1), + ov::float16(0), ov::float16(2), ov::float16(2), ov::float16(1), ov::float16(1), ov::float16(2), + ov::float16(2), ov::float16(1), ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(2), + ov::float16(2), ov::float16(2), ov::float16(1), ov::float16(2), ov::float16(0), ov::float16(0), + ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(2), ov::float16(0), ov::float16(2), + ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(2), ov::float16(1), ov::float16(2), + ov::float16(0), ov::float16(2), ov::float16(0), ov::float16(2), ov::float16(1), ov::float16(0), + ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(0), + ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(2), ov::float16(0), ov::float16(1), + ov::float16(0), ov::float16(1), ov::float16(2), ov::float16(2), ov::float16(0), ov::float16(0), + ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(2), + ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(2), + ov::float16(0), ov::float16(0), ov::float16(2), ov::float16(2), ov::float16(1), ov::float16(1), + ov::float16(1), ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(1), ov::float16(1), + ov::float16(1), ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(0), + ov::float16(2), ov::float16(0), ov::float16(2), ov::float16(0), ov::float16(2), ov::float16(0), + ov::float16(1), ov::float16(2), ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(0), + ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(2), ov::float16(1), ov::float16(1), + ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(2), + ov::float16(0), ov::float16(0), ov::float16(2), ov::float16(1), ov::float16(1), ov::float16(2), + ov::float16(0), ov::float16(2), ov::float16(2), ov::float16(0), ov::float16(0), ov::float16(1), + ov::float16(0), ov::float16(1), ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(0), + ov::float16(2), ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(1), + ov::float16(2), ov::float16(1), ov::float16(2), ov::float16(1), ov::float16(1), ov::float16(0), + ov::float16(1), ov::float16(2), ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(1), + ov::float16(1), ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(2), ov::float16(2), + ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(2), ov::float16(1), + ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(2), + ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(2), + ov::float16(0), ov::float16(0), ov::float16(2), ov::float16(1), ov::float16(1), ov::float16(1), + ov::float16(2), ov::float16(2), ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(0), + ov::float16(2), ov::float16(2), ov::float16(1), ov::float16(2), ov::float16(2), ov::float16(2), + ov::float16(0), ov::float16(2), ov::float16(2), ov::float16(0), ov::float16(1), ov::float16(0), + ov::float16(2), ov::float16(1), ov::float16(2), ov::float16(2), ov::float16(1), ov::float16(2), + ov::float16(2), ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(0), ov::float16(1), + ov::float16(2), ov::float16(1), ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(1), + ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(2), ov::float16(2), ov::float16(2), + ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(2), ov::float16(0), ov::float16(0), + ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(2), ov::float16(1), + ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(2), ov::float16(2), ov::float16(2), + ov::float16(1), ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(0), + ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(2), ov::float16(1), ov::float16(1), + ov::float16(0), ov::float16(2), ov::float16(1), ov::float16(1), ov::float16(2), ov::float16(2), + ov::float16(0), ov::float16(2), ov::float16(0), ov::float16(2), ov::float16(2), ov::float16(1), + ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(2), ov::float16(1), ov::float16(0), + ov::float16(0), ov::float16(1), ov::float16(2), ov::float16(1), ov::float16(0), ov::float16(0), + ov::float16(2), ov::float16(1), ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(2), + ov::float16(2), ov::float16(0), ov::float16(0), ov::float16(2), ov::float16(2), ov::float16(1), + ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(2), ov::float16(1), ov::float16(1), + ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(2), + ov::float16(1), ov::float16(2), ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(2), + ov::float16(2), ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(0), ov::float16(1), + ov::float16(0), ov::float16(2), ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(1), + ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(1), ov::float16(2), ov::float16(2), + ov::float16(0), ov::float16(2), ov::float16(0), ov::float16(2), ov::float16(2), ov::float16(2), + ov::float16(2), ov::float16(2), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(2), + ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(0), ov::float16(1), + ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(1), + ov::float16(1), ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(1), ov::float16(2), + ov::float16(2), ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(1), + ov::float16(0), ov::float16(2), ov::float16(0), ov::float16(2), ov::float16(2), ov::float16(2), + ov::float16(2), ov::float16(1), ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(0), + ov::float16(0), ov::float16(2), ov::float16(1), ov::float16(1), ov::float16(2), ov::float16(2), + ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(0), ov::float16(1), + ov::float16(1), ov::float16(2), ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(1), + ov::float16(2), ov::float16(1), ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(1), + ov::float16(2), ov::float16(0), ov::float16(0), ov::float16(2), ov::float16(0), ov::float16(0), + ov::float16(0), ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(0), ov::float16(2), + ov::float16(1), ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(1), + ov::float16(2), ov::float16(1), ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(1), + ov::float16(2), ov::float16(0), ov::float16(0), ov::float16(2), ov::float16(1), ov::float16(2), + ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(0), + ov::float16(1), ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(0), ov::float16(0), + ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(2), ov::float16(0), ov::float16(1), + ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(0), + ov::float16(2), ov::float16(1), ov::float16(2), ov::float16(1), ov::float16(0), ov::float16(0), + ov::float16(0), ov::float16(1), ov::float16(2), ov::float16(1), ov::float16(0), ov::float16(1), + ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(2), ov::float16(0), ov::float16(0), + ov::float16(0), ov::float16(1), ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(0), + ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(2), ov::float16(0), ov::float16(2), + ov::float16(1), ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(1), ov::float16(2), + ov::float16(0), ov::float16(2), ov::float16(1), ov::float16(2), ov::float16(0), ov::float16(0), + ov::float16(0), ov::float16(1), ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(1), + ov::float16(1), ov::float16(2), ov::float16(2), ov::float16(0), ov::float16(1), ov::float16(2), + ov::float16(2), ov::float16(2), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(0), + ov::float16(0), ov::float16(1), ov::float16(2), ov::float16(1), ov::float16(2), ov::float16(2), + ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(2), ov::float16(0), ov::float16(2), + ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(0), ov::float16(1), ov::float16(2), + ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(2), ov::float16(2), ov::float16(1), + ov::float16(0), ov::float16(2), ov::float16(2), ov::float16(1), ov::float16(1), ov::float16(1), + ov::float16(2), ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(2), ov::float16(0), + ov::float16(1), ov::float16(0), ov::float16(1), ov::float16(2), ov::float16(2), ov::float16(0), + ov::float16(0), ov::float16(1), ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(2), + ov::float16(1), ov::float16(2), ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(1), + ov::float16(1), ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(1), ov::float16(2), }); std::vector expected_results = { - FLOAT16(0), FLOAT16(1), FLOAT16(8), FLOAT16(8), FLOAT16(8), FLOAT16(0), - FLOAT16(5), FLOAT16(5), FLOAT16(2), FLOAT16(5), FLOAT16(5), FLOAT16(5), - FLOAT16(7), FLOAT16(0), FLOAT16(7), FLOAT16(7), FLOAT16(7), FLOAT16(7), - FLOAT16(5), FLOAT16(4), FLOAT16(5), FLOAT16(4), FLOAT16(10), FLOAT16(5), - FLOAT16(0), FLOAT16(9), FLOAT16(0), FLOAT16(0), FLOAT16(9), FLOAT16(9), - FLOAT16(7), FLOAT16(0), FLOAT16(0), FLOAT16(7), FLOAT16(7), FLOAT16(7), - FLOAT16(0), FLOAT16(4), FLOAT16(4), FLOAT16(4), FLOAT16(4), FLOAT16(4), - FLOAT16(10), FLOAT16(10), FLOAT16(10), FLOAT16(7), FLOAT16(7), FLOAT16(10), - FLOAT16(5), FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(9), - FLOAT16(7), FLOAT16(7), FLOAT16(7), FLOAT16(7), FLOAT16(7), FLOAT16(7), - FLOAT16(2), FLOAT16(10), FLOAT16(8), FLOAT16(8), FLOAT16(2), FLOAT16(2), - FLOAT16(8), FLOAT16(8), FLOAT16(0), FLOAT16(3), FLOAT16(0), FLOAT16(0), - FLOAT16(6), FLOAT16(10), FLOAT16(10), FLOAT16(10), FLOAT16(8), FLOAT16(6), - FLOAT16(4), FLOAT16(10), FLOAT16(2), FLOAT16(10), FLOAT16(2), FLOAT16(10), - FLOAT16(7), FLOAT16(7), FLOAT16(8), FLOAT16(7), FLOAT16(7), FLOAT16(7), - FLOAT16(0), FLOAT16(6), FLOAT16(6), FLOAT16(9), FLOAT16(0), FLOAT16(0), - FLOAT16(8), FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(4), FLOAT16(2), - FLOAT16(5), FLOAT16(3), FLOAT16(3), FLOAT16(5), FLOAT16(3), FLOAT16(5), - FLOAT16(3), FLOAT16(1), FLOAT16(1), FLOAT16(3), FLOAT16(1), FLOAT16(1), - FLOAT16(10), FLOAT16(9), FLOAT16(0), FLOAT16(10), FLOAT16(9), FLOAT16(0), - FLOAT16(9), FLOAT16(9), FLOAT16(5), FLOAT16(5), FLOAT16(5), FLOAT16(5), - FLOAT16(10), FLOAT16(10), FLOAT16(10), FLOAT16(3), FLOAT16(5), FLOAT16(10), - FLOAT16(2), FLOAT16(0), FLOAT16(2), FLOAT16(0), FLOAT16(10), FLOAT16(10), - FLOAT16(0), FLOAT16(5), FLOAT16(4), FLOAT16(4), FLOAT16(5), FLOAT16(0), - FLOAT16(10), FLOAT16(3), FLOAT16(5), FLOAT16(5), FLOAT16(10), FLOAT16(10), - FLOAT16(10), FLOAT16(5), FLOAT16(10), FLOAT16(0), FLOAT16(10), FLOAT16(10), - FLOAT16(8), FLOAT16(9), FLOAT16(8), FLOAT16(9), FLOAT16(8), FLOAT16(9), - FLOAT16(7), FLOAT16(0), FLOAT16(0), FLOAT16(7), FLOAT16(0), FLOAT16(0), - FLOAT16(8), FLOAT16(9), FLOAT16(6), FLOAT16(8), FLOAT16(8), FLOAT16(6), - FLOAT16(9), FLOAT16(9), FLOAT16(7), FLOAT16(10), FLOAT16(10), FLOAT16(10), - FLOAT16(2), FLOAT16(2), FLOAT16(3), FLOAT16(3), FLOAT16(2), FLOAT16(3), - FLOAT16(6), FLOAT16(6), FLOAT16(9), FLOAT16(6), FLOAT16(6), FLOAT16(5), - FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(9), FLOAT16(4), FLOAT16(4), - FLOAT16(5), FLOAT16(5), FLOAT16(4), FLOAT16(4), FLOAT16(5), FLOAT16(5), - FLOAT16(3), FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(1), - FLOAT16(0), FLOAT16(8), FLOAT16(8), FLOAT16(6), FLOAT16(6), FLOAT16(0), - FLOAT16(10), FLOAT16(10), FLOAT16(5), FLOAT16(10), FLOAT16(5), FLOAT16(5), - FLOAT16(6), FLOAT16(8), FLOAT16(9), FLOAT16(9), FLOAT16(8), FLOAT16(9), - FLOAT16(9), FLOAT16(6), FLOAT16(6), FLOAT16(1), FLOAT16(9), FLOAT16(1), - FLOAT16(2), FLOAT16(1), FLOAT16(2), FLOAT16(1), FLOAT16(7), FLOAT16(2), - FLOAT16(1), FLOAT16(1), FLOAT16(3), FLOAT16(1), FLOAT16(1), FLOAT16(1), - FLOAT16(0), FLOAT16(4), FLOAT16(4), FLOAT16(7), FLOAT16(4), FLOAT16(0), - FLOAT16(10), FLOAT16(2), FLOAT16(1), FLOAT16(1), FLOAT16(10), FLOAT16(10), - FLOAT16(3), FLOAT16(3), FLOAT16(3), FLOAT16(9), FLOAT16(9), FLOAT16(7), - FLOAT16(7), FLOAT16(7), FLOAT16(7), FLOAT16(1), FLOAT16(4), FLOAT16(4), - FLOAT16(4), FLOAT16(4), FLOAT16(1), FLOAT16(1), FLOAT16(5), FLOAT16(5), - FLOAT16(9), FLOAT16(6), FLOAT16(6), FLOAT16(6), FLOAT16(9), FLOAT16(9), - FLOAT16(6), FLOAT16(10), FLOAT16(6), FLOAT16(10), FLOAT16(10), FLOAT16(10), - FLOAT16(1), FLOAT16(10), FLOAT16(1), FLOAT16(10), FLOAT16(1), FLOAT16(10), - FLOAT16(2), FLOAT16(5), FLOAT16(6), FLOAT16(2), FLOAT16(2), FLOAT16(6), - FLOAT16(5), FLOAT16(5), FLOAT16(5), FLOAT16(1), FLOAT16(10), FLOAT16(10), - FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(6), - FLOAT16(1), FLOAT16(1), FLOAT16(6), FLOAT16(7), FLOAT16(7), FLOAT16(6), - FLOAT16(8), FLOAT16(5), FLOAT16(5), FLOAT16(8), FLOAT16(8), FLOAT16(2), - FLOAT16(4), FLOAT16(2), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(4), - FLOAT16(1), FLOAT16(9), FLOAT16(4), FLOAT16(9), FLOAT16(9), FLOAT16(4), - FLOAT16(1), FLOAT16(4), FLOAT16(1), FLOAT16(4), FLOAT16(4), FLOAT16(10), - FLOAT16(1), FLOAT16(1), FLOAT16(9), FLOAT16(1), FLOAT16(9), FLOAT16(1), - FLOAT16(4), FLOAT16(4), FLOAT16(0), FLOAT16(2), FLOAT16(2), FLOAT16(2), - FLOAT16(8), FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(5), FLOAT16(8), - FLOAT16(3), FLOAT16(4), FLOAT16(3), FLOAT16(3), FLOAT16(3), FLOAT16(8), - FLOAT16(10), FLOAT16(10), FLOAT16(7), FLOAT16(10), FLOAT16(10), FLOAT16(2), - FLOAT16(7), FLOAT16(7), FLOAT16(2), FLOAT16(9), FLOAT16(9), FLOAT16(9), - FLOAT16(5), FLOAT16(5), FLOAT16(5), FLOAT16(9), FLOAT16(9), FLOAT16(9), - FLOAT16(8), FLOAT16(8), FLOAT16(8), FLOAT16(8), FLOAT16(8), FLOAT16(8), - FLOAT16(5), FLOAT16(6), FLOAT16(6), FLOAT16(5), FLOAT16(10), FLOAT16(5), - FLOAT16(7), FLOAT16(9), FLOAT16(7), FLOAT16(7), FLOAT16(9), FLOAT16(7), - FLOAT16(10), FLOAT16(10), FLOAT16(7), FLOAT16(10), FLOAT16(7), FLOAT16(10), - FLOAT16(5), FLOAT16(3), FLOAT16(9), FLOAT16(3), FLOAT16(9), FLOAT16(3), - FLOAT16(5), FLOAT16(1), FLOAT16(1), FLOAT16(4), FLOAT16(4), FLOAT16(4), - FLOAT16(9), FLOAT16(9), FLOAT16(9), FLOAT16(4), FLOAT16(6), FLOAT16(6), - FLOAT16(9), FLOAT16(8), FLOAT16(8), FLOAT16(8), FLOAT16(7), FLOAT16(9), - FLOAT16(8), FLOAT16(8), FLOAT16(7), FLOAT16(8), FLOAT16(8), FLOAT16(8), - FLOAT16(9), FLOAT16(0), FLOAT16(9), FLOAT16(0), FLOAT16(0), FLOAT16(0), - FLOAT16(0), FLOAT16(5), FLOAT16(7), FLOAT16(7), FLOAT16(0), FLOAT16(0), - FLOAT16(5), FLOAT16(7), FLOAT16(7), FLOAT16(7), FLOAT16(7), FLOAT16(7), - FLOAT16(2), FLOAT16(9), FLOAT16(2), FLOAT16(9), FLOAT16(9), FLOAT16(10), - FLOAT16(5), FLOAT16(5), FLOAT16(5), FLOAT16(1), FLOAT16(5), FLOAT16(9), - FLOAT16(4), FLOAT16(10), FLOAT16(2), FLOAT16(10), FLOAT16(4), FLOAT16(4), - FLOAT16(5), FLOAT16(3), FLOAT16(4), FLOAT16(3), FLOAT16(4), FLOAT16(5), - FLOAT16(5), FLOAT16(9), FLOAT16(9), FLOAT16(5), FLOAT16(5), FLOAT16(4), - FLOAT16(4), FLOAT16(8), FLOAT16(8), FLOAT16(2), FLOAT16(4), FLOAT16(4), - FLOAT16(10), FLOAT16(10), FLOAT16(10), FLOAT16(1), FLOAT16(10), FLOAT16(6), - FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(6), FLOAT16(0), FLOAT16(0), - FLOAT16(3), FLOAT16(8), FLOAT16(8), FLOAT16(3), FLOAT16(8), FLOAT16(8), - FLOAT16(4), FLOAT16(7), FLOAT16(4), FLOAT16(7), FLOAT16(7), FLOAT16(7), - FLOAT16(9), FLOAT16(2), FLOAT16(7), FLOAT16(9), FLOAT16(7), FLOAT16(7), - FLOAT16(9), FLOAT16(0), FLOAT16(9), FLOAT16(0), FLOAT16(0), FLOAT16(0), - FLOAT16(2), FLOAT16(2), FLOAT16(8), FLOAT16(8), FLOAT16(8), FLOAT16(2), - FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(2), FLOAT16(0), - FLOAT16(10), FLOAT16(10), FLOAT16(10), FLOAT16(10), FLOAT16(10), FLOAT16(10), - FLOAT16(7), FLOAT16(7), FLOAT16(2), FLOAT16(3), FLOAT16(7), FLOAT16(3), - FLOAT16(4), FLOAT16(8), FLOAT16(8), FLOAT16(8), FLOAT16(8), FLOAT16(8), - FLOAT16(3), FLOAT16(0), FLOAT16(3), FLOAT16(0), FLOAT16(0), FLOAT16(0), - FLOAT16(2), FLOAT16(10), FLOAT16(10), FLOAT16(2), FLOAT16(2), FLOAT16(2), - FLOAT16(9), FLOAT16(4), FLOAT16(1), FLOAT16(1), FLOAT16(4), FLOAT16(4), - FLOAT16(6), FLOAT16(1), FLOAT16(6), FLOAT16(9), FLOAT16(6), FLOAT16(1), - FLOAT16(10), FLOAT16(2), FLOAT16(1), FLOAT16(10), FLOAT16(1), FLOAT16(10), - FLOAT16(2), FLOAT16(1), FLOAT16(1), FLOAT16(2), FLOAT16(2), FLOAT16(1), - FLOAT16(8), FLOAT16(6), FLOAT16(6), FLOAT16(8), FLOAT16(6), FLOAT16(6), - FLOAT16(7), FLOAT16(7), FLOAT16(7), FLOAT16(7), FLOAT16(7), FLOAT16(7), - FLOAT16(0), FLOAT16(0), FLOAT16(6), FLOAT16(6), FLOAT16(0), FLOAT16(0), - FLOAT16(7), FLOAT16(3), FLOAT16(3), FLOAT16(2), FLOAT16(7), FLOAT16(3), - FLOAT16(5), FLOAT16(1), FLOAT16(1), FLOAT16(5), FLOAT16(8), FLOAT16(5), - FLOAT16(6), FLOAT16(6), FLOAT16(6), FLOAT16(6), FLOAT16(6), FLOAT16(6), - FLOAT16(1), FLOAT16(1), FLOAT16(7), FLOAT16(1), FLOAT16(7), FLOAT16(7), - FLOAT16(9), FLOAT16(5), FLOAT16(8), FLOAT16(8), FLOAT16(5), FLOAT16(9), - FLOAT16(6), FLOAT16(8), FLOAT16(8), FLOAT16(6), FLOAT16(6), FLOAT16(6), - FLOAT16(3), FLOAT16(5), FLOAT16(3), FLOAT16(5), FLOAT16(1), FLOAT16(1), - FLOAT16(6), FLOAT16(5), FLOAT16(4), FLOAT16(5), FLOAT16(6), FLOAT16(5), - FLOAT16(4), FLOAT16(2), FLOAT16(4), FLOAT16(4), FLOAT16(2), FLOAT16(2), - FLOAT16(4), FLOAT16(5), FLOAT16(4), FLOAT16(4), FLOAT16(4), FLOAT16(4), - FLOAT16(3), FLOAT16(3), FLOAT16(0), FLOAT16(4), FLOAT16(3), FLOAT16(4), - FLOAT16(7), FLOAT16(7), FLOAT16(2), FLOAT16(7), FLOAT16(7), FLOAT16(7), - FLOAT16(5), FLOAT16(7), FLOAT16(8), FLOAT16(7), FLOAT16(5), FLOAT16(5), - FLOAT16(10), FLOAT16(5), FLOAT16(10), FLOAT16(10), FLOAT16(10), FLOAT16(5), - FLOAT16(5), FLOAT16(5), FLOAT16(5), FLOAT16(3), FLOAT16(5), FLOAT16(5), - FLOAT16(6), FLOAT16(6), FLOAT16(7), FLOAT16(7), FLOAT16(7), FLOAT16(7), - FLOAT16(10), FLOAT16(1), FLOAT16(7), FLOAT16(1), FLOAT16(7), FLOAT16(7), - FLOAT16(5), FLOAT16(5), FLOAT16(5), FLOAT16(5), FLOAT16(3), FLOAT16(5), - FLOAT16(0), FLOAT16(9), FLOAT16(3), FLOAT16(9), FLOAT16(0), FLOAT16(3), - FLOAT16(6), FLOAT16(6), FLOAT16(6), FLOAT16(10), FLOAT16(10), FLOAT16(6), - FLOAT16(2), FLOAT16(2), FLOAT16(2), FLOAT16(10), FLOAT16(10), FLOAT16(10), - FLOAT16(5), FLOAT16(9), FLOAT16(7), FLOAT16(7), FLOAT16(5), FLOAT16(9), - FLOAT16(0), FLOAT16(8), FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(8), - FLOAT16(7), FLOAT16(7), FLOAT16(4), FLOAT16(4), FLOAT16(4), FLOAT16(4), - FLOAT16(8), FLOAT16(10), FLOAT16(8), FLOAT16(6), FLOAT16(10), FLOAT16(8), - FLOAT16(3), FLOAT16(3), FLOAT16(7), FLOAT16(8), FLOAT16(3), FLOAT16(8), + ov::float16(0), ov::float16(1), ov::float16(8), ov::float16(8), ov::float16(8), ov::float16(0), + ov::float16(5), ov::float16(5), ov::float16(2), ov::float16(5), ov::float16(5), ov::float16(5), + ov::float16(7), ov::float16(0), ov::float16(7), ov::float16(7), ov::float16(7), ov::float16(7), + ov::float16(5), ov::float16(4), ov::float16(5), ov::float16(4), ov::float16(10), ov::float16(5), + ov::float16(0), ov::float16(9), ov::float16(0), ov::float16(0), ov::float16(9), ov::float16(9), + ov::float16(7), ov::float16(0), ov::float16(0), ov::float16(7), ov::float16(7), ov::float16(7), + ov::float16(0), ov::float16(4), ov::float16(4), ov::float16(4), ov::float16(4), ov::float16(4), + ov::float16(10), ov::float16(10), ov::float16(10), ov::float16(7), ov::float16(7), ov::float16(10), + ov::float16(5), ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(9), + ov::float16(7), ov::float16(7), ov::float16(7), ov::float16(7), ov::float16(7), ov::float16(7), + ov::float16(2), ov::float16(10), ov::float16(8), ov::float16(8), ov::float16(2), ov::float16(2), + ov::float16(8), ov::float16(8), ov::float16(0), ov::float16(3), ov::float16(0), ov::float16(0), + ov::float16(6), ov::float16(10), ov::float16(10), ov::float16(10), ov::float16(8), ov::float16(6), + ov::float16(4), ov::float16(10), ov::float16(2), ov::float16(10), ov::float16(2), ov::float16(10), + ov::float16(7), ov::float16(7), ov::float16(8), ov::float16(7), ov::float16(7), ov::float16(7), + ov::float16(0), ov::float16(6), ov::float16(6), ov::float16(9), ov::float16(0), ov::float16(0), + ov::float16(8), ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(4), ov::float16(2), + ov::float16(5), ov::float16(3), ov::float16(3), ov::float16(5), ov::float16(3), ov::float16(5), + ov::float16(3), ov::float16(1), ov::float16(1), ov::float16(3), ov::float16(1), ov::float16(1), + ov::float16(10), ov::float16(9), ov::float16(0), ov::float16(10), ov::float16(9), ov::float16(0), + ov::float16(9), ov::float16(9), ov::float16(5), ov::float16(5), ov::float16(5), ov::float16(5), + ov::float16(10), ov::float16(10), ov::float16(10), ov::float16(3), ov::float16(5), ov::float16(10), + ov::float16(2), ov::float16(0), ov::float16(2), ov::float16(0), ov::float16(10), ov::float16(10), + ov::float16(0), ov::float16(5), ov::float16(4), ov::float16(4), ov::float16(5), ov::float16(0), + ov::float16(10), ov::float16(3), ov::float16(5), ov::float16(5), ov::float16(10), ov::float16(10), + ov::float16(10), ov::float16(5), ov::float16(10), ov::float16(0), ov::float16(10), ov::float16(10), + ov::float16(8), ov::float16(9), ov::float16(8), ov::float16(9), ov::float16(8), ov::float16(9), + ov::float16(7), ov::float16(0), ov::float16(0), ov::float16(7), ov::float16(0), ov::float16(0), + ov::float16(8), ov::float16(9), ov::float16(6), ov::float16(8), ov::float16(8), ov::float16(6), + ov::float16(9), ov::float16(9), ov::float16(7), ov::float16(10), ov::float16(10), ov::float16(10), + ov::float16(2), ov::float16(2), ov::float16(3), ov::float16(3), ov::float16(2), ov::float16(3), + ov::float16(6), ov::float16(6), ov::float16(9), ov::float16(6), ov::float16(6), ov::float16(5), + ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(9), ov::float16(4), ov::float16(4), + ov::float16(5), ov::float16(5), ov::float16(4), ov::float16(4), ov::float16(5), ov::float16(5), + ov::float16(3), ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(1), + ov::float16(0), ov::float16(8), ov::float16(8), ov::float16(6), ov::float16(6), ov::float16(0), + ov::float16(10), ov::float16(10), ov::float16(5), ov::float16(10), ov::float16(5), ov::float16(5), + ov::float16(6), ov::float16(8), ov::float16(9), ov::float16(9), ov::float16(8), ov::float16(9), + ov::float16(9), ov::float16(6), ov::float16(6), ov::float16(1), ov::float16(9), ov::float16(1), + ov::float16(2), ov::float16(1), ov::float16(2), ov::float16(1), ov::float16(7), ov::float16(2), + ov::float16(1), ov::float16(1), ov::float16(3), ov::float16(1), ov::float16(1), ov::float16(1), + ov::float16(0), ov::float16(4), ov::float16(4), ov::float16(7), ov::float16(4), ov::float16(0), + ov::float16(10), ov::float16(2), ov::float16(1), ov::float16(1), ov::float16(10), ov::float16(10), + ov::float16(3), ov::float16(3), ov::float16(3), ov::float16(9), ov::float16(9), ov::float16(7), + ov::float16(7), ov::float16(7), ov::float16(7), ov::float16(1), ov::float16(4), ov::float16(4), + ov::float16(4), ov::float16(4), ov::float16(1), ov::float16(1), ov::float16(5), ov::float16(5), + ov::float16(9), ov::float16(6), ov::float16(6), ov::float16(6), ov::float16(9), ov::float16(9), + ov::float16(6), ov::float16(10), ov::float16(6), ov::float16(10), ov::float16(10), ov::float16(10), + ov::float16(1), ov::float16(10), ov::float16(1), ov::float16(10), ov::float16(1), ov::float16(10), + ov::float16(2), ov::float16(5), ov::float16(6), ov::float16(2), ov::float16(2), ov::float16(6), + ov::float16(5), ov::float16(5), ov::float16(5), ov::float16(1), ov::float16(10), ov::float16(10), + ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(6), + ov::float16(1), ov::float16(1), ov::float16(6), ov::float16(7), ov::float16(7), ov::float16(6), + ov::float16(8), ov::float16(5), ov::float16(5), ov::float16(8), ov::float16(8), ov::float16(2), + ov::float16(4), ov::float16(2), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(4), + ov::float16(1), ov::float16(9), ov::float16(4), ov::float16(9), ov::float16(9), ov::float16(4), + ov::float16(1), ov::float16(4), ov::float16(1), ov::float16(4), ov::float16(4), ov::float16(10), + ov::float16(1), ov::float16(1), ov::float16(9), ov::float16(1), ov::float16(9), ov::float16(1), + ov::float16(4), ov::float16(4), ov::float16(0), ov::float16(2), ov::float16(2), ov::float16(2), + ov::float16(8), ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(5), ov::float16(8), + ov::float16(3), ov::float16(4), ov::float16(3), ov::float16(3), ov::float16(3), ov::float16(8), + ov::float16(10), ov::float16(10), ov::float16(7), ov::float16(10), ov::float16(10), ov::float16(2), + ov::float16(7), ov::float16(7), ov::float16(2), ov::float16(9), ov::float16(9), ov::float16(9), + ov::float16(5), ov::float16(5), ov::float16(5), ov::float16(9), ov::float16(9), ov::float16(9), + ov::float16(8), ov::float16(8), ov::float16(8), ov::float16(8), ov::float16(8), ov::float16(8), + ov::float16(5), ov::float16(6), ov::float16(6), ov::float16(5), ov::float16(10), ov::float16(5), + ov::float16(7), ov::float16(9), ov::float16(7), ov::float16(7), ov::float16(9), ov::float16(7), + ov::float16(10), ov::float16(10), ov::float16(7), ov::float16(10), ov::float16(7), ov::float16(10), + ov::float16(5), ov::float16(3), ov::float16(9), ov::float16(3), ov::float16(9), ov::float16(3), + ov::float16(5), ov::float16(1), ov::float16(1), ov::float16(4), ov::float16(4), ov::float16(4), + ov::float16(9), ov::float16(9), ov::float16(9), ov::float16(4), ov::float16(6), ov::float16(6), + ov::float16(9), ov::float16(8), ov::float16(8), ov::float16(8), ov::float16(7), ov::float16(9), + ov::float16(8), ov::float16(8), ov::float16(7), ov::float16(8), ov::float16(8), ov::float16(8), + ov::float16(9), ov::float16(0), ov::float16(9), ov::float16(0), ov::float16(0), ov::float16(0), + ov::float16(0), ov::float16(5), ov::float16(7), ov::float16(7), ov::float16(0), ov::float16(0), + ov::float16(5), ov::float16(7), ov::float16(7), ov::float16(7), ov::float16(7), ov::float16(7), + ov::float16(2), ov::float16(9), ov::float16(2), ov::float16(9), ov::float16(9), ov::float16(10), + ov::float16(5), ov::float16(5), ov::float16(5), ov::float16(1), ov::float16(5), ov::float16(9), + ov::float16(4), ov::float16(10), ov::float16(2), ov::float16(10), ov::float16(4), ov::float16(4), + ov::float16(5), ov::float16(3), ov::float16(4), ov::float16(3), ov::float16(4), ov::float16(5), + ov::float16(5), ov::float16(9), ov::float16(9), ov::float16(5), ov::float16(5), ov::float16(4), + ov::float16(4), ov::float16(8), ov::float16(8), ov::float16(2), ov::float16(4), ov::float16(4), + ov::float16(10), ov::float16(10), ov::float16(10), ov::float16(1), ov::float16(10), ov::float16(6), + ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(6), ov::float16(0), ov::float16(0), + ov::float16(3), ov::float16(8), ov::float16(8), ov::float16(3), ov::float16(8), ov::float16(8), + ov::float16(4), ov::float16(7), ov::float16(4), ov::float16(7), ov::float16(7), ov::float16(7), + ov::float16(9), ov::float16(2), ov::float16(7), ov::float16(9), ov::float16(7), ov::float16(7), + ov::float16(9), ov::float16(0), ov::float16(9), ov::float16(0), ov::float16(0), ov::float16(0), + ov::float16(2), ov::float16(2), ov::float16(8), ov::float16(8), ov::float16(8), ov::float16(2), + ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(2), ov::float16(0), + ov::float16(10), ov::float16(10), ov::float16(10), ov::float16(10), ov::float16(10), ov::float16(10), + ov::float16(7), ov::float16(7), ov::float16(2), ov::float16(3), ov::float16(7), ov::float16(3), + ov::float16(4), ov::float16(8), ov::float16(8), ov::float16(8), ov::float16(8), ov::float16(8), + ov::float16(3), ov::float16(0), ov::float16(3), ov::float16(0), ov::float16(0), ov::float16(0), + ov::float16(2), ov::float16(10), ov::float16(10), ov::float16(2), ov::float16(2), ov::float16(2), + ov::float16(9), ov::float16(4), ov::float16(1), ov::float16(1), ov::float16(4), ov::float16(4), + ov::float16(6), ov::float16(1), ov::float16(6), ov::float16(9), ov::float16(6), ov::float16(1), + ov::float16(10), ov::float16(2), ov::float16(1), ov::float16(10), ov::float16(1), ov::float16(10), + ov::float16(2), ov::float16(1), ov::float16(1), ov::float16(2), ov::float16(2), ov::float16(1), + ov::float16(8), ov::float16(6), ov::float16(6), ov::float16(8), ov::float16(6), ov::float16(6), + ov::float16(7), ov::float16(7), ov::float16(7), ov::float16(7), ov::float16(7), ov::float16(7), + ov::float16(0), ov::float16(0), ov::float16(6), ov::float16(6), ov::float16(0), ov::float16(0), + ov::float16(7), ov::float16(3), ov::float16(3), ov::float16(2), ov::float16(7), ov::float16(3), + ov::float16(5), ov::float16(1), ov::float16(1), ov::float16(5), ov::float16(8), ov::float16(5), + ov::float16(6), ov::float16(6), ov::float16(6), ov::float16(6), ov::float16(6), ov::float16(6), + ov::float16(1), ov::float16(1), ov::float16(7), ov::float16(1), ov::float16(7), ov::float16(7), + ov::float16(9), ov::float16(5), ov::float16(8), ov::float16(8), ov::float16(5), ov::float16(9), + ov::float16(6), ov::float16(8), ov::float16(8), ov::float16(6), ov::float16(6), ov::float16(6), + ov::float16(3), ov::float16(5), ov::float16(3), ov::float16(5), ov::float16(1), ov::float16(1), + ov::float16(6), ov::float16(5), ov::float16(4), ov::float16(5), ov::float16(6), ov::float16(5), + ov::float16(4), ov::float16(2), ov::float16(4), ov::float16(4), ov::float16(2), ov::float16(2), + ov::float16(4), ov::float16(5), ov::float16(4), ov::float16(4), ov::float16(4), ov::float16(4), + ov::float16(3), ov::float16(3), ov::float16(0), ov::float16(4), ov::float16(3), ov::float16(4), + ov::float16(7), ov::float16(7), ov::float16(2), ov::float16(7), ov::float16(7), ov::float16(7), + ov::float16(5), ov::float16(7), ov::float16(8), ov::float16(7), ov::float16(5), ov::float16(5), + ov::float16(10), ov::float16(5), ov::float16(10), ov::float16(10), ov::float16(10), ov::float16(5), + ov::float16(5), ov::float16(5), ov::float16(5), ov::float16(3), ov::float16(5), ov::float16(5), + ov::float16(6), ov::float16(6), ov::float16(7), ov::float16(7), ov::float16(7), ov::float16(7), + ov::float16(10), ov::float16(1), ov::float16(7), ov::float16(1), ov::float16(7), ov::float16(7), + ov::float16(5), ov::float16(5), ov::float16(5), ov::float16(5), ov::float16(3), ov::float16(5), + ov::float16(0), ov::float16(9), ov::float16(3), ov::float16(9), ov::float16(0), ov::float16(3), + ov::float16(6), ov::float16(6), ov::float16(6), ov::float16(10), ov::float16(10), ov::float16(6), + ov::float16(2), ov::float16(2), ov::float16(2), ov::float16(10), ov::float16(10), ov::float16(10), + ov::float16(5), ov::float16(9), ov::float16(7), ov::float16(7), ov::float16(5), ov::float16(9), + ov::float16(0), ov::float16(8), ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(8), + ov::float16(7), ov::float16(7), ov::float16(4), ov::float16(4), ov::float16(4), ov::float16(4), + ov::float16(8), ov::float16(10), ov::float16(8), ov::float16(6), ov::float16(10), ov::float16(8), + ov::float16(3), ov::float16(3), ov::float16(7), ov::float16(8), ov::float16(3), ov::float16(8), }; DoTest(engine, input0, input1, expected_results, tensor(2, 2, 6, 4, 4, 2), axis); @@ -999,48 +999,48 @@ TEST(gather_elements_gpu_fp16, d124251_i124221_an3) { auto input1 = engine.allocate_memory({ data_types::f16, format::bfwzyx, { 1, 2, 4, 2, 2, 1 } }); // indices set_values(input0, { - FLOAT16(0), FLOAT16(1), FLOAT16(8), FLOAT16(5), - FLOAT16(5), FLOAT16(2), FLOAT16(0), FLOAT16(7), - FLOAT16(7), FLOAT16(10), FLOAT16(4), FLOAT16(5), - FLOAT16(9), FLOAT16(0), FLOAT16(0), FLOAT16(5), - FLOAT16(7), FLOAT16(0), FLOAT16(4), FLOAT16(0), - FLOAT16(4), FLOAT16(7), FLOAT16(6), FLOAT16(10), - FLOAT16(9), FLOAT16(5), FLOAT16(1), FLOAT16(7), - FLOAT16(4), FLOAT16(7), FLOAT16(10), FLOAT16(8), - FLOAT16(2), FLOAT16(0), FLOAT16(8), FLOAT16(3), - FLOAT16(6), FLOAT16(8), FLOAT16(10), FLOAT16(4), - FLOAT16(2), FLOAT16(10), FLOAT16(7), FLOAT16(8), - FLOAT16(7), FLOAT16(0), FLOAT16(6), FLOAT16(9), - FLOAT16(2), FLOAT16(4), FLOAT16(8), FLOAT16(5), - FLOAT16(2), FLOAT16(3), FLOAT16(3), FLOAT16(1), - FLOAT16(5), FLOAT16(9), FLOAT16(10), FLOAT16(0), - FLOAT16(9), FLOAT16(5), FLOAT16(5), FLOAT16(3), - FLOAT16(10), FLOAT16(5), FLOAT16(2), FLOAT16(0), - FLOAT16(10), FLOAT16(0), FLOAT16(5), FLOAT16(4), - FLOAT16(3), FLOAT16(10), FLOAT16(5), FLOAT16(5), - FLOAT16(10), FLOAT16(0), FLOAT16(8), FLOAT16(8), + ov::float16(0), ov::float16(1), ov::float16(8), ov::float16(5), + ov::float16(5), ov::float16(2), ov::float16(0), ov::float16(7), + ov::float16(7), ov::float16(10), ov::float16(4), ov::float16(5), + ov::float16(9), ov::float16(0), ov::float16(0), ov::float16(5), + ov::float16(7), ov::float16(0), ov::float16(4), ov::float16(0), + ov::float16(4), ov::float16(7), ov::float16(6), ov::float16(10), + ov::float16(9), ov::float16(5), ov::float16(1), ov::float16(7), + ov::float16(4), ov::float16(7), ov::float16(10), ov::float16(8), + ov::float16(2), ov::float16(0), ov::float16(8), ov::float16(3), + ov::float16(6), ov::float16(8), ov::float16(10), ov::float16(4), + ov::float16(2), ov::float16(10), ov::float16(7), ov::float16(8), + ov::float16(7), ov::float16(0), ov::float16(6), ov::float16(9), + ov::float16(2), ov::float16(4), ov::float16(8), ov::float16(5), + ov::float16(2), ov::float16(3), ov::float16(3), ov::float16(1), + ov::float16(5), ov::float16(9), ov::float16(10), ov::float16(0), + ov::float16(9), ov::float16(5), ov::float16(5), ov::float16(3), + ov::float16(10), ov::float16(5), ov::float16(2), ov::float16(0), + ov::float16(10), ov::float16(0), ov::float16(5), ov::float16(4), + ov::float16(3), ov::float16(10), ov::float16(5), ov::float16(5), + ov::float16(10), ov::float16(0), ov::float16(8), ov::float16(8), }); set_values(input1, { - FLOAT16(0), FLOAT16(2), FLOAT16(4), FLOAT16(3), - FLOAT16(4), FLOAT16(0), FLOAT16(0), FLOAT16(1), - FLOAT16(4), FLOAT16(0), FLOAT16(1), FLOAT16(0), - FLOAT16(1), FLOAT16(0), FLOAT16(1), FLOAT16(1), - FLOAT16(3), FLOAT16(1), FLOAT16(4), FLOAT16(2), - FLOAT16(4), FLOAT16(2), FLOAT16(1), FLOAT16(3), - FLOAT16(2), FLOAT16(1), FLOAT16(2), FLOAT16(4), - FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(4), + ov::float16(0), ov::float16(2), ov::float16(4), ov::float16(3), + ov::float16(4), ov::float16(0), ov::float16(0), ov::float16(1), + ov::float16(4), ov::float16(0), ov::float16(1), ov::float16(0), + ov::float16(1), ov::float16(0), ov::float16(1), ov::float16(1), + ov::float16(3), ov::float16(1), ov::float16(4), ov::float16(2), + ov::float16(4), ov::float16(2), ov::float16(1), ov::float16(3), + ov::float16(2), ov::float16(1), ov::float16(2), ov::float16(4), + ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(4), }); std::vector expected_results = { - FLOAT16(0), FLOAT16(0), FLOAT16(8), FLOAT16(7), - FLOAT16(6), FLOAT16(2), FLOAT16(0), FLOAT16(5), - FLOAT16(2), FLOAT16(1), FLOAT16(4), FLOAT16(5), - FLOAT16(9), FLOAT16(2), FLOAT16(0), FLOAT16(5), - FLOAT16(10), FLOAT16(4), FLOAT16(5), FLOAT16(0), - FLOAT16(10), FLOAT16(5), FLOAT16(3), FLOAT16(4), - FLOAT16(5), FLOAT16(4), FLOAT16(10), FLOAT16(5), - FLOAT16(2), FLOAT16(0), FLOAT16(5), FLOAT16(8), + ov::float16(0), ov::float16(0), ov::float16(8), ov::float16(7), + ov::float16(6), ov::float16(2), ov::float16(0), ov::float16(5), + ov::float16(2), ov::float16(1), ov::float16(4), ov::float16(5), + ov::float16(9), ov::float16(2), ov::float16(0), ov::float16(5), + ov::float16(10), ov::float16(4), ov::float16(5), ov::float16(0), + ov::float16(10), ov::float16(5), ov::float16(3), ov::float16(4), + ov::float16(5), ov::float16(4), ov::float16(10), ov::float16(5), + ov::float16(2), ov::float16(0), ov::float16(5), ov::float16(8), }; DoTest(engine, input0, input1, expected_results, tensor(1, 2, 4, 2, 2, 1), axis); @@ -1054,90 +1054,90 @@ TEST(gather_elements_gpu_fp16, d233113_i233115_a2) { auto input1 = engine.allocate_memory({ data_types::f16, format::bfwzyx, { 2, 3, 3, 1, 1, 5 } }); // indices set_values(input0, { - FLOAT16(0), FLOAT16(1), FLOAT16(8), - FLOAT16(5), FLOAT16(5), FLOAT16(2), - FLOAT16(0), FLOAT16(7), FLOAT16(7), - FLOAT16(10), FLOAT16(4), FLOAT16(5), - FLOAT16(9), FLOAT16(0), FLOAT16(0), - FLOAT16(5), FLOAT16(7), FLOAT16(0), - FLOAT16(4), FLOAT16(0), FLOAT16(4), - FLOAT16(7), FLOAT16(6), FLOAT16(10), - FLOAT16(9), FLOAT16(5), FLOAT16(1), - FLOAT16(7), FLOAT16(4), FLOAT16(7), - FLOAT16(10), FLOAT16(8), FLOAT16(2), - FLOAT16(0), FLOAT16(8), FLOAT16(3), - FLOAT16(6), FLOAT16(8), FLOAT16(10), - FLOAT16(4), FLOAT16(2), FLOAT16(10), - FLOAT16(7), FLOAT16(8), FLOAT16(7), - FLOAT16(0), FLOAT16(6), FLOAT16(9), - FLOAT16(2), FLOAT16(4), FLOAT16(8), - FLOAT16(5), FLOAT16(2), FLOAT16(3), + ov::float16(0), ov::float16(1), ov::float16(8), + ov::float16(5), ov::float16(5), ov::float16(2), + ov::float16(0), ov::float16(7), ov::float16(7), + ov::float16(10), ov::float16(4), ov::float16(5), + ov::float16(9), ov::float16(0), ov::float16(0), + ov::float16(5), ov::float16(7), ov::float16(0), + ov::float16(4), ov::float16(0), ov::float16(4), + ov::float16(7), ov::float16(6), ov::float16(10), + ov::float16(9), ov::float16(5), ov::float16(1), + ov::float16(7), ov::float16(4), ov::float16(7), + ov::float16(10), ov::float16(8), ov::float16(2), + ov::float16(0), ov::float16(8), ov::float16(3), + ov::float16(6), ov::float16(8), ov::float16(10), + ov::float16(4), ov::float16(2), ov::float16(10), + ov::float16(7), ov::float16(8), ov::float16(7), + ov::float16(0), ov::float16(6), ov::float16(9), + ov::float16(2), ov::float16(4), ov::float16(8), + ov::float16(5), ov::float16(2), ov::float16(3), }); set_values(input1, { - FLOAT16(0), FLOAT16(1), FLOAT16(2), - FLOAT16(2), FLOAT16(2), FLOAT16(0), - FLOAT16(0), FLOAT16(0), FLOAT16(2), - FLOAT16(0), FLOAT16(0), FLOAT16(0), - FLOAT16(1), FLOAT16(0), FLOAT16(1), - FLOAT16(1), FLOAT16(2), FLOAT16(1), - FLOAT16(2), FLOAT16(1), FLOAT16(2), - FLOAT16(1), FLOAT16(0), FLOAT16(2), - FLOAT16(1), FLOAT16(0), FLOAT16(1), - FLOAT16(2), FLOAT16(0), FLOAT16(0), - FLOAT16(1), FLOAT16(2), FLOAT16(2), - FLOAT16(1), FLOAT16(1), FLOAT16(1), - FLOAT16(1), FLOAT16(0), FLOAT16(2), - FLOAT16(0), FLOAT16(2), FLOAT16(2), - FLOAT16(2), FLOAT16(2), FLOAT16(2), - FLOAT16(0), FLOAT16(0), FLOAT16(2), - FLOAT16(1), FLOAT16(2), FLOAT16(2), - FLOAT16(2), FLOAT16(2), FLOAT16(0), - FLOAT16(2), FLOAT16(0), FLOAT16(0), - FLOAT16(0), FLOAT16(0), FLOAT16(2), - FLOAT16(2), FLOAT16(0), FLOAT16(1), - FLOAT16(1), FLOAT16(2), FLOAT16(2), - FLOAT16(1), FLOAT16(1), FLOAT16(0), - FLOAT16(2), FLOAT16(0), FLOAT16(0), - FLOAT16(0), FLOAT16(2), FLOAT16(2), - FLOAT16(2), FLOAT16(1), FLOAT16(0), - FLOAT16(0), FLOAT16(2), FLOAT16(1), - FLOAT16(2), FLOAT16(1), FLOAT16(2), - FLOAT16(0), FLOAT16(0), FLOAT16(1), - FLOAT16(2), FLOAT16(0), FLOAT16(2), + ov::float16(0), ov::float16(1), ov::float16(2), + ov::float16(2), ov::float16(2), ov::float16(0), + ov::float16(0), ov::float16(0), ov::float16(2), + ov::float16(0), ov::float16(0), ov::float16(0), + ov::float16(1), ov::float16(0), ov::float16(1), + ov::float16(1), ov::float16(2), ov::float16(1), + ov::float16(2), ov::float16(1), ov::float16(2), + ov::float16(1), ov::float16(0), ov::float16(2), + ov::float16(1), ov::float16(0), ov::float16(1), + ov::float16(2), ov::float16(0), ov::float16(0), + ov::float16(1), ov::float16(2), ov::float16(2), + ov::float16(1), ov::float16(1), ov::float16(1), + ov::float16(1), ov::float16(0), ov::float16(2), + ov::float16(0), ov::float16(2), ov::float16(2), + ov::float16(2), ov::float16(2), ov::float16(2), + ov::float16(0), ov::float16(0), ov::float16(2), + ov::float16(1), ov::float16(2), ov::float16(2), + ov::float16(2), ov::float16(2), ov::float16(0), + ov::float16(2), ov::float16(0), ov::float16(0), + ov::float16(0), ov::float16(0), ov::float16(2), + ov::float16(2), ov::float16(0), ov::float16(1), + ov::float16(1), ov::float16(2), ov::float16(2), + ov::float16(1), ov::float16(1), ov::float16(0), + ov::float16(2), ov::float16(0), ov::float16(0), + ov::float16(0), ov::float16(2), ov::float16(2), + ov::float16(2), ov::float16(1), ov::float16(0), + ov::float16(0), ov::float16(2), ov::float16(1), + ov::float16(2), ov::float16(1), ov::float16(2), + ov::float16(0), ov::float16(0), ov::float16(1), + ov::float16(2), ov::float16(0), ov::float16(2), }); std::vector expected_results = { - FLOAT16(0), FLOAT16(5), FLOAT16(7), - FLOAT16(0), FLOAT16(7), FLOAT16(8), - FLOAT16(0), FLOAT16(1), FLOAT16(7), - FLOAT16(0), FLOAT16(1), FLOAT16(8), - FLOAT16(5), FLOAT16(1), FLOAT16(2), - FLOAT16(9), FLOAT16(7), FLOAT16(0), - FLOAT16(5), FLOAT16(0), FLOAT16(0), - FLOAT16(9), FLOAT16(4), FLOAT16(0), - FLOAT16(9), FLOAT16(4), FLOAT16(0), - FLOAT16(5), FLOAT16(4), FLOAT16(5), - FLOAT16(7), FLOAT16(5), FLOAT16(1), - FLOAT16(7), FLOAT16(6), FLOAT16(10), - FLOAT16(7), FLOAT16(0), FLOAT16(1), - FLOAT16(4), FLOAT16(5), FLOAT16(1), - FLOAT16(9), FLOAT16(5), FLOAT16(1), - FLOAT16(7), FLOAT16(4), FLOAT16(3), - FLOAT16(10), FLOAT16(8), FLOAT16(3), - FLOAT16(0), FLOAT16(8), FLOAT16(7), - FLOAT16(0), FLOAT16(4), FLOAT16(7), - FLOAT16(7), FLOAT16(4), FLOAT16(3), - FLOAT16(7), FLOAT16(8), FLOAT16(10), - FLOAT16(4), FLOAT16(8), FLOAT16(7), - FLOAT16(4), FLOAT16(2), FLOAT16(10), - FLOAT16(7), FLOAT16(8), FLOAT16(10), - FLOAT16(6), FLOAT16(8), FLOAT16(7), - FLOAT16(5), FLOAT16(4), FLOAT16(9), - FLOAT16(0), FLOAT16(2), FLOAT16(8), - FLOAT16(5), FLOAT16(4), FLOAT16(3), - FLOAT16(0), FLOAT16(6), FLOAT16(8), - FLOAT16(5), FLOAT16(6), FLOAT16(3), + ov::float16(0), ov::float16(5), ov::float16(7), + ov::float16(0), ov::float16(7), ov::float16(8), + ov::float16(0), ov::float16(1), ov::float16(7), + ov::float16(0), ov::float16(1), ov::float16(8), + ov::float16(5), ov::float16(1), ov::float16(2), + ov::float16(9), ov::float16(7), ov::float16(0), + ov::float16(5), ov::float16(0), ov::float16(0), + ov::float16(9), ov::float16(4), ov::float16(0), + ov::float16(9), ov::float16(4), ov::float16(0), + ov::float16(5), ov::float16(4), ov::float16(5), + ov::float16(7), ov::float16(5), ov::float16(1), + ov::float16(7), ov::float16(6), ov::float16(10), + ov::float16(7), ov::float16(0), ov::float16(1), + ov::float16(4), ov::float16(5), ov::float16(1), + ov::float16(9), ov::float16(5), ov::float16(1), + ov::float16(7), ov::float16(4), ov::float16(3), + ov::float16(10), ov::float16(8), ov::float16(3), + ov::float16(0), ov::float16(8), ov::float16(7), + ov::float16(0), ov::float16(4), ov::float16(7), + ov::float16(7), ov::float16(4), ov::float16(3), + ov::float16(7), ov::float16(8), ov::float16(10), + ov::float16(4), ov::float16(8), ov::float16(7), + ov::float16(4), ov::float16(2), ov::float16(10), + ov::float16(7), ov::float16(8), ov::float16(10), + ov::float16(6), ov::float16(8), ov::float16(7), + ov::float16(5), ov::float16(4), ov::float16(9), + ov::float16(0), ov::float16(2), ov::float16(8), + ov::float16(5), ov::float16(4), ov::float16(3), + ov::float16(0), ov::float16(6), ov::float16(8), + ov::float16(5), ov::float16(6), ov::float16(3), }; DoTest(engine, input0, input1, expected_results, tensor(2, 3, 3, 1, 1, 5), axis, false); @@ -1151,90 +1151,90 @@ TEST(gather_elements_gpu_fp16, export_import) { auto input1 = engine.allocate_memory({ data_types::f16, format::bfwzyx, { 2, 3, 3, 1, 1, 5 } }); // indices set_values(input0, { - FLOAT16(0), FLOAT16(1), FLOAT16(8), - FLOAT16(5), FLOAT16(5), FLOAT16(2), - FLOAT16(0), FLOAT16(7), FLOAT16(7), - FLOAT16(10), FLOAT16(4), FLOAT16(5), - FLOAT16(9), FLOAT16(0), FLOAT16(0), - FLOAT16(5), FLOAT16(7), FLOAT16(0), - FLOAT16(4), FLOAT16(0), FLOAT16(4), - FLOAT16(7), FLOAT16(6), FLOAT16(10), - FLOAT16(9), FLOAT16(5), FLOAT16(1), - FLOAT16(7), FLOAT16(4), FLOAT16(7), - FLOAT16(10), FLOAT16(8), FLOAT16(2), - FLOAT16(0), FLOAT16(8), FLOAT16(3), - FLOAT16(6), FLOAT16(8), FLOAT16(10), - FLOAT16(4), FLOAT16(2), FLOAT16(10), - FLOAT16(7), FLOAT16(8), FLOAT16(7), - FLOAT16(0), FLOAT16(6), FLOAT16(9), - FLOAT16(2), FLOAT16(4), FLOAT16(8), - FLOAT16(5), FLOAT16(2), FLOAT16(3), + ov::float16(0), ov::float16(1), ov::float16(8), + ov::float16(5), ov::float16(5), ov::float16(2), + ov::float16(0), ov::float16(7), ov::float16(7), + ov::float16(10), ov::float16(4), ov::float16(5), + ov::float16(9), ov::float16(0), ov::float16(0), + ov::float16(5), ov::float16(7), ov::float16(0), + ov::float16(4), ov::float16(0), ov::float16(4), + ov::float16(7), ov::float16(6), ov::float16(10), + ov::float16(9), ov::float16(5), ov::float16(1), + ov::float16(7), ov::float16(4), ov::float16(7), + ov::float16(10), ov::float16(8), ov::float16(2), + ov::float16(0), ov::float16(8), ov::float16(3), + ov::float16(6), ov::float16(8), ov::float16(10), + ov::float16(4), ov::float16(2), ov::float16(10), + ov::float16(7), ov::float16(8), ov::float16(7), + ov::float16(0), ov::float16(6), ov::float16(9), + ov::float16(2), ov::float16(4), ov::float16(8), + ov::float16(5), ov::float16(2), ov::float16(3), }); set_values(input1, { - FLOAT16(0), FLOAT16(1), FLOAT16(2), - FLOAT16(2), FLOAT16(2), FLOAT16(0), - FLOAT16(0), FLOAT16(0), FLOAT16(2), - FLOAT16(0), FLOAT16(0), FLOAT16(0), - FLOAT16(1), FLOAT16(0), FLOAT16(1), - FLOAT16(1), FLOAT16(2), FLOAT16(1), - FLOAT16(2), FLOAT16(1), FLOAT16(2), - FLOAT16(1), FLOAT16(0), FLOAT16(2), - FLOAT16(1), FLOAT16(0), FLOAT16(1), - FLOAT16(2), FLOAT16(0), FLOAT16(0), - FLOAT16(1), FLOAT16(2), FLOAT16(2), - FLOAT16(1), FLOAT16(1), FLOAT16(1), - FLOAT16(1), FLOAT16(0), FLOAT16(2), - FLOAT16(0), FLOAT16(2), FLOAT16(2), - FLOAT16(2), FLOAT16(2), FLOAT16(2), - FLOAT16(0), FLOAT16(0), FLOAT16(2), - FLOAT16(1), FLOAT16(2), FLOAT16(2), - FLOAT16(2), FLOAT16(2), FLOAT16(0), - FLOAT16(2), FLOAT16(0), FLOAT16(0), - FLOAT16(0), FLOAT16(0), FLOAT16(2), - FLOAT16(2), FLOAT16(0), FLOAT16(1), - FLOAT16(1), FLOAT16(2), FLOAT16(2), - FLOAT16(1), FLOAT16(1), FLOAT16(0), - FLOAT16(2), FLOAT16(0), FLOAT16(0), - FLOAT16(0), FLOAT16(2), FLOAT16(2), - FLOAT16(2), FLOAT16(1), FLOAT16(0), - FLOAT16(0), FLOAT16(2), FLOAT16(1), - FLOAT16(2), FLOAT16(1), FLOAT16(2), - FLOAT16(0), FLOAT16(0), FLOAT16(1), - FLOAT16(2), FLOAT16(0), FLOAT16(2), + ov::float16(0), ov::float16(1), ov::float16(2), + ov::float16(2), ov::float16(2), ov::float16(0), + ov::float16(0), ov::float16(0), ov::float16(2), + ov::float16(0), ov::float16(0), ov::float16(0), + ov::float16(1), ov::float16(0), ov::float16(1), + ov::float16(1), ov::float16(2), ov::float16(1), + ov::float16(2), ov::float16(1), ov::float16(2), + ov::float16(1), ov::float16(0), ov::float16(2), + ov::float16(1), ov::float16(0), ov::float16(1), + ov::float16(2), ov::float16(0), ov::float16(0), + ov::float16(1), ov::float16(2), ov::float16(2), + ov::float16(1), ov::float16(1), ov::float16(1), + ov::float16(1), ov::float16(0), ov::float16(2), + ov::float16(0), ov::float16(2), ov::float16(2), + ov::float16(2), ov::float16(2), ov::float16(2), + ov::float16(0), ov::float16(0), ov::float16(2), + ov::float16(1), ov::float16(2), ov::float16(2), + ov::float16(2), ov::float16(2), ov::float16(0), + ov::float16(2), ov::float16(0), ov::float16(0), + ov::float16(0), ov::float16(0), ov::float16(2), + ov::float16(2), ov::float16(0), ov::float16(1), + ov::float16(1), ov::float16(2), ov::float16(2), + ov::float16(1), ov::float16(1), ov::float16(0), + ov::float16(2), ov::float16(0), ov::float16(0), + ov::float16(0), ov::float16(2), ov::float16(2), + ov::float16(2), ov::float16(1), ov::float16(0), + ov::float16(0), ov::float16(2), ov::float16(1), + ov::float16(2), ov::float16(1), ov::float16(2), + ov::float16(0), ov::float16(0), ov::float16(1), + ov::float16(2), ov::float16(0), ov::float16(2), }); std::vector expected_results = { - FLOAT16(0), FLOAT16(5), FLOAT16(7), - FLOAT16(0), FLOAT16(7), FLOAT16(8), - FLOAT16(0), FLOAT16(1), FLOAT16(7), - FLOAT16(0), FLOAT16(1), FLOAT16(8), - FLOAT16(5), FLOAT16(1), FLOAT16(2), - FLOAT16(9), FLOAT16(7), FLOAT16(0), - FLOAT16(5), FLOAT16(0), FLOAT16(0), - FLOAT16(9), FLOAT16(4), FLOAT16(0), - FLOAT16(9), FLOAT16(4), FLOAT16(0), - FLOAT16(5), FLOAT16(4), FLOAT16(5), - FLOAT16(7), FLOAT16(5), FLOAT16(1), - FLOAT16(7), FLOAT16(6), FLOAT16(10), - FLOAT16(7), FLOAT16(0), FLOAT16(1), - FLOAT16(4), FLOAT16(5), FLOAT16(1), - FLOAT16(9), FLOAT16(5), FLOAT16(1), - FLOAT16(7), FLOAT16(4), FLOAT16(3), - FLOAT16(10), FLOAT16(8), FLOAT16(3), - FLOAT16(0), FLOAT16(8), FLOAT16(7), - FLOAT16(0), FLOAT16(4), FLOAT16(7), - FLOAT16(7), FLOAT16(4), FLOAT16(3), - FLOAT16(7), FLOAT16(8), FLOAT16(10), - FLOAT16(4), FLOAT16(8), FLOAT16(7), - FLOAT16(4), FLOAT16(2), FLOAT16(10), - FLOAT16(7), FLOAT16(8), FLOAT16(10), - FLOAT16(6), FLOAT16(8), FLOAT16(7), - FLOAT16(5), FLOAT16(4), FLOAT16(9), - FLOAT16(0), FLOAT16(2), FLOAT16(8), - FLOAT16(5), FLOAT16(4), FLOAT16(3), - FLOAT16(0), FLOAT16(6), FLOAT16(8), - FLOAT16(5), FLOAT16(6), FLOAT16(3), + ov::float16(0), ov::float16(5), ov::float16(7), + ov::float16(0), ov::float16(7), ov::float16(8), + ov::float16(0), ov::float16(1), ov::float16(7), + ov::float16(0), ov::float16(1), ov::float16(8), + ov::float16(5), ov::float16(1), ov::float16(2), + ov::float16(9), ov::float16(7), ov::float16(0), + ov::float16(5), ov::float16(0), ov::float16(0), + ov::float16(9), ov::float16(4), ov::float16(0), + ov::float16(9), ov::float16(4), ov::float16(0), + ov::float16(5), ov::float16(4), ov::float16(5), + ov::float16(7), ov::float16(5), ov::float16(1), + ov::float16(7), ov::float16(6), ov::float16(10), + ov::float16(7), ov::float16(0), ov::float16(1), + ov::float16(4), ov::float16(5), ov::float16(1), + ov::float16(9), ov::float16(5), ov::float16(1), + ov::float16(7), ov::float16(4), ov::float16(3), + ov::float16(10), ov::float16(8), ov::float16(3), + ov::float16(0), ov::float16(8), ov::float16(7), + ov::float16(0), ov::float16(4), ov::float16(7), + ov::float16(7), ov::float16(4), ov::float16(3), + ov::float16(7), ov::float16(8), ov::float16(10), + ov::float16(4), ov::float16(8), ov::float16(7), + ov::float16(4), ov::float16(2), ov::float16(10), + ov::float16(7), ov::float16(8), ov::float16(10), + ov::float16(6), ov::float16(8), ov::float16(7), + ov::float16(5), ov::float16(4), ov::float16(9), + ov::float16(0), ov::float16(2), ov::float16(8), + ov::float16(5), ov::float16(4), ov::float16(3), + ov::float16(0), ov::float16(6), ov::float16(8), + ov::float16(5), ov::float16(6), ov::float16(3), }; DoTest(engine, input0, input1, expected_results, tensor(2, 3, 3, 1, 1, 5), axis, true); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/gather_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/gather_gpu_test.cpp index e8e511d1e5665b..bbef9e78912d5b 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/gather_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/gather_gpu_test.cpp @@ -119,7 +119,7 @@ class gather8_test : public ::testing::TestWithParam { !memcmp(reorder_output_ptr.data(), planar_output_ptr.data(), get_linear_size(shape_out) * sizeof(T_dat))); } }; -using gather8_test_f16i32 = gather8_test; +using gather8_test_f16i32 = gather8_test; using gather8_test_f32i8 = gather8_test; using gather8_test_i32i32 = gather8_test; TEST_P(gather8_test_f16i32, gather8_test_f16i32) {} @@ -366,31 +366,31 @@ TEST(gather8_gpu_fp16, d323_axisY_bdim_m1) { bool negative_indexes = true; set_values(input1, { - FLOAT16(1.f), FLOAT16(2.f), FLOAT16(3.f), FLOAT16(4.f), FLOAT16(5.f), FLOAT16(6.f), FLOAT16(7.f), FLOAT16(8.f), - FLOAT16(9.f), FLOAT16(10.f), FLOAT16(11.f), FLOAT16(12.f), FLOAT16(13.f), FLOAT16(14.f), FLOAT16(15.f), FLOAT16(16.f), - FLOAT16(17.f), FLOAT16(18.f), FLOAT16(19.f), FLOAT16(20.f), FLOAT16(21.f), FLOAT16(22.f), FLOAT16(23.f), FLOAT16(24.f), + ov::float16(1.f), ov::float16(2.f), ov::float16(3.f), ov::float16(4.f), ov::float16(5.f), ov::float16(6.f), ov::float16(7.f), ov::float16(8.f), + ov::float16(9.f), ov::float16(10.f), ov::float16(11.f), ov::float16(12.f), ov::float16(13.f), ov::float16(14.f), ov::float16(15.f), ov::float16(16.f), + ov::float16(17.f), ov::float16(18.f), ov::float16(19.f), ov::float16(20.f), ov::float16(21.f), ov::float16(22.f), ov::float16(23.f), ov::float16(24.f), - FLOAT16(25.f), FLOAT16(26.f), FLOAT16(27.f), FLOAT16(28.f), FLOAT16(29.f), FLOAT16(30.f), FLOAT16(31.f), FLOAT16(32.f), - FLOAT16(33.f), FLOAT16(34.f), FLOAT16(35.f), FLOAT16(36.f), FLOAT16(37.f), FLOAT16(38.f), FLOAT16(39.f), FLOAT16(40.f), - FLOAT16(41.f), FLOAT16(42.f), FLOAT16(43.f), FLOAT16(44.f), FLOAT16(45.f), FLOAT16(46.f), FLOAT16(47.f), FLOAT16(48.f), + ov::float16(25.f), ov::float16(26.f), ov::float16(27.f), ov::float16(28.f), ov::float16(29.f), ov::float16(30.f), ov::float16(31.f), ov::float16(32.f), + ov::float16(33.f), ov::float16(34.f), ov::float16(35.f), ov::float16(36.f), ov::float16(37.f), ov::float16(38.f), ov::float16(39.f), ov::float16(40.f), + ov::float16(41.f), ov::float16(42.f), ov::float16(43.f), ov::float16(44.f), ov::float16(45.f), ov::float16(46.f), ov::float16(47.f), ov::float16(48.f), - FLOAT16(49.f), FLOAT16(50.f), FLOAT16(51.f), FLOAT16(52.f), FLOAT16(53.f), FLOAT16(54.f), FLOAT16(55.f), FLOAT16(56.f), - FLOAT16(57.f), FLOAT16(58.f), FLOAT16(59.f), FLOAT16(60.f), FLOAT16(61.f), FLOAT16(62.f), FLOAT16(63.f), FLOAT16(64.f), - FLOAT16(65.f), FLOAT16(66.f), FLOAT16(67.f), FLOAT16(68.f), FLOAT16(69.f), FLOAT16(70.f), FLOAT16(71.f), FLOAT16(72.f), + ov::float16(49.f), ov::float16(50.f), ov::float16(51.f), ov::float16(52.f), ov::float16(53.f), ov::float16(54.f), ov::float16(55.f), ov::float16(56.f), + ov::float16(57.f), ov::float16(58.f), ov::float16(59.f), ov::float16(60.f), ov::float16(61.f), ov::float16(62.f), ov::float16(63.f), ov::float16(64.f), + ov::float16(65.f), ov::float16(66.f), ov::float16(67.f), ov::float16(68.f), ov::float16(69.f), ov::float16(70.f), ov::float16(71.f), ov::float16(72.f), - FLOAT16(73.f), FLOAT16(74.f), FLOAT16(75.f), FLOAT16(76.f), FLOAT16(77.f), FLOAT16(78.f), FLOAT16(79.f), FLOAT16(80.f), - FLOAT16(81.f), FLOAT16(82.f), FLOAT16(83.f), FLOAT16(84.f), FLOAT16(85.f), FLOAT16(86.f), FLOAT16(87.f), FLOAT16(88.f), - FLOAT16(89.f), FLOAT16(90.f), FLOAT16(91.f), FLOAT16(92.f), FLOAT16(93.f), FLOAT16(94.f), FLOAT16(95.f), FLOAT16(96.f), + ov::float16(73.f), ov::float16(74.f), ov::float16(75.f), ov::float16(76.f), ov::float16(77.f), ov::float16(78.f), ov::float16(79.f), ov::float16(80.f), + ov::float16(81.f), ov::float16(82.f), ov::float16(83.f), ov::float16(84.f), ov::float16(85.f), ov::float16(86.f), ov::float16(87.f), ov::float16(88.f), + ov::float16(89.f), ov::float16(90.f), ov::float16(91.f), ov::float16(92.f), ov::float16(93.f), ov::float16(94.f), ov::float16(95.f), ov::float16(96.f), - FLOAT16(97.f), FLOAT16(98.f), FLOAT16(99.f), FLOAT16(100.f), FLOAT16(101.f), FLOAT16(102.f), FLOAT16(103.f), FLOAT16(104.f), - FLOAT16(105.f), FLOAT16(106.f), FLOAT16(107.f), FLOAT16(108.f), FLOAT16(109.f), FLOAT16(110.f), FLOAT16(111.f), FLOAT16(112.f), - FLOAT16(113.f), FLOAT16(114.f), FLOAT16(115.f), FLOAT16(116.f), FLOAT16(117.f), FLOAT16(118.f), FLOAT16(119.f), FLOAT16(120.f), + ov::float16(97.f), ov::float16(98.f), ov::float16(99.f), ov::float16(100.f), ov::float16(101.f), ov::float16(102.f), ov::float16(103.f), ov::float16(104.f), + ov::float16(105.f), ov::float16(106.f), ov::float16(107.f), ov::float16(108.f), ov::float16(109.f), ov::float16(110.f), ov::float16(111.f), ov::float16(112.f), + ov::float16(113.f), ov::float16(114.f), ov::float16(115.f), ov::float16(116.f), ov::float16(117.f), ov::float16(118.f), ov::float16(119.f), ov::float16(120.f), - FLOAT16(121.f), FLOAT16(122.f), FLOAT16(123.f), FLOAT16(124.f), FLOAT16(125.f), FLOAT16(126.f), FLOAT16(127.f), FLOAT16(128.f), - FLOAT16(129.f), FLOAT16(130.f), FLOAT16(131.f), FLOAT16(132.f), FLOAT16(133.f), FLOAT16(134.f), FLOAT16(135.f), FLOAT16(136.f), - FLOAT16(137.f), FLOAT16(138.f), FLOAT16(139.f), FLOAT16(140.f), FLOAT16(141.f), FLOAT16(142.f), FLOAT16(143.f), FLOAT16(144.f) + ov::float16(121.f), ov::float16(122.f), ov::float16(123.f), ov::float16(124.f), ov::float16(125.f), ov::float16(126.f), ov::float16(127.f), ov::float16(128.f), + ov::float16(129.f), ov::float16(130.f), ov::float16(131.f), ov::float16(132.f), ov::float16(133.f), ov::float16(134.f), ov::float16(135.f), ov::float16(136.f), + ov::float16(137.f), ov::float16(138.f), ov::float16(139.f), ov::float16(140.f), ov::float16(141.f), ov::float16(142.f), ov::float16(143.f), ov::float16(144.f) }); set_values(input2, { @@ -490,17 +490,17 @@ TEST(gather7_gpu_fp16, d222_axisX_bdim_m1) { int64_t batch_dim = -1; set_values(input1, { - FLOAT16(1.f), FLOAT16(2.f), FLOAT16(3.f), FLOAT16(4.f), FLOAT16(5.f), FLOAT16(6.f), FLOAT16(7.f), FLOAT16(8.f), - FLOAT16(9.f), FLOAT16(10.f), FLOAT16(11.f), FLOAT16(12.f), FLOAT16(13.f), FLOAT16(14.f), FLOAT16(15.f), FLOAT16(16.f), + ov::float16(1.f), ov::float16(2.f), ov::float16(3.f), ov::float16(4.f), ov::float16(5.f), ov::float16(6.f), ov::float16(7.f), ov::float16(8.f), + ov::float16(9.f), ov::float16(10.f), ov::float16(11.f), ov::float16(12.f), ov::float16(13.f), ov::float16(14.f), ov::float16(15.f), ov::float16(16.f), - FLOAT16(17.f), FLOAT16(18.f), FLOAT16(19.f), FLOAT16(20.f), FLOAT16(21.f), FLOAT16(22.f), FLOAT16(23.f), FLOAT16(24.f), - FLOAT16(25.f), FLOAT16(26.f), FLOAT16(27.f), FLOAT16(28.f), FLOAT16(29.f), FLOAT16(30.f), FLOAT16(31.f), FLOAT16(32.f), + ov::float16(17.f), ov::float16(18.f), ov::float16(19.f), ov::float16(20.f), ov::float16(21.f), ov::float16(22.f), ov::float16(23.f), ov::float16(24.f), + ov::float16(25.f), ov::float16(26.f), ov::float16(27.f), ov::float16(28.f), ov::float16(29.f), ov::float16(30.f), ov::float16(31.f), ov::float16(32.f), - FLOAT16(33.f), FLOAT16(34.f), FLOAT16(35.f), FLOAT16(36.f), FLOAT16(37.f), FLOAT16(38.f), FLOAT16(39.f), FLOAT16(40.f), - FLOAT16(41.f), FLOAT16(42.f), FLOAT16(43.f), FLOAT16(44.f), FLOAT16(45.f), FLOAT16(46.f), FLOAT16(47.f), FLOAT16(48.f), + ov::float16(33.f), ov::float16(34.f), ov::float16(35.f), ov::float16(36.f), ov::float16(37.f), ov::float16(38.f), ov::float16(39.f), ov::float16(40.f), + ov::float16(41.f), ov::float16(42.f), ov::float16(43.f), ov::float16(44.f), ov::float16(45.f), ov::float16(46.f), ov::float16(47.f), ov::float16(48.f), - FLOAT16(49.f), FLOAT16(50.f), FLOAT16(51.f), FLOAT16(52.f), FLOAT16(53.f), FLOAT16(54.f), FLOAT16(55.f), FLOAT16(56.f), - FLOAT16(57.f), FLOAT16(58.f), FLOAT16(59.f), FLOAT16(60.f), FLOAT16(61.f), FLOAT16(62.f), FLOAT16(63.f), FLOAT16(64.f), + ov::float16(49.f), ov::float16(50.f), ov::float16(51.f), ov::float16(52.f), ov::float16(53.f), ov::float16(54.f), ov::float16(55.f), ov::float16(56.f), + ov::float16(57.f), ov::float16(58.f), ov::float16(59.f), ov::float16(60.f), ov::float16(61.f), ov::float16(62.f), ov::float16(63.f), ov::float16(64.f), }); set_values(input2, { @@ -584,31 +584,31 @@ TEST(gather7_gpu_fp16, d323_axisY_bdim_m1) { int64_t batch_dim = -1; set_values(input1, { - FLOAT16(1.f), FLOAT16(2.f), FLOAT16(3.f), FLOAT16(4.f), FLOAT16(5.f), FLOAT16(6.f), FLOAT16(7.f), FLOAT16(8.f), - FLOAT16(9.f), FLOAT16(10.f), FLOAT16(11.f), FLOAT16(12.f), FLOAT16(13.f), FLOAT16(14.f), FLOAT16(15.f), FLOAT16(16.f), - FLOAT16(17.f), FLOAT16(18.f), FLOAT16(19.f), FLOAT16(20.f), FLOAT16(21.f), FLOAT16(22.f), FLOAT16(23.f), FLOAT16(24.f), + ov::float16(1.f), ov::float16(2.f), ov::float16(3.f), ov::float16(4.f), ov::float16(5.f), ov::float16(6.f), ov::float16(7.f), ov::float16(8.f), + ov::float16(9.f), ov::float16(10.f), ov::float16(11.f), ov::float16(12.f), ov::float16(13.f), ov::float16(14.f), ov::float16(15.f), ov::float16(16.f), + ov::float16(17.f), ov::float16(18.f), ov::float16(19.f), ov::float16(20.f), ov::float16(21.f), ov::float16(22.f), ov::float16(23.f), ov::float16(24.f), - FLOAT16(25.f), FLOAT16(26.f), FLOAT16(27.f), FLOAT16(28.f), FLOAT16(29.f), FLOAT16(30.f), FLOAT16(31.f), FLOAT16(32.f), - FLOAT16(33.f), FLOAT16(34.f), FLOAT16(35.f), FLOAT16(36.f), FLOAT16(37.f), FLOAT16(38.f), FLOAT16(39.f), FLOAT16(40.f), - FLOAT16(41.f), FLOAT16(42.f), FLOAT16(43.f), FLOAT16(44.f), FLOAT16(45.f), FLOAT16(46.f), FLOAT16(47.f), FLOAT16(48.f), + ov::float16(25.f), ov::float16(26.f), ov::float16(27.f), ov::float16(28.f), ov::float16(29.f), ov::float16(30.f), ov::float16(31.f), ov::float16(32.f), + ov::float16(33.f), ov::float16(34.f), ov::float16(35.f), ov::float16(36.f), ov::float16(37.f), ov::float16(38.f), ov::float16(39.f), ov::float16(40.f), + ov::float16(41.f), ov::float16(42.f), ov::float16(43.f), ov::float16(44.f), ov::float16(45.f), ov::float16(46.f), ov::float16(47.f), ov::float16(48.f), - FLOAT16(49.f), FLOAT16(50.f), FLOAT16(51.f), FLOAT16(52.f), FLOAT16(53.f), FLOAT16(54.f), FLOAT16(55.f), FLOAT16(56.f), - FLOAT16(57.f), FLOAT16(58.f), FLOAT16(59.f), FLOAT16(60.f), FLOAT16(61.f), FLOAT16(62.f), FLOAT16(63.f), FLOAT16(64.f), - FLOAT16(65.f), FLOAT16(66.f), FLOAT16(67.f), FLOAT16(68.f), FLOAT16(69.f), FLOAT16(70.f), FLOAT16(71.f), FLOAT16(72.f), + ov::float16(49.f), ov::float16(50.f), ov::float16(51.f), ov::float16(52.f), ov::float16(53.f), ov::float16(54.f), ov::float16(55.f), ov::float16(56.f), + ov::float16(57.f), ov::float16(58.f), ov::float16(59.f), ov::float16(60.f), ov::float16(61.f), ov::float16(62.f), ov::float16(63.f), ov::float16(64.f), + ov::float16(65.f), ov::float16(66.f), ov::float16(67.f), ov::float16(68.f), ov::float16(69.f), ov::float16(70.f), ov::float16(71.f), ov::float16(72.f), - FLOAT16(73.f), FLOAT16(74.f), FLOAT16(75.f), FLOAT16(76.f), FLOAT16(77.f), FLOAT16(78.f), FLOAT16(79.f), FLOAT16(80.f), - FLOAT16(81.f), FLOAT16(82.f), FLOAT16(83.f), FLOAT16(84.f), FLOAT16(85.f), FLOAT16(86.f), FLOAT16(87.f), FLOAT16(88.f), - FLOAT16(89.f), FLOAT16(90.f), FLOAT16(91.f), FLOAT16(92.f), FLOAT16(93.f), FLOAT16(94.f), FLOAT16(95.f), FLOAT16(96.f), + ov::float16(73.f), ov::float16(74.f), ov::float16(75.f), ov::float16(76.f), ov::float16(77.f), ov::float16(78.f), ov::float16(79.f), ov::float16(80.f), + ov::float16(81.f), ov::float16(82.f), ov::float16(83.f), ov::float16(84.f), ov::float16(85.f), ov::float16(86.f), ov::float16(87.f), ov::float16(88.f), + ov::float16(89.f), ov::float16(90.f), ov::float16(91.f), ov::float16(92.f), ov::float16(93.f), ov::float16(94.f), ov::float16(95.f), ov::float16(96.f), - FLOAT16(97.f), FLOAT16(98.f), FLOAT16(99.f), FLOAT16(100.f), FLOAT16(101.f), FLOAT16(102.f), FLOAT16(103.f), FLOAT16(104.f), - FLOAT16(105.f), FLOAT16(106.f), FLOAT16(107.f), FLOAT16(108.f), FLOAT16(109.f), FLOAT16(110.f), FLOAT16(111.f), FLOAT16(112.f), - FLOAT16(113.f), FLOAT16(114.f), FLOAT16(115.f), FLOAT16(116.f), FLOAT16(117.f), FLOAT16(118.f), FLOAT16(119.f), FLOAT16(120.f), + ov::float16(97.f), ov::float16(98.f), ov::float16(99.f), ov::float16(100.f), ov::float16(101.f), ov::float16(102.f), ov::float16(103.f), ov::float16(104.f), + ov::float16(105.f), ov::float16(106.f), ov::float16(107.f), ov::float16(108.f), ov::float16(109.f), ov::float16(110.f), ov::float16(111.f), ov::float16(112.f), + ov::float16(113.f), ov::float16(114.f), ov::float16(115.f), ov::float16(116.f), ov::float16(117.f), ov::float16(118.f), ov::float16(119.f), ov::float16(120.f), - FLOAT16(121.f), FLOAT16(122.f), FLOAT16(123.f), FLOAT16(124.f), FLOAT16(125.f), FLOAT16(126.f), FLOAT16(127.f), FLOAT16(128.f), - FLOAT16(129.f), FLOAT16(130.f), FLOAT16(131.f), FLOAT16(132.f), FLOAT16(133.f), FLOAT16(134.f), FLOAT16(135.f), FLOAT16(136.f), - FLOAT16(137.f), FLOAT16(138.f), FLOAT16(139.f), FLOAT16(140.f), FLOAT16(141.f), FLOAT16(142.f), FLOAT16(143.f), FLOAT16(144.f) + ov::float16(121.f), ov::float16(122.f), ov::float16(123.f), ov::float16(124.f), ov::float16(125.f), ov::float16(126.f), ov::float16(127.f), ov::float16(128.f), + ov::float16(129.f), ov::float16(130.f), ov::float16(131.f), ov::float16(132.f), ov::float16(133.f), ov::float16(134.f), ov::float16(135.f), ov::float16(136.f), + ov::float16(137.f), ov::float16(138.f), ov::float16(139.f), ov::float16(140.f), ov::float16(141.f), ov::float16(142.f), ov::float16(143.f), ov::float16(144.f) }); set_values(input2, { @@ -702,21 +702,21 @@ TEST(gather7_gpu_fp16, d44_axisY_bdim1) { int64_t batch_dim = 1; set_values(input1, { - FLOAT16(84.f), FLOAT16( 7.f), FLOAT16(10.f), FLOAT16(69.f), FLOAT16(13.f), - FLOAT16(47.f), FLOAT16(75.f), FLOAT16( 8.f), FLOAT16(65.f), FLOAT16(28.f), - FLOAT16( 5.f), FLOAT16(12.f), FLOAT16(56.f), FLOAT16(54.f), FLOAT16( 9.f), + ov::float16(84.f), ov::float16( 7.f), ov::float16(10.f), ov::float16(69.f), ov::float16(13.f), + ov::float16(47.f), ov::float16(75.f), ov::float16( 8.f), ov::float16(65.f), ov::float16(28.f), + ov::float16( 5.f), ov::float16(12.f), ov::float16(56.f), ov::float16(54.f), ov::float16( 9.f), - FLOAT16(31.f), FLOAT16(12.f), FLOAT16(71.f), FLOAT16(55.f), FLOAT16( 8.f), - FLOAT16(73.f), FLOAT16(16.f), FLOAT16(29.f), FLOAT16(81.f), FLOAT16(81.f), - FLOAT16(75.f), FLOAT16( 8.f), FLOAT16(74.f), FLOAT16(75.f), FLOAT16(51.f), + ov::float16(31.f), ov::float16(12.f), ov::float16(71.f), ov::float16(55.f), ov::float16( 8.f), + ov::float16(73.f), ov::float16(16.f), ov::float16(29.f), ov::float16(81.f), ov::float16(81.f), + ov::float16(75.f), ov::float16( 8.f), ov::float16(74.f), ov::float16(75.f), ov::float16(51.f), - FLOAT16( 7.f), FLOAT16(29.f), FLOAT16( 6.f), FLOAT16(72.f), FLOAT16(18.f), - FLOAT16(38.f), FLOAT16(54.f), FLOAT16(19.f), FLOAT16(70.f), FLOAT16(16.f), - FLOAT16(74.f), FLOAT16(40.f), FLOAT16(72.f), FLOAT16(88.f), FLOAT16(24.f), + ov::float16( 7.f), ov::float16(29.f), ov::float16( 6.f), ov::float16(72.f), ov::float16(18.f), + ov::float16(38.f), ov::float16(54.f), ov::float16(19.f), ov::float16(70.f), ov::float16(16.f), + ov::float16(74.f), ov::float16(40.f), ov::float16(72.f), ov::float16(88.f), ov::float16(24.f), - FLOAT16(14.f), FLOAT16(75.f), FLOAT16(74.f), FLOAT16(82.f), FLOAT16(25.f), - FLOAT16(48.f), FLOAT16(13.f), FLOAT16(71.f), FLOAT16(92.f), FLOAT16( 9.f), - FLOAT16(73.f), FLOAT16( 8.f), FLOAT16(80.f), FLOAT16(27.f), FLOAT16(64.f) + ov::float16(14.f), ov::float16(75.f), ov::float16(74.f), ov::float16(82.f), ov::float16(25.f), + ov::float16(48.f), ov::float16(13.f), ov::float16(71.f), ov::float16(92.f), ov::float16( 9.f), + ov::float16(73.f), ov::float16( 8.f), ov::float16(80.f), ov::float16(27.f), ov::float16(64.f) }); set_values(input2, { @@ -791,9 +791,9 @@ TEST(gather7_gpu_fp16, d32_axisF_bdim_m1) { size_t batch_dim = -1; set_values(input1, { - FLOAT16(1.f), FLOAT16(2.f), - FLOAT16(3.f), FLOAT16(4.f), - FLOAT16(5.f), FLOAT16(6.f) + ov::float16(1.f), ov::float16(2.f), + ov::float16(3.f), ov::float16(4.f), + ov::float16(5.f), ov::float16(6.f) }); set_values(input2, { @@ -854,9 +854,9 @@ TEST(gather7_gpu_fp16, d32_axisF_bdim1) { int64_t batch_dim = 1; set_values(input1, { - FLOAT16(1.f), FLOAT16(2.f), - FLOAT16(3.f), FLOAT16(4.f), - FLOAT16(5.f), FLOAT16(6.f) + ov::float16(1.f), ov::float16(2.f), + ov::float16(3.f), ov::float16(4.f), + ov::float16(5.f), ov::float16(6.f) }); set_values(input2, { @@ -916,9 +916,9 @@ TEST(gather7_gpu_fp16, d32_axisF_bdim0) { size_t batch_dim = 0; set_values(input1, { - FLOAT16(1.f), FLOAT16(2.f), - FLOAT16(3.f), FLOAT16(4.f), - FLOAT16(5.f), FLOAT16(6.f) + ov::float16(1.f), ov::float16(2.f), + ov::float16(3.f), ov::float16(4.f), + ov::float16(5.f), ov::float16(6.f) }); set_values(input2, { @@ -985,8 +985,8 @@ TEST(gather_gpu_fp16, d14_axisB) { int64_t axis = 0; set_values(input1, { - FLOAT16(1.0f), FLOAT16(2.0f), - FLOAT16(3.0f), FLOAT16(4.0f) + ov::float16(1.0f), ov::float16(2.0f), + ov::float16(3.0f), ov::float16(4.0f) }); set_values(input2, { @@ -1044,11 +1044,11 @@ TEST(gather_gpu_fp16, d222_axisB) { int64_t axis = 0; set_values(input1, { - FLOAT16(1.f), FLOAT16(2.f), FLOAT16(3.f), - FLOAT16(4.f), FLOAT16(5.f), FLOAT16(6.f), + ov::float16(1.f), ov::float16(2.f), ov::float16(3.f), + ov::float16(4.f), ov::float16(5.f), ov::float16(6.f), - FLOAT16(7.f), FLOAT16(8.f), FLOAT16(9.f), - FLOAT16(10.f), FLOAT16(11.f), FLOAT16(12.f) + ov::float16(7.f), ov::float16(8.f), ov::float16(9.f), + ov::float16(10.f), ov::float16(11.f), ov::float16(12.f) }); set_values(input2, { @@ -1106,11 +1106,11 @@ TEST(gather_gpu_fp16, d22_axisY) { int64_t axis = 2; set_values(input1, { - FLOAT16(1.f), FLOAT16(2.f), FLOAT16(3.f), - FLOAT16(4.f), FLOAT16(5.f), FLOAT16(6.f), + ov::float16(1.f), ov::float16(2.f), ov::float16(3.f), + ov::float16(4.f), ov::float16(5.f), ov::float16(6.f), - FLOAT16(7.f), FLOAT16(8.f), FLOAT16(9.f), - FLOAT16(10.f), FLOAT16(11.f), FLOAT16(12.f) + ov::float16(7.f), ov::float16(8.f), ov::float16(9.f), + ov::float16(10.f), ov::float16(11.f), ov::float16(12.f) }); set_values(input2, { @@ -1167,11 +1167,11 @@ TEST(gather_gpu_fp16, d22_axisF) { int64_t axis = 1; set_values(input1, { - FLOAT16(1.f), FLOAT16(2.f), FLOAT16(3.f), - FLOAT16(4.f), FLOAT16(5.f), FLOAT16(6.f), + ov::float16(1.f), ov::float16(2.f), ov::float16(3.f), + ov::float16(4.f), ov::float16(5.f), ov::float16(6.f), - FLOAT16(7.f), FLOAT16(8.f), FLOAT16(9.f), - FLOAT16(10.f), FLOAT16(11.f), FLOAT16(12.f) + ov::float16(7.f), ov::float16(8.f), ov::float16(9.f), + ov::float16(10.f), ov::float16(11.f), ov::float16(12.f) }); set_values(input2, { diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/gather_nd_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/gather_nd_gpu_test.cpp index 44d23a8703032d..78d031680f4b1c 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/gather_nd_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/gather_nd_gpu_test.cpp @@ -106,33 +106,33 @@ TEST(gather_nd_gpu_fp16, d23322_i231312_ir6_batch2) { // expected output dim: v5{6,1,3,1,2}, v8{2,3,1,3,1,2} set_values(input0, { - FLOAT16(11), FLOAT16(12), FLOAT16(13), FLOAT16(14), FLOAT16(15), FLOAT16(16), FLOAT16(11), FLOAT16(12), FLOAT16(13), FLOAT16(14), FLOAT16(15), FLOAT16(16), - FLOAT16(21), FLOAT16(22), FLOAT16(23), FLOAT16(24), FLOAT16(25), FLOAT16(26), FLOAT16(21), FLOAT16(22), FLOAT16(23), FLOAT16(24), FLOAT16(25), FLOAT16(26), - FLOAT16(31), FLOAT16(32), FLOAT16(33), FLOAT16(34), FLOAT16(35), FLOAT16(36), FLOAT16(31), FLOAT16(32), FLOAT16(33), FLOAT16(34), FLOAT16(35), FLOAT16(36), + ov::float16(11), ov::float16(12), ov::float16(13), ov::float16(14), ov::float16(15), ov::float16(16), ov::float16(11), ov::float16(12), ov::float16(13), ov::float16(14), ov::float16(15), ov::float16(16), + ov::float16(21), ov::float16(22), ov::float16(23), ov::float16(24), ov::float16(25), ov::float16(26), ov::float16(21), ov::float16(22), ov::float16(23), ov::float16(24), ov::float16(25), ov::float16(26), + ov::float16(31), ov::float16(32), ov::float16(33), ov::float16(34), ov::float16(35), ov::float16(36), ov::float16(31), ov::float16(32), ov::float16(33), ov::float16(34), ov::float16(35), ov::float16(36), - FLOAT16(11), FLOAT16(12), FLOAT16(13), FLOAT16(14), FLOAT16(15), FLOAT16(16), FLOAT16(11), FLOAT16(12), FLOAT16(13), FLOAT16(14), FLOAT16(15), FLOAT16(16), - FLOAT16(21), FLOAT16(22), FLOAT16(23), FLOAT16(24), FLOAT16(25), FLOAT16(26), FLOAT16(21), FLOAT16(22), FLOAT16(23), FLOAT16(24), FLOAT16(25), FLOAT16(26), - FLOAT16(31), FLOAT16(32), FLOAT16(33), FLOAT16(34), FLOAT16(35), FLOAT16(36), FLOAT16(31), FLOAT16(32), FLOAT16(33), FLOAT16(34), FLOAT16(35), FLOAT16(36), + ov::float16(11), ov::float16(12), ov::float16(13), ov::float16(14), ov::float16(15), ov::float16(16), ov::float16(11), ov::float16(12), ov::float16(13), ov::float16(14), ov::float16(15), ov::float16(16), + ov::float16(21), ov::float16(22), ov::float16(23), ov::float16(24), ov::float16(25), ov::float16(26), ov::float16(21), ov::float16(22), ov::float16(23), ov::float16(24), ov::float16(25), ov::float16(26), + ov::float16(31), ov::float16(32), ov::float16(33), ov::float16(34), ov::float16(35), ov::float16(36), ov::float16(31), ov::float16(32), ov::float16(33), ov::float16(34), ov::float16(35), ov::float16(36), }); set_values(input1, { - FLOAT16(2), FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(1), - FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(0), FLOAT16(2), FLOAT16(0), - FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(1), + ov::float16(2), ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(1), + ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(0), ov::float16(2), ov::float16(0), + ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(1), - FLOAT16(2), FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(1), FLOAT16(0), - FLOAT16(1), FLOAT16(1), FLOAT16(2), FLOAT16(1), FLOAT16(2), FLOAT16(1), - FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(0), + ov::float16(2), ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(1), ov::float16(0), + ov::float16(1), ov::float16(1), ov::float16(2), ov::float16(1), ov::float16(2), ov::float16(1), + ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(0), }); std::vector expected_results = { - FLOAT16(15), FLOAT16(16), FLOAT16(11), FLOAT16(12), FLOAT16(11), FLOAT16(12), - FLOAT16(25), FLOAT16(26), FLOAT16(23), FLOAT16(24), FLOAT16(23), FLOAT16(24), - FLOAT16(33), FLOAT16(34), FLOAT16(33), FLOAT16(34), FLOAT16(33), FLOAT16(34), + ov::float16(15), ov::float16(16), ov::float16(11), ov::float16(12), ov::float16(11), ov::float16(12), + ov::float16(25), ov::float16(26), ov::float16(23), ov::float16(24), ov::float16(23), ov::float16(24), + ov::float16(33), ov::float16(34), ov::float16(33), ov::float16(34), ov::float16(33), ov::float16(34), - FLOAT16(13), FLOAT16(14), FLOAT16(15), FLOAT16(16), FLOAT16(15), FLOAT16(16), - FLOAT16(21), FLOAT16(22), FLOAT16(25), FLOAT16(26), FLOAT16(25), FLOAT16(26), - FLOAT16(31), FLOAT16(32), FLOAT16(35), FLOAT16(36), FLOAT16(33), FLOAT16(34), + ov::float16(13), ov::float16(14), ov::float16(15), ov::float16(16), ov::float16(15), ov::float16(16), + ov::float16(21), ov::float16(22), ov::float16(25), ov::float16(26), ov::float16(25), ov::float16(26), + ov::float16(31), ov::float16(32), ov::float16(35), ov::float16(36), ov::float16(33), ov::float16(34), }; DoTestV5(engine, input0, input1, expected_results, indices_rank, batch_dims, format::bfzyx, {6, 1, 2, 1, 3}); @@ -149,33 +149,33 @@ TEST(gather_nd_gpu_fp16, d231322_i231321_ir6_batch5) { // expected output dim: v5{36}, v8{2, 3, 2, 3, 1} set_values(input0, { - FLOAT16(11), FLOAT16(12), FLOAT16(13), FLOAT16(14), FLOAT16(15), FLOAT16(16), FLOAT16(17), FLOAT16(18), FLOAT16(19), FLOAT16(10), FLOAT16(21), FLOAT16(18), - FLOAT16(21), FLOAT16(22), FLOAT16(23), FLOAT16(24), FLOAT16(25), FLOAT16(26), FLOAT16(27), FLOAT16(28), FLOAT16(29), FLOAT16(20), FLOAT16(27), FLOAT16(28), - FLOAT16(31), FLOAT16(32), FLOAT16(33), FLOAT16(34), FLOAT16(35), FLOAT16(36), FLOAT16(37), FLOAT16(38), FLOAT16(39), FLOAT16(30), FLOAT16(31), FLOAT16(30), + ov::float16(11), ov::float16(12), ov::float16(13), ov::float16(14), ov::float16(15), ov::float16(16), ov::float16(17), ov::float16(18), ov::float16(19), ov::float16(10), ov::float16(21), ov::float16(18), + ov::float16(21), ov::float16(22), ov::float16(23), ov::float16(24), ov::float16(25), ov::float16(26), ov::float16(27), ov::float16(28), ov::float16(29), ov::float16(20), ov::float16(27), ov::float16(28), + ov::float16(31), ov::float16(32), ov::float16(33), ov::float16(34), ov::float16(35), ov::float16(36), ov::float16(37), ov::float16(38), ov::float16(39), ov::float16(30), ov::float16(31), ov::float16(30), - FLOAT16(11), FLOAT16(12), FLOAT16(13), FLOAT16(14), FLOAT16(15), FLOAT16(16), FLOAT16(17), FLOAT16(18), FLOAT16(19), FLOAT16(10), FLOAT16(17), FLOAT16(18), - FLOAT16(21), FLOAT16(22), FLOAT16(23), FLOAT16(24), FLOAT16(25), FLOAT16(26), FLOAT16(27), FLOAT16(28), FLOAT16(29), FLOAT16(20), FLOAT16(27), FLOAT16(28), - FLOAT16(31), FLOAT16(32), FLOAT16(33), FLOAT16(34), FLOAT16(35), FLOAT16(36), FLOAT16(37), FLOAT16(38), FLOAT16(39), FLOAT16(30), FLOAT16(29), FLOAT16(30), + ov::float16(11), ov::float16(12), ov::float16(13), ov::float16(14), ov::float16(15), ov::float16(16), ov::float16(17), ov::float16(18), ov::float16(19), ov::float16(10), ov::float16(17), ov::float16(18), + ov::float16(21), ov::float16(22), ov::float16(23), ov::float16(24), ov::float16(25), ov::float16(26), ov::float16(27), ov::float16(28), ov::float16(29), ov::float16(20), ov::float16(27), ov::float16(28), + ov::float16(31), ov::float16(32), ov::float16(33), ov::float16(34), ov::float16(35), ov::float16(36), ov::float16(37), ov::float16(38), ov::float16(39), ov::float16(30), ov::float16(29), ov::float16(30), }); set_values(input1, { - FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(1), - FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(0), - FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(0), + ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(1), + ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(0), + ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(0), - FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(1), - FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(0), - FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(0), + ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(1), + ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(0), + ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(0), }); std::vector expected_results = { - FLOAT16(12), FLOAT16(14), FLOAT16(16), FLOAT16(18), FLOAT16(10), FLOAT16(18), - FLOAT16(21), FLOAT16(23), FLOAT16(25), FLOAT16(27), FLOAT16(29), FLOAT16(27), - FLOAT16(32), FLOAT16(33), FLOAT16(35), FLOAT16(38), FLOAT16(30), FLOAT16(31), + ov::float16(12), ov::float16(14), ov::float16(16), ov::float16(18), ov::float16(10), ov::float16(18), + ov::float16(21), ov::float16(23), ov::float16(25), ov::float16(27), ov::float16(29), ov::float16(27), + ov::float16(32), ov::float16(33), ov::float16(35), ov::float16(38), ov::float16(30), ov::float16(31), - FLOAT16(12), FLOAT16(14), FLOAT16(16), FLOAT16(18), FLOAT16(10), FLOAT16(18), - FLOAT16(21), FLOAT16(23), FLOAT16(25), FLOAT16(27), FLOAT16(29), FLOAT16(27), - FLOAT16(32), FLOAT16(33), FLOAT16(35), FLOAT16(38), FLOAT16(30), FLOAT16(29), + ov::float16(12), ov::float16(14), ov::float16(16), ov::float16(18), ov::float16(10), ov::float16(18), + ov::float16(21), ov::float16(23), ov::float16(25), ov::float16(27), ov::float16(29), ov::float16(27), + ov::float16(32), ov::float16(33), ov::float16(35), ov::float16(38), ov::float16(30), ov::float16(29), }; DoTestV5(engine, input0, input1, expected_results, indices_rank, batch_dims, format::bfyx, {36, 1, 1, 1}); @@ -192,33 +192,33 @@ TEST(gather_nd_gpu_fp16, d23322_i23321_ir5_batch4) { // expected output dim: v5{36}, v8{2,3,2,3} set_values(input0, { - FLOAT16(11), FLOAT16(12), FLOAT16(13), FLOAT16(14), FLOAT16(15), FLOAT16(16), FLOAT16(17), FLOAT16(18), FLOAT16(19), FLOAT16(10), FLOAT16(21), FLOAT16(18), - FLOAT16(21), FLOAT16(22), FLOAT16(23), FLOAT16(24), FLOAT16(25), FLOAT16(26), FLOAT16(27), FLOAT16(28), FLOAT16(29), FLOAT16(20), FLOAT16(27), FLOAT16(28), - FLOAT16(31), FLOAT16(32), FLOAT16(33), FLOAT16(34), FLOAT16(35), FLOAT16(36), FLOAT16(37), FLOAT16(38), FLOAT16(39), FLOAT16(30), FLOAT16(31), FLOAT16(30), + ov::float16(11), ov::float16(12), ov::float16(13), ov::float16(14), ov::float16(15), ov::float16(16), ov::float16(17), ov::float16(18), ov::float16(19), ov::float16(10), ov::float16(21), ov::float16(18), + ov::float16(21), ov::float16(22), ov::float16(23), ov::float16(24), ov::float16(25), ov::float16(26), ov::float16(27), ov::float16(28), ov::float16(29), ov::float16(20), ov::float16(27), ov::float16(28), + ov::float16(31), ov::float16(32), ov::float16(33), ov::float16(34), ov::float16(35), ov::float16(36), ov::float16(37), ov::float16(38), ov::float16(39), ov::float16(30), ov::float16(31), ov::float16(30), - FLOAT16(11), FLOAT16(12), FLOAT16(13), FLOAT16(14), FLOAT16(15), FLOAT16(16), FLOAT16(17), FLOAT16(18), FLOAT16(19), FLOAT16(10), FLOAT16(17), FLOAT16(18), - FLOAT16(21), FLOAT16(22), FLOAT16(23), FLOAT16(24), FLOAT16(25), FLOAT16(26), FLOAT16(27), FLOAT16(28), FLOAT16(29), FLOAT16(20), FLOAT16(27), FLOAT16(28), - FLOAT16(31), FLOAT16(32), FLOAT16(33), FLOAT16(34), FLOAT16(35), FLOAT16(36), FLOAT16(37), FLOAT16(38), FLOAT16(39), FLOAT16(30), FLOAT16(29), FLOAT16(30), + ov::float16(11), ov::float16(12), ov::float16(13), ov::float16(14), ov::float16(15), ov::float16(16), ov::float16(17), ov::float16(18), ov::float16(19), ov::float16(10), ov::float16(17), ov::float16(18), + ov::float16(21), ov::float16(22), ov::float16(23), ov::float16(24), ov::float16(25), ov::float16(26), ov::float16(27), ov::float16(28), ov::float16(29), ov::float16(20), ov::float16(27), ov::float16(28), + ov::float16(31), ov::float16(32), ov::float16(33), ov::float16(34), ov::float16(35), ov::float16(36), ov::float16(37), ov::float16(38), ov::float16(39), ov::float16(30), ov::float16(29), ov::float16(30), }); set_values(input1, { - FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(1), - FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(0), - FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(0), + ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(1), + ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(0), + ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(0), - FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(1), - FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(0), - FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(0), + ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(1), + ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(0), + ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(0), }); std::vector expected_results = { - FLOAT16(12), FLOAT16(14), FLOAT16(16), FLOAT16(18), FLOAT16(10), FLOAT16(18), - FLOAT16(21), FLOAT16(23), FLOAT16(25), FLOAT16(27), FLOAT16(29), FLOAT16(27), - FLOAT16(32), FLOAT16(33), FLOAT16(35), FLOAT16(38), FLOAT16(30), FLOAT16(31), + ov::float16(12), ov::float16(14), ov::float16(16), ov::float16(18), ov::float16(10), ov::float16(18), + ov::float16(21), ov::float16(23), ov::float16(25), ov::float16(27), ov::float16(29), ov::float16(27), + ov::float16(32), ov::float16(33), ov::float16(35), ov::float16(38), ov::float16(30), ov::float16(31), - FLOAT16(12), FLOAT16(14), FLOAT16(16), FLOAT16(18), FLOAT16(10), FLOAT16(18), - FLOAT16(21), FLOAT16(23), FLOAT16(25), FLOAT16(27), FLOAT16(29), FLOAT16(27), - FLOAT16(32), FLOAT16(33), FLOAT16(35), FLOAT16(38), FLOAT16(30), FLOAT16(29), + ov::float16(12), ov::float16(14), ov::float16(16), ov::float16(18), ov::float16(10), ov::float16(18), + ov::float16(21), ov::float16(23), ov::float16(25), ov::float16(27), ov::float16(29), ov::float16(27), + ov::float16(32), ov::float16(33), ov::float16(35), ov::float16(38), ov::float16(30), ov::float16(29), }; DoTestV5(engine, input0, input1, expected_results, indices_rank, batch_dims, format::bfyx, { 36, 1, 1, 1 }); @@ -236,33 +236,33 @@ TEST(gather_nd_gpu_fp16, d23223_i2321_ir4_batch3) { // expected output dim: v5{12,3} v8{2,3,3,2} set_values(input0, { - FLOAT16(11), FLOAT16(12), FLOAT16(13), FLOAT16(14), FLOAT16(15), FLOAT16(16), FLOAT16(17), FLOAT16(18),FLOAT16(15), FLOAT16(16), FLOAT16(17), FLOAT16(18), - FLOAT16(21), FLOAT16(22), FLOAT16(23), FLOAT16(24), FLOAT16(25), FLOAT16(26), FLOAT16(27), FLOAT16(28),FLOAT16(25), FLOAT16(26), FLOAT16(27), FLOAT16(28), - FLOAT16(29), FLOAT16(30), FLOAT16(31), FLOAT16(32), FLOAT16(33), FLOAT16(34), FLOAT16(35), FLOAT16(36),FLOAT16(33), FLOAT16(34), FLOAT16(35), FLOAT16(36), + ov::float16(11), ov::float16(12), ov::float16(13), ov::float16(14), ov::float16(15), ov::float16(16), ov::float16(17), ov::float16(18),ov::float16(15), ov::float16(16), ov::float16(17), ov::float16(18), + ov::float16(21), ov::float16(22), ov::float16(23), ov::float16(24), ov::float16(25), ov::float16(26), ov::float16(27), ov::float16(28),ov::float16(25), ov::float16(26), ov::float16(27), ov::float16(28), + ov::float16(29), ov::float16(30), ov::float16(31), ov::float16(32), ov::float16(33), ov::float16(34), ov::float16(35), ov::float16(36),ov::float16(33), ov::float16(34), ov::float16(35), ov::float16(36), - FLOAT16(11), FLOAT16(12), FLOAT16(13), FLOAT16(14), FLOAT16(15), FLOAT16(16), FLOAT16(17), FLOAT16(18),FLOAT16(15), FLOAT16(16), FLOAT16(17), FLOAT16(18), - FLOAT16(21), FLOAT16(22), FLOAT16(23), FLOAT16(24), FLOAT16(25), FLOAT16(26), FLOAT16(27), FLOAT16(28),FLOAT16(25), FLOAT16(26), FLOAT16(27), FLOAT16(28), - FLOAT16(29), FLOAT16(30), FLOAT16(31), FLOAT16(32), FLOAT16(33), FLOAT16(34), FLOAT16(35), FLOAT16(36),FLOAT16(33), FLOAT16(34), FLOAT16(35), FLOAT16(36), + ov::float16(11), ov::float16(12), ov::float16(13), ov::float16(14), ov::float16(15), ov::float16(16), ov::float16(17), ov::float16(18),ov::float16(15), ov::float16(16), ov::float16(17), ov::float16(18), + ov::float16(21), ov::float16(22), ov::float16(23), ov::float16(24), ov::float16(25), ov::float16(26), ov::float16(27), ov::float16(28),ov::float16(25), ov::float16(26), ov::float16(27), ov::float16(28), + ov::float16(29), ov::float16(30), ov::float16(31), ov::float16(32), ov::float16(33), ov::float16(34), ov::float16(35), ov::float16(36),ov::float16(33), ov::float16(34), ov::float16(35), ov::float16(36), }); set_values(input1, { - FLOAT16(1), FLOAT16(1), - FLOAT16(1), FLOAT16(0), - FLOAT16(1), FLOAT16(1), + ov::float16(1), ov::float16(1), + ov::float16(1), ov::float16(0), + ov::float16(1), ov::float16(1), - FLOAT16(0), FLOAT16(0), - FLOAT16(0), FLOAT16(1), - FLOAT16(0), FLOAT16(0), + ov::float16(0), ov::float16(0), + ov::float16(0), ov::float16(1), + ov::float16(0), ov::float16(0), }); std::vector expected_results = { - FLOAT16(14), FLOAT16(15), FLOAT16(16), FLOAT16(16), FLOAT16(17), FLOAT16(18), - FLOAT16(24), FLOAT16(25), FLOAT16(26), FLOAT16(27), FLOAT16(28), FLOAT16(25), - FLOAT16(32), FLOAT16(33), FLOAT16(34), FLOAT16(34), FLOAT16(35), FLOAT16(36), + ov::float16(14), ov::float16(15), ov::float16(16), ov::float16(16), ov::float16(17), ov::float16(18), + ov::float16(24), ov::float16(25), ov::float16(26), ov::float16(27), ov::float16(28), ov::float16(25), + ov::float16(32), ov::float16(33), ov::float16(34), ov::float16(34), ov::float16(35), ov::float16(36), - FLOAT16(11), FLOAT16(12), FLOAT16(13), FLOAT16(17), FLOAT16(18), FLOAT16(15), - FLOAT16(21), FLOAT16(22), FLOAT16(23), FLOAT16(26), FLOAT16(27), FLOAT16(28), - FLOAT16(29), FLOAT16(30), FLOAT16(31), FLOAT16(35), FLOAT16(36), FLOAT16(33), + ov::float16(11), ov::float16(12), ov::float16(13), ov::float16(17), ov::float16(18), ov::float16(15), + ov::float16(21), ov::float16(22), ov::float16(23), ov::float16(26), ov::float16(27), ov::float16(28), + ov::float16(29), ov::float16(30), ov::float16(31), ov::float16(35), ov::float16(36), ov::float16(33), }; DoTestV5(engine, input0, input1, expected_results, indices_rank, batch_dims, format::bfyx, { 12, 3, 1, 1 }); @@ -279,33 +279,33 @@ TEST(gather_nd_gpu_fp16, d2342_i2312_ir4_batch2) { // expected output dim: v5{6,1}, v8(2,3,1) set_values(input0, { - FLOAT16(11), FLOAT16(12), FLOAT16(13), FLOAT16(14), FLOAT16(15), FLOAT16(16), FLOAT16(17), FLOAT16(18), - FLOAT16(21), FLOAT16(22), FLOAT16(23), FLOAT16(24), FLOAT16(25), FLOAT16(26), FLOAT16(27), FLOAT16(28), - FLOAT16(29), FLOAT16(30), FLOAT16(31), FLOAT16(32), FLOAT16(33), FLOAT16(34), FLOAT16(35), FLOAT16(36), + ov::float16(11), ov::float16(12), ov::float16(13), ov::float16(14), ov::float16(15), ov::float16(16), ov::float16(17), ov::float16(18), + ov::float16(21), ov::float16(22), ov::float16(23), ov::float16(24), ov::float16(25), ov::float16(26), ov::float16(27), ov::float16(28), + ov::float16(29), ov::float16(30), ov::float16(31), ov::float16(32), ov::float16(33), ov::float16(34), ov::float16(35), ov::float16(36), - FLOAT16(11), FLOAT16(12), FLOAT16(13), FLOAT16(14), FLOAT16(15), FLOAT16(16), FLOAT16(17), FLOAT16(18), - FLOAT16(21), FLOAT16(22), FLOAT16(23), FLOAT16(24), FLOAT16(25), FLOAT16(26), FLOAT16(27), FLOAT16(28), - FLOAT16(29), FLOAT16(30), FLOAT16(31), FLOAT16(32), FLOAT16(33), FLOAT16(34), FLOAT16(35), FLOAT16(36), + ov::float16(11), ov::float16(12), ov::float16(13), ov::float16(14), ov::float16(15), ov::float16(16), ov::float16(17), ov::float16(18), + ov::float16(21), ov::float16(22), ov::float16(23), ov::float16(24), ov::float16(25), ov::float16(26), ov::float16(27), ov::float16(28), + ov::float16(29), ov::float16(30), ov::float16(31), ov::float16(32), ov::float16(33), ov::float16(34), ov::float16(35), ov::float16(36), }); set_values(input1, { - FLOAT16(1), FLOAT16(1), - FLOAT16(0), FLOAT16(0), - FLOAT16(2), FLOAT16(1), + ov::float16(1), ov::float16(1), + ov::float16(0), ov::float16(0), + ov::float16(2), ov::float16(1), - FLOAT16(0), FLOAT16(0), - FLOAT16(2), FLOAT16(1), - FLOAT16(2), FLOAT16(0), + ov::float16(0), ov::float16(0), + ov::float16(2), ov::float16(1), + ov::float16(2), ov::float16(0), }); std::vector expected_results = { - FLOAT16(14), - FLOAT16(21), - FLOAT16(34), + ov::float16(14), + ov::float16(21), + ov::float16(34), - FLOAT16(11), - FLOAT16(26), - FLOAT16(33), + ov::float16(11), + ov::float16(26), + ov::float16(33), }; DoTestV5(engine, input0, input1, expected_results, indices_rank, batch_dims, format::bfyx, { 6, 1, 1, 1 }); @@ -322,34 +322,34 @@ TEST(gather_nd_gpu_fp16, d234_i2311_ir4_batch2) { // expected output dim: v5{6,1,1}, v8{2,3,1,1} set_values(input0, { - FLOAT16(1), FLOAT16(2), FLOAT16(3), FLOAT16(4), - FLOAT16(5), FLOAT16(6), FLOAT16(7), FLOAT16(8), - FLOAT16(9), FLOAT16(10), FLOAT16(11), FLOAT16(12), + ov::float16(1), ov::float16(2), ov::float16(3), ov::float16(4), + ov::float16(5), ov::float16(6), ov::float16(7), ov::float16(8), + ov::float16(9), ov::float16(10), ov::float16(11), ov::float16(12), - FLOAT16(13), FLOAT16(14), FLOAT16(15), FLOAT16(16), - FLOAT16(17), FLOAT16(18), FLOAT16(19), FLOAT16(20), - FLOAT16(21), FLOAT16(22), FLOAT16(23), FLOAT16(24), + ov::float16(13), ov::float16(14), ov::float16(15), ov::float16(16), + ov::float16(17), ov::float16(18), ov::float16(19), ov::float16(20), + ov::float16(21), ov::float16(22), ov::float16(23), ov::float16(24), }); set_values(input1, { - FLOAT16(1), - FLOAT16(0), - FLOAT16(2), + ov::float16(1), + ov::float16(0), + ov::float16(2), - FLOAT16(0), - FLOAT16(2), - FLOAT16(2), + ov::float16(0), + ov::float16(2), + ov::float16(2), }); std::vector expected_results = { - FLOAT16(2), - FLOAT16(5), - FLOAT16(11), + ov::float16(2), + ov::float16(5), + ov::float16(11), - FLOAT16(13), - FLOAT16(19), - FLOAT16(23), + ov::float16(13), + ov::float16(19), + ov::float16(23), }; DoTestV5(engine, input0, input1, expected_results, indices_rank, batch_dims, format::bfyx, { 6, 1, 1, 1 }); @@ -366,24 +366,24 @@ TEST(gather_nd_gpu_fp16, d234_i21_ir2_batch1) { // expected output dim: v5{2,4,1,1}, v8{2,4,1,1} set_values(input0, { - FLOAT16(1), FLOAT16(2), FLOAT16(3), FLOAT16(4), - FLOAT16(5), FLOAT16(6), FLOAT16(7), FLOAT16(8), - FLOAT16(9), FLOAT16(10), FLOAT16(11), FLOAT16(12), + ov::float16(1), ov::float16(2), ov::float16(3), ov::float16(4), + ov::float16(5), ov::float16(6), ov::float16(7), ov::float16(8), + ov::float16(9), ov::float16(10), ov::float16(11), ov::float16(12), - FLOAT16(13), FLOAT16(14), FLOAT16(15), FLOAT16(16), - FLOAT16(17), FLOAT16(18), FLOAT16(19), FLOAT16(20), - FLOAT16(21), FLOAT16(22), FLOAT16(23), FLOAT16(24), + ov::float16(13), ov::float16(14), ov::float16(15), ov::float16(16), + ov::float16(17), ov::float16(18), ov::float16(19), ov::float16(20), + ov::float16(21), ov::float16(22), ov::float16(23), ov::float16(24), }); set_values(input1, { - FLOAT16(1), - FLOAT16(0), + ov::float16(1), + ov::float16(0), }); std::vector expected_results = { - FLOAT16(5), FLOAT16(6), FLOAT16(7), FLOAT16(8), - FLOAT16(13), FLOAT16(14), FLOAT16(15), FLOAT16(16), + ov::float16(5), ov::float16(6), ov::float16(7), ov::float16(8), + ov::float16(13), ov::float16(14), ov::float16(15), ov::float16(16), }; DoTestV5(engine, input0, input1, expected_results, indices_rank, batch_dims, format::bfyx, { 2, 4, 1, 1 }); @@ -400,18 +400,18 @@ TEST(gather_nd_gpu_fp16, d22_i21_ir2_batch1) { // expected output dim: v5{2,1,1}, v8{2,1,1} set_values(input0, { - FLOAT16(1), FLOAT16(2), - FLOAT16(3), FLOAT16(4), + ov::float16(1), ov::float16(2), + ov::float16(3), ov::float16(4), }); set_values(input1, { - FLOAT16(1), - FLOAT16(0), + ov::float16(1), + ov::float16(0), }); std::vector expected_results = { - FLOAT16(2), - FLOAT16(3), + ov::float16(2), + ov::float16(3), }; DoTestV5(engine, input0, input1, expected_results, indices_rank, batch_dims, format::bfyx, { 2, 1, 1, 1 }); @@ -428,36 +428,36 @@ TEST(gather_nd_gpu_fp16, d3223_i321113_ir6_batch0) { // expected output dim: 323111 set_values(input0, { - FLOAT16(11), FLOAT16(12), FLOAT16(13), FLOAT16(14), FLOAT16(15), FLOAT16(16), - FLOAT16(21), FLOAT16(22), FLOAT16(23), FLOAT16(24), FLOAT16(25), FLOAT16(26), + ov::float16(11), ov::float16(12), ov::float16(13), ov::float16(14), ov::float16(15), ov::float16(16), + ov::float16(21), ov::float16(22), ov::float16(23), ov::float16(24), ov::float16(25), ov::float16(26), - FLOAT16(31), FLOAT16(32), FLOAT16(33), FLOAT16(34), FLOAT16(35), FLOAT16(36), - FLOAT16(41), FLOAT16(42), FLOAT16(43), FLOAT16(44), FLOAT16(45), FLOAT16(46), + ov::float16(31), ov::float16(32), ov::float16(33), ov::float16(34), ov::float16(35), ov::float16(36), + ov::float16(41), ov::float16(42), ov::float16(43), ov::float16(44), ov::float16(45), ov::float16(46), - FLOAT16(51), FLOAT16(52), FLOAT16(53), FLOAT16(54), FLOAT16(55), FLOAT16(56), - FLOAT16(61), FLOAT16(62), FLOAT16(63), FLOAT16(64), FLOAT16(65), FLOAT16(66), + ov::float16(51), ov::float16(52), ov::float16(53), ov::float16(54), ov::float16(55), ov::float16(56), + ov::float16(61), ov::float16(62), ov::float16(63), ov::float16(64), ov::float16(65), ov::float16(66), }); set_values(input1, { - FLOAT16(2), FLOAT16(1), FLOAT16(1), - FLOAT16(1), FLOAT16(0), FLOAT16(0), + ov::float16(2), ov::float16(1), ov::float16(1), + ov::float16(1), ov::float16(0), ov::float16(0), - FLOAT16(0), FLOAT16(1), FLOAT16(0), - FLOAT16(2), FLOAT16(0), FLOAT16(1), + ov::float16(0), ov::float16(1), ov::float16(0), + ov::float16(2), ov::float16(0), ov::float16(1), - FLOAT16(1), FLOAT16(1), FLOAT16(0), - FLOAT16(0), FLOAT16(0), FLOAT16(0), + ov::float16(1), ov::float16(1), ov::float16(0), + ov::float16(0), ov::float16(0), ov::float16(0), }); std::vector expected_results = { - FLOAT16(64), FLOAT16(65), FLOAT16(66), - FLOAT16(31), FLOAT16(32), FLOAT16(33), + ov::float16(64), ov::float16(65), ov::float16(66), + ov::float16(31), ov::float16(32), ov::float16(33), - FLOAT16(21), FLOAT16(22), FLOAT16(23), - FLOAT16(54), FLOAT16(55), FLOAT16(56), + ov::float16(21), ov::float16(22), ov::float16(23), + ov::float16(54), ov::float16(55), ov::float16(56), - FLOAT16(41), FLOAT16(42), FLOAT16(43), - FLOAT16(11), FLOAT16(12), FLOAT16(13), + ov::float16(41), ov::float16(42), ov::float16(43), + ov::float16(11), ov::float16(12), ov::float16(13), }; DoTestV5(engine, input0, input1, expected_results, indices_rank, batch_dims, format::bfwzyx, { 3, 2, 3, 1, 1, 1 }); @@ -474,36 +474,36 @@ TEST(gather_nd_gpu_fp16, d3221_i32312_ir3_batch0) { // expected output dim: 32213 set_values(input0, { - FLOAT16(11), FLOAT16(12), FLOAT16(13), FLOAT16(14), FLOAT16(15), FLOAT16(16), - FLOAT16(21), FLOAT16(22), FLOAT16(23), FLOAT16(24), FLOAT16(25), FLOAT16(26), + ov::float16(11), ov::float16(12), ov::float16(13), ov::float16(14), ov::float16(15), ov::float16(16), + ov::float16(21), ov::float16(22), ov::float16(23), ov::float16(24), ov::float16(25), ov::float16(26), - FLOAT16(31), FLOAT16(32), FLOAT16(33), FLOAT16(34), FLOAT16(35), FLOAT16(36), - FLOAT16(41), FLOAT16(42), FLOAT16(43), FLOAT16(44), FLOAT16(45), FLOAT16(46), + ov::float16(31), ov::float16(32), ov::float16(33), ov::float16(34), ov::float16(35), ov::float16(36), + ov::float16(41), ov::float16(42), ov::float16(43), ov::float16(44), ov::float16(45), ov::float16(46), - FLOAT16(51), FLOAT16(52), FLOAT16(53), FLOAT16(54), FLOAT16(55), FLOAT16(56), - FLOAT16(61), FLOAT16(62), FLOAT16(63), FLOAT16(64), FLOAT16(65), FLOAT16(66), + ov::float16(51), ov::float16(52), ov::float16(53), ov::float16(54), ov::float16(55), ov::float16(56), + ov::float16(61), ov::float16(62), ov::float16(63), ov::float16(64), ov::float16(65), ov::float16(66), }); set_values(input1, { - FLOAT16(2), FLOAT16(1), - FLOAT16(1), FLOAT16(0), + ov::float16(2), ov::float16(1), + ov::float16(1), ov::float16(0), - FLOAT16(0), FLOAT16(1), - FLOAT16(2), FLOAT16(0), + ov::float16(0), ov::float16(1), + ov::float16(2), ov::float16(0), - FLOAT16(1), FLOAT16(1), - FLOAT16(0), FLOAT16(0), + ov::float16(1), ov::float16(1), + ov::float16(0), ov::float16(0), }); std::vector expected_results = { - FLOAT16(61), FLOAT16(62), FLOAT16(63), FLOAT16(64), FLOAT16(65), FLOAT16(66), - FLOAT16(31), FLOAT16(32), FLOAT16(33), FLOAT16(34), FLOAT16(35), FLOAT16(36), + ov::float16(61), ov::float16(62), ov::float16(63), ov::float16(64), ov::float16(65), ov::float16(66), + ov::float16(31), ov::float16(32), ov::float16(33), ov::float16(34), ov::float16(35), ov::float16(36), - FLOAT16(21), FLOAT16(22), FLOAT16(23), FLOAT16(24), FLOAT16(25), FLOAT16(26), - FLOAT16(51), FLOAT16(52), FLOAT16(53), FLOAT16(54), FLOAT16(55), FLOAT16(56), + ov::float16(21), ov::float16(22), ov::float16(23), ov::float16(24), ov::float16(25), ov::float16(26), + ov::float16(51), ov::float16(52), ov::float16(53), ov::float16(54), ov::float16(55), ov::float16(56), - FLOAT16(41), FLOAT16(42), FLOAT16(43), FLOAT16(44), FLOAT16(45), FLOAT16(46), - FLOAT16(11), FLOAT16(12), FLOAT16(13), FLOAT16(14), FLOAT16(15), FLOAT16(16), + ov::float16(41), ov::float16(42), ov::float16(43), ov::float16(44), ov::float16(45), ov::float16(46), + ov::float16(11), ov::float16(12), ov::float16(13), ov::float16(14), ov::float16(15), ov::float16(16), }; DoTestV5(engine, input0, input1, expected_results, indices_rank, batch_dims, format::bfzyx, { 3, 2, 2, 1, 3 }); @@ -520,36 +520,36 @@ TEST(gather_nd_gpu_fp16, d3231_i32312_ir3_batch0) { // expected output dim: {3,2,2,1} set_values(input0, { - FLOAT16(11), FLOAT16(12), FLOAT16(13), FLOAT16(14), FLOAT16(15), FLOAT16(16), - FLOAT16(21), FLOAT16(22), FLOAT16(23), FLOAT16(24), FLOAT16(25), FLOAT16(26), + ov::float16(11), ov::float16(12), ov::float16(13), ov::float16(14), ov::float16(15), ov::float16(16), + ov::float16(21), ov::float16(22), ov::float16(23), ov::float16(24), ov::float16(25), ov::float16(26), - FLOAT16(31), FLOAT16(32), FLOAT16(33), FLOAT16(34), FLOAT16(35), FLOAT16(36), - FLOAT16(41), FLOAT16(42), FLOAT16(43), FLOAT16(44), FLOAT16(45), FLOAT16(46), + ov::float16(31), ov::float16(32), ov::float16(33), ov::float16(34), ov::float16(35), ov::float16(36), + ov::float16(41), ov::float16(42), ov::float16(43), ov::float16(44), ov::float16(45), ov::float16(46), - FLOAT16(51), FLOAT16(52), FLOAT16(53), FLOAT16(54), FLOAT16(55), FLOAT16(56), - FLOAT16(61), FLOAT16(62), FLOAT16(63), FLOAT16(64), FLOAT16(65), FLOAT16(66), + ov::float16(51), ov::float16(52), ov::float16(53), ov::float16(54), ov::float16(55), ov::float16(56), + ov::float16(61), ov::float16(62), ov::float16(63), ov::float16(64), ov::float16(65), ov::float16(66), }); set_values(input1, { - FLOAT16(2), FLOAT16(1), FLOAT16(1), - FLOAT16(1), FLOAT16(0), FLOAT16(2), + ov::float16(2), ov::float16(1), ov::float16(1), + ov::float16(1), ov::float16(0), ov::float16(2), - FLOAT16(0), FLOAT16(1), FLOAT16(0), - FLOAT16(2), FLOAT16(0), FLOAT16(1), + ov::float16(0), ov::float16(1), ov::float16(0), + ov::float16(2), ov::float16(0), ov::float16(1), - FLOAT16(1), FLOAT16(1), FLOAT16(2), - FLOAT16(0), FLOAT16(0), FLOAT16(0), + ov::float16(1), ov::float16(1), ov::float16(2), + ov::float16(0), ov::float16(0), ov::float16(0), }); std::vector expected_results = { - FLOAT16(63), FLOAT16(64), - FLOAT16(35), FLOAT16(36), + ov::float16(63), ov::float16(64), + ov::float16(35), ov::float16(36), - FLOAT16(21), FLOAT16(22), - FLOAT16(53), FLOAT16(54), + ov::float16(21), ov::float16(22), + ov::float16(53), ov::float16(54), - FLOAT16(45), FLOAT16(46), - FLOAT16(11), FLOAT16(12), + ov::float16(45), ov::float16(46), + ov::float16(11), ov::float16(12), }; DoTestV5(engine, input0, input1, expected_results, indices_rank, batch_dims, format::bfyx, { 3, 2, 2, 1 }); @@ -566,31 +566,31 @@ TEST(gather_nd_gpu_fp16, d3112_i3221_ir4_batch0) { // expected output dim: {3,2,2,1,1,2} set_values(input0, { - FLOAT16(1), FLOAT16(2), - FLOAT16(7), FLOAT16(8), - FLOAT16(13), FLOAT16(14), + ov::float16(1), ov::float16(2), + ov::float16(7), ov::float16(8), + ov::float16(13), ov::float16(14), }); set_values(input1, { - FLOAT16(2), FLOAT16(1), - FLOAT16(0), FLOAT16(1), + ov::float16(2), ov::float16(1), + ov::float16(0), ov::float16(1), - FLOAT16(2), FLOAT16(1), - FLOAT16(0), FLOAT16(1), + ov::float16(2), ov::float16(1), + ov::float16(0), ov::float16(1), - FLOAT16(2), FLOAT16(1), - FLOAT16(0), FLOAT16(1), + ov::float16(2), ov::float16(1), + ov::float16(0), ov::float16(1), }); std::vector expected_results = { - FLOAT16(13), FLOAT16(14), FLOAT16(7), FLOAT16(8), - FLOAT16(1), FLOAT16(2), FLOAT16(7), FLOAT16(8), + ov::float16(13), ov::float16(14), ov::float16(7), ov::float16(8), + ov::float16(1), ov::float16(2), ov::float16(7), ov::float16(8), - FLOAT16(13), FLOAT16(14), FLOAT16(7), FLOAT16(8), - FLOAT16(1), FLOAT16(2), FLOAT16(7), FLOAT16(8), + ov::float16(13), ov::float16(14), ov::float16(7), ov::float16(8), + ov::float16(1), ov::float16(2), ov::float16(7), ov::float16(8), - FLOAT16(13), FLOAT16(14), FLOAT16(7), FLOAT16(8), - FLOAT16(1), FLOAT16(2), FLOAT16(7), FLOAT16(8), + ov::float16(13), ov::float16(14), ov::float16(7), ov::float16(8), + ov::float16(1), ov::float16(2), ov::float16(7), ov::float16(8), }; DoTestV5(engine, input0, input1, expected_results, indices_rank, batch_dims, format::bfwzyx, { 3, 2, 2, 1, 1, 2 }); @@ -607,39 +607,39 @@ TEST(gather_nd_gpu_fp16, d3332_i3223_ir4_batch0) { // expected output dim: {3,2,3,2} set_values(input0, { - FLOAT16(1), FLOAT16(2), FLOAT16(3), FLOAT16(4), FLOAT16(5), FLOAT16(6), - FLOAT16(7), FLOAT16(8), FLOAT16(9), FLOAT16(10), FLOAT16(11), FLOAT16(12), - FLOAT16(13), FLOAT16(14), FLOAT16(15), FLOAT16(16), FLOAT16(17), FLOAT16(18), + ov::float16(1), ov::float16(2), ov::float16(3), ov::float16(4), ov::float16(5), ov::float16(6), + ov::float16(7), ov::float16(8), ov::float16(9), ov::float16(10), ov::float16(11), ov::float16(12), + ov::float16(13), ov::float16(14), ov::float16(15), ov::float16(16), ov::float16(17), ov::float16(18), - FLOAT16(19), FLOAT16(20), FLOAT16(21), FLOAT16(22), FLOAT16(23), FLOAT16(24), - FLOAT16(25), FLOAT16(26), FLOAT16(27), FLOAT16(28), FLOAT16(29), FLOAT16(30), - FLOAT16(31), FLOAT16(32), FLOAT16(33), FLOAT16(34), FLOAT16(35), FLOAT16(36), + ov::float16(19), ov::float16(20), ov::float16(21), ov::float16(22), ov::float16(23), ov::float16(24), + ov::float16(25), ov::float16(26), ov::float16(27), ov::float16(28), ov::float16(29), ov::float16(30), + ov::float16(31), ov::float16(32), ov::float16(33), ov::float16(34), ov::float16(35), ov::float16(36), - FLOAT16(41), FLOAT16(42), FLOAT16(43), FLOAT16(44), FLOAT16(45), FLOAT16(46), - FLOAT16(51), FLOAT16(52), FLOAT16(53), FLOAT16(54), FLOAT16(55), FLOAT16(56), - FLOAT16(61), FLOAT16(62), FLOAT16(63), FLOAT16(64), FLOAT16(65), FLOAT16(66), + ov::float16(41), ov::float16(42), ov::float16(43), ov::float16(44), ov::float16(45), ov::float16(46), + ov::float16(51), ov::float16(52), ov::float16(53), ov::float16(54), ov::float16(55), ov::float16(56), + ov::float16(61), ov::float16(62), ov::float16(63), ov::float16(64), ov::float16(65), ov::float16(66), }); set_values(input1, { - FLOAT16(2), FLOAT16(0), FLOAT16(0), FLOAT16(2), FLOAT16(2), FLOAT16(0), - FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(0), + ov::float16(2), ov::float16(0), ov::float16(0), ov::float16(2), ov::float16(2), ov::float16(0), + ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(0), - FLOAT16(1), FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(1), - FLOAT16(2), FLOAT16(0), FLOAT16(0), FLOAT16(2), FLOAT16(1), FLOAT16(0), + ov::float16(1), ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(1), + ov::float16(2), ov::float16(0), ov::float16(0), ov::float16(2), ov::float16(1), ov::float16(0), - FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(0), FLOAT16(1), FLOAT16(1), - FLOAT16(1), FLOAT16(2), FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(1), + ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(0), ov::float16(1), ov::float16(1), + ov::float16(1), ov::float16(2), ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(1), }); std::vector expected_results = { - FLOAT16(41), FLOAT16(42), FLOAT16(43), FLOAT16(61), FLOAT16(62), FLOAT16(63), - FLOAT16(19), FLOAT16(20), FLOAT16(21), FLOAT16(25), FLOAT16(26), FLOAT16(27), + ov::float16(41), ov::float16(42), ov::float16(43), ov::float16(61), ov::float16(62), ov::float16(63), + ov::float16(19), ov::float16(20), ov::float16(21), ov::float16(25), ov::float16(26), ov::float16(27), - FLOAT16(22), FLOAT16(23), FLOAT16(24), FLOAT16(28), FLOAT16(29), FLOAT16(30), - FLOAT16(41), FLOAT16(42), FLOAT16(43), FLOAT16(51), FLOAT16(52), FLOAT16(53), + ov::float16(22), ov::float16(23), ov::float16(24), ov::float16(28), ov::float16(29), ov::float16(30), + ov::float16(41), ov::float16(42), ov::float16(43), ov::float16(51), ov::float16(52), ov::float16(53), - FLOAT16(28), FLOAT16(29), FLOAT16(30), FLOAT16(10), FLOAT16(11), FLOAT16(12), - FLOAT16(34), FLOAT16(35), FLOAT16(36), FLOAT16(16), FLOAT16(17), FLOAT16(18), + ov::float16(28), ov::float16(29), ov::float16(30), ov::float16(10), ov::float16(11), ov::float16(12), + ov::float16(34), ov::float16(35), ov::float16(36), ov::float16(16), ov::float16(17), ov::float16(18), }; DoTestV5(engine, input0, input1, expected_results, indices_rank, batch_dims, format::bfyx, { 3, 2, 3, 2 }); @@ -656,39 +656,39 @@ TEST(gather_nd_gpu_fp16, d3323_i322_ir3_batch0) { // expected output dim: {3,2,3,2} set_values(input0, { - FLOAT16(1), FLOAT16(2), FLOAT16(3), FLOAT16(4), FLOAT16(5), FLOAT16(6), - FLOAT16(7), FLOAT16(8), FLOAT16(9), FLOAT16(10), FLOAT16(11), FLOAT16(12), - FLOAT16(13), FLOAT16(14), FLOAT16(15), FLOAT16(16), FLOAT16(17), FLOAT16(18), + ov::float16(1), ov::float16(2), ov::float16(3), ov::float16(4), ov::float16(5), ov::float16(6), + ov::float16(7), ov::float16(8), ov::float16(9), ov::float16(10), ov::float16(11), ov::float16(12), + ov::float16(13), ov::float16(14), ov::float16(15), ov::float16(16), ov::float16(17), ov::float16(18), - FLOAT16(19), FLOAT16(20), FLOAT16(21), FLOAT16(22), FLOAT16(23), FLOAT16(24), - FLOAT16(25), FLOAT16(26), FLOAT16(27), FLOAT16(28), FLOAT16(29), FLOAT16(30), - FLOAT16(31), FLOAT16(32), FLOAT16(33), FLOAT16(34), FLOAT16(35), FLOAT16(36), + ov::float16(19), ov::float16(20), ov::float16(21), ov::float16(22), ov::float16(23), ov::float16(24), + ov::float16(25), ov::float16(26), ov::float16(27), ov::float16(28), ov::float16(29), ov::float16(30), + ov::float16(31), ov::float16(32), ov::float16(33), ov::float16(34), ov::float16(35), ov::float16(36), - FLOAT16(41), FLOAT16(42), FLOAT16(43), FLOAT16(44), FLOAT16(45), FLOAT16(46), - FLOAT16(51), FLOAT16(52), FLOAT16(53), FLOAT16(54), FLOAT16(55), FLOAT16(56), - FLOAT16(61), FLOAT16(62), FLOAT16(63), FLOAT16(64), FLOAT16(65), FLOAT16(66), + ov::float16(41), ov::float16(42), ov::float16(43), ov::float16(44), ov::float16(45), ov::float16(46), + ov::float16(51), ov::float16(52), ov::float16(53), ov::float16(54), ov::float16(55), ov::float16(56), + ov::float16(61), ov::float16(62), ov::float16(63), ov::float16(64), ov::float16(65), ov::float16(66), }); set_values(input1, { - FLOAT16(2), FLOAT16(0), - FLOAT16(2), FLOAT16(1), + ov::float16(2), ov::float16(0), + ov::float16(2), ov::float16(1), - FLOAT16(1), FLOAT16(2), - FLOAT16(1), FLOAT16(0), + ov::float16(1), ov::float16(2), + ov::float16(1), ov::float16(0), - FLOAT16(0), FLOAT16(1), - FLOAT16(0), FLOAT16(2), + ov::float16(0), ov::float16(1), + ov::float16(0), ov::float16(2), }); std::vector expected_results = { - FLOAT16(41), FLOAT16(42), FLOAT16(43), FLOAT16(44), FLOAT16(45), FLOAT16(46), - FLOAT16(51), FLOAT16(52), FLOAT16(53), FLOAT16(54), FLOAT16(55), FLOAT16(56), + ov::float16(41), ov::float16(42), ov::float16(43), ov::float16(44), ov::float16(45), ov::float16(46), + ov::float16(51), ov::float16(52), ov::float16(53), ov::float16(54), ov::float16(55), ov::float16(56), - FLOAT16(31), FLOAT16(32), FLOAT16(33), FLOAT16(34), FLOAT16(35), FLOAT16(36), - FLOAT16(19), FLOAT16(20), FLOAT16(21), FLOAT16(22), FLOAT16(23), FLOAT16(24), + ov::float16(31), ov::float16(32), ov::float16(33), ov::float16(34), ov::float16(35), ov::float16(36), + ov::float16(19), ov::float16(20), ov::float16(21), ov::float16(22), ov::float16(23), ov::float16(24), - FLOAT16(7), FLOAT16(8), FLOAT16(9), FLOAT16(10), FLOAT16(11), FLOAT16(12), - FLOAT16(13), FLOAT16(14), FLOAT16(15), FLOAT16(16), FLOAT16(17), FLOAT16(18), + ov::float16(7), ov::float16(8), ov::float16(9), ov::float16(10), ov::float16(11), ov::float16(12), + ov::float16(13), ov::float16(14), ov::float16(15), ov::float16(16), ov::float16(17), ov::float16(18), }; DoTestV5(engine, input0, input1, expected_results, indices_rank, batch_dims, format::bfyx, { 3, 2, 3, 2 }); @@ -705,17 +705,17 @@ TEST(gather_nd_gpu_fp16, d22_i21_ir2_batch0) { // expected output dim: {2,2,1,1} set_values(input0, { - FLOAT16(1), FLOAT16(2), - FLOAT16(3), FLOAT16(4) + ov::float16(1), ov::float16(2), + ov::float16(3), ov::float16(4) }); set_values(input1, { - FLOAT16(1), FLOAT16(0), + ov::float16(1), ov::float16(0), }); std::vector expected_results = { - FLOAT16(3), FLOAT16(4), - FLOAT16(1), FLOAT16(2), + ov::float16(3), ov::float16(4), + ov::float16(1), ov::float16(2), }; DoTestV5(engine, input0, input1, expected_results, indices_rank, batch_dims, format::bfyx, { 2, 2, 1, 1 }); @@ -732,20 +732,20 @@ TEST(gather_nd_gpu_fp16, d22_i32_ir2_batch0) { // expected output dim: {3,1,1} set_values(input0, { - FLOAT16(1), FLOAT16(2), - FLOAT16(3), FLOAT16(4) + ov::float16(1), ov::float16(2), + ov::float16(3), ov::float16(4) }); set_values(input1, { - FLOAT16(0), FLOAT16(0), - FLOAT16(1), FLOAT16(0), - FLOAT16(1), FLOAT16(1), + ov::float16(0), ov::float16(0), + ov::float16(1), ov::float16(0), + ov::float16(1), ov::float16(1), }); std::vector expected_results = { - FLOAT16(1), - FLOAT16(3), - FLOAT16(4), + ov::float16(1), + ov::float16(3), + ov::float16(4), }; DoTestV5(engine,input0, input1, expected_results, indices_rank, batch_dims, format::bfyx, { 3, 1, 1, 1 }); @@ -762,20 +762,20 @@ TEST(gather_nd_gpu_fp16, export_import) { // expected output dim: {3,1,1} set_values(input0, { - FLOAT16(1), FLOAT16(2), - FLOAT16(3), FLOAT16(4) + ov::float16(1), ov::float16(2), + ov::float16(3), ov::float16(4) }); set_values(input1, { - FLOAT16(0), FLOAT16(0), - FLOAT16(1), FLOAT16(0), - FLOAT16(1), FLOAT16(1), + ov::float16(0), ov::float16(0), + ov::float16(1), ov::float16(0), + ov::float16(1), ov::float16(1), }); std::vector expected_results = { - FLOAT16(1), - FLOAT16(3), - FLOAT16(4), + ov::float16(1), + ov::float16(3), + ov::float16(4), }; DoTestV5(engine,input0, input1, expected_results, indices_rank, batch_dims, format::bfyx, { 3, 1, 1, 1 }, true); @@ -794,39 +794,39 @@ TEST(gather_nd_gpu_fp16, dynamic_r4) { auto input2 = engine.allocate_memory(layout{ov::PartialShape(in2_shape), data_types::f16, format::bfyx}); // Indexes set_values(input1, { - FLOAT16(1), FLOAT16(2), FLOAT16(3), FLOAT16(4), FLOAT16(5), FLOAT16(6), - FLOAT16(7), FLOAT16(8), FLOAT16(9), FLOAT16(10), FLOAT16(11), FLOAT16(12), - FLOAT16(13), FLOAT16(14), FLOAT16(15), FLOAT16(16), FLOAT16(17), FLOAT16(18), + ov::float16(1), ov::float16(2), ov::float16(3), ov::float16(4), ov::float16(5), ov::float16(6), + ov::float16(7), ov::float16(8), ov::float16(9), ov::float16(10), ov::float16(11), ov::float16(12), + ov::float16(13), ov::float16(14), ov::float16(15), ov::float16(16), ov::float16(17), ov::float16(18), - FLOAT16(19), FLOAT16(20), FLOAT16(21), FLOAT16(22), FLOAT16(23), FLOAT16(24), - FLOAT16(25), FLOAT16(26), FLOAT16(27), FLOAT16(28), FLOAT16(29), FLOAT16(30), - FLOAT16(31), FLOAT16(32), FLOAT16(33), FLOAT16(34), FLOAT16(35), FLOAT16(36), + ov::float16(19), ov::float16(20), ov::float16(21), ov::float16(22), ov::float16(23), ov::float16(24), + ov::float16(25), ov::float16(26), ov::float16(27), ov::float16(28), ov::float16(29), ov::float16(30), + ov::float16(31), ov::float16(32), ov::float16(33), ov::float16(34), ov::float16(35), ov::float16(36), - FLOAT16(41), FLOAT16(42), FLOAT16(43), FLOAT16(44), FLOAT16(45), FLOAT16(46), - FLOAT16(51), FLOAT16(52), FLOAT16(53), FLOAT16(54), FLOAT16(55), FLOAT16(56), - FLOAT16(61), FLOAT16(62), FLOAT16(63), FLOAT16(64), FLOAT16(65), FLOAT16(66), + ov::float16(41), ov::float16(42), ov::float16(43), ov::float16(44), ov::float16(45), ov::float16(46), + ov::float16(51), ov::float16(52), ov::float16(53), ov::float16(54), ov::float16(55), ov::float16(56), + ov::float16(61), ov::float16(62), ov::float16(63), ov::float16(64), ov::float16(65), ov::float16(66), }); set_values(input2, { - FLOAT16(2), FLOAT16(0), FLOAT16(0), FLOAT16(2), FLOAT16(2), FLOAT16(0), - FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(0), + ov::float16(2), ov::float16(0), ov::float16(0), ov::float16(2), ov::float16(2), ov::float16(0), + ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(0), - FLOAT16(1), FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(1), - FLOAT16(2), FLOAT16(0), FLOAT16(0), FLOAT16(2), FLOAT16(1), FLOAT16(0), + ov::float16(1), ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(1), + ov::float16(2), ov::float16(0), ov::float16(0), ov::float16(2), ov::float16(1), ov::float16(0), - FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(0), FLOAT16(1), FLOAT16(1), - FLOAT16(1), FLOAT16(2), FLOAT16(1), FLOAT16(0), FLOAT16(2), FLOAT16(1), + ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(0), ov::float16(1), ov::float16(1), + ov::float16(1), ov::float16(2), ov::float16(1), ov::float16(0), ov::float16(2), ov::float16(1), }); std::vector expected_results = { - FLOAT16(41), FLOAT16(42), FLOAT16(43), FLOAT16(61), FLOAT16(62), FLOAT16(63), - FLOAT16(19), FLOAT16(20), FLOAT16(21), FLOAT16(25), FLOAT16(26), FLOAT16(27), + ov::float16(41), ov::float16(42), ov::float16(43), ov::float16(61), ov::float16(62), ov::float16(63), + ov::float16(19), ov::float16(20), ov::float16(21), ov::float16(25), ov::float16(26), ov::float16(27), - FLOAT16(22), FLOAT16(23), FLOAT16(24), FLOAT16(28), FLOAT16(29), FLOAT16(30), - FLOAT16(41), FLOAT16(42), FLOAT16(43), FLOAT16(51), FLOAT16(52), FLOAT16(53), + ov::float16(22), ov::float16(23), ov::float16(24), ov::float16(28), ov::float16(29), ov::float16(30), + ov::float16(41), ov::float16(42), ov::float16(43), ov::float16(51), ov::float16(52), ov::float16(53), - FLOAT16(28), FLOAT16(29), FLOAT16(30), FLOAT16(10), FLOAT16(11), FLOAT16(12), - FLOAT16(34), FLOAT16(35), FLOAT16(36), FLOAT16(16), FLOAT16(17), FLOAT16(18), + ov::float16(28), ov::float16(29), ov::float16(30), ov::float16(10), ov::float16(11), ov::float16(12), + ov::float16(34), ov::float16(35), ov::float16(36), ov::float16(16), ov::float16(17), ov::float16(18), }; auto expected_fmt = format::bfyx; @@ -882,33 +882,33 @@ TEST(gather_nd_gpu_fp16, dynamic_r5) { auto input2 = engine.allocate_memory(layout{ov::PartialShape(in2_shape), data_types::f16, format::bfzyx}); // Indexes set_values(input1, { - FLOAT16(11), FLOAT16(12), FLOAT16(13), FLOAT16(14), FLOAT16(15), FLOAT16(16), FLOAT16(17), FLOAT16(18), FLOAT16(19), FLOAT16(10), FLOAT16(21), FLOAT16(18), - FLOAT16(21), FLOAT16(22), FLOAT16(23), FLOAT16(24), FLOAT16(25), FLOAT16(26), FLOAT16(27), FLOAT16(28), FLOAT16(29), FLOAT16(20), FLOAT16(27), FLOAT16(28), - FLOAT16(31), FLOAT16(32), FLOAT16(33), FLOAT16(34), FLOAT16(35), FLOAT16(36), FLOAT16(37), FLOAT16(38), FLOAT16(39), FLOAT16(30), FLOAT16(31), FLOAT16(30), + ov::float16(11), ov::float16(12), ov::float16(13), ov::float16(14), ov::float16(15), ov::float16(16), ov::float16(17), ov::float16(18), ov::float16(19), ov::float16(10), ov::float16(21), ov::float16(18), + ov::float16(21), ov::float16(22), ov::float16(23), ov::float16(24), ov::float16(25), ov::float16(26), ov::float16(27), ov::float16(28), ov::float16(29), ov::float16(20), ov::float16(27), ov::float16(28), + ov::float16(31), ov::float16(32), ov::float16(33), ov::float16(34), ov::float16(35), ov::float16(36), ov::float16(37), ov::float16(38), ov::float16(39), ov::float16(30), ov::float16(31), ov::float16(30), - FLOAT16(11), FLOAT16(12), FLOAT16(13), FLOAT16(14), FLOAT16(15), FLOAT16(16), FLOAT16(17), FLOAT16(18), FLOAT16(19), FLOAT16(10), FLOAT16(17), FLOAT16(18), - FLOAT16(21), FLOAT16(22), FLOAT16(23), FLOAT16(24), FLOAT16(25), FLOAT16(26), FLOAT16(27), FLOAT16(28), FLOAT16(29), FLOAT16(20), FLOAT16(27), FLOAT16(28), - FLOAT16(31), FLOAT16(32), FLOAT16(33), FLOAT16(34), FLOAT16(35), FLOAT16(36), FLOAT16(37), FLOAT16(38), FLOAT16(39), FLOAT16(30), FLOAT16(29), FLOAT16(30), + ov::float16(11), ov::float16(12), ov::float16(13), ov::float16(14), ov::float16(15), ov::float16(16), ov::float16(17), ov::float16(18), ov::float16(19), ov::float16(10), ov::float16(17), ov::float16(18), + ov::float16(21), ov::float16(22), ov::float16(23), ov::float16(24), ov::float16(25), ov::float16(26), ov::float16(27), ov::float16(28), ov::float16(29), ov::float16(20), ov::float16(27), ov::float16(28), + ov::float16(31), ov::float16(32), ov::float16(33), ov::float16(34), ov::float16(35), ov::float16(36), ov::float16(37), ov::float16(38), ov::float16(39), ov::float16(30), ov::float16(29), ov::float16(30), }); set_values(input2, { - FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(1), - FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(0), - FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(0), + ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(1), + ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(0), + ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(0), - FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(1), FLOAT16(1), - FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(0), FLOAT16(0), - FLOAT16(1), FLOAT16(0), FLOAT16(0), FLOAT16(1), FLOAT16(1), FLOAT16(0), + ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(1), ov::float16(1), + ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(0), ov::float16(0), + ov::float16(1), ov::float16(0), ov::float16(0), ov::float16(1), ov::float16(1), ov::float16(0), }); std::vector expected_results = { - FLOAT16(12), FLOAT16(14), FLOAT16(16), FLOAT16(18), FLOAT16(10), FLOAT16(18), - FLOAT16(21), FLOAT16(23), FLOAT16(25), FLOAT16(27), FLOAT16(29), FLOAT16(27), - FLOAT16(32), FLOAT16(33), FLOAT16(35), FLOAT16(38), FLOAT16(30), FLOAT16(31), + ov::float16(12), ov::float16(14), ov::float16(16), ov::float16(18), ov::float16(10), ov::float16(18), + ov::float16(21), ov::float16(23), ov::float16(25), ov::float16(27), ov::float16(29), ov::float16(27), + ov::float16(32), ov::float16(33), ov::float16(35), ov::float16(38), ov::float16(30), ov::float16(31), - FLOAT16(12), FLOAT16(14), FLOAT16(16), FLOAT16(18), FLOAT16(10), FLOAT16(18), - FLOAT16(21), FLOAT16(23), FLOAT16(25), FLOAT16(27), FLOAT16(29), FLOAT16(27), - FLOAT16(32), FLOAT16(33), FLOAT16(35), FLOAT16(38), FLOAT16(30), FLOAT16(29), + ov::float16(12), ov::float16(14), ov::float16(16), ov::float16(18), ov::float16(10), ov::float16(18), + ov::float16(21), ov::float16(23), ov::float16(25), ov::float16(27), ov::float16(29), ov::float16(27), + ov::float16(32), ov::float16(33), ov::float16(35), ov::float16(38), ov::float16(30), ov::float16(29), }; auto expected_fmt = format::bfyx; @@ -952,4 +952,4 @@ TEST(gather_nd_gpu_fp16, dynamic_r5) { for (size_t i = 0; i < expected_results.size(); ++i) { EXPECT_EQ(expected_results[i], half_to_float(output_ptr[i])) << i; } -} \ No newline at end of file +} diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/gather_tree_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/gather_tree_gpu_test.cpp index 8c7e9fa22e6280..ad40ff142086a0 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/gather_tree_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/gather_tree_gpu_test.cpp @@ -168,7 +168,7 @@ struct gather_tree_test : public ::testing::TestWithParam > { public: void test() { - const auto data_type = type_to_data_type::value; + const auto data_type = ov::element::from(); Params params; format::type plain_layout; format::type target_layout; diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/gemm_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/gemm_gpu_test.cpp index ef6031465d2532..a90edc00a2db98 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/gemm_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/gemm_gpu_test.cpp @@ -1436,7 +1436,7 @@ class GemmOneDNNTest : public ::testing::TestWithParam { VF rnd_vec = rg.generate_random_1d(s.count(), -200, 200); set_values(prim, rnd_vec); } else if (l.data_type == cldnn::data_types::f16) { - VF rnd_vec = rg.generate_random_1d(s.count(), -1, 1); + VF rnd_vec = rg.generate_random_1d(s.count(), -1, 1); set_values(prim, rnd_vec); } else { VF rnd_vec = rg.generate_random_1d(s.count(), -1, 1); @@ -1604,7 +1604,7 @@ INSTANTIATE_TEST_SUITE_P(gemm_gpu, gemm_uint8_simple_tests_onednn, ::testing::Va gemm_base_test_params{ CASE_GEMM_UINT8_ONEDNN_4, "" }, })); -class gemm_fp16_simple_tests_onednn : public ::GemmBaseOneDNNTest {}; +class gemm_fp16_simple_tests_onednn : public ::GemmBaseOneDNNTest {}; TEST_P(gemm_fp16_simple_tests_onednn, basic) { auto p = GetParam(); execute(p); } INSTANTIATE_TEST_SUITE_P(gemm_gpu, gemm_fp16_simple_tests_onednn, ::testing::ValuesIn(std::vector { @@ -1654,7 +1654,7 @@ INSTANTIATE_TEST_SUITE_P(gemm_gpu, gemm_uint8_transposition_tests_onednn, ::test gemm_base_test_params{ CASE_GEMM_UINT8_TT_TRANSPOSITION_LEFTOVERS_ONEDNN, "" }, })); -class gemm_fp16_transposition_tests_onednn : public ::GemmBaseOneDNNTest {}; +class gemm_fp16_transposition_tests_onednn : public ::GemmBaseOneDNNTest {}; TEST_P(gemm_fp16_transposition_tests_onednn, basic) { auto p = GetParam(); execute(p); } INSTANTIATE_TEST_SUITE_P(gemm_gpu, gemm_fp16_transposition_tests_onednn, ::testing::ValuesIn(std::vector { @@ -1684,7 +1684,7 @@ INSTANTIATE_TEST_SUITE_P(gemm_gpu, gemm_int8_broadcasting_tests_onednn, ::testin gemm_base_test_params{ CASE_GEMM_INT8_BROADCASTING_ONEDNN_4, "" }, })); -class gemm_fp16_broadcasting_tests_onednn : public ::GemmBaseOneDNNTest {}; +class gemm_fp16_broadcasting_tests_onednn : public ::GemmBaseOneDNNTest {}; TEST_P(gemm_fp16_broadcasting_tests_onednn, basic) { auto p = GetParam(); execute(p); } INSTANTIATE_TEST_SUITE_P(gemm_gpu, gemm_fp16_broadcasting_tests_onednn, ::testing::ValuesIn(std::vector { @@ -1724,7 +1724,7 @@ INSTANTIATE_TEST_SUITE_P(gemm_gpu, gemm_uint8_combo_tests_onednn, ::testing::Val gemm_base_test_params{ CASE_GEMM_UINT8_COMBO_ONEDNN_4, "" }, })); -class gemm_fp16_combo_tests_onednn : public ::GemmBaseOneDNNTest {}; +class gemm_fp16_combo_tests_onednn : public ::GemmBaseOneDNNTest {}; TEST_P(gemm_fp16_combo_tests_onednn, basic) { auto p = GetParam(); execute(p); } INSTANTIATE_TEST_SUITE_P(gemm_gpu, gemm_fp16_combo_tests_onednn, ::testing::ValuesIn(std::vector { @@ -1854,7 +1854,7 @@ INSTANTIATE_TEST_SUITE_P(gemm_gpu, gemm_fp32_tiled_nn_broadcast_tests, ::testing gemm_base_test_params{ CASE_GEMM_FP32_TILED_NN_BROADCAST_4, "gemm_tiled_opt" }, })); -class gemm_fp16_tiled_nn_tests : public ::GemmBaseTest {}; +class gemm_fp16_tiled_nn_tests : public ::GemmBaseTest {}; TEST_P(gemm_fp16_tiled_nn_tests, basic) { auto p = GetParam(); execute(p); } INSTANTIATE_TEST_SUITE_P(gemm_gpu, gemm_fp16_tiled_nn_tests, ::testing::ValuesIn(std::vector { @@ -1864,7 +1864,7 @@ INSTANTIATE_TEST_SUITE_P(gemm_gpu, gemm_fp16_tiled_nn_tests, ::testing::ValuesIn gemm_base_test_params{ CASE_GEMM_FP16_TILED_NN_4, "gemm_tiled_opt" }, })); -class gemm_fp16_tiled_nt_tests : public ::GemmBaseTest {}; +class gemm_fp16_tiled_nt_tests : public ::GemmBaseTest {}; TEST_P(gemm_fp16_tiled_nt_tests, basic) { auto p = GetParam(); execute(p); } INSTANTIATE_TEST_SUITE_P(gemm_gpu, gemm_fp16_tiled_nt_tests, ::testing::ValuesIn(std::vector { @@ -1874,7 +1874,7 @@ INSTANTIATE_TEST_SUITE_P(gemm_gpu, gemm_fp16_tiled_nt_tests, ::testing::ValuesIn gemm_base_test_params{ CASE_GEMM_FP16_TILED_NT_4, "gemm_tiled_opt" }, })); -class gemm_fp16_tiled_tn_tests : public ::GemmBaseTest {}; +class gemm_fp16_tiled_tn_tests : public ::GemmBaseTest {}; TEST_P(gemm_fp16_tiled_tn_tests, basic) { auto p = GetParam(); execute(p); } INSTANTIATE_TEST_SUITE_P(gemm_gpu, gemm_fp16_tiled_tn_tests, ::testing::ValuesIn(std::vector { @@ -1884,7 +1884,7 @@ INSTANTIATE_TEST_SUITE_P(gemm_gpu, gemm_fp16_tiled_tn_tests, ::testing::ValuesIn gemm_base_test_params{ CASE_GEMM_FP16_TILED_TN_4, "gemm_tiled_opt" }, })); -class gemm_fp16_tiled_tt_tests : public ::GemmBaseTest {}; +class gemm_fp16_tiled_tt_tests : public ::GemmBaseTest {}; TEST_P(gemm_fp16_tiled_tt_tests, basic) { auto p = GetParam(); execute(p); } INSTANTIATE_TEST_SUITE_P(gemm_gpu, gemm_fp16_tiled_tt_tests, ::testing::ValuesIn(std::vector { @@ -1894,7 +1894,7 @@ INSTANTIATE_TEST_SUITE_P(gemm_gpu, gemm_fp16_tiled_tt_tests, ::testing::ValuesIn gemm_base_test_params{ CASE_GEMM_FP16_TILED_TT_4, "gemm_tiled_opt" }, })); -class gemm_fp16_tiled_nn_broadcast_tests : public ::GemmBaseTest {}; +class gemm_fp16_tiled_nn_broadcast_tests : public ::GemmBaseTest {}; TEST_P(gemm_fp16_tiled_nn_broadcast_tests, basic) { auto p = GetParam(); execute(p); } INSTANTIATE_TEST_SUITE_P(gemm_gpu, gemm_fp16_tiled_nn_broadcast_tests, ::testing::ValuesIn(std::vector { diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/generate_proposals_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/generate_proposals_gpu_test.cpp index 5435c6b96ddad4..10e5dd3d27c141 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/generate_proposals_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/generate_proposals_gpu_test.cpp @@ -162,7 +162,7 @@ float getError() { } template<> -float getError() { +float getError() { return 0.2; } @@ -286,8 +286,8 @@ struct generate_proposals_test std::tie(param, data_layout, is_caching_test) = this->GetParam(); const bool need_reorder = data_layout != format::bfyx; - const auto data_type = type_to_data_type::value; - const auto rois_num_type = type_to_data_type::value; + const auto data_type = ov::element::from(); + const auto rois_num_type = ov::element::from(); auto& engine = get_test_engine(); std::shared_ptr stream = get_test_stream_ptr();; @@ -442,7 +442,7 @@ INSTANTIATE_TEST_SUITE_P( ::testing::Values(false) )); -using f16_i32 = generate_proposals_test; +using f16_i32 = generate_proposals_test; TEST_P(f16_i32, f16_i32) { test(); } @@ -450,12 +450,12 @@ INSTANTIATE_TEST_SUITE_P( generate_proposals_gpu_test, f16_i32, ::testing::Combine( - ::testing::ValuesIn(getGenerateProposalsParams()), + ::testing::ValuesIn(getGenerateProposalsParams()), ::testing::ValuesIn(layouts), ::testing::Values(false) )); -using f16_i64 = generate_proposals_test; +using f16_i64 = generate_proposals_test; TEST_P(f16_i64, f16_i64) { test(); } @@ -463,7 +463,7 @@ INSTANTIATE_TEST_SUITE_P( generate_proposals_gpu_test, f16_i64, ::testing::Combine( - ::testing::ValuesIn(getGenerateProposalsParams()), + ::testing::ValuesIn(getGenerateProposalsParams()), ::testing::ValuesIn(layouts), ::testing::Values(false) )); @@ -472,7 +472,7 @@ INSTANTIATE_TEST_SUITE_P( export_import_generate_proposals_gpu_test, f16_i64, ::testing::Combine( - ::testing::Values(getGenerateProposalsParams()[0]), + ::testing::Values(getGenerateProposalsParams()[0]), ::testing::Values(layouts[0]), ::testing::Values(true) )); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/grid_sample_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/grid_sample_gpu_test.cpp index 66b226d649ec22..ec8cf82d9750ef 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/grid_sample_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/grid_sample_gpu_test.cpp @@ -41,7 +41,7 @@ float getError() { } template <> -float getError() { +float getError() { return 0.5f; } @@ -55,8 +55,8 @@ struct grid_sample_gpu_test : public testing::TestWithParam>::GetParam(); auto& engine = get_test_engine(); - const auto data_data_type = type_to_data_type::value; - const auto grid_data_type = type_to_data_type::value; + const auto data_data_type = ov::element::from(); + const auto grid_data_type = ov::element::from(); const auto plane_format = format::bfyx; const layout data_layout(data_data_type, plane_format, tensor(plane_format, p.data_shape)); @@ -674,7 +674,7 @@ TEST_P(grid_sample_gpu_test_float_float, test) { ASSERT_NO_FATAL_FAILURE(test()); } -using grid_sample_gpu_test_FLOAT16_FLOAT16 = grid_sample_gpu_test; +using grid_sample_gpu_test_FLOAT16_FLOAT16 = grid_sample_gpu_test; TEST_P(grid_sample_gpu_test_FLOAT16_FLOAT16, test) { ASSERT_NO_FATAL_FAILURE(test()); } @@ -688,7 +688,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_grid_sample_gpu_test_float_float, INSTANTIATE_TEST_SUITE_P(smoke_grid_sample_gpu_test_FLOAT16_FLOAT16, grid_sample_gpu_test_FLOAT16_FLOAT16, - testing::Combine(testing::ValuesIn(getParamsToCheckLogic()), + testing::Combine(testing::ValuesIn(getParamsToCheckLogic()), testing::Values(format::bfyx), testing::Values(RUN_CACHING_TEST)), grid_sample_gpu_test_FLOAT16_FLOAT16::PrintToStringParamName); @@ -696,7 +696,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_grid_sample_gpu_test_FLOAT16_FLOAT16, #ifndef RUN_ALL_MODEL_CACHING_TESTS INSTANTIATE_TEST_SUITE_P(smoke_grid_sample_gpu_test_FLOAT16_FLOAT16_cached, grid_sample_gpu_test_FLOAT16_FLOAT16, - testing::Combine(testing::ValuesIn(getNearestParamsOddDimensionsOuterGrids()), + testing::Combine(testing::ValuesIn(getNearestParamsOddDimensionsOuterGrids()), testing::Values(format::bfyx), testing::Values(true)), grid_sample_gpu_test_FLOAT16_FLOAT16::PrintToStringParamName); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/hash_key_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/hash_key_gpu_test.cpp index 367277fa0e061b..b76e45428f9850 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/hash_key_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/hash_key_gpu_test.cpp @@ -44,7 +44,7 @@ class check_hash_value: public ::testing::Test { const auto params_hash = prim_inst->get_impl_params()->hash(); ASSERT_EQ(primitive_hash, 4145865612957978777UL); - ASSERT_EQ(params_hash, 14779472302025859443UL); + ASSERT_EQ(params_hash, 13330229854511334999UL); } void test_fc_basic(bool is_caching_test) { @@ -72,10 +72,10 @@ class check_hash_value: public ::testing::Test { const auto params_hash = primitve->type->get_fake_aligned_params(*prim_inst->get_impl_params()).hash(); if (!engine.get_device_info().supports_immad) { ASSERT_EQ(primitive_hash, 6924775129729406941UL); - ASSERT_EQ(params_hash, 15366394052020805414UL); + ASSERT_EQ(params_hash, 8142839956977133460UL); } else { ASSERT_EQ(primitive_hash, 6924775129729406941UL); - ASSERT_EQ(params_hash, 8552673460001178483UL); + ASSERT_EQ(params_hash, 9266224209991282259UL); } } @@ -105,7 +105,7 @@ class check_hash_value: public ::testing::Test { const auto params_hash = prim_inst->get_impl_params()->hash(); ASSERT_EQ(primitive_hash, 93320679543770233UL); - ASSERT_EQ(params_hash, 16130855364209139301UL); + ASSERT_EQ(params_hash, 1542578941420280552UL); } void test_gemm_basic(bool is_caching_test) { @@ -128,7 +128,7 @@ class check_hash_value: public ::testing::Test { const auto primitive_hash = primitve->hash(); const auto params_hash = prim_inst->get_impl_params()->hash(); ASSERT_EQ(primitive_hash, 8009877756431655269UL); - ASSERT_EQ(params_hash, 16181383969029667789UL); + ASSERT_EQ(params_hash, 12585836190897043350UL); } void test_permute_basic(bool is_caching_test) { @@ -149,7 +149,7 @@ class check_hash_value: public ::testing::Test { const auto params_hash = prim_inst->get_impl_params()->hash(); ASSERT_EQ(primitive_hash, 4658575237077439700UL); - ASSERT_EQ(params_hash, 5773472682005147183UL); + ASSERT_EQ(params_hash, 10588150284756843899UL); } void test_reorder_basic(bool is_caching_test) { @@ -176,7 +176,7 @@ class check_hash_value: public ::testing::Test { const auto params_hash = prim_inst->get_impl_params()->hash(); ASSERT_EQ(primitive_hash, 16293979194373117693UL); - ASSERT_EQ(params_hash, 550629972043680951UL); + ASSERT_EQ(params_hash, 14231564068060955575UL); } void test_reshape_basic(bool is_caching_test) { @@ -200,7 +200,7 @@ class check_hash_value: public ::testing::Test { const auto params_hash = prim_inst->get_impl_params()->hash(); ASSERT_EQ(primitive_hash, 1534749073560581535UL); - ASSERT_EQ(params_hash, 2578847666139139067UL); + ASSERT_EQ(params_hash, 4349925423879269352UL); } void test_conv_basic(bool is_caching_test) { @@ -225,7 +225,7 @@ class check_hash_value: public ::testing::Test { const auto params_hash = prim_inst->get_impl_params()->hash(); ASSERT_EQ(primitive_hash, 13549661972131371304UL); - ASSERT_EQ(params_hash, 2971412112872172751UL); + ASSERT_EQ(params_hash, 7127098854451559675UL); } void test_quantize_basic(bool is_caching_test) { @@ -255,7 +255,7 @@ class check_hash_value: public ::testing::Test { const auto primitive_hash = primitve->hash(); const auto params_hash = prim_inst->get_impl_params()->hash(); ASSERT_EQ(primitive_hash, 4135863035456568493UL); - ASSERT_EQ(params_hash, 881730825593882400UL); + ASSERT_EQ(params_hash, 5990757629995899044UL); } }; diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/lrn_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/lrn_gpu_test.cpp index fcbe4eaa7d3e76..a89e07138c6d94 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/lrn_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/lrn_gpu_test.cpp @@ -164,7 +164,7 @@ void test_fp16_basic1(bool is_caching_test) { } TEST(lrn_fp16_gpu, basic1) { - test_fp16_basic1(false); + test_fp16_basic1(false); } template @@ -272,7 +272,7 @@ TEST(lrn_fp32_gpu, basic2_cached) { } TEST(lrn_fp16_gpu, basic1_cached) { - test_fp16_basic1(true); + test_fp16_basic1(true); } #endif TEST(lrn_fp32_gpu, basic3_cached) { diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/lru_caches_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/lru_caches_gpu_test.cpp index 735786132dace7..69beb343e766eb 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/lru_caches_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/lru_caches_gpu_test.cpp @@ -133,8 +133,8 @@ struct ImplHasher { } // namespace TEST(lru_cache, collisions) { - auto l1 = layout{{1, 3, 80, 80}, data_types::f32, format::bfyx}; - auto l2 = layout{{1, 3, 81, 141}, data_types::f32, format::bfyx}; + auto l1 = layout{{1, 3, 40, 20}, data_types::f32, format::bfyx}; + auto l2 = layout{{1, 3, 39, 83}, data_types::f32, format::bfyx}; auto input1_prim = std::make_shared("input1", l1); auto input2_prim = std::make_shared("input2", l2); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/lstm_dynamic_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/lstm_dynamic_gpu_test.cpp index ac4f990477dca3..a6a180a1ce7788 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/lstm_dynamic_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/lstm_dynamic_gpu_test.cpp @@ -160,7 +160,7 @@ VVVVF lstm_dynamic_input_ref(VVVVF& input, VVVVF& weights, VVVVF& bi } // Convert back to output data type before storing it into the output buffer. Currently, the output - // data type may be float or FLOAT16 (half) + // data type may be float or ov::float16 (half) tempOut[b][0][0][h] = (T)(std::tanh(val) * sigmoid(fp32_ot)); tempOut[b][1][0][h] = (T)val; } @@ -207,7 +207,7 @@ struct lstm_dynamic_input_layer_test : public ::testing::Test void SetUp() override { rg.set_seed(GET_SUITE_NAME); } - + void input_single_layer_generic_test(int32_t direction, int32_t batch_size, int32_t max_sequence_len, int32_t input_size, int32_t hidden_size, std::vector dynamic_lengths, bool has_bias = false) { @@ -539,7 +539,7 @@ struct lstm_dynamic_single_layer_test : public ::testing::Test } }; -typedef ::testing::Types lstm_dynamic_test_types; +typedef ::testing::Types lstm_dynamic_test_types; TYPED_TEST_SUITE(lstm_dynamic_single_layer_test, lstm_dynamic_test_types); TYPED_TEST_SUITE(lstm_dynamic_input_layer_test, lstm_dynamic_test_types); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/lstm_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/lstm_gpu_test.cpp index 4e47738d4777a5..9d6cbc48aeddbc 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/lstm_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/lstm_gpu_test.cpp @@ -116,7 +116,7 @@ VVVVF lstm_elt_reference(VVVVF& tempGEMM, VVVVF& cell, } // Convert back to output data type before storing it into the output buffer. Currently, the output - // data type may be float or FLOAT16 (half) + // data type may be float or ov::float16 (half) tempOut[b][0][0][h] = (T)(std::tanh(val) * sigmoid(fp32_ot)); tempOut[b][1][0][h] = (T)val; } @@ -418,12 +418,12 @@ void generic_lstm_custom_gpu_test(int sequence_len, int direction, int batch_siz hasBias, hasInitialHidden, hasInitialCell); auto& engine = get_test_engine(); - memory::ptr input = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ batch_size, sequence_len, input_size, 1 } }); - memory::ptr weights = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ 1, direction, input_size, 4 * hidden_size } }); - memory::ptr recurrent = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ 1, direction, hidden_size, 4 * hidden_size } }); - memory::ptr biases = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ 1, 1, 4 * hidden_size, direction } }); - memory::ptr hidden = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ batch_size, direction, hidden_size, 1 } }); - memory::ptr cell = engine.allocate_memory({ type_to_data_type::value, format::bfyx,{ batch_size, direction, hidden_size, 1 } }); + memory::ptr input = engine.allocate_memory({ ov::element::from(), format::bfyx,{ batch_size, sequence_len, input_size, 1 } }); + memory::ptr weights = engine.allocate_memory({ ov::element::from(), format::bfyx,{ 1, direction, input_size, 4 * hidden_size } }); + memory::ptr recurrent = engine.allocate_memory({ ov::element::from(), format::bfyx,{ 1, direction, hidden_size, 4 * hidden_size } }); + memory::ptr biases = engine.allocate_memory({ ov::element::from(), format::bfyx,{ 1, 1, 4 * hidden_size, direction } }); + memory::ptr hidden = engine.allocate_memory({ ov::element::from(), format::bfyx,{ batch_size, direction, hidden_size, 1 } }); + memory::ptr cell = engine.allocate_memory({ ov::element::from(), format::bfyx,{ batch_size, direction, hidden_size, 1 } }); set_values(input, ref_input_vec); set_values(weights, ref_weights_vec); set_values(recurrent, ref_recurrent_vec); @@ -680,12 +680,12 @@ void lstm_gpu_output_test(const lstm_output_selection& output_selection, int dir auto& engine = get_test_engine(); - memory::ptr input = engine.allocate_memory({ type_to_data_type::value, format::bfyx, {batch_size, sequence_len, input_size, 1} }); - memory::ptr weights = engine.allocate_memory({ type_to_data_type::value, format::bfyx, { 1, directions, input_size , 4 * hidden_size } }); - memory::ptr recurrent = engine.allocate_memory({ type_to_data_type::value, format::bfyx, { 1, directions, hidden_size, 4 * hidden_size } }); - memory::ptr biases = engine.allocate_memory({ type_to_data_type::value, format::bfyx, { 1, 1, 4 * hidden_size, directions } }); - memory::ptr hidden = engine.allocate_memory({ type_to_data_type::value, format::bfyx, { batch_size, 1, hidden_size, directions } }); - memory::ptr cell = engine.allocate_memory({ type_to_data_type::value, format::bfyx, { batch_size, 1, hidden_size, directions } }); + memory::ptr input = engine.allocate_memory({ ov::element::from(), format::bfyx, {batch_size, sequence_len, input_size, 1} }); + memory::ptr weights = engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, directions, input_size , 4 * hidden_size } }); + memory::ptr recurrent = engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, directions, hidden_size, 4 * hidden_size } }); + memory::ptr biases = engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, 1, 4 * hidden_size, directions } }); + memory::ptr hidden = engine.allocate_memory({ ov::element::from(), format::bfyx, { batch_size, 1, hidden_size, directions } }); + memory::ptr cell = engine.allocate_memory({ ov::element::from(), format::bfyx, { batch_size, 1, hidden_size, directions } }); set_values(input, ref_input_vec); set_values(weights, ref_weights_vec); @@ -844,12 +844,12 @@ void lstm_gpu_format_test(const cldnn::format& format, int directions, bool is_c auto& engine = get_test_engine(); - memory::ptr input = engine.allocate_memory({ type_to_data_type::value,format, {batch_size, sequence_len, input_size, 1} }); - memory::ptr weights = engine.allocate_memory({ type_to_data_type::value, format::bfyx, { 1, directions, input_size , 4 * hidden_size } }); - memory::ptr recurrent = engine.allocate_memory({ type_to_data_type::value, format::bfyx, { 1, directions, hidden_size, 4 * hidden_size } }); - memory::ptr biases = engine.allocate_memory({ type_to_data_type::value, format::bfyx, { 1, 1, 4 * hidden_size, directions } }); - memory::ptr hidden = engine.allocate_memory({ type_to_data_type::value, format, { batch_size, 1, hidden_size, directions } }); - memory::ptr cell = engine.allocate_memory({ type_to_data_type::value, format, { batch_size, 1, hidden_size, directions } }); + memory::ptr input = engine.allocate_memory({ ov::element::from(),format, {batch_size, sequence_len, input_size, 1} }); + memory::ptr weights = engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, directions, input_size , 4 * hidden_size } }); + memory::ptr recurrent = engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, directions, hidden_size, 4 * hidden_size } }); + memory::ptr biases = engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, 1, 4 * hidden_size, directions } }); + memory::ptr hidden = engine.allocate_memory({ ov::element::from(), format, { batch_size, 1, hidden_size, directions } }); + memory::ptr cell = engine.allocate_memory({ ov::element::from(), format, { batch_size, 1, hidden_size, directions } }); set_values(input, ref_input_vec); set_values(weights, ref_weights_vec); @@ -1025,12 +1025,12 @@ void lstm_gpu_users_test(bool is_caching_test = false) { auto& engine = get_test_engine(); - memory::ptr input = engine.allocate_memory({ type_to_data_type::value, format::bfyx, {batch_size, sequence_len, input_size, 1} }); - memory::ptr weights = engine.allocate_memory({ type_to_data_type::value, format::bfyx, { 1, directions, input_size , 4 * hidden_size } }); - memory::ptr recurrent = engine.allocate_memory({ type_to_data_type::value, format::bfyx, { 1, directions, hidden_size, 4 * hidden_size } }); - memory::ptr biases = engine.allocate_memory({ type_to_data_type::value, format::bfyx, { 1, 1, 4 * hidden_size, directions } }); - memory::ptr hidden = engine.allocate_memory({ type_to_data_type::value, format::bfyx, { batch_size, 1, hidden_size, directions } }); - memory::ptr cell = engine.allocate_memory({ type_to_data_type::value, format::bfyx, { batch_size, 1, hidden_size, directions } }); + memory::ptr input = engine.allocate_memory({ ov::element::from(), format::bfyx, {batch_size, sequence_len, input_size, 1} }); + memory::ptr weights = engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, directions, input_size , 4 * hidden_size } }); + memory::ptr recurrent = engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, directions, hidden_size, 4 * hidden_size } }); + memory::ptr biases = engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, 1, 4 * hidden_size, directions } }); + memory::ptr hidden = engine.allocate_memory({ ov::element::from(), format::bfyx, { batch_size, 1, hidden_size, directions } }); + memory::ptr cell = engine.allocate_memory({ ov::element::from(), format::bfyx, { batch_size, 1, hidden_size, directions } }); set_values(input, ref_input_vec); set_values(weights, ref_weights_vec); @@ -1150,7 +1150,7 @@ void lstm_gpu_concatenated_input_test(int layers, int sequence_len, int directio auto& engine = get_test_engine(); - memory::ptr input = engine.allocate_memory({ type_to_data_type::value, format::bfyx, {batch_size, sequence_len, input_size, 1} }); + memory::ptr input = engine.allocate_memory({ ov::element::from(), format::bfyx, {batch_size, sequence_len, input_size, 1} }); set_values(input, ref_input_vec); std::vector weights; @@ -1159,20 +1159,20 @@ void lstm_gpu_concatenated_input_test(int layers, int sequence_len, int directio std::vector hidden; std::vector cell; for (int i = 0; i < layers; ++i) { - weights.push_back(engine.allocate_memory({ type_to_data_type::value, format::bfyx, { 1, direction, i == 0 ? input_size : hidden_size, 4 * hidden_size } })); + weights.push_back(engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, direction, i == 0 ? input_size : hidden_size, 4 * hidden_size } })); set_values(weights[i], ref_weights_vec[i]); - recurrent.push_back(engine.allocate_memory({ type_to_data_type::value, format::bfyx, { 1, direction, hidden_size, 4 * hidden_size } })); + recurrent.push_back(engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, direction, hidden_size, 4 * hidden_size } })); set_values(recurrent[i], ref_recurrent_vec[i]); if (has_bias) { - biases.push_back(engine.allocate_memory({ type_to_data_type::value, format::bfyx, { 1, 1, 4 * hidden_size, direction } })); + biases.push_back(engine.allocate_memory({ ov::element::from(), format::bfyx, { 1, 1, 4 * hidden_size, direction } })); set_values(biases[i], ref_bias_vec[i]); } if (has_initial_hidden) { - hidden.push_back(engine.allocate_memory({ type_to_data_type::value, format::bfyx, { batch_size, 1, hidden_size, direction } })); + hidden.push_back(engine.allocate_memory({ ov::element::from(), format::bfyx, { batch_size, 1, hidden_size, direction } })); set_values(hidden[i], ref_hidden_vec[i]); } if (has_initial_cell) { - cell.push_back(engine.allocate_memory({ type_to_data_type::value, format::bfyx, { batch_size, 1, hidden_size, direction} })); + cell.push_back(engine.allocate_memory({ ov::element::from(), format::bfyx, { batch_size, 1, hidden_size, direction} })); set_values(cell[i], ref_cell_vec[i]); } } @@ -1390,7 +1390,7 @@ void lstm_gpu_chain_test(int batch_size, int input_size, int hidden_size, auto& engine = get_test_engine(); tensor input_tensor = { batch_size, sequence_len, input_size, 1 }; - layout layout = { type_to_data_type::value, cldnn::format::bfyx, input_tensor }; + layout layout = { ov::element::from(), cldnn::format::bfyx, input_tensor }; memory::ptr input = engine.allocate_memory(layout); set_values(input, ref_input_vec); @@ -1410,27 +1410,27 @@ void lstm_gpu_chain_test(int batch_size, int input_size, int hidden_size, std::vector per_chain_cell; for (size_t layer = 0; layer < layers; layer++) { - per_chain_weights.push_back(engine.allocate_memory({ type_to_data_type::value, format::bfyx, {1, directions, layer == 0 ? input_size : hidden_size, 4 * hidden_size} })); + per_chain_weights.push_back(engine.allocate_memory({ ov::element::from(), format::bfyx, {1, directions, layer == 0 ? input_size : hidden_size, 4 * hidden_size} })); set_values(per_chain_weights[layer], ref_weights_vec[chain][layer]); - per_chain_recurrent.push_back(engine.allocate_memory({ type_to_data_type::value, format::bfyx, {1, directions, hidden_size, 4 * hidden_size} })); + per_chain_recurrent.push_back(engine.allocate_memory({ ov::element::from(), format::bfyx, {1, directions, hidden_size, 4 * hidden_size} })); set_values(per_chain_recurrent[layer], ref_recurrent_vec[chain][layer]); if (has_bias) { - per_chain_biases.push_back(engine.allocate_memory({ type_to_data_type::value, format::bfyx, {1, 1, 4 * hidden_size, directions} })); + per_chain_biases.push_back(engine.allocate_memory({ ov::element::from(), format::bfyx, {1, 1, 4 * hidden_size, directions} })); set_values(per_chain_biases[layer], ref_bias_vec[chain][layer]); } if (has_initial_hidden) { - per_chain_hidden.push_back(engine.allocate_memory({ type_to_data_type::value, format::bfyx, {1, 1, hidden_size, directions} })); + per_chain_hidden.push_back(engine.allocate_memory({ ov::element::from(), format::bfyx, {1, 1, hidden_size, directions} })); set_values(per_chain_hidden[layer], ref_hidden_vec[chain][layer]); } if (has_initial_cell) { - per_chain_cell.push_back(engine.allocate_memory({ type_to_data_type::value, format::bfyx, {1, 1, hidden_size, directions} })); + per_chain_cell.push_back(engine.allocate_memory({ ov::element::from(), format::bfyx, {1, 1, hidden_size, directions} })); set_values(per_chain_cell[layer], ref_cell_vec[chain][layer]); } } @@ -1935,115 +1935,115 @@ TEST(lstm_gpu, generic_lstm_chained_stacked_bidirectional_f32) { // FP16 Half precision tests TEST(lstm_gemm_gpu, generic_lstm_gemm_test_f16) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, true, true); + generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, true, true); } TEST(lstm_gemm_gpu, generic_lstm_gemm_no_bias_f16) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, false, true); + generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, false, true); } TEST(lstm_gemm_gpu, generic_lstm_gemm_no_hidden_f16) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, true, false); + generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, true, false); } TEST(lstm_gemm_gpu, generic_lstm_gemm_no_hidden_bias_f16) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, false, false); + generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, false, false); } TEST(DISABLED_lstm_elt_gpu, generic_lstm_elt_test_clip_f16) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.3f, false); + generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.3f, false); } TEST(lstm_elt_gpu, generic_lstm_elt_test_input_forget_f16) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.f, true); + generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.f, true); } TEST(DISABLED_lstm_elt_gpu, generic_lstm_elt_test_clip_input_forget_f16) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.5f, true); + generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.5f, true); } TEST(lstm_elt_gpu, generic_lstm_elt_test_f16) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.f, false); + generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.f, false); } TEST(lstm_elt_gpu, generic_lstm_elt_no_cell_f16) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, false, 0.f, false); + generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, false, 0.f, false); } TEST(lstm_gpu, generic_lstm_f16) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0, false); + generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0, false); } TEST(lstm_gpu, generic_lstm_no_bias_f16) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, false, true, true, 0, false); + generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, false, true, true, 0, false); } TEST(lstm_gpu, generic_lstm_no_hidden_f16) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, false, true, 0, false); + generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, false, true, 0, false); } TEST(lstm_gpu, generic_lstm_no_bias_hidden_f16) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, false, true, 0, false); + generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, false, true, 0, false); } TEST(lstm_gpu, generic_lstm_no_cell_f16) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, true, false, 0, false); + generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, true, false, 0, false); } TEST(lstm_gpu, generic_lstm_no_bias_cell_f16) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, true, false, 0, false); + generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, true, false, 0, false); } TEST(lstm_gpu, generic_lstm_no_hidden_cell_f16) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, false, false, 0, false); + generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, false, false, 0, false); } TEST(lstm_gpu, generic_lstm_no_bias_hidden_cell_f16) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, false, false, 0, false); + generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, false, false, 0, false); } TEST(DISABLED_lstm_gpu, generic_lstm_clip_f16) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.3f, 0); + generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.3f, 0); } TEST(lstm_gpu, generic_lstm_input_forget_f16) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.f, 1); + generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.f, 1); } TEST(DISABLED_lstm_gpu, generic_lstm_clip_input_forget_f16) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.3f, 1); + generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.3f, 1); } TEST(lstm_gpu, generic_lstm_offset_order_ifoz_f16) { default_offset_type = lstm_weights_order::ifoz; - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0, false); + generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0, false); default_offset_type = lstm_weights_order::iofz; } TEST(lstm_gpu, generic_lstm_canonical_f16) { - generic_lstm_gpu_test(1, 1, 1, 1, 1, 1, true, true, true, 0, false); + generic_lstm_gpu_test(1, 1, 1, 1, 1, 1, true, true, true, 0, false); } // bidirectional support TEST(lstm_gpu, generic_lstm_bi_bias_f16) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, false, false, 0, false); + generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, false, false, 0, false); } TEST(lstm_gpu, generic_lstm_bi_bias_hidden_f16) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, true, false, 0, false); + generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, true, false, 0, false); } TEST(lstm_gpu, generic_lstm_bi_bias_hidden_cell_f16) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, true, true, 0, false); + generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, true, true, 0, false); } // multi-layer support TEST(lstm_gpu, generic_lstm_stacked_seq_f16) { - generic_lstm_gpu_test(4, 7, 1, 3, 3, 2, true, true, true, 0, false); + generic_lstm_gpu_test(4, 7, 1, 3, 3, 2, true, true, true, 0, false); } TEST(lstm_gpu, generic_lstm_stacked_bi_f16) { - generic_lstm_gpu_test(4, 7, 2, 3, 3, 2, true, true, true, 0, false); + generic_lstm_gpu_test(4, 7, 2, 3, 3, 2, true, true, true, 0, false); } // TODO: Add tests for the following: @@ -2300,112 +2300,112 @@ TEST(lstm_gpu, generic_lstm_chained_stacked_bidirectional_f32_cached) { // FP16 Half precision tests TEST(lstm_gemm_gpu, generic_lstm_gemm_test_f16_cached) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, true, true, true); + generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, true, true, true); } TEST(lstm_gemm_gpu, generic_lstm_gemm_no_bias_f16_cached) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, false, true, true); + generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, false, true, true); } TEST(lstm_gemm_gpu, generic_lstm_gemm_no_hidden_f16_cached) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, true, false, true); + generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, true, false, true); } TEST(lstm_gemm_gpu, generic_lstm_gemm_no_hidden_bias_f16_cached) { - generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, false, false, true); + generic_lstm_gemm_gpu_test(1, 1, 3, 6, 2, false, false, true); } TEST(DISABLED_lstm_elt_gpu, generic_lstm_elt_test_clip_f16_cached) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.3f, false, true); + generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.3f, false, true); } TEST(lstm_elt_gpu, generic_lstm_elt_test_input_forget_f16_cached) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.f, true, true); + generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.f, true, true); } TEST(DISABLED_lstm_elt_gpu, generic_lstm_elt_test_clip_input_forget_f16_cached) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.5f, true, true); + generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.5f, true, true); } TEST(lstm_elt_gpu, generic_lstm_elt_test_f16_cached) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.f, false, true); + generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, true, 0.f, false, true); } TEST(lstm_elt_gpu, generic_lstm_elt_no_cell_f16_cached) { - generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, false, 0.f, false, true); + generic_lstm_elt_gpu_test(1, 1, 4, 6, 3, false, 0.f, false, true); } TEST(lstm_gpu, generic_lstm_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0, false, true); + generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0, false, true); } TEST(lstm_gpu, generic_lstm_no_bias_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, false, true, true, 0, false, true); + generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, false, true, true, 0, false, true); } TEST(lstm_gpu, generic_lstm_no_hidden_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, false, true, 0, false, true); + generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, false, true, 0, false, true); } TEST(lstm_gpu, generic_lstm_no_bias_hidden_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, false, true, 0, false, true); + generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, false, true, 0, false, true); } TEST(lstm_gpu, generic_lstm_no_cell_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, true, false, 0, false, true); + generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, true, false, 0, false, true); } TEST(lstm_gpu, generic_lstm_no_bias_cell_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, true, false, 0, false, true); + generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, true, false, 0, false, true); } TEST(lstm_gpu, generic_lstm_no_hidden_cell_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, false, false, 0, false, true); + generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, true, false, false, 0, false, true); } TEST(lstm_gpu, generic_lstm_no_bias_hidden_cell_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, false, false, 0, false, true); + generic_lstm_gpu_test(1, 7, 1, 5, 4, 3, false, false, false, 0, false, true); } TEST(DISABLED_lstm_gpu, generic_lstm_clip_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.3f, 0, true); + generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.3f, 0, true); } TEST(DISABLED_lstm_gpu, generic_lstm_input_forget_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.f, 1, true); + generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.f, 1, true); } TEST(DISABLED_lstm_gpu, generic_lstm_clip_input_forget_f16_cached) { - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.3f, 1, true); + generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0.3f, 1, true); } TEST(lstm_gpu, generic_lstm_offset_order_ifoz_f16_cached) { default_offset_type = lstm_weights_order::ifoz; - generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0, false, true); + generic_lstm_gpu_test(1, 7, 1, 3, 3, 2, true, true, true, 0, false, true); default_offset_type = lstm_weights_order::iofz; } TEST(lstm_gpu, generic_lstm_canonical_f16_cached) { - generic_lstm_gpu_test(1, 1, 1, 1, 1, 1, true, true, true, 0, false, true); + generic_lstm_gpu_test(1, 1, 1, 1, 1, 1, true, true, true, 0, false, true); } // bidirectional support TEST(lstm_gpu, generic_lstm_bi_bias_f16_cached) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, false, false, 0, false, true); + generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, false, false, 0, false, true); } TEST(lstm_gpu, generic_lstm_bi_bias_hidden_f16_cached) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, true, false, 0, false, true); + generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, true, false, 0, false, true); } TEST(lstm_gpu, generic_lstm_bi_bias_hidden_cell_f16_cached) { - generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, true, true, 0, false, true); + generic_lstm_gpu_test(1, 7, 2, 2, 3, 4, true, true, true, 0, false, true); } TEST(lstm_gpu, generic_lstm_stacked_seq_f16_cached) { - generic_lstm_gpu_test(4, 7, 1, 3, 3, 2, true, true, true, 0, false, true); + generic_lstm_gpu_test(4, 7, 1, 3, 3, 2, true, true, true, 0, false, true); } #endif TEST(lstm_gpu, generic_lstm_stacked_bi_f16_cached) { - generic_lstm_gpu_test(4, 7, 2, 3, 3, 2, true, true, true, 0, false, true); + generic_lstm_gpu_test(4, 7, 2, 3, 3, 2, true, true, true, 0, false, true); } diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/matrix_nms_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/matrix_nms_gpu_test.cpp index 0adeae76c21f6d..f44b7590888b82 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/matrix_nms_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/matrix_nms_gpu_test.cpp @@ -59,7 +59,7 @@ struct matrix_nms_gpu_test : public testing::TestWithParam::GetParam(); - const auto data_type = type_to_data_type::value; + const auto data_type = ov::element::from(); const auto plain_format = format::bfyx; auto& engine = get_test_engine(); @@ -666,26 +666,27 @@ INSTANTIATE_MATRIX_NMS_TEST_SUITE(float, get_matrix_nms_top_k_inputs) INSTANTIATE_MATRIX_NMS_TEST_SUITE(float, get_matrix_nms_single_box_inputs) INSTANTIATE_MATRIX_NMS_TEST_SUITE(float, get_matrix_nms_no_output_inputs) -INSTANTIATE_MATRIX_NMS_TEST_SUITE(FLOAT16, get_matrix_nms_smoke_inputs) -INSTANTIATE_MATRIX_NMS_TEST_SUITE(FLOAT16, get_matrix_nms_gaussian_inputs) -INSTANTIATE_MATRIX_NMS_TEST_SUITE(FLOAT16, get_matrix_nms_two_batches_two_classes_inputs) -INSTANTIATE_MATRIX_NMS_TEST_SUITE(FLOAT16, get_matrix_nms_by_keep_top_k_inputs) -INSTANTIATE_MATRIX_NMS_TEST_SUITE(FLOAT16, get_matrix_nms_two_batches_two_classes_by_classid_cross_batch_inputs) -INSTANTIATE_MATRIX_NMS_TEST_SUITE(FLOAT16, get_matrix_nms_two_batches_two_classes_by_score_cross_batch_inputs) -INSTANTIATE_MATRIX_NMS_TEST_SUITE(FLOAT16, get_matrix_nms_background_inputs) -INSTANTIATE_MATRIX_NMS_TEST_SUITE(FLOAT16, get_matrix_nms_flipped_coordinates_inputs) -INSTANTIATE_MATRIX_NMS_TEST_SUITE(FLOAT16, get_matrix_nms_post_threshold_inputs) -INSTANTIATE_MATRIX_NMS_TEST_SUITE(FLOAT16, get_matrix_nms_identical_boxes_inputs) -INSTANTIATE_MATRIX_NMS_TEST_SUITE(FLOAT16, get_matrix_nms_top_k_inputs) -INSTANTIATE_MATRIX_NMS_TEST_SUITE(FLOAT16, get_matrix_nms_single_box_inputs) -INSTANTIATE_MATRIX_NMS_TEST_SUITE(FLOAT16, get_matrix_nms_no_output_inputs) +using ov::float16; +INSTANTIATE_MATRIX_NMS_TEST_SUITE(float16, get_matrix_nms_smoke_inputs) +INSTANTIATE_MATRIX_NMS_TEST_SUITE(float16, get_matrix_nms_gaussian_inputs) +INSTANTIATE_MATRIX_NMS_TEST_SUITE(float16, get_matrix_nms_two_batches_two_classes_inputs) +INSTANTIATE_MATRIX_NMS_TEST_SUITE(float16, get_matrix_nms_by_keep_top_k_inputs) +INSTANTIATE_MATRIX_NMS_TEST_SUITE(float16, get_matrix_nms_two_batches_two_classes_by_classid_cross_batch_inputs) +INSTANTIATE_MATRIX_NMS_TEST_SUITE(float16, get_matrix_nms_two_batches_two_classes_by_score_cross_batch_inputs) +INSTANTIATE_MATRIX_NMS_TEST_SUITE(float16, get_matrix_nms_background_inputs) +INSTANTIATE_MATRIX_NMS_TEST_SUITE(float16, get_matrix_nms_flipped_coordinates_inputs) +INSTANTIATE_MATRIX_NMS_TEST_SUITE(float16, get_matrix_nms_post_threshold_inputs) +INSTANTIATE_MATRIX_NMS_TEST_SUITE(float16, get_matrix_nms_identical_boxes_inputs) +INSTANTIATE_MATRIX_NMS_TEST_SUITE(float16, get_matrix_nms_top_k_inputs) +INSTANTIATE_MATRIX_NMS_TEST_SUITE(float16, get_matrix_nms_single_box_inputs) +INSTANTIATE_MATRIX_NMS_TEST_SUITE(float16, get_matrix_nms_no_output_inputs) #ifndef RUN_ALL_MODEL_CACHING_TESTS -INSTANTIATE_TEST_SUITE_P(matrix_nms_test_FLOAT16get_matrix_nms_smoke_inputs_cached, - matrix_nms_gpu_test_FLOAT16get_matrix_nms_smoke_inputs, +INSTANTIATE_TEST_SUITE_P(matrix_nms_test_float16get_matrix_nms_smoke_inputs_cached, + matrix_nms_gpu_test_float16get_matrix_nms_smoke_inputs, testing::Combine(testing::Values(get_matrix_nms_smoke_inputs()), testing::ValuesIn(layout_formats), testing::Values(true)), - matrix_nms_gpu_test_FLOAT16get_matrix_nms_smoke_inputs::PrintToStringParamName); + matrix_nms_gpu_test_float16get_matrix_nms_smoke_inputs::PrintToStringParamName); #endif #undef INSTANTIATE_MATRIX_NMS_TEST_SUITE diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/multiclass_nms_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/multiclass_nms_gpu_test.cpp index d5fe3121996c65..62c64e6a07b052 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/multiclass_nms_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/multiclass_nms_gpu_test.cpp @@ -27,7 +27,7 @@ float getError() { } template<> -float getError() { +float getError() { return 0.2; } @@ -66,8 +66,8 @@ struct multiclass_nms_test : public ::testing::TestWithParam& formats = {format::bfyx}) { const MulticlassNmsParams param = testing::TestWithParam>::GetParam(); - auto data_type = type_to_data_type::value; - auto index_data_type = type_to_data_type::value; + auto data_type = ov::element::from(); + auto index_data_type = ov::element::from(); constexpr auto plain_format = format::bfyx; for (const auto target_format : formats) { @@ -240,14 +240,14 @@ struct PrintToStringParamName { const auto &p = info.param; std::ostringstream result; result << p.test_name << "_"; - result << "InputType=" << data_type_traits::name(type_to_data_type::value) << "_"; - result << "DataType=" << data_type_traits::name(type_to_data_type::value); + result << "InputType=" << ov::element::Type(ov::element::from()) << "_"; + result << "DataType=" << ov::element::Type(ov::element::from()); return result.str(); } }; using multiclass_nms_test_f32_i32 = multiclass_nms_test; -using multiclass_nms_test_f16_i64 = multiclass_nms_test; +using multiclass_nms_test_f16_i64 = multiclass_nms_test; using multiclass_nms_test_blocked = multiclass_nms_test; TEST_P(multiclass_nms_test_f32_i32, basic) { @@ -848,7 +848,7 @@ INSTANTIATE_TEST_SUITE_P(multiclass_nms_gpu_test, INSTANTIATE_TEST_SUITE_P(multiclass_nms_gpu_test, multiclass_nms_test_f16_i64, - ::testing::ValuesIn(getMulticlassNmsParams()), + ::testing::ValuesIn(getMulticlassNmsParams()), PrintToStringParamName()); INSTANTIATE_TEST_SUITE_P(multiclass_nms_gpu_test_blocked, @@ -864,7 +864,7 @@ INSTANTIATE_TEST_SUITE_P(multiclass_nms_gpu_test_cached, INSTANTIATE_TEST_SUITE_P(multiclass_nms_gpu_test_cached, multiclass_nms_test_f16_i64, - ::testing::ValuesIn(getMulticlassNmsParams(true)), + ::testing::ValuesIn(getMulticlassNmsParams(true)), PrintToStringParamName()); #endif INSTANTIATE_TEST_SUITE_P(multiclass_nms_gpu_test_blocked_cached, diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/mvn_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/mvn_gpu_test.cpp index 6723436a6ab3b5..e3a8e02bee0872 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/mvn_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/mvn_gpu_test.cpp @@ -74,7 +74,7 @@ void mvn_compute_mean_within_channels(cldnn::memory::ptr output, bool normalize_ cldnn::mem_lock buff(output, get_test_stream()); - float err_margin = output->get_layout().data_type == data_types::f32 ? 1e-03F : 1e-02F; + float err_margin = output->get_layout().data_type == data_types::f32 ? 1e-03F : 2e-02F; for (uint32_t b = 0; b < batch_size; ++b) { for (uint32_t f = 0; f < feature_size; ++f) { @@ -113,7 +113,7 @@ void test_mvn_test_across_channels_outside_sqrt_bfyx(bool is_caching_test) { auto& engine = get_test_engine(); - cldnn::data_types input_data_type = std::is_same::value ? data_types::f16 : data_types::f32; + cldnn::data_types input_data_type = std::is_same::value ? data_types::f16 : data_types::f32; auto input = engine.allocate_memory({input_data_type, format::bfyx, {7, 10, 17, 13}}); @@ -147,7 +147,7 @@ void test_mvn_test_across_channels_inside_sqrt_bfyx(bool is_caching_test) { auto& engine = get_test_engine(); - cldnn::data_types input_data_type = std::is_same::value ? data_types::f16 : data_types::f32; + cldnn::data_types input_data_type = std::is_same::value ? data_types::f16 : data_types::f32; auto input = engine.allocate_memory({input_data_type, format::bfyx, {7, 10, 17, 13}}); @@ -174,11 +174,11 @@ TEST(mvn_gpu_test, mvn_test_across_channels_inside_sqrt_bfyx) { } TEST(mvn_gpu_test, mvn_test_across_channels_outside_sqrt_bfyx_fp16) { - test_mvn_test_across_channels_outside_sqrt_bfyx(false); + test_mvn_test_across_channels_outside_sqrt_bfyx(false); } TEST(mvn_gpu_test, mvn_test_across_channels_inside_sqrt_bfyx_fp16) { - test_mvn_test_across_channels_inside_sqrt_bfyx(false); + test_mvn_test_across_channels_inside_sqrt_bfyx(false); } TEST(mvn_gpu_test, mvn_test_across_channels_outside_sqrt_bfyx_normalize_variance) { @@ -244,7 +244,7 @@ TEST(mvn_gpu_test, mvn_test_across_channels_outside_sqrt_bfyx_normalize_variance auto input = engine.allocate_memory({data_types::f16, format::bfyx, {7, 10, 17, 13}}); - tests::set_random_values(input, true, 8, 100); + tests::set_random_values(input, true, 8, 100); topology topology; topology.add(input_layout("input", input->get_layout())); @@ -259,7 +259,7 @@ TEST(mvn_gpu_test, mvn_test_across_channels_outside_sqrt_bfyx_normalize_variance ASSERT_EQ(outputs.begin()->first, "mvn"); auto output = outputs.begin()->second.get_memory(); - mvn_compute_mean_across_channels(output, true); + mvn_compute_mean_across_channels(output, true); } TEST(mvn_gpu_test, mvn_test_across_channels_inside_sqrt_bfyx_normalize_variance_fp16) { @@ -271,7 +271,7 @@ TEST(mvn_gpu_test, mvn_test_across_channels_inside_sqrt_bfyx_normalize_variance_ auto input = engine.allocate_memory({data_types::f16, format::bfyx, {7, 10, 17, 13}}); - tests::set_random_values(input, true, 8, 100); + tests::set_random_values(input, true, 8, 100); topology topology; topology.add(input_layout("input", input->get_layout())); @@ -286,7 +286,7 @@ TEST(mvn_gpu_test, mvn_test_across_channels_inside_sqrt_bfyx_normalize_variance_ ASSERT_EQ(outputs.begin()->first, "mvn"); auto output = outputs.begin()->second.get_memory(); - mvn_compute_mean_across_channels(output, true); + mvn_compute_mean_across_channels(output, true); } TEST(mvn_gpu_test, dynamic_across_channels_inside_sqrt_bfyx_normalize_variance_fp16) { @@ -300,7 +300,7 @@ TEST(mvn_gpu_test, dynamic_across_channels_inside_sqrt_bfyx_normalize_variance_f auto in_layout = layout{ov::PartialShape::dynamic(in_shape.size()), data_types::f16, format::bfyx}; auto input = engine.allocate_memory(layout{ov::PartialShape(in_shape), data_types::f16, format::bfyx}); - tests::set_random_values(input, true, 8, 100); + tests::set_random_values(input, true, 8, 100); topology topology; topology.add(input_layout("input", in_layout)); @@ -321,7 +321,7 @@ TEST(mvn_gpu_test, dynamic_across_channels_inside_sqrt_bfyx_normalize_variance_f ASSERT_EQ(outputs.begin()->first, "mvn"); auto output = outputs.begin()->second.get_memory(); - mvn_compute_mean_across_channels(output, true); + mvn_compute_mean_across_channels(output, true); } TEST(mvn_gpu_test, mvn_test_within_channels_outside_sqrt_bfyx) { @@ -387,7 +387,7 @@ TEST(mvn_gpu_test, mvn_test_within_channels_outside_sqrt_bfyx_fp16) { auto input = engine.allocate_memory({data_types::f16, format::bfyx, {7, 10, 17, 13}}); - tests::set_random_values(input, true, 8, 100); + tests::set_random_values(input, true, 8, 100); topology topology; topology.add(input_layout("input", input->get_layout())); @@ -402,7 +402,7 @@ TEST(mvn_gpu_test, mvn_test_within_channels_outside_sqrt_bfyx_fp16) { ASSERT_EQ(outputs.begin()->first, "mvn"); auto output = outputs.begin()->second.get_memory(); - mvn_compute_mean_within_channels(output, false); + mvn_compute_mean_within_channels(output, false); } TEST(mvn_gpu_test, mvn_test_within_channels_inside_sqrt_bfyx_fp16) { @@ -414,7 +414,7 @@ TEST(mvn_gpu_test, mvn_test_within_channels_inside_sqrt_bfyx_fp16) { auto input = engine.allocate_memory({data_types::f16, format::bfyx, {7, 10, 17, 13}}); - tests::set_random_values(input, true, 8, 100); + tests::set_random_values(input, true, 8, 100); topology topology; topology.add(input_layout("input", input->get_layout())); @@ -429,7 +429,7 @@ TEST(mvn_gpu_test, mvn_test_within_channels_inside_sqrt_bfyx_fp16) { ASSERT_EQ(outputs.begin()->first, "mvn"); auto output = outputs.begin()->second.get_memory(); - mvn_compute_mean_within_channels(output, false); + mvn_compute_mean_within_channels(output, false); } TEST(mvn_gpu_test, mvn_test_within_channels_outside_sqrt_bfyx_normalize_variance) { @@ -495,7 +495,7 @@ TEST(mvn_gpu_test, mvn_test_within_channels_outside_sqrt_bfyx_normalize_variance auto input = engine.allocate_memory({data_types::f16, format::bfyx, {7, 10, 17, 13}}); - tests::set_random_values(input, true, 8, 100); + tests::set_random_values(input, true, 8, 100); topology topology; topology.add(input_layout("input", input->get_layout())); @@ -510,7 +510,7 @@ TEST(mvn_gpu_test, mvn_test_within_channels_outside_sqrt_bfyx_normalize_variance ASSERT_EQ(outputs.begin()->first, "mvn"); auto output = outputs.begin()->second.get_memory(); - mvn_compute_mean_within_channels(output, true); + mvn_compute_mean_within_channels(output, true); } TEST(mvn_gpu_test, mvn_test_within_channels_inside_sqrt_bfyx_normalize_variance_fp16) { @@ -522,7 +522,7 @@ TEST(mvn_gpu_test, mvn_test_within_channels_inside_sqrt_bfyx_normalize_variance_ auto input = engine.allocate_memory({data_types::f16, format::bfyx, {7, 10, 17, 13}}); - tests::set_random_values(input, true, 8, 100); + tests::set_random_values(input, true, 8, 100); topology topology; topology.add(input_layout("input", input->get_layout())); @@ -537,7 +537,7 @@ TEST(mvn_gpu_test, mvn_test_within_channels_inside_sqrt_bfyx_normalize_variance_ ASSERT_EQ(outputs.begin()->first, "mvn"); auto output = outputs.begin()->second.get_memory(); - mvn_compute_mean_within_channels(output, true); + mvn_compute_mean_within_channels(output, true); } TEST(mvn_gpu_test, dynamic_within_channels_inside_sqrt_bfyx_normalize_variance_fp16) { @@ -551,7 +551,7 @@ TEST(mvn_gpu_test, dynamic_within_channels_inside_sqrt_bfyx_normalize_variance_f auto in_layout = layout{ov::PartialShape::dynamic(in_shape.size()), data_types::f16, format::bfyx}; auto input = engine.allocate_memory(layout{ov::PartialShape(in_shape), data_types::f16, format::bfyx}); - tests::set_random_values(input, true, 8, 100); + tests::set_random_values(input, true, 8, 100); topology topology; topology.add(input_layout("input", in_layout)); @@ -572,7 +572,7 @@ TEST(mvn_gpu_test, dynamic_within_channels_inside_sqrt_bfyx_normalize_variance_f ASSERT_EQ(outputs.begin()->first, "mvn"); auto output = outputs.begin()->second.get_memory(); - mvn_compute_mean_within_channels(output, true); + mvn_compute_mean_within_channels(output, true); } struct mvn_basic_test_params { @@ -634,9 +634,9 @@ struct mvn_random_test : ::testing::TestWithParam { } } else if (output->get_layout().data_type == data_types::f16) { if (across_channels) { - mvn_compute_mean_across_channels(output, normalize_variance); + mvn_compute_mean_across_channels(output, normalize_variance); } else { - mvn_compute_mean_within_channels(output, normalize_variance); + mvn_compute_mean_within_channels(output, normalize_variance); } } } @@ -652,7 +652,7 @@ struct mvn_random_test : ::testing::TestWithParam { fill_random_data(input, -127, 127); break; case data_types::f16: - fill_random_data(input, -127, 127); + fill_random_data(input, -127, 127); break; case data_types::i8: fill_random_data(input, -127, 127); @@ -848,7 +848,7 @@ struct mvn_random_test_bsv32 : ::testing::TestWithParam { fill_random_data(input, -127, 127); break; case data_types::f16: - fill_random_data(input, -127, 127, 1); + fill_random_data(input, -127, 127, 1); break; case data_types::i8: fill_random_data(input, -127, 127, 1); @@ -900,7 +900,7 @@ struct mvn_random_test_bsv32 : ::testing::TestWithParam { if(output_dtype == data_types::f32) { compare_outputs(output, output_opt); } else if (output_dtype == data_types::f16) { - compare_outputs(output, output_opt); + compare_outputs(output, output_opt); } else if (output_dtype == data_types::i8) { compare_outputs(output, output_opt); } else if (output_dtype == data_types::u8) { @@ -958,11 +958,11 @@ TEST(mvn_gpu_test, mvn_test_across_channels_inside_sqrt_bfyx_cached) { } TEST(mvn_gpu_test, mvn_test_across_channels_outside_sqrt_bfyx_fp16_cached) { - test_mvn_test_across_channels_outside_sqrt_bfyx(true); + test_mvn_test_across_channels_outside_sqrt_bfyx(true); } TEST(mvn_gpu_test, mvn_test_across_channels_inside_sqrt_bfyx_fp16_cached) { - test_mvn_test_across_channels_inside_sqrt_bfyx(true); + test_mvn_test_across_channels_inside_sqrt_bfyx(true); } TEST_P(mvn_random_test, random_cached) { diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/non_max_suppression_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/non_max_suppression_test.cpp index 245db179d50feb..909149b05e32fa 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/non_max_suppression_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/non_max_suppression_test.cpp @@ -51,6 +51,9 @@ struct non_max_suppression_basic : public testing::Test { // 1 1 1 0.2 -- iou 0.29 // 1 1 0 0.1 -- iou 0.43 using DataType = typename TypeWithLayout::Type; + static const format::type layout_format = TypeWithLayout::layout; + const data_types data_type = ov::element::from(); + const int batch_size = 2; const int classes_num = 2; const int boxes_num = 3; @@ -79,11 +82,12 @@ struct non_max_suppression_basic : public testing::Test { }; const layout boxes_layout = layout(ov::PartialShape{batch_size, boxes_num, 4}, - type_to_data_type::value, + data_type, format::bfyx); const layout scores_layout = layout(ov::PartialShape{batch_size, classes_num, boxes_num}, - type_to_data_type::value, + data_type, format::bfyx); + const layout selected_scores_layout = layout(ov::PartialShape{selected_indices_num, 3}, data_type, layout_format); const layout valid_outputs_layout = layout(ov::PartialShape{1}, cldnn::data_types::i32, layout_format); @@ -109,8 +113,6 @@ struct non_max_suppression_basic : public testing::Test { return mem; } - static const format::type layout_format = TypeWithLayout::layout; - static const data_types data_type = type_to_data_type::value; const int pad = -1; @@ -329,10 +331,10 @@ struct non_max_suppression_basic : public testing::Test { ASSERT_FLOAT_EQ(expected_second_out[i], second_output_ptr[i]); } } else { - cldnn::mem_lock second_output_ptr(plane_scores_mem, get_test_stream()); + cldnn::mem_lock second_output_ptr(plane_scores_mem, get_test_stream()); for (size_t i = 0; i < expected_second_out.size(); ++i) { - ASSERT_NEAR(expected_second_out[i], half_to_float(second_output_ptr[i]), 0.0002f); + ASSERT_NEAR(expected_second_out[i], static_cast(second_output_ptr[i]), 0.0002f); } } @@ -449,10 +451,10 @@ struct non_max_suppression_basic : public testing::Test { ASSERT_FLOAT_EQ(expected_second_out[i], second_output_ptr[i]); } } else { - cldnn::mem_lock second_output_ptr(plane_scores_mem, get_test_stream()); + cldnn::mem_lock second_output_ptr(plane_scores_mem, get_test_stream()); for (size_t i = 0; i < expected_second_out.size(); ++i) { - ASSERT_NEAR(expected_second_out[i], half_to_float(second_output_ptr[i]), 0.0002f); + ASSERT_NEAR(expected_second_out[i], static_cast(second_output_ptr[i]), 0.0002f); } } @@ -643,12 +645,12 @@ using nms_types = testing::Types, TypeWithLayoutFormat, - TypeWithLayoutFormat, - TypeWithLayoutFormat, - TypeWithLayoutFormat, - TypeWithLayoutFormat, - TypeWithLayoutFormat, - TypeWithLayoutFormat>; + TypeWithLayoutFormat, + TypeWithLayoutFormat, + TypeWithLayoutFormat, + TypeWithLayoutFormat, + TypeWithLayoutFormat, + TypeWithLayoutFormat>; TYPED_TEST_SUITE(non_max_suppression_basic, nms_types); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/non_zero_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/non_zero_gpu_test.cpp index b3aee2a31d645e..a4543197bcbd0a 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/non_zero_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/non_zero_gpu_test.cpp @@ -69,11 +69,11 @@ TEST(test_count_non_zero, 4d_fp32_1_2_1_5) { } TEST(test_count_non_zero, 5d_fp16_1_3_2_1_2) { - std::vector in_data = { + std::vector in_data = { 0.1f, 0.2f, 0.3f, 0.0f, 12.1f, 11.1f, 0.0f, 0.0f, 0.1f, 0.9f, 0.10f, 0.001f }; - test_count_non_zero(layout{ov::PartialShape{1, 3, 2, 1, 2}, data_types::f16, format::bfzyx}, in_data); + test_count_non_zero(layout{ov::PartialShape{1, 3, 2, 1, 2}, data_types::f16, format::bfzyx}, in_data); } TEST(test_count_non_zero, 2d_int32_1_256) { @@ -216,7 +216,7 @@ TEST(test_gather_non_zero, 4d_fp32_2_4_3_2) { test_gather_non_zero(layout{ov::PartialShape{2, 4, 3, 2}, data_types::f32, format::bfyx}, in_data); } TEST(test_gather_non_zero, 4d_fp16_2_4_3_2) { - std::vector in_data = { + std::vector in_data = { 0.1f, 0.2f, 0.3f, 0.0f, 12.0f, 2.0f, 0.4f, 0.1f, 1.9f, 0.10f, 1.0f, 0.0f, 0.1f, 0.2f, 0.0f, 100.0f, 0.0001f, 0.0f, 2.9f, 0.2f, 4.0f, 0.0f, 9.1f, 0.9f, @@ -224,7 +224,7 @@ TEST(test_gather_non_zero, 4d_fp16_2_4_3_2) { 4.0f, 0.0f, 3.1f, 0.9f, 0.10f, 49.2f, 0.0f, 0.3f, 100.0f, 0.4f, 0.1f, 0.9f, 0.1f, 33.12f, 12.1f, 0.0001f }; - test_gather_non_zero(layout{ov::PartialShape{2, 4, 3, 2}, data_types::f16, format::bfyx}, in_data); + test_gather_non_zero(layout{ov::PartialShape{2, 4, 3, 2}, data_types::f16, format::bfyx}, in_data); } TEST(test_gather_non_zero, 5d_fp32_1_3_3_2_2) { @@ -351,7 +351,7 @@ void test_non_zero(layout in_layout, std::vector in_data) { } TEST(test_non_zero, 1d_fp16_48) { - std::vector in_data = { + std::vector in_data = { 0.1f, 0.2f, 0.3f, 0.0f, 12.0f, 2.0f, 0.4f, 0.1f, 1.9f, 0.10f, 1.0f, 0.0f, 0.1f, 0.2f, 0.0f, 100.0f, 0.0001f, 0.0f, 2.9f, 0.2f, 4.0f, 0.0f, 9.1f, 0.9f, @@ -359,7 +359,7 @@ TEST(test_non_zero, 1d_fp16_48) { 4.0f, 0.0f, 3.1f, 0.9f, 0.10f, 49.2f, 0.0f, 0.3f, 100.0f, 0.4f, 0.1f, 0.9f, 0.1f, 33.12f, 12.1f, 0.0001f }; - test_non_zero(layout{ov::PartialShape{48}, data_types::f16, format::bfyx}, in_data); + test_non_zero(layout{ov::PartialShape{48}, data_types::f16, format::bfyx}, in_data); } TEST(test_non_zero, 2d_fp32_2_34) { @@ -387,7 +387,7 @@ TEST(test_non_zero, 3d_fp16_4_3_4) { } TEST(test_non_zero, 4d_fp16_2_4_3_2) { - std::vector in_data = { + std::vector in_data = { 0.1f, 0.2f, 0.3f, 0.0f, 12.0f, 2.0f, 0.4f, 0.1f, 1.9f, 0.10f, 1.0f, 0.0f, 0.1f, 0.2f, 0.0f, 100.0f, 0.0001f, 0.0f, 2.9f, 0.2f, 4.0f, 0.0f, 9.1f, 0.9f, @@ -395,7 +395,7 @@ TEST(test_non_zero, 4d_fp16_2_4_3_2) { 4.0f, 0.0f, 3.1f, 0.9f, 0.10f, 49.2f, 0.0f, 0.3f, 100.0f, 0.4f, 0.1f, 0.9f, 0.1f, 33.12f, 12.1f, 0.0001f }; - test_non_zero(layout{ov::PartialShape{2, 4, 3, 2}, data_types::f16, format::bfyx}, in_data); + test_non_zero(layout{ov::PartialShape{2, 4, 3, 2}, data_types::f16, format::bfyx}, in_data); } TEST(test_non_zero, 5d_fp32_1_3_3_2_2) { diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/normalizel2_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/normalizel2_gpu_test.cpp index b9b6f620a43842..597e3fd9a022f0 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/normalizel2_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/normalizel2_gpu_test.cpp @@ -28,18 +28,16 @@ struct normalize_input_types { static const auto format = layoutFormat; using type = DataType; using output_type = typename std::conditional::value, DataType, float>::type; - static const data_types data_type = type_to_data_type::value; - static const data_types output_data_type = type_to_data_type::value; static const bool normalize_type = across_spatial; }; template struct normalize_basic : public testing::Test { static const auto format = NormalizeInput::format; - static const auto data_type = NormalizeInput::data_type; - static const auto output_data_type = NormalizeInput::output_data_type; using input_type = typename NormalizeInput::type; using output_type = typename NormalizeInput::output_type; + const ov::element::Type data_type = ov::element::from(); + const ov::element::Type output_data_type = ov::element::from(); static const bool across_spatial = NormalizeInput::normalize_type; const std::vector get_expected_result() { return get_expected_result(std::integral_constant()); @@ -91,7 +89,7 @@ struct normalize_basic : public testing::Test { auto output = outputs.at("plane_normalize2").get_memory(); if (this->data_type == data_types::f16) { - cldnn::mem_lock output_ptr(output, get_test_stream()); + cldnn::mem_lock output_ptr(output, get_test_stream()); auto expected_results = this->get_expected_result(); for (size_t i = 0; i < expected_results.size(); ++i) { ASSERT_NEAR(expected_results[i], output_ptr[i], 0.001); @@ -159,12 +157,12 @@ using format_types = testing::Types, normalize_input_types, normalize_input_types, - normalize_input_types, - normalize_input_types, - normalize_input_types, - normalize_input_types, - normalize_input_types, - normalize_input_types, + normalize_input_types, + normalize_input_types, + normalize_input_types, + normalize_input_types, + normalize_input_types, + normalize_input_types, normalize_input_types, normalize_input_types, normalize_input_types, @@ -179,12 +177,12 @@ using format_types = testing::Types, normalize_input_types, normalize_input_types, - normalize_input_types, - normalize_input_types, - normalize_input_types, - normalize_input_types, - normalize_input_types, - normalize_input_types, + normalize_input_types, + normalize_input_types, + normalize_input_types, + normalize_input_types, + normalize_input_types, + normalize_input_types, normalize_input_types, normalize_input_types, normalize_input_types, diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/one_hot_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/one_hot_gpu_test.cpp index baf6e095aa151c..520727e1874679 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/one_hot_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/one_hot_gpu_test.cpp @@ -79,7 +79,7 @@ void generic_one_hot_test_int(cldnn::format test_input_fmt, int input_b, int inp auto& engine = get_test_engine(); tensor input_tensor(input_b, input_f, input_x, input_y); - auto input = engine.allocate_memory({ type_to_data_type::value, test_input_fmt, input_tensor }); + auto input = engine.allocate_memory({ ov::element::from(), test_input_fmt, input_tensor }); set_values(input, input_rnd_vec); topology topology; diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/permute_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/permute_gpu_test.cpp index bc8df804a1efb4..7be1609c64a9c9 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/permute_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/permute_gpu_test.cpp @@ -1904,7 +1904,7 @@ void TiledPermuteTest::compare_value(float a, float b) const { // f16 format template<> -void TiledPermuteTest::compare_value(FLOAT16 a, FLOAT16 b) const { +void TiledPermuteTest::compare_value(ov::float16 a, ov::float16 b) const { ASSERT_FLOAT_EQ(static_cast(a), static_cast(b)); } @@ -1923,9 +1923,9 @@ template void TiledPermuteTest::run_test(const std::vector& sizes, cldnn::format format_fsv, const std::string & permute_opt, std::vector permute_order, bool is_caching_test) { - // convert half_t to FLOAT16 + // convert ov::float16 to ov::float16 using type_ = typename data_type_to_type::type; - using type = typename std::conditional::value, FLOAT16, type_>::type; + using type = typename std::conditional::value, ov::float16, type_>::type; std::vector internal_sizes(sizes); std::swap(internal_sizes.at(2), internal_sizes.back()); @@ -2317,9 +2317,9 @@ struct TiledPerformancePermuteTest : TiledPermuteTest const std::string & kernel_name, std::vector permute_order) { auto& engine = get_test_engine(); - // convert half_t to FLOAT16 + // convert ov::float16 to ov::float16 using type_ = typename data_type_to_type::type; - using type = typename std::conditional::value, FLOAT16, type_>::type; + using type = typename std::conditional::value, ov::float16, type_>::type; std::vector internal_sizes(sizes); std::swap(internal_sizes.at(2), internal_sizes.back()); @@ -2394,7 +2394,7 @@ struct TiledPerformancePermuteTest : TiledPermuteTest auto output_layout_ref = network_ref.get_program()->get_node("output").get_output_layout(); auto output_layout_opt = network_tile.get_program()->get_node("output").get_output_layout(); std::string frm_str = cldnn::format(format).to_string(); - std::string input_type = data_type_traits::name(Data_Type); + std::string input_type = ov::element::Type(Data_Type).get_type_name(); std::cout << "Exectued time " << " " << "permute_ref" << " " << " input(" << tensor.to_string() << ") output(" << output_layout_ref.to_string() << ") " @@ -2421,4 +2421,3 @@ INSTANTIATE_TEST_SUITE_P(, TiledPerformancePermuteTest, {{1, 256, 128, 256}, format::bfyx}, {{1, 256, 256, 128}, format::b_fs_yx_fsv16}, })); - diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/pooling_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/pooling_gpu_test.cpp index 944e6a0d9bd2ad..246d5d420c3435 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/pooling_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/pooling_gpu_test.cpp @@ -275,9 +275,9 @@ TEST(pooling_forward_gpu, basic_max_yxfb_f32_wsiz3x3_wstr1x1_i3x3x1x1_nopad) { TEST(pooling_forward_gpu, basic_max_pooling_int8) { auto& engine = get_test_engine(); - layout in_layout = { type_to_data_type::value, format::byxf, { 1, 1, 3, 3 } }; - layout out_layout = { type_to_data_type::value, format::byxf, { 1, 1, 1, 1 } }; - layout byte_layout = { type_to_data_type::value, format::bfyx, { 1, 1, 3, 3 } }; + layout in_layout = { ov::element::from(), format::byxf, { 1, 1, 3, 3 } }; + layout out_layout = { ov::element::from(), format::byxf, { 1, 1, 1, 1 } }; + layout byte_layout = { ov::element::from(), format::bfyx, { 1, 1, 3, 3 } }; std::initializer_list input_f = { 1.0f, -2.5f, 3.1f, -4.0f, 5.03f, -6.99f, 7.0f, -8.0f, 9.5f }; std::list final_results = { 9.0f }; @@ -318,9 +318,9 @@ TEST(pooling_forward_gpu, basic_max_pooling_int8) { TEST(pooling_forward_gpu, basic_avg_pooling_int8) { auto& engine = get_test_engine(); - layout in_layout = { type_to_data_type::value, format::byxf, { 1, 1, 3, 3 } }; - layout out_layout = { type_to_data_type::value, format::byxf, { 1, 1, 1, 1 } }; - layout byte_layout = { type_to_data_type::value, format::bfyx, { 1, 1, 3, 3 } }; + layout in_layout = { ov::element::from(), format::byxf, { 1, 1, 3, 3 } }; + layout out_layout = { ov::element::from(), format::byxf, { 1, 1, 1, 1 } }; + layout byte_layout = { ov::element::from(), format::bfyx, { 1, 1, 3, 3 } }; std::initializer_list input_f = { 2.0f, -2.5f, 5.1f, -4.0f, 8.03f, -6.99f, 17.0f, -8.0f, 19.5f }; // Average pooling returns fp32 by default for int8 inputs auto final_result = 0.0f; @@ -1258,22 +1258,22 @@ TEST(pooling_forward_gpu, bfyx_average_without_padding_i1x1_w3x3_s1x1_o1x1) //bfyx fp16 TEST(pooling_forward_gpu, bfyx_average_without_padding_i3x3_w2x2_s2x2_fp16) { - generic_average_wo_padding_test(format::bfyx, (tensor) spatial(2, 2), (tensor) spatial(3, 3), {2, 2}, {2, 2}, {0, 0}); + generic_average_wo_padding_test(format::bfyx, (tensor) spatial(2, 2), (tensor) spatial(3, 3), {2, 2}, {2, 2}, {0, 0}); } TEST(pooling_forward_gpu, bfyx_average_without_padding_i3x3_w2x2_s2x2_o1x1_fp16) { - generic_average_wo_padding_test(format::bfyx, (tensor) spatial(2, 2), (tensor) spatial(3, 3), {2, 2}, {2, 2}, {1, 1}); + generic_average_wo_padding_test(format::bfyx, (tensor) spatial(2, 2), (tensor) spatial(3, 3), {2, 2}, {2, 2}, {1, 1}); } TEST(pooling_forward_gpu, bfyx_average_without_padding_i3x3_w2x2_s3x3_o1x1_fp16) { - generic_average_wo_padding_test(format::bfyx, (tensor) spatial(2, 2), (tensor) spatial(3, 3), {3, 3}, {2, 2}, {1, 1}); + generic_average_wo_padding_test(format::bfyx, (tensor) spatial(2, 2), (tensor) spatial(3, 3), {3, 3}, {2, 2}, {1, 1}); } TEST(pooling_forward_gpu, bfyx_average_without_padding_i1x1_w3x3_s1x1_o1x1_fp16) { - generic_average_wo_padding_test(format::bfyx, (tensor) spatial(1, 1), (tensor) spatial(1, 1), {3, 3}, {1, 1}, {1, 1}); + generic_average_wo_padding_test(format::bfyx, (tensor) spatial(1, 1), (tensor) spatial(1, 1), {3, 3}, {1, 1}, {1, 1}); } //yxfb fp32 @@ -1300,22 +1300,22 @@ TEST(pooling_forward_gpu, yxfb_average_without_padding_i1x1_w3x3_s1x1_o1x1) //yxfb fp16 TEST(pooling_forward_gpu, yxfb_average_without_padding_i3x3_w2x2_s2x2_fp16) { - generic_average_wo_padding_test(format::yxfb, (tensor) spatial(2, 2), (tensor) spatial(3, 3), {2, 2}, {2, 2}, {0, 0}); + generic_average_wo_padding_test(format::yxfb, (tensor) spatial(2, 2), (tensor) spatial(3, 3), {2, 2}, {2, 2}, {0, 0}); } TEST(pooling_forward_gpu, yxfb_average_without_padding_i3x3_w2x2_s2x2_o1x1_fp16) { - generic_average_wo_padding_test(format::yxfb, (tensor) spatial(2, 2), (tensor) spatial(3, 3), {2, 2}, {2, 2}, {1, 1}); + generic_average_wo_padding_test(format::yxfb, (tensor) spatial(2, 2), (tensor) spatial(3, 3), {2, 2}, {2, 2}, {1, 1}); } TEST(pooling_forward_gpu, yxfb_average_without_padding_i3x3_w2x2_s3x3_o1x1_fp16) { - generic_average_wo_padding_test(format::yxfb, (tensor) spatial(2, 2), (tensor) spatial(3, 3), {3, 3}, {2, 2}, {1, 1}); + generic_average_wo_padding_test(format::yxfb, (tensor) spatial(2, 2), (tensor) spatial(3, 3), {3, 3}, {2, 2}, {1, 1}); } TEST(pooling_forward_gpu, yxfb_average_without_padding_i1x1_w3x3_s1x1_o1x1_fp16) { - generic_average_wo_padding_test(format::yxfb, (tensor) spatial(1, 1), (tensor) spatial(1, 1), {3, 3}, {1, 1}, {1, 1}); + generic_average_wo_padding_test(format::yxfb, (tensor) spatial(1, 1), (tensor) spatial(1, 1), {3, 3}, {1, 1}, {1, 1}); } //bfzyx fp32 @@ -1347,27 +1347,27 @@ TEST(pooling_forward_gpu, bfzyx_average_without_padding_i3x3x3_w3x3x3_s3x3x3) //bfzyx fp16 TEST(pooling_forward_gpu, bfzyx_average_without_padding_i3x3x3_w2x2x2_s2x2x2_fp16) { - generic_average_wo_padding_test(format::bfzyx, (tensor) spatial(2, 2, 2), (tensor) spatial(3, 3, 3), {2, 2, 2}, {2, 2, 2}, {0, 0, 0}); + generic_average_wo_padding_test(format::bfzyx, (tensor) spatial(2, 2, 2), (tensor) spatial(3, 3, 3), {2, 2, 2}, {2, 2, 2}, {0, 0, 0}); } TEST(pooling_forward_gpu, bfzyx_average_without_padding_i3x3x3_w2x2x2_s2x2x2_o1x1x1_fp16) { - generic_average_wo_padding_test(format::bfzyx, (tensor) spatial(2, 2, 2), (tensor) spatial(3, 3, 3), {2, 2, 2}, {2, 2, 2}, {1, 1, 1}); + generic_average_wo_padding_test(format::bfzyx, (tensor) spatial(2, 2, 2), (tensor) spatial(3, 3, 3), {2, 2, 2}, {2, 2, 2}, {1, 1, 1}); } TEST(pooling_forward_gpu, bfzyx_average_without_padding_i3x3x3_w2x2x3_s3x3x3_o1x1x1_fp16) { - generic_average_wo_padding_test(format::bfzyx, (tensor) spatial(2, 2, 2), (tensor) spatial(3, 3, 3), {3, 3, 3}, {2, 2, 2}, {1, 1, 1}); + generic_average_wo_padding_test(format::bfzyx, (tensor) spatial(2, 2, 2), (tensor) spatial(3, 3, 3), {3, 3, 3}, {2, 2, 2}, {1, 1, 1}); } TEST(pooling_forward_gpu, bfzyx_average_without_padding_i1x1x1_w3x3x3_s1x1x1_o1x1x1_fp16) { - generic_average_wo_padding_test(format::bfzyx, (tensor) spatial(1, 1, 1), (tensor) spatial(1, 1, 1), {3, 3, 3}, {1, 1, 1}, {1, 1, 1}); + generic_average_wo_padding_test(format::bfzyx, (tensor) spatial(1, 1, 1), (tensor) spatial(1, 1, 1), {3, 3, 3}, {1, 1, 1}, {1, 1, 1}); } TEST(pooling_forward_gpu, bfzyx_average_without_padding_i3x3x3_w3x3x3_s3x3x3_fp16) { - generic_average_wo_padding_test(format::bfzyx, (tensor) spatial(1, 1, 1), (tensor) spatial(3, 3, 3), {3, 3, 3}, {3, 3, 3}, {0, 0, 0}); + generic_average_wo_padding_test(format::bfzyx, (tensor) spatial(1, 1, 1), (tensor) spatial(3, 3, 3), {3, 3, 3}, {3, 3, 3}, {0, 0, 0}); } TEST(pooling_forward_gpu, b_fs_yx_fsv4) @@ -1525,7 +1525,7 @@ TEST(pooling_forward_gpu, fs_b_yx_fsv32_avg_3x3_input_2x2_pool_1x1_stride_2x2_ou topology.add(reorder("reorder_after_pooling", input_info("avg_pooling"), layout(data_types::f16, format::bfyx, { 1, 1, 2, 2 }))); network network(engine, topology, get_test_default_config(engine)); - set_values(input_prim, { FLOAT16(-0.5f), FLOAT16(1.0f), FLOAT16(0.5f), FLOAT16(2.0f), FLOAT16(1.5f), FLOAT16(-0.5f), FLOAT16(4.0f), FLOAT16(-1.0f), FLOAT16(3.5f) }); + set_values(input_prim, { ov::float16(-0.5f), ov::float16(1.0f), ov::float16(0.5f), ov::float16(2.0f), ov::float16(1.5f), ov::float16(-0.5f), ov::float16(4.0f), ov::float16(-1.0f), ov::float16(3.5f) }); network.set_input_data("input", input_prim); auto outputs = network.execute(); @@ -1534,7 +1534,7 @@ TEST(pooling_forward_gpu, fs_b_yx_fsv32_avg_3x3_input_2x2_pool_1x1_stride_2x2_ou auto output_prim = outputs.begin()->second.get_memory(); - cldnn::mem_lock output_ptr(output_prim, get_test_stream()); + cldnn::mem_lock output_ptr(output_prim, get_test_stream()); ASSERT_EQ(1.0f, float(output_ptr[0])); ASSERT_EQ(0.625f, float(output_ptr[1])); @@ -1577,7 +1577,7 @@ TEST(pooling_forward_gpu, fs_b_yx_fsv32_avg_3x3_input_2x2_pool_2x2_stride) topology.add(reorder("reorder_after_pooling", input_info("avg_pooling"), layout(data_types::f16, format::bfyx, { 1, 1, 3, 3 }))); network network(engine, topology, get_test_default_config(engine)); - set_values(input_prim, { FLOAT16(-0.5f), FLOAT16(1.0f), FLOAT16(0.5f), FLOAT16(2.0f), FLOAT16(1.5f), FLOAT16(-0.5f), FLOAT16(4.0f), FLOAT16(-1.0f), FLOAT16(3.5f) }); + set_values(input_prim, { ov::float16(-0.5f), ov::float16(1.0f), ov::float16(0.5f), ov::float16(2.0f), ov::float16(1.5f), ov::float16(-0.5f), ov::float16(4.0f), ov::float16(-1.0f), ov::float16(3.5f) }); network.set_input_data("input", input_prim); auto outputs = network.execute(); @@ -1585,7 +1585,7 @@ TEST(pooling_forward_gpu, fs_b_yx_fsv32_avg_3x3_input_2x2_pool_2x2_stride) ASSERT_EQ(outputs.begin()->first, "reorder_after_pooling"); auto output_prim = outputs.begin()->second.get_memory(); - cldnn::mem_lock output_ptr(output_prim, get_test_stream()); + cldnn::mem_lock output_ptr(output_prim, get_test_stream()); ASSERT_EQ(1.0f, float(output_ptr[0])); ASSERT_EQ(0.f, float(output_ptr[1])); @@ -1643,10 +1643,10 @@ TEST(pooling_forward_gpu, fs_b_yx_fsv32_avg_2x2x3x3_input_2x2_pool_2x2_stride) topology.add(reorder("reorder_after_pooling", input_info("avg_pooling"), layout(data_types::f16, format::bfyx, { batch_count, features_count, out_y, out_x }))); network network(engine, topology, get_test_default_config(engine)); - set_values(input_prim, { FLOAT16(-0.5f), FLOAT16(1.0f), FLOAT16(0.5f), FLOAT16(2.0f), FLOAT16(1.5f), FLOAT16(-0.5f), FLOAT16(4.0f), FLOAT16(-1.0f), FLOAT16(3.5f), //B0F0 - FLOAT16(-0.5f), FLOAT16(1.0f), FLOAT16(0.5f), FLOAT16(2.0f), FLOAT16(1.5f), FLOAT16(-0.5f), FLOAT16(4.0f), FLOAT16(-1.0f), FLOAT16(3.5f), //B0F1 - FLOAT16(-0.5f), FLOAT16(1.0f), FLOAT16(0.5f), FLOAT16(2.0f), FLOAT16(1.5f), FLOAT16(-0.5f), FLOAT16(4.0f), FLOAT16(-1.0f), FLOAT16(3.5f), //B1F0 - FLOAT16(-0.5f), FLOAT16(1.0f), FLOAT16(0.5f), FLOAT16(2.0f), FLOAT16(1.5f), FLOAT16(-0.5f), FLOAT16(4.0f), FLOAT16(-1.0f), FLOAT16(3.5f) });//B1F1 + set_values(input_prim, { ov::float16(-0.5f), ov::float16(1.0f), ov::float16(0.5f), ov::float16(2.0f), ov::float16(1.5f), ov::float16(-0.5f), ov::float16(4.0f), ov::float16(-1.0f), ov::float16(3.5f), //B0F0 + ov::float16(-0.5f), ov::float16(1.0f), ov::float16(0.5f), ov::float16(2.0f), ov::float16(1.5f), ov::float16(-0.5f), ov::float16(4.0f), ov::float16(-1.0f), ov::float16(3.5f), //B0F1 + ov::float16(-0.5f), ov::float16(1.0f), ov::float16(0.5f), ov::float16(2.0f), ov::float16(1.5f), ov::float16(-0.5f), ov::float16(4.0f), ov::float16(-1.0f), ov::float16(3.5f), //B1F0 + ov::float16(-0.5f), ov::float16(1.0f), ov::float16(0.5f), ov::float16(2.0f), ov::float16(1.5f), ov::float16(-0.5f), ov::float16(4.0f), ov::float16(-1.0f), ov::float16(3.5f) });//B1F1 network.set_input_data("input", input_prim); auto outputs = network.execute(); @@ -1655,7 +1655,7 @@ TEST(pooling_forward_gpu, fs_b_yx_fsv32_avg_2x2x3x3_input_2x2_pool_2x2_stride) auto output_prim = outputs.begin()->second.get_memory(); - cldnn::mem_lock output_ptr(output_prim, get_test_stream()); + cldnn::mem_lock output_ptr(output_prim, get_test_stream()); ASSERT_EQ((int)output_ptr.size(), batch_count * features_count*out_x*out_y); @@ -1716,9 +1716,9 @@ TEST(pooling_forward_gpu, fs_b_yx_fsv32_max_1x1x3x3_input_2x2_pool_2x2_stride_2x network network(engine, topology, get_test_default_config(engine)); set_values(input_prim, { - FLOAT16(1.50f), FLOAT16(-1.00f), FLOAT16(-0.50f), - FLOAT16(1.00f), FLOAT16(-1.00f), FLOAT16(-1.00f), - FLOAT16(-1.00f), FLOAT16(-1.00f), FLOAT16(-0.50f) + ov::float16(1.50f), ov::float16(-1.00f), ov::float16(-0.50f), + ov::float16(1.00f), ov::float16(-1.00f), ov::float16(-1.00f), + ov::float16(-1.00f), ov::float16(-1.00f), ov::float16(-0.50f) }); network.set_input_data("input_prim", input_prim); @@ -1738,7 +1738,7 @@ TEST(pooling_forward_gpu, fs_b_yx_fsv32_max_1x1x3x3_input_2x2_pool_2x2_stride_2x ASSERT_EQ((int)output_prim->get_layout().count(), 4); ASSERT_EQ((int)output_prim->get_layout().get_buffer_size().count(), 16); - cldnn::mem_lock output_ptr(output_prim, get_test_stream()); + cldnn::mem_lock output_ptr(output_prim, get_test_stream()); for (size_t i = 0; i < expected.size(); ++i) { ASSERT_EQ(expected[i], float(output_ptr[i])); @@ -1789,11 +1789,11 @@ TEST(pooling_forward_gpu, fs_b_yx_fsv32_max_1x1x5x5_input_2x2_pool_2x2_stride_2x network network(engine, topology, get_test_default_config(engine)); set_values(input_prim, { - FLOAT16(1.f), FLOAT16(2.f), FLOAT16(3.f), FLOAT16(4.f), FLOAT16(5.f), - FLOAT16(6.f), FLOAT16(1.50f), FLOAT16(-1.00f), FLOAT16(-0.50f), FLOAT16(7.f), - FLOAT16(8.f), FLOAT16(1.00f), FLOAT16(-1.00f), FLOAT16(-1.00f), FLOAT16(9.f), - FLOAT16(10.f), FLOAT16(-1.00f), FLOAT16(-1.00f), FLOAT16(-0.50f), FLOAT16(11.f), - FLOAT16(12.f), FLOAT16(13.f), FLOAT16(14.f), FLOAT16(15.f), FLOAT16(16.f) + ov::float16(1.f), ov::float16(2.f), ov::float16(3.f), ov::float16(4.f), ov::float16(5.f), + ov::float16(6.f), ov::float16(1.50f), ov::float16(-1.00f), ov::float16(-0.50f), ov::float16(7.f), + ov::float16(8.f), ov::float16(1.00f), ov::float16(-1.00f), ov::float16(-1.00f), ov::float16(9.f), + ov::float16(10.f), ov::float16(-1.00f), ov::float16(-1.00f), ov::float16(-0.50f), ov::float16(11.f), + ov::float16(12.f), ov::float16(13.f), ov::float16(14.f), ov::float16(15.f), ov::float16(16.f) }); network.set_input_data("input_prim", input_prim); @@ -1814,7 +1814,7 @@ TEST(pooling_forward_gpu, fs_b_yx_fsv32_max_1x1x5x5_input_2x2_pool_2x2_stride_2x ASSERT_EQ((int)output_prim->get_layout().count(), 9); ASSERT_EQ((int)output_prim->get_layout().get_buffer_size().count(), 25); - cldnn::mem_lock output_ptr(output_prim, get_test_stream()); + cldnn::mem_lock output_ptr(output_prim, get_test_stream()); for (size_t i = 0; i < expected.size(); ++i) { ASSERT_EQ(expected[i], float(output_ptr[i])); @@ -1844,10 +1844,10 @@ TEST(pooling_forward_gpu, fs_b_yx_fsv32_avg_65x5x6x7_input_3x3_pool_4x4_stride_3 const tensor input_tensor(batches, features, x_input, y_input); - std::vector input_data(batches*features*x_input*y_input); + std::vector input_data(batches*features*x_input*y_input); for (size_t i = 0; i < input_data.size(); i++) { - input_data[i] = FLOAT16((float)i/float(input_data.size())); + input_data[i] = ov::float16((float)i/float(input_data.size())); } auto input_prim = engine.allocate_memory({ data_types::f16,format::bfyx,input_tensor }); @@ -1866,7 +1866,7 @@ TEST(pooling_forward_gpu, fs_b_yx_fsv32_avg_65x5x6x7_input_3x3_pool_4x4_stride_3 golden_network.set_input_data("input", input_prim); auto outputs = golden_network.execute(); - cldnn::mem_lock output_ptr(outputs.begin()->second.get_memory(), get_test_stream()); + cldnn::mem_lock output_ptr(outputs.begin()->second.get_memory(), get_test_stream()); for (size_t i = 0; i < output_ptr.size(); i++) { golden_results.push_back(float(output_ptr[i])); @@ -1884,7 +1884,7 @@ TEST(pooling_forward_gpu, fs_b_yx_fsv32_avg_65x5x6x7_input_3x3_pool_4x4_stride_3 fsv32_network.set_input_data("input", input_prim); auto outputs = fsv32_network.execute(); - cldnn::mem_lock output_ptr(outputs.begin()->second.get_memory(), get_test_stream()); + cldnn::mem_lock output_ptr(outputs.begin()->second.get_memory(), get_test_stream()); for (size_t i = 0; i < output_ptr.size(); i++) { fsv32_results.push_back(float(output_ptr[i])); @@ -2007,11 +2007,11 @@ class pooling_test_base { format::type input_format() { return _input_fmt; } data_types input_type() { - return type_to_data_type::value; + return ov::element::from(); } data_types output_type() { - return type_to_data_type::value; + return ov::element::from(); } pooling_mode pool_mode() { return Mode; } @@ -2246,12 +2246,12 @@ class pooling_scale_random_test_base : public pooling_random_test_base(); + auto test_case = pooling_random_test_base(); ASSERT_NO_FATAL_FAILURE(test_case.run_random(GetParam(), false)); } TEST_P(pooling_random_test_fp16_fp32, max_fp16) { - auto test_case = pooling_random_test_base(); + auto test_case = pooling_random_test_base(); ASSERT_NO_FATAL_FAILURE(test_case.run_random(GetParam(), false)); } @@ -2960,7 +2960,7 @@ class pooling_test : public tests::generic_test } else { - prepare_input_for_test_typed(inputs); + prepare_input_for_test_typed(inputs); } } @@ -3188,7 +3188,7 @@ class pooling_test : public tests::generic_test } else { - return generate_reference_typed(inputs); + return generate_reference_typed(inputs); } } @@ -3219,9 +3219,9 @@ TEST(pooling_forward_gpu_onednn, basic_max_pooling_int8) { auto& engine = get_test_engine(); if (!engine.get_device_info().supports_immad) return; - layout in_layout = { type_to_data_type::value, format::byxf, { 1, 1, 3, 3 } }; - layout out_layout = { type_to_data_type::value, format::byxf, { 1, 1, 1, 1 } }; - layout byte_layout = { type_to_data_type::value, format::bfyx, { 1, 1, 3, 3 } }; + layout in_layout = { ov::element::from(), format::byxf, { 1, 1, 3, 3 } }; + layout out_layout = { ov::element::from(), format::byxf, { 1, 1, 1, 1 } }; + layout byte_layout = { ov::element::from(), format::bfyx, { 1, 1, 3, 3 } }; std::initializer_list input_f = { 1.0f, -2.5f, 3.1f, -4.0f, 5.03f, -6.99f, 7.0f, -8.0f, 9.5f }; std::list final_results = { 9.0f }; @@ -3288,12 +3288,12 @@ TEST_P(pooling_random_test, avg_u8_cached) { } TEST_P(pooling_random_test_fp16_fp32, avg_fp16_cached) { - auto test_case = pooling_random_test_base(); + auto test_case = pooling_random_test_base(); ASSERT_NO_FATAL_FAILURE(test_case.run_random(GetParam(), true)); } TEST_P(pooling_random_test_fp16_fp32, max_fp16_cached) { - auto test_case = pooling_random_test_base(); + auto test_case = pooling_random_test_base(); ASSERT_NO_FATAL_FAILURE(test_case.run_random(GetParam(), true)); } diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/prior_box_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/prior_box_gpu_test.cpp index d4f59eed3d2520..61d4ae8e20efc4 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/prior_box_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/prior_box_gpu_test.cpp @@ -41,8 +41,8 @@ template class PriorBoxGPUTest : public ::testing::TestWithParam> { public: void execute(bool is_caching_test) { - const auto input_data_type = type_to_data_type::value; - const auto output_data_type = type_to_data_type::value; + const auto input_data_type = ov::element::from(); + const auto output_data_type = ov::element::from(); const auto plain_format = format::bfyx; format::type target_format; diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/propagate_constants_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/propagate_constants_gpu_test.cpp index 0a95c5051881d3..eeaed4027a7786 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/propagate_constants_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/propagate_constants_gpu_test.cpp @@ -51,9 +51,9 @@ void test_copy_dependecies_from_nodes(bool is_caching_test) { } TEST(propagate_constants, copy_dependecies_from_nodes) { - test_copy_dependecies_from_nodes(false); + test_copy_dependecies_from_nodes(false); } TEST(propagate_constants, copy_dependecies_from_nodes_cached) { - test_copy_dependecies_from_nodes(true); + test_copy_dependecies_from_nodes(true); } diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/proposal_cpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/proposal_cpu_test.cpp index 7e9953b64a87e6..8b6d4cdecfb94c 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/proposal_cpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/proposal_cpu_test.cpp @@ -67,9 +67,9 @@ class TestRunnerProposal template TestRunnerProposal::TestRunnerProposal(cldnn::tensor image_info_size, bool is_caching_test) : - _cls_scores_layout(cldnn::type_to_data_type::value, format::bfyx, { 1, 18, 23, 14 } ), - _bbox_pred_layout(cldnn::type_to_data_type::value, format::bfyx, { 1, 36, 23, 14 } ), - _image_info_layout(cldnn::type_to_data_type::value, format::bfyx, image_info_size), + _cls_scores_layout(ov::element::from(), format::bfyx, { 1, 18, 23, 14 } ), + _bbox_pred_layout(ov::element::from(), format::bfyx, { 1, 36, 23, 14 } ), + _image_info_layout(ov::element::from(), format::bfyx, image_info_size), _test_layer(layer_name, cls_scores_name, bbox_pred_name, @@ -156,7 +156,7 @@ TEST(proposal, basic) { } TEST(proposal, fp16) { - test_proposal_basic({ 1, 3, 1, 1 }, false); + test_proposal_basic({ 1, 3, 1, 1 }, false); } TEST(proposal, img_info_batched) { @@ -186,11 +186,11 @@ void test_proposal_basic_two_types(cldnn::tensor image_info_size, bool is_cachin } TEST(proposal, scores_fp16_im_info_fp32) { - test_proposal_basic_two_types({ 1, 3, 1, 1 }, false); + test_proposal_basic_two_types({ 1, 3, 1, 1 }, false); } TEST(proposal, scores_fp32_im_info_fp16) { - test_proposal_basic_two_types({ 1, 3, 1, 1 }, false); + test_proposal_basic_two_types({ 1, 3, 1, 1 }, false); } #ifdef RUN_ALL_MODEL_CACHING_TESTS TEST(proposal, basic_cached) { @@ -198,7 +198,7 @@ TEST(proposal, basic_cached) { } TEST(proposal, fp16_cached) { - test_proposal_basic({ 1, 3, 1, 1 }, true); + test_proposal_basic({ 1, 3, 1, 1 }, true); } TEST(proposal, img_info_batched_cached) { @@ -210,9 +210,9 @@ TEST(proposal, img_info_batch_only_cached) { } TEST(proposal, scores_fp16_im_info_fp32_cached) { - test_proposal_basic_two_types({ 1, 3, 1, 1 }, true); + test_proposal_basic_two_types({ 1, 3, 1, 1 }, true); } #endif // RUN_ALL_MODEL_CACHING_TESTS TEST(proposal, scores_fp32_im_info_fp16_cached) { - test_proposal_basic_two_types({ 1, 3, 1, 1 }, true); + test_proposal_basic_two_types({ 1, 3, 1, 1 }, true); } diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/pyramid_roi_align_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/pyramid_roi_align_gpu_test.cpp index 51f047aaa70f64..4cec9e2b18aac5 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/pyramid_roi_align_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/pyramid_roi_align_gpu_test.cpp @@ -13,7 +13,7 @@ using namespace ::tests; template struct pyramid_roi_align_typed_test : testing::Test { - static const data_types data_type = type_to_data_type::value; + const data_types data_type = ov::element::from(); using Type = T; void execute(bool is_caching_test) { @@ -124,7 +124,7 @@ struct pyramid_roi_align_typed_test : testing::Test { } } }; -using pyramid_roi_align_types = testing::Types; +using pyramid_roi_align_types = testing::Types; TYPED_TEST_SUITE(pyramid_roi_align_typed_test, pyramid_roi_align_types); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/quantize_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/quantize_gpu_test.cpp index 4c689ac3c08834..3058d3b389ee89 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/quantize_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/quantize_gpu_test.cpp @@ -209,7 +209,7 @@ TEST(quantize_gpu, quantize_levels_2_output_broadcast_inputs_1_ch8_binary_pack) data("input_high", input_thresh), data("output_low", output_low), data("output_high", output_high), - quantize("quantize", input_info("input"), input_info("input_low"), input_info("input_high"), input_info("output_low"), input_info("output_high"), 2, data_types::bin), + quantize("quantize", input_info("input"), input_info("input_low"), input_info("input_high"), input_info("output_low"), input_info("output_high"), 2, data_types::u1), reorder("reorder", input_info("quantize"), layout{data_types::f32, format::bfyx, tensor{1,8,2,2}}) ); @@ -760,7 +760,7 @@ struct quantize_random_test : testing::TestWithParam(mem, -127, 127, 2); break; case data_types::f16: - fill_random_typed(mem, -127, 127, 2); + fill_random_typed(mem, -127, 127, 2); break; case data_types::i8: fill_random_typed(mem, -127, 127, 1); @@ -859,7 +859,7 @@ struct quantize_random_test : testing::TestWithParam(input, input_opt); } else if (params.input_type == data_types::f16) { - fill_typed(input, input_opt); + fill_typed(input, input_opt); } else if (params.input_type == data_types::i8) { fill_typed(input, input_opt); } else if (params.input_type == data_types::u8) { @@ -896,7 +896,7 @@ struct quantize_random_test : testing::TestWithParam(output, output_opt); } else if (params.output_type == data_types::f16) { - compare_outputs(output, output_opt); + compare_outputs(output, output_opt); } else if (params.output_type == data_types::i8) { compare_outputs(output, output_opt); } else if (params.output_type == data_types::u8) { diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/random_uniform_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/random_uniform_gpu_test.cpp index 62950ea1985cf5..873cf831dbccff 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/random_uniform_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/random_uniform_gpu_test.cpp @@ -31,7 +31,7 @@ struct random_uniform_gpu_test : public ::testing::TestWithParam::value; + auto data_type = ov::element::from(); RandomUniformParams params = testing::TestWithParam >::GetParam(); auto &engine = get_test_engine(); @@ -88,7 +88,7 @@ struct PrintToStringParamName { }; template<> -std::string PrintToStringParamName::operator()(const testing::TestParamInfo > ¶m) { +std::string PrintToStringParamName::operator()(const testing::TestParamInfo > ¶m) { std::stringstream buf; buf << "output_tensor_" << param.param.output_shape << "_min_value_" << static_cast(param.param.min_val) @@ -101,7 +101,7 @@ std::string PrintToStringParamName::operator()(const testing::TestParamInfo; using random_uniform_gpu_test_i64 = random_uniform_gpu_test; using random_uniform_gpu_test_f32 = random_uniform_gpu_test; -using random_uniform_gpu_test_f16 = random_uniform_gpu_test; +using random_uniform_gpu_test_f16 = random_uniform_gpu_test; TEST_P(random_uniform_gpu_test_i32, random_int32) { ASSERT_NO_FATAL_FAILURE(test(false)); @@ -173,20 +173,20 @@ INSTANTIATE_TEST_SUITE_P(smoke_random_uniform_f32, INSTANTIATE_TEST_SUITE_P(smoke_random_uniform_f16, random_uniform_gpu_test_f16, ::testing::Values( - RandomUniformParams{ov::Shape{1, 1, 4, 2, 3}, half_t(-1.5), - half_t(-1.0), 150, 10, - {half_t(-1.19726562), half_t(-1.09667969), - half_t(-1.08398438), half_t(-1.30859375), - half_t(-1.48242188), half_t(-1.45898438), - half_t(-1.22851562), half_t(-1.08300781), - half_t(-1.33203125), half_t(-1.14062500), - half_t(-1.42285156), half_t(-1.43554688), - half_t(-1.32617188), half_t(-1.06542969), - half_t(-1.29296875), half_t(-1.21386719), - half_t(-1.21289062), half_t(-1.03027344), - half_t(-1.17187500), half_t(-1.08886719), - half_t(-1.08789062), half_t(-1.43359375), - half_t(-1.17773438), half_t(-1.16992188)} + RandomUniformParams{ov::Shape{1, 1, 4, 2, 3}, ov::float16(-1.5), + ov::float16(-1.0), 150, 10, + {ov::float16(-1.19726562), ov::float16(-1.09667969), + ov::float16(-1.08398438), ov::float16(-1.30859375), + ov::float16(-1.48242188), ov::float16(-1.45898438), + ov::float16(-1.22851562), ov::float16(-1.08300781), + ov::float16(-1.33203125), ov::float16(-1.14062500), + ov::float16(-1.42285156), ov::float16(-1.43554688), + ov::float16(-1.32617188), ov::float16(-1.06542969), + ov::float16(-1.29296875), ov::float16(-1.21386719), + ov::float16(-1.21289062), ov::float16(-1.03027344), + ov::float16(-1.17187500), ov::float16(-1.08886719), + ov::float16(-1.08789062), ov::float16(-1.43359375), + ov::float16(-1.17773438), ov::float16(-1.16992188)} } ), PrintToStringParamName()); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/range_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/range_gpu_test.cpp index 5460afb2a087ad..4b505185cd2f83 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/range_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/range_gpu_test.cpp @@ -68,7 +68,7 @@ struct range_test_params { }; std::ostream& operator<<(std::ostream& ost, const range_test_params& params) { - ost << data_type_traits::name(params.d_types) << ","; + ost << ov::element::Type(params.d_types) << ","; ost << "{start:" << params.start << ",stop:" << params.stop << ",step:" << params.step << "},"; ost << " use_new_shape_infer(" << (params.use_new_shape_infer?"True":"False") << ")"; return ost; @@ -105,9 +105,9 @@ void doSmokeRange_fp16(range_test_params& params) { auto stop_val = static_cast(params.stop); auto step_val = static_cast(params.step); - tests::set_values(args.start.p, { float_to_half(start_val) }); - tests::set_values(args.stop.p, { float_to_half(stop_val) }); - tests::set_values(args.step.p, { float_to_half(step_val) }); + tests::set_values(args.start.p, { ov::float16(start_val).to_bits() }); + tests::set_values(args.stop.p, { ov::float16(stop_val).to_bits() }); + tests::set_values(args.step.p, { ov::float16(step_val).to_bits() }); auto outLen = (stop_val - start_val) / step_val; diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/reduce_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/reduce_gpu_test.cpp index f14cc66aaf5529..b14ad20c958740 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/reduce_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/reduce_gpu_test.cpp @@ -415,7 +415,7 @@ struct input_data_type { template <> struct input_data_type { - using type = FLOAT16; + using type = ov::float16; }; template <> @@ -435,7 +435,7 @@ struct output_data_type { template <> struct output_data_type { - using type = FLOAT16; + using type = ov::float16; }; template <> @@ -1774,7 +1774,7 @@ TEST(reduce_gpu, b_fs_yx_fsv16_max_dynamic) { topology.add(input_layout("input", in_layout)); topology.add(reorder("reorder", input_info("input"), used_layout)); - topology.add(reduce("reduce", input_info("reorder"), reduce_mode::max, {1}, 0)); + topology.add(reduce("reduce", input_info("reorder"), reduce_mode::max, {1}, 0)); ExecutionConfig config = get_test_default_config(engine); config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/region_yolo_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/region_yolo_gpu_test.cpp index ec868c8ff6acc9..11aac6ab995236 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/region_yolo_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/region_yolo_gpu_test.cpp @@ -228,22 +228,22 @@ TEST(region_yolo_gpu_fp32, byxf_softmax) { TEST(region_yolo_gpu_fp16, bfyx) { region_yolo_test_params params{{ 1, 33, 52, 52 }, { 0, 1, 2 }, 4, 6, 3, 1, 3, data_types::f16, format::bfyx, false}; - runRegionTest(params); + runRegionTest(params); } TEST(region_yolo_gpu_fp16, bfyx_softmax) { region_yolo_test_params params{{ 1, 33, 52, 52 }, { 0, 1, 2 }, 4, 6, 3, 1, 3, data_types::f16, format::bfyx, true}; - runRegionTest(params); + runRegionTest(params); } TEST(region_yolo_gpu_fp16, byxf) { region_yolo_test_params params{{ 1, 33, 52, 52 }, { 0, 1, 2 }, 4, 6, 3, 1, 3, data_types::f16, format::byxf, false}; - runRegionTest(params); + runRegionTest(params); } TEST(region_yolo_gpu_fp16, byxf_softmax) { region_yolo_test_params params{{ 1, 33, 52, 52 }, { 0, 1, 2 }, 4, 6, 3, 1, 3, data_types::f16, format::byxf, true}; - runRegionTest(params); + runRegionTest(params); } #ifdef RUN_ALL_MODEL_CACHING_TESTS @@ -269,20 +269,20 @@ TEST(region_yolo_gpu_fp32, byxf_softmax_cached) { TEST(region_yolo_gpu_fp16, bfyx_cached) { region_yolo_test_params params{{ 1, 33, 52, 52 }, { 0, 1, 2 }, 4, 6, 3, 1, 3, data_types::f16, format::bfyx, false}; - runRegionTest(params, true); + runRegionTest(params, true); } TEST(region_yolo_gpu_fp16, bfyx_softmax_cached) { region_yolo_test_params params{{ 1, 33, 52, 52 }, { 0, 1, 2 }, 4, 6, 3, 1, 3, data_types::f16, format::bfyx, true}; - runRegionTest(params, true); + runRegionTest(params, true); } TEST(region_yolo_gpu_fp16, byxf_cached) { region_yolo_test_params params{{ 1, 33, 52, 52 }, { 0, 1, 2 }, 4, 6, 3, 1, 3, data_types::f16, format::byxf, false}; - runRegionTest(params, true); + runRegionTest(params, true); } #endif // RUN_ALL_MODEL_CACHING_TESTS TEST(region_yolo_gpu_fp16, byxf_softmax_cached) { region_yolo_test_params params{{ 1, 33, 52, 52 }, { 0, 1, 2 }, 4, 6, 3, 1, 3, data_types::f16, format::byxf, true}; - runRegionTest(params, true); + runRegionTest(params, true); } diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/reorder_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/reorder_gpu_test.cpp index 669162d73ae2e8..3d71384fca884b 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/reorder_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/reorder_gpu_test.cpp @@ -629,17 +629,17 @@ TEST(reorder_gpu_f16, basic_subtract_f32_output_f32) { auto subtract = engine.allocate_memory({ data_types::f32, format::byxf, { 1, 2, 2, 2 } }); set_values(input, { - half_t(1.f), half_t(0.f), - half_t(5.f), half_t(1.5f), + ov::float16(1.f), ov::float16(0.f), + ov::float16(5.f), ov::float16(1.5f), - half_t(2.f), half_t(0.f), - half_t(6.f), half_t(5.2f), + ov::float16(2.f), ov::float16(0.f), + ov::float16(6.f), ov::float16(5.2f), - half_t(3.f), half_t(0.5f), - half_t(7.f), half_t(12.f), + ov::float16(3.f), ov::float16(0.5f), + ov::float16(7.f), ov::float16(12.f), - half_t(4.f), half_t(-0.5f), - half_t(8.f), half_t(8.f) + ov::float16(4.f), ov::float16(-0.5f), + ov::float16(8.f), ov::float16(8.f) }); set_values(subtract, { @@ -723,17 +723,17 @@ TEST(reorder_gpu_f16, basic_subtract_value) { std::vector subtract_val = { 0.5, 2.5 }; set_values(input, { - half_t(1.f), half_t(0.f), - half_t(5.f), half_t(1.5f), + ov::float16(1.f), ov::float16(0.f), + ov::float16(5.f), ov::float16(1.5f), - half_t(2.f), half_t(0.f), - half_t(6.f), half_t(5.2f), + ov::float16(2.f), ov::float16(0.f), + ov::float16(6.f), ov::float16(5.2f), - half_t(3.f), half_t(0.5f), - half_t(7.f), half_t(12.f), + ov::float16(3.f), ov::float16(0.5f), + ov::float16(7.f), ov::float16(12.f), - half_t(4.f), half_t(-0.5f), - half_t(8.f), half_t(8.f) + ov::float16(4.f), ov::float16(-0.5f), + ov::float16(8.f), ov::float16(8.f) }); topology topology; @@ -749,20 +749,20 @@ TEST(reorder_gpu_f16, basic_subtract_value) { auto output = outputs.begin()->second.get_memory(); - half_t answers[16] = { half_t(0.5f), half_t(1.5f), - half_t(2.5f), half_t(3.5f), + ov::float16 answers[16] = { ov::float16(0.5f), ov::float16(1.5f), + ov::float16(2.5f), ov::float16(3.5f), - half_t(2.5f), half_t(3.5f), - half_t(4.5f), half_t(5.5f), + ov::float16(2.5f), ov::float16(3.5f), + ov::float16(4.5f), ov::float16(5.5f), - half_t(-0.5f), half_t(-0.5f), - half_t(0.f), half_t(-1.f), + ov::float16(-0.5f), ov::float16(-0.5f), + ov::float16(0.f), ov::float16(-1.f), - half_t(-1.f), half_t(2.7f), - half_t(9.5f), half_t(5.5f) + ov::float16(-1.f), ov::float16(2.7f), + ov::float16(9.5f), ov::float16(5.5f) }; - cldnn::mem_lock output_ptr(output, get_test_stream()); + cldnn::mem_lock output_ptr(output, get_test_stream()); for (int i = 0; i < 16; i++) { ASSERT_TRUE(are_equal(static_cast(answers[i]), static_cast(output_ptr[i]))); @@ -788,19 +788,19 @@ TEST(reorder_gpu, basic_convert_f16_f32_f16) { return; } - std::vector expected_values; + std::vector expected_values; expected_values.resize(0xF804); for (int i = 0; i < 0x7C00; ++i) - expected_values[i] = half_t(i, 0); // norms/denorms/zero (positive). + expected_values[i] = ov::float16::from_bits(i); // norms/denorms/zero (positive). for (int i = 0x7C00; i < 0xF800; ++i) - expected_values[i] = half_t(i + 0x0400, 0); // norms/denorms (negative). - expected_values[0x7C00] = half_t(0x0000, 0); // NOTE: do not do final test for negative 0 (-0). + expected_values[i] = ov::float16::from_bits(i + 0x0400); // norms/denorms (negative). + expected_values[0x7C00] = ov::float16::from_bits(0x0000); // NOTE: do not do final test for negative 0 (-0). // Special values. - expected_values[0xF800] = half_t(0x7C00, 0); // +infinity - expected_values[0xF801] = half_t(0xFC00, 0); // -infinity + expected_values[0xF800] = ov::float16::from_bits(0x7C00); // +infinity + expected_values[0xF801] = ov::float16::from_bits(0xFC00); // -infinity // Special values (ambiguous ones). - expected_values[0xF802] = half_t(0x8000, 0); // -0 - expected_values[0xF803] = half_t(0xFC12, 0); // A NaN (sample: -NaN.0x12). + expected_values[0xF802] = ov::float16::from_bits(0x8000); // -0 + expected_values[0xF803] = ov::float16::from_bits(0xFC12); // A NaN (sample: -NaN.0x12). auto input = engine.allocate_memory({ data_types::f16, format::yxfb, { 1, static_cast(expected_values.size()) / 4, 2, 2 } }); layout interm_layout( data_types::f32, format::byxf, { 1, static_cast(expected_values.size()) / 4, 2, 2 }); @@ -846,7 +846,7 @@ TEST(reorder_gpu, basic_convert_f16_f32_f16) { ASSERT_TRUE(std::isnan(interm_ptr[0xF803])); auto output = outputs.at("reorder_f32_f16").get_memory(); - cldnn::mem_lock output_ptr(output, get_test_stream()); + cldnn::mem_lock output_ptr(output, get_test_stream()); for (int i = 0; i < 0xF802; ++i) // NOTE: do not test for possibly ambiguous values of floating point (-0, NaNs). { ASSERT_TRUE(are_equal(static_cast(expected_values[i]), static_cast(output_ptr[i]))); @@ -856,8 +856,8 @@ TEST(reorder_gpu, basic_convert_f16_f32_f16) { TEST(reorder_gpu, basic_convert_int8) { auto& engine = get_test_engine(); - layout in_layout = { type_to_data_type::value,format::byxf,{ 1, 1, 3, 3 } }; - layout byte_layout = { type_to_data_type::value, format::bfyx,{ 1, 1, 3, 3 } }; + layout in_layout = { ov::element::from(),format::byxf,{ 1, 1, 3, 3 } }; + layout byte_layout = { ov::element::from(), format::bfyx,{ 1, 1, 3, 3 } }; std::initializer_list input_f = { 1.0f, -2.5f, 3.1f, -4.0f, 5.03f, -6.99f, 7.0f, -8.0f, 9.0f }; std::list final_results = { 1.0f, -2.0f, 3.0f, -4.0f, 5.0f, -6.0f, 7.0f, -8.0f, 9.0f }; @@ -900,8 +900,8 @@ TEST(reorder_gpu, basic_convert_int8) { TEST(reorder_gpu, basic_convert_uint8) { auto& engine = get_test_engine(); - layout in_layout = { type_to_data_type::value,format::byxf,{ 1, 1, 3, 3 } }; - layout byte_layout = { type_to_data_type::value, format::bfyx,{ 1, 1, 3, 3 } }; + layout in_layout = { ov::element::from(),format::byxf,{ 1, 1, 3, 3 } }; + layout byte_layout = { ov::element::from(), format::bfyx,{ 1, 1, 3, 3 } }; std::initializer_list input_f = { 1.0f, -2.5f, 3.1f, -4.0f, 5.03f, -6.99f, 7.0f, -8.0f, 9.0f }; std::list final_results = { 1.0f, 254.0f, 3.0f, 252.0f, 5.0f, 250.0f, 7.0f, 248.0f, 9.0f }; @@ -968,8 +968,8 @@ TEST(reorder_gpu, basic_convert_uint8rgbabyxf_to_fp32_bfyx) { 255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 237, 236 }; - layout in_layout = { type_to_data_type::value,format::byxf,{ 1, 4, kernel_size,kernel_size } }; - layout output_layout = { type_to_data_type::value, format::bfyx, { 1, 4, kernel_size,kernel_size } }; + layout in_layout = { ov::element::from(),format::byxf,{ 1, 4, kernel_size,kernel_size } }; + layout output_layout = { ov::element::from(), format::bfyx, { 1, 4, kernel_size,kernel_size } }; // Allocate memory for input image. auto input_memory = engine.allocate_memory(in_layout); @@ -1306,22 +1306,22 @@ TEST(reorder_gpu_f32, dynamic_bfyx_to_bfyx_dynamic_padding_x) { data_types::f16, format::bfyx, padding({0, 0, 2, 0}, {0, 0, 1, 0}, 0.0f, dyn_pad_dims)}); - set_values(input_mem, { - FLOAT16(0.f), FLOAT16(0.f), // padding - FLOAT16(1.f), FLOAT16(2.f), // data - FLOAT16(0.f), // padding - - FLOAT16(0.f), FLOAT16(0.f), // padding - FLOAT16(3.f), FLOAT16(4.f), // data - FLOAT16(0.f), // padding - - FLOAT16(0.f), FLOAT16(0.f), // padding - FLOAT16(5.f), FLOAT16(6.f), // data - FLOAT16(0.f), // padding - - FLOAT16(0.f), FLOAT16(0.f), // padding - FLOAT16(7.f), FLOAT16(8.f), // data - FLOAT16(0.f), // padding + set_values(input_mem, { + ov::float16(0.f), ov::float16(0.f), // padding + ov::float16(1.f), ov::float16(2.f), // data + ov::float16(0.f), // padding + + ov::float16(0.f), ov::float16(0.f), // padding + ov::float16(3.f), ov::float16(4.f), // data + ov::float16(0.f), // padding + + ov::float16(0.f), ov::float16(0.f), // padding + ov::float16(5.f), ov::float16(6.f), // data + ov::float16(0.f), // padding + + ov::float16(0.f), ov::float16(0.f), // padding + ov::float16(7.f), ov::float16(8.f), // data + ov::float16(0.f), // padding }); network.set_input_data("input", input_mem); @@ -1368,20 +1368,20 @@ TEST(reorder_gpu_f32, dynamic_bfyx_to_bfyx_dynamic_padding_f) { data_types::f16, format::bfyx, padding({0, 2, 0, 0}, {0, 1, 0, 0}, 0.0f, dyn_pad_dims)}); - set_values(input_mem, { - FLOAT16(0.f), FLOAT16(0.f), // f before - FLOAT16(0.f), FLOAT16(0.f), // f before - FLOAT16(1.f), FLOAT16(2.f), // b0 f0 - FLOAT16(3.f), FLOAT16(4.f), // b0 f1 - FLOAT16(5.f), FLOAT16(6.f), // b0 f2 - FLOAT16(0.f), FLOAT16(0.f), // f after - - FLOAT16(0.f), FLOAT16(0.f), // f before - FLOAT16(0.f), FLOAT16(0.f), // f before - FLOAT16(11.f), FLOAT16(22.f), // b1 f0 - FLOAT16(33.f), FLOAT16(44.f), // b1 f1 - FLOAT16(55.f), FLOAT16(66.f), // b1 f2 - FLOAT16(0.f), FLOAT16(0.f), // f after + set_values(input_mem, { + ov::float16(0.f), ov::float16(0.f), // f before + ov::float16(0.f), ov::float16(0.f), // f before + ov::float16(1.f), ov::float16(2.f), // b0 f0 + ov::float16(3.f), ov::float16(4.f), // b0 f1 + ov::float16(5.f), ov::float16(6.f), // b0 f2 + ov::float16(0.f), ov::float16(0.f), // f after + + ov::float16(0.f), ov::float16(0.f), // f before + ov::float16(0.f), ov::float16(0.f), // f before + ov::float16(11.f), ov::float16(22.f), // b1 f0 + ov::float16(33.f), ov::float16(44.f), // b1 f1 + ov::float16(55.f), ov::float16(66.f), // b1 f2 + ov::float16(0.f), ov::float16(0.f), // f after }); network.set_input_data("input", input_mem); @@ -1407,18 +1407,18 @@ TEST(reorder_gpu_f32, dynamic_bfyx_to_bfzyx) { layout in_layout{ov::PartialShape::dynamic(in_shape.size()), data_types::f16, format::bfyx}; auto input = engine.allocate_memory({ov::PartialShape(in_shape), data_types::f16, format::bfyx}); - set_values(input, { - FLOAT16(1.f), FLOAT16(0.f), - FLOAT16(5.f), FLOAT16(1.5f), + set_values(input, { + ov::float16(1.f), ov::float16(0.f), + ov::float16(5.f), ov::float16(1.5f), - FLOAT16(2.f), FLOAT16(0.f), - FLOAT16(6.f), FLOAT16(5.2f), + ov::float16(2.f), ov::float16(0.f), + ov::float16(6.f), ov::float16(5.2f), - FLOAT16(3.f), FLOAT16(0.5f), - FLOAT16(7.f), FLOAT16(12.f), + ov::float16(3.f), ov::float16(0.5f), + ov::float16(7.f), ov::float16(12.f), - FLOAT16(4.f), FLOAT16(-0.5f), - FLOAT16(8.f), FLOAT16(8.f) + ov::float16(4.f), ov::float16(-0.5f), + ov::float16(8.f), ov::float16(8.f) }); topology topology( @@ -2075,7 +2075,7 @@ TEST(reorder_gpu_binary, binary_output) config.set_property(ov::intel_gpu::optimize_data(true)); auto input = engine.allocate_memory({ data_types::f32, format::bfyx,{ 2, 2, 2, 2 } }); - layout output_layout(data_types::bin, format::b_fs_yx_32fp, { 2, 2, 2, 2 }); + layout output_layout(data_types::u1, format::b_fs_yx_32fp, { 2, 2, 2, 2 }); // Data is supposed to be quantized to {0,1} values set_values(input, { @@ -2122,7 +2122,7 @@ TEST(reorder_gpu_binary, binary_input) ov::intel_gpu::ExecutionConfig config = get_test_default_config(engine); config.set_property(ov::intel_gpu::optimize_data(true)); - auto input = engine.allocate_memory({ data_types::bin, format::b_fs_yx_32fp,{ 2, 2, 2, 2 } }); + auto input = engine.allocate_memory({ data_types::u1, format::b_fs_yx_32fp,{ 2, 2, 2, 2 } }); layout output_layout(data_types::f32, format::bfyx, { 2, 2, 2, 2 }); // Data is supposed to be quantized to {0,1} values @@ -2606,10 +2606,10 @@ TEST(reorder_image2d_rgba_to_bfyx_gpu, basic) 50.0f, 253.0f, }; - cldnn::mem_lock output_ptr (output, get_test_stream()); + cldnn::mem_lock output_ptr (output, get_test_stream()); for (int i = 0; i < 12; i++) { - ASSERT_NEAR(FLOAT16(answers[i] / 255.f), output_ptr[i], 1e-3f); + ASSERT_NEAR(ov::float16(answers[i] / 255.f), output_ptr[i], 1e-3f); } } @@ -2621,15 +2621,15 @@ TEST(reorder_bfyx_to_image2d_rgba_gpu, basic) auto input = engine.allocate_memory({ data_types::f16, format::bfyx, { 1, 3, 2, 2 } }); layout output_layout(data_types::u8, format::image_2d_rgba, { 1, 3, 2, 2 }); - set_values(input, { - FLOAT16(1.0f / 255.f), FLOAT16(2.0f / 255.f), - FLOAT16(124.0f / 255.f), FLOAT16(251.0f / 255.f), + set_values(input, { + ov::float16(1.0f / 255.f), ov::float16(2.0f / 255.f), + ov::float16(124.0f / 255.f), ov::float16(251.0f / 255.f), - FLOAT16(0.0f / 255.f), FLOAT16(111.0f / 255.f), - FLOAT16(125.0f / 255.f), FLOAT16(252.0f / 255.f), + ov::float16(0.0f / 255.f), ov::float16(111.0f / 255.f), + ov::float16(125.0f / 255.f), ov::float16(252.0f / 255.f), - FLOAT16(5.0f / 255.f), FLOAT16(123.0f / 255.f), - FLOAT16(50.0f / 255.f), FLOAT16(253.0f / 255.f), + ov::float16(5.0f / 255.f), ov::float16(123.0f / 255.f), + ov::float16(50.0f / 255.f), ov::float16(253.0f / 255.f), }); topology topology( @@ -2751,18 +2751,18 @@ class reorder_test : public tests::generic_test } else { - return generate_reference_typed(inputs); + return generate_reference_typed(inputs); } } else { if (*layer_params->output_data_types[0] == data_types::f32) { - return generate_reference_typed(inputs); + return generate_reference_typed(inputs); } else { - return generate_reference_typed(inputs); + return generate_reference_typed(inputs); } } } @@ -2859,7 +2859,7 @@ class ReorderTest : public ::testing::TestWithParam { cldnn::memory::ptr get_mem(cldnn::layout l) { auto prim = engine.allocate_memory(l); tensor s = l.get_tensor(); - if (l.data_type == data_types::bin) { + if (l.data_type == data_types::u1) { VF rnd_vec = rg.generate_random_1d(s.count() / 32, min_random, max_random); set_values(prim, rnd_vec); } else if (l.data_type == data_types::i8 || l.data_type == data_types::u8) { @@ -3148,8 +3148,8 @@ TEST(reorder_onednn_gpu, basic_convert_int8) { auto& engine = get_test_engine(); if (!engine.get_device_info().supports_immad) return; - layout in_layout = { type_to_data_type::value, format::byxf, { 1, 1, 3, 3 } }; - layout byte_layout = { type_to_data_type::value, format::bfyx, { 1, 1, 3, 3 } }; + layout in_layout = { ov::element::from(), format::byxf, { 1, 1, 3, 3 } }; + layout byte_layout = { ov::element::from(), format::bfyx, { 1, 1, 3, 3 } }; std::initializer_list input_f = { 1.0f, -2.6f, 3.1f, -4.0f, 5.03f, -6.99f, 7.0f, -8.0f, 9.0f }; std::list final_results = { 1.0f, -3.0f, 3.0f, -4.0f, 5.0f, -7.0f, 7.0f, -8.0f, 9.0f }; diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/reorg_yolo_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/reorg_yolo_gpu_test.cpp index edaecf60a34508..f7356f03938844 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/reorg_yolo_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/reorg_yolo_gpu_test.cpp @@ -54,7 +54,7 @@ float getError() { } template<> -float getError() { +float getError() { return 0.2; } @@ -305,7 +305,7 @@ struct reorg_yolo_test private: void run_test(const ReorgYoloParams& params, const format::type target_format, bool is_caching_test) { - const auto data_type = type_to_data_type::value; + const auto data_type = ov::element::from(); const format::type plain_format = format::bfyx; auto& engine = get_test_engine(); @@ -336,7 +336,7 @@ struct reorg_yolo_test using test_f32 = reorg_yolo_test; -using test_f16 = reorg_yolo_test; +using test_f16 = reorg_yolo_test; TEST_P(test_f32, basic) { test(false); @@ -359,7 +359,7 @@ INSTANTIATE_TEST_SUITE_P(reorg_yolo_f32, INSTANTIATE_TEST_SUITE_P(reorg_yolo_f16, test_f16, ::testing::Combine( - ::testing::ValuesIn(generateParams()), + ::testing::ValuesIn(generateParams()), ::testing::ValuesIn(dataFormats), ::testing::Values(false)), PrintToStringParamName()); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/resample_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/resample_gpu_test.cpp index 6f241399134fa7..16c911fca45a2e 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/resample_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/resample_gpu_test.cpp @@ -344,7 +344,7 @@ struct resample_random_test : testing::TestWithParam(mem, -127, 127, 2); break; case data_types::f16: - fill_random_typed(mem, -127, 127, 2); + fill_random_typed(mem, -127, 127, 2); break; case data_types::i8: fill_random_typed(mem, -127, 127, 1); @@ -455,7 +455,7 @@ struct resample_random_test : testing::TestWithParam(input, output, 0); } else if (dt == data_types::f16) { - compare_nearest_typed(input, output, 0); + compare_nearest_typed(input, output, 0); } else if (dt == data_types::i8) { compare_nearest_typed(input, output, 0); } else if (dt == data_types::u8) { @@ -588,7 +588,7 @@ struct caffe_resample_random_test : testing::TestWithParam(mem, -127, 127, 2); break; case data_types::f16: - fill_random_typed(mem, -127, 127, 2); + fill_random_typed(mem, -127, 127, 2); break; case data_types::i8: fill_random_typed(mem, -127, 127, 1); @@ -680,7 +680,7 @@ struct caffe_resample_random_test : testing::TestWithParam(output, output_opt); } else if (params.input_type == data_types::f16) { - compare_outputs(output, output_opt); + compare_outputs(output, output_opt); } else if (params.input_type == data_types::i8) { compare_outputs(output, output_opt); } else if (params.input_type == data_types::u8) { @@ -1982,7 +1982,7 @@ struct resample_opt_random_test : testing::TestWithParam(mem, -127, 127, 2); break; case data_types::f16: - fill_random_typed(mem, -127, 127, 2); + fill_random_typed(mem, -127, 127, 2); break; case data_types::i8: fill_random_typed(mem, -127, 127, 1); @@ -2018,7 +2018,7 @@ struct resample_opt_random_test : testing::TestWithParam::value) { + if (std::is_same::value) { ASSERT_NEAR(static_cast(opt_out_val), static_cast(ref_out_val), 1.e-1f); } else { ASSERT_EQ(opt_out_val, ref_out_val); @@ -2085,7 +2085,7 @@ struct resample_opt_random_test : testing::TestWithParam(output, output_opt); } else if (params.input_type == data_types::f16) { - compare_outputs(output, output_opt); + compare_outputs(output, output_opt); } else if (params.input_type == data_types::i8) { compare_outputs(output, output_opt); } else if (params.input_type == data_types::u8) { @@ -2178,7 +2178,7 @@ struct resample_opt_random_test_ext : resample_opt_random_test } exectime /= r; std::string frm_str = format(working_format).to_string(); - std::string input_type = data_type_traits::name(params.input_type); + std::string input_type = ov::element::Type(params.input_type).get_type_name(); std::string is_opt = (do_planar == true) ? " not optimazed " : " optimized "; std::string mode; switch (params.operation_type) { diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/reshape_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/reshape_gpu_test.cpp index e3cdc480c9f082..0d7c6cbe271f41 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/reshape_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/reshape_gpu_test.cpp @@ -34,7 +34,7 @@ void generic_reshape_test(format fmt, tensor const& input_size, tensor const& re //allocate input memory auto data_type = data_types::f32; - if (std::is_same::value) + if (std::is_same::value) data_type = data_types::f16; else if (std::is_same::value) data_type = data_types::i8; @@ -140,7 +140,7 @@ TEST(reshape_gpu_f32, basic_2dim_in_place) { } TEST(reshape_gpu_f16, basic_2dim_in_place) { - generic_reshape_test( + generic_reshape_test( format::bfyx, tensor(1, 1, 2, 2), tensor(1, 1, 1, 4), @@ -180,7 +180,7 @@ TEST(reshape_gpu_f32, basic_4dim_in_place) { } TEST(reshape_gpu_f16, basic_4dim_in_place) { - generic_reshape_test( + generic_reshape_test( format::yxfb, tensor(9, 9, 2, 4), tensor(3, 4, 27, 2), @@ -214,7 +214,7 @@ TEST(reshpape_gpu_f32, basic_2dim_output_padd) { } TEST(reshape_gpu_f16, basic_2dim_output_padd) { - generic_reshape_test( + generic_reshape_test( format::byxf, tensor(1, 1, 3, 4), tensor(1, 1, 2, 6), @@ -263,7 +263,7 @@ TEST(reshape_gpu_f32, basic_2dim_input_padd) { } TEST(reshape_gpu_f16, basic_2dim_input_padd) { - generic_reshape_test( + generic_reshape_test( format::fyxb, tensor(1, 1, 3, 3), tensor(1, 1, 1, 9), @@ -309,7 +309,7 @@ TEST(reshape_gpu_f32, basic_2dim_input_output_padd) { } TEST(reshape_gpu_f16, basic_2dim_input_output_padd) { - generic_reshape_test( + generic_reshape_test( format::byxf, tensor(1, 1, 6, 6), tensor(1, 1, 3, 12), @@ -359,7 +359,7 @@ TEST(reshpape_gpu_f32, basic_4dim_output_padd) { } TEST(reshape_gpu_f16, basic_4dim_output_padd) { - generic_reshape_test( + generic_reshape_test( format::bfyx, tensor(5, 4, 2, 2), tensor(40, 2, 1, 1), @@ -378,7 +378,7 @@ TEST(reshape_gpu_f32, basic_4dim_input_padd) { } TEST(reshape_gpu_f16, basic_4dim_input_padd) { - generic_reshape_test( + generic_reshape_test( format::yxfb, tensor(2, 32, 8, 8), tensor(8, 128, 1, 4), @@ -397,7 +397,7 @@ TEST(reshape_gpu_f32, basic_4dim_input_output_padd) { } TEST(reshape_gpu_f16, basic_4dim_input_output_padd) { - generic_reshape_test( + generic_reshape_test( format::byxf, tensor(32, 3, 227, 227), tensor(8, 12, 227, 227), @@ -1005,7 +1005,7 @@ TEST(reshape_gpu_f32, basic_2dim_in_place_cached) { } TEST(reshape_gpu_f16, basic_2dim_in_place_cached) { - generic_reshape_test( + generic_reshape_test( format::bfyx, tensor(1, 1, 2, 2), tensor(1, 1, 1, 4), @@ -1060,7 +1060,7 @@ TEST(reshape_gpu_f32, basic_4dim_in_place_cached) { } TEST(reshape_gpu_f16, basic_4dim_in_place_cached) { - generic_reshape_test( + generic_reshape_test( format::yxfb, tensor(9, 9, 2, 4), tensor(3, 4, 27, 2), @@ -1104,7 +1104,7 @@ TEST(reshpape_gpu_f32, basic_2dim_output_padd_cached) { } TEST(reshape_gpu_f16, basic_2dim_output_padd_cached) { - generic_reshape_test( + generic_reshape_test( format::byxf, tensor(1, 1, 3, 4), tensor(1, 1, 2, 6), @@ -1159,7 +1159,7 @@ TEST(reshape_gpu_f32, basic_2dim_input_padd_cached) { } TEST(reshape_gpu_f16, basic_2dim_input_padd_cached) { - generic_reshape_test( + generic_reshape_test( format::fyxb, tensor(1, 1, 3, 3), tensor(1, 1, 1, 9), @@ -1214,7 +1214,7 @@ TEST(reshape_gpu_f32, basic_2dim_input_output_padd_cached) { } TEST(reshape_gpu_f16, basic_2dim_input_output_padd_cached) { - generic_reshape_test( + generic_reshape_test( format::byxf, tensor(1, 1, 6, 6), tensor(1, 1, 3, 12), @@ -1269,7 +1269,7 @@ TEST(reshpape_gpu_f32, basic_4dim_output_padd_cached) { } TEST(reshape_gpu_f16, basic_4dim_output_padd_cached) { - generic_reshape_test( + generic_reshape_test( format::bfyx, tensor(5, 4, 2, 2), tensor(40, 2, 1, 1), @@ -1291,7 +1291,7 @@ TEST(reshape_gpu_f32, basic_4dim_input_padd_cached) { } TEST(reshape_gpu_f16, basic_4dim_input_padd_cached) { - generic_reshape_test( + generic_reshape_test( format::yxfb, tensor(2, 32, 8, 8), tensor(8, 128, 1, 4), @@ -1313,7 +1313,7 @@ TEST(reshape_gpu_f32, basic_4dim_input_output_padd_cached) { } TEST(reshape_gpu_f16, basic_4dim_input_output_padd_cached) { - generic_reshape_test( + generic_reshape_test( format::byxf, tensor(32, 3, 227, 227), tensor(8, 12, 227, 227), diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/reverse_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/reverse_gpu_test.cpp index cb512fcd839463..c58f448d0d6662 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/reverse_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/reverse_gpu_test.cpp @@ -44,7 +44,7 @@ template struct reverse_gpu_test : public ::testing::TestWithParam> { public: void test(bool is_caching_test = false) { - auto data_type = type_to_data_type::value; + auto data_type = ov::element::from(); ReverseParams params = testing::TestWithParam>::GetParam(); auto& engine = get_test_engine(); @@ -67,11 +67,11 @@ struct reverse_gpu_test : public ::testing::TestWithParam if (reorder_needed) { const std::string r_reverse_input_id = "r_reverse_input"; const std::string r_axes_id = "r_reverse_axes"; - tp.add(reorder(r_reverse_input_id, input_info(reverse_input_id), params.input_format, type_to_data_type::value)); - tp.add(reorder(r_axes_id, input_info(axes_id), params.input_format, type_to_data_type::value)); + tp.add(reorder(r_reverse_input_id, input_info(reverse_input_id), params.input_format, ov::element::from())); + tp.add(reorder(r_axes_id, input_info(axes_id), params.input_format, ov::element::from())); tp.add(reverse(reverse_id, input_info(r_reverse_input_id), input_info(r_axes_id), mode)); ouput_op_name = "reversed_result"; - tp.add(reorder(ouput_op_name, input_info(reverse_id), fmt, type_to_data_type::value)); + tp.add(reorder(ouput_op_name, input_info(reverse_id), fmt, ov::element::from())); } else { tp.add(reverse(reverse_id, input_info(reverse_input_id), input_info(axes_id), mode)); } @@ -115,8 +115,8 @@ using reverse_gpu_test_int8_mask = reverse_gpu_test; using reverse_gpu_test_int8_index = reverse_gpu_test; using reverse_gpu_test_uint8_mask = reverse_gpu_test; using reverse_gpu_test_uint8_index = reverse_gpu_test; -using reverse_gpu_test_f16_mask = reverse_gpu_test; -using reverse_gpu_test_f16_index = reverse_gpu_test; +using reverse_gpu_test_f16_mask = reverse_gpu_test; +using reverse_gpu_test_f16_index = reverse_gpu_test; TEST_P(reverse_gpu_test_int32_mask, reverse_i32_mask) { ASSERT_NO_FATAL_FAILURE(test()); @@ -291,74 +291,74 @@ std::vector> generateIndexParams() { } template <> -std::vector> generateMaskParams() { - std::vector> params; +std::vector> generateMaskParams() { + std::vector> params; for (const auto fmt : four_d_formats) { // reverse_2d_1_mask params.push_back({tensor(batch(4), feature(3)), fmt, - std::vector{half_t(0), - half_t(1), - half_t(2), - half_t(3), - half_t(4), - half_t(5), - half_t(6), - half_t(7), - half_t(8), - half_t(9), - half_t(10), - half_t(11)}, + std::vector{ov::float16(0), + ov::float16(1), + ov::float16(2), + ov::float16(3), + ov::float16(4), + ov::float16(5), + ov::float16(6), + ov::float16(7), + ov::float16(8), + ov::float16(9), + ov::float16(10), + ov::float16(11)}, {false, true}, - std::vector{half_t(2), - half_t(1), - half_t(0), - half_t(5), - half_t(4), - half_t(3), - half_t(8), - half_t(7), - half_t(6), - half_t(11), - half_t(10), - half_t(9)}}); + std::vector{ov::float16(2), + ov::float16(1), + ov::float16(0), + ov::float16(5), + ov::float16(4), + ov::float16(3), + ov::float16(8), + ov::float16(7), + ov::float16(6), + ov::float16(11), + ov::float16(10), + ov::float16(9)}}); } return params; } template <> -std::vector> generateIndexParams() { - std::vector> params; +std::vector> generateIndexParams() { + std::vector> params; for (const auto fmt : four_d_formats) { // reverse_2d_1_mask params.push_back({tensor(batch(4), feature(3)), fmt, - std::vector{half_t(0), - half_t(1), - half_t(2), - half_t(3), - half_t(4), - half_t(5), - half_t(6), - half_t(7), - half_t(8), - half_t(9), - half_t(10), - half_t(11)}, + std::vector{ov::float16(0), + ov::float16(1), + ov::float16(2), + ov::float16(3), + ov::float16(4), + ov::float16(5), + ov::float16(6), + ov::float16(7), + ov::float16(8), + ov::float16(9), + ov::float16(10), + ov::float16(11)}, {1}, - std::vector{half_t(2), - half_t(1), - half_t(0), - half_t(5), - half_t(4), - half_t(3), - half_t(8), - half_t(7), - half_t(6), - half_t(11), - half_t(10), - half_t(9)}}); + std::vector{ov::float16(2), + ov::float16(1), + ov::float16(0), + ov::float16(5), + ov::float16(4), + ov::float16(3), + ov::float16(8), + ov::float16(7), + ov::float16(6), + ov::float16(11), + ov::float16(10), + ov::float16(9)}}); } return params; } @@ -390,7 +390,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_reverse_uint8_mask, INSTANTIATE_TEST_SUITE_P(smoke_reverse_f16_mask, reverse_gpu_test_f16_mask, - ::testing::ValuesIn(generateMaskParams()), + ::testing::ValuesIn(generateMaskParams()), PrintToStringParamName()); INSTANTIATE_TEST_SUITE_P(smoke_reverse_i32_index, @@ -420,7 +420,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_reverse_uint8_index, INSTANTIATE_TEST_SUITE_P(smoke_reverse_f16_index, reverse_gpu_test_f16_index, - ::testing::ValuesIn(generateIndexParams()), + ::testing::ValuesIn(generateIndexParams()), PrintToStringParamName()); #ifdef RUN_ALL_MODEL_CACHING_TESTS diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/reverse_sequence_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/reverse_sequence_gpu_test.cpp index d3491b814dd864..6874839a5ee1d9 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/reverse_sequence_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/reverse_sequence_gpu_test.cpp @@ -306,7 +306,7 @@ TEST(reverese_sequence_gpu_test, fp16_d2_2_ba1_sa0) { int32_t seq_axis = 0; set_values(input, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f) }); set_values(seq_lengths, { @@ -348,11 +348,11 @@ TEST(reverese_sequence_gpu_test, fp16x2_d2_2_ba1_sa0) { int32_t seq_axis = 0; set_values(input, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f) }); set_values(seq_lengths, { - FLOAT16(1.0f), FLOAT16(2.0f) + ov::float16(1.0f), ov::float16(2.0f) }); topology topology; @@ -390,9 +390,9 @@ TEST(reverese_sequence_gpu_test, fp16_d3_3_3_ba0_sa1) { int32_t seq_axis = 1; set_values(input, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(9.0f), - FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(13.0f), FLOAT16(14.0f), FLOAT16(15.0f), FLOAT16(16.0f), FLOAT16(17.0f), FLOAT16(18.0f), FLOAT16(19.0f), - FLOAT16(20.0f), FLOAT16(21.0f), FLOAT16(22.0f), FLOAT16(23.0f), FLOAT16(24.0f), FLOAT16(25.0f), FLOAT16(26.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(9.0f), + ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(13.0f), ov::float16(14.0f), ov::float16(15.0f), ov::float16(16.0f), ov::float16(17.0f), ov::float16(18.0f), ov::float16(19.0f), + ov::float16(20.0f), ov::float16(21.0f), ov::float16(22.0f), ov::float16(23.0f), ov::float16(24.0f), ov::float16(25.0f), ov::float16(26.0f) }); set_values(seq_lengths, { @@ -436,9 +436,9 @@ TEST(reverese_sequence_gpu_test, fp16_d3_3_3_ba2_sa0) { int32_t seq_axis = 0; set_values(input, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(9.0f), - FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(13.0f), FLOAT16(14.0f), FLOAT16(15.0f), FLOAT16(16.0f), FLOAT16(17.0f), FLOAT16(18.0f), FLOAT16(19.0f), - FLOAT16(20.0f), FLOAT16(21.0f), FLOAT16(22.0f), FLOAT16(23.0f), FLOAT16(24.0f), FLOAT16(25.0f), FLOAT16(26.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(9.0f), + ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(13.0f), ov::float16(14.0f), ov::float16(15.0f), ov::float16(16.0f), ov::float16(17.0f), ov::float16(18.0f), ov::float16(19.0f), + ov::float16(20.0f), ov::float16(21.0f), ov::float16(22.0f), ov::float16(23.0f), ov::float16(24.0f), ov::float16(25.0f), ov::float16(26.0f) }); set_values(seq_lengths, { @@ -482,9 +482,9 @@ TEST(reverese_sequence_gpu_test, fp16_d2_2_3_2ba0_sa3) { int32_t seq_axis = 3; set_values(input, { - FLOAT16(0.0f), FLOAT16( 1.0f), FLOAT16( 2.0f), FLOAT16( 3.0f), FLOAT16( 4.0f), FLOAT16( 5.0f), FLOAT16( 6.0f), FLOAT16( 7.0f), FLOAT16( 8.0f), FLOAT16( 9.0f), - FLOAT16(10.0f), FLOAT16( 11.0f), FLOAT16( 12.0f), FLOAT16( 13.0f), FLOAT16( 14.0f), FLOAT16( 15.0f), FLOAT16( 16.0f), FLOAT16( 17.0f), FLOAT16( 18.0f), FLOAT16( 19.0f), - FLOAT16(20.0f), FLOAT16( 21.0f), FLOAT16( 22.0f), FLOAT16( 23.0f) + ov::float16(0.0f), ov::float16( 1.0f), ov::float16( 2.0f), ov::float16( 3.0f), ov::float16( 4.0f), ov::float16( 5.0f), ov::float16( 6.0f), ov::float16( 7.0f), ov::float16( 8.0f), ov::float16( 9.0f), + ov::float16(10.0f), ov::float16( 11.0f), ov::float16( 12.0f), ov::float16( 13.0f), ov::float16( 14.0f), ov::float16( 15.0f), ov::float16( 16.0f), ov::float16( 17.0f), ov::float16( 18.0f), ov::float16( 19.0f), + ov::float16(20.0f), ov::float16( 21.0f), ov::float16( 22.0f), ov::float16( 23.0f) }); set_values(seq_lengths, { @@ -529,9 +529,9 @@ TEST(reverese_sequence_gpu_test, fp16_d2_2_3_2ba0_sa2) { int32_t seq_axis = 2; set_values(input, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(9.0f), - FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(13.0f), FLOAT16(14.0f), FLOAT16(15.0f), FLOAT16(16.0f), FLOAT16(17.0f), FLOAT16(18.0f), FLOAT16(19.0f), - FLOAT16(20.0f), FLOAT16(21.0f), FLOAT16(22.0f), FLOAT16(23.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(9.0f), + ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(13.0f), ov::float16(14.0f), ov::float16(15.0f), ov::float16(16.0f), ov::float16(17.0f), ov::float16(18.0f), ov::float16(19.0f), + ov::float16(20.0f), ov::float16(21.0f), ov::float16(22.0f), ov::float16(23.0f) }); set_values(seq_lengths, { @@ -576,9 +576,9 @@ TEST(reverese_sequence_gpu_test, fp16_d2_2_3_2ba2_sa0) { int32_t seq_axis = 0; set_values(input, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(9.0f), - FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(13.0f), FLOAT16(14.0f), FLOAT16(15.0f), FLOAT16(16.0f), FLOAT16(17.0f), FLOAT16(18.0f), FLOAT16(19.0f), - FLOAT16(20.0f), FLOAT16(21.0f), FLOAT16(22.0f), FLOAT16(23.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(9.0f), + ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(13.0f), ov::float16(14.0f), ov::float16(15.0f), ov::float16(16.0f), ov::float16(17.0f), ov::float16(18.0f), ov::float16(19.0f), + ov::float16(20.0f), ov::float16(21.0f), ov::float16(22.0f), ov::float16(23.0f) }); set_values(seq_lengths, { diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/roi_align_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/roi_align_gpu_test.cpp index 353607db567f00..eef424d8081744 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/roi_align_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/roi_align_gpu_test.cpp @@ -22,8 +22,8 @@ template struct roi_align_test : public testing::Test { using TD = typename Types::DataType; using TI = typename Types::IndexType; - const data_types device_data_type = type_to_data_type::value; - const data_types device_ind_type = type_to_data_type::value; + const data_types device_data_type = ov::element::from(); + const data_types device_ind_type = ov::element::from(); const cldnn::format::type blocked_format = Types::format; const cldnn::format::type plain_format = format::bfyx; @@ -113,26 +113,26 @@ struct roi_align_test : public testing::Test { // it's a bit overloaded with the cartesian product of types and formats, but that's the lesser evil // since we have specific type for expected values that are tied to specific input modes // so that Combine approach could avoid manual combinations but it would be much more complicated -using roi_align_test_types = testing::Types, - TypesWithFormat, - TypesWithFormat, - TypesWithFormat, - TypesWithFormat, - TypesWithFormat, - - TypesWithFormat, - TypesWithFormat, - TypesWithFormat, - TypesWithFormat, - TypesWithFormat, - TypesWithFormat, - - TypesWithFormat, - TypesWithFormat, - TypesWithFormat, - TypesWithFormat, - TypesWithFormat, - TypesWithFormat, +using roi_align_test_types = testing::Types, + TypesWithFormat, + TypesWithFormat, + TypesWithFormat, + TypesWithFormat, + TypesWithFormat, + + TypesWithFormat, + TypesWithFormat, + TypesWithFormat, + TypesWithFormat, + TypesWithFormat, + TypesWithFormat, + + TypesWithFormat, + TypesWithFormat, + TypesWithFormat, + TypesWithFormat, + TypesWithFormat, + TypesWithFormat, TypesWithFormat, TypesWithFormat, diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/roi_pooling_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/roi_pooling_gpu_test.cpp index 1d9e6098e2c67d..d7b133bf9d2da8 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/roi_pooling_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/roi_pooling_gpu_test.cpp @@ -126,7 +126,7 @@ struct roi_pooling_gpu_test : public testing::TestWithParam>::GetParam(); auto& engine = get_test_engine(); - const auto data_type = type_to_data_type::value; + const auto data_type = ov::element::from(); const auto plane_format = format::bfyx; std::vector> inputs; @@ -165,7 +165,7 @@ struct roi_pooling_gpu_test : public testing::TestWithParamget_layout())); - topology.add(reorder("reordered_" + input.first, input_info(input.first), fmt, type_to_data_type::value)); + topology.add(reorder("reordered_" + input.first, input_info(input.first), fmt, ov::element::from())); } topology.add(roi_pooling("roi_pooling", @@ -183,7 +183,7 @@ struct roi_pooling_gpu_test : public testing::TestWithParam::value)); + topology.add(reorder("reordered_roi_pooling", input_info("roi_pooling"), plane_format, ov::element::from())); cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); @@ -225,7 +225,7 @@ struct roi_pooling_gpu_test : public testing::TestWithParamget_layout())); - topology.add(reorder("reordered_input", input_info("input"), input_format, type_to_data_type::value)); + topology.add(reorder("reordered_input", input_info("input"), input_format, ov::element::from())); topology.add(roll("roll", input_info("reordered_input"), tensor(input_format, p.shift))); - topology.add(reorder("reordered_roll", input_info("roll"), plane_format, type_to_data_type::value)); + topology.add(reorder("reordered_roll", input_info("roll"), plane_format, ov::element::from())); cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); network->set_input_data("input", input); @@ -71,7 +71,7 @@ struct roll_test : testing::TestWithParam> { auto& p = std::get<0>(info.param); std::ostringstream result; result << "InputShape=" << vec2str(p.input_shape) << "_"; - result << "Precision=" << data_type_traits::name(type_to_data_type::value) << "_"; + result << "Precision=" << ov::element::Type(ov::element::from()) << "_"; result << "Shift=" << vec2str(p.shift) << "_"; result << "Format=" << std::get<1>(info.param); return result.str(); @@ -250,9 +250,10 @@ INSTANTIATE_ROLL_TEST_SUITE(uint8_t, getRollParams6D, formats6d) INSTANTIATE_ROLL_TEST_SUITE(int32_t, getRollParams6D, formats6d) INSTANTIATE_ROLL_TEST_SUITE(int64_t, getRollParams6D, formats6d) -INSTANTIATE_ROLL_TEST_SUITE(FLOAT16, getRollFloatingPointParams, formats4d) +using ov::float16; +INSTANTIATE_ROLL_TEST_SUITE(float16, getRollFloatingPointParams, formats4d) INSTANTIATE_ROLL_TEST_SUITE(float, getRollFloatingPointParams, formats4d) -INSTANTIATE_ROLL_TEST_SUITE(FLOAT16, getRollFloatingPointAdditionalLogic, {format::bfyx}) +INSTANTIATE_ROLL_TEST_SUITE(float16, getRollFloatingPointAdditionalLogic, {format::bfyx}) INSTANTIATE_ROLL_TEST_SUITE(float, getRollFloatingPointAdditionalLogic, {format::bfyx}) #undef INSTANTIATE_ROLL_TEST_SUITE @@ -279,9 +280,9 @@ INSTANTIATE_ROLL_TEST_SUITE_CACHED(int8_t, getRollParams6D) INSTANTIATE_ROLL_TEST_SUITE_CACHED(uint8_t, getRollParams6D) INSTANTIATE_ROLL_TEST_SUITE_CACHED(int32_t, getRollParams6D) INSTANTIATE_ROLL_TEST_SUITE_CACHED(int64_t, getRollParams6D) -INSTANTIATE_ROLL_TEST_SUITE_CACHED(FLOAT16, getRollFloatingPointParams) +INSTANTIATE_ROLL_TEST_SUITE_CACHED(float16, getRollFloatingPointParams) INSTANTIATE_ROLL_TEST_SUITE_CACHED(float, getRollFloatingPointParams) -INSTANTIATE_ROLL_TEST_SUITE_CACHED(FLOAT16, getRollFloatingPointAdditionalLogic) +INSTANTIATE_ROLL_TEST_SUITE_CACHED(float16, getRollFloatingPointAdditionalLogic) #endif INSTANTIATE_ROLL_TEST_SUITE_CACHED(float, getRollFloatingPointAdditionalLogic) diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/scatter_elements_update_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/scatter_elements_update_gpu_test.cpp index 961d10d5190cb0..a66202b116652c 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/scatter_elements_update_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/scatter_elements_update_gpu_test.cpp @@ -49,18 +49,18 @@ void test_d2411_axisF(bool is_caching_test) { auto axis = 1; set_values(input1, { - FLOAT16(3.0f), FLOAT16(6.0f), FLOAT16(5.0f), FLOAT16(4.0f), - FLOAT16(1.0f), FLOAT16(7.0f), FLOAT16(2.0f), FLOAT16(9.0f) + ov::float16(3.0f), ov::float16(6.0f), ov::float16(5.0f), ov::float16(4.0f), + ov::float16(1.0f), ov::float16(7.0f), ov::float16(2.0f), ov::float16(9.0f) }); set_values(input2, { - FLOAT16(0.0f), FLOAT16(1.0f), - FLOAT16(2.0f), FLOAT16(3.0f) + ov::float16(0.0f), ov::float16(1.0f), + ov::float16(2.0f), ov::float16(3.0f) }); set_values(input3, { - FLOAT16(10.0f), FLOAT16(11.0f), - FLOAT16(12.0f), FLOAT16(13.0f) + ov::float16(10.0f), ov::float16(11.0f), + ov::float16(12.0f), ov::float16(13.0f) }); topology topology; @@ -228,7 +228,7 @@ float getError() { } template<> -float getError() { +float getError() { return 0.2; } @@ -259,7 +259,7 @@ struct scatter_elements_update_gpu_formats_test : public ::testing::TestWithParam > { public: void test(bool is_caching_test) { - const auto data_type = type_to_data_type::value; + const auto data_type = ov::element::from(); ScatterElementsUpdateParams params; format::type plain_format; format::type target_data_format; @@ -316,7 +316,7 @@ struct scatter_elements_update_gpu_formats_test }; using scatter_elements_update_gpu_formats_test_f32 = scatter_elements_update_gpu_formats_test; -using scatter_elements_update_gpu_formats_test_f16 = scatter_elements_update_gpu_formats_test; +using scatter_elements_update_gpu_formats_test_f16 = scatter_elements_update_gpu_formats_test; using scatter_elements_update_gpu_formats_test_i32 = scatter_elements_update_gpu_formats_test; TEST_P(scatter_elements_update_gpu_formats_test_f32, basic) { @@ -346,7 +346,7 @@ INSTANTIATE_TEST_SUITE_P(scatter_elements_update_gpu_formats_test_f32_2d, INSTANTIATE_TEST_SUITE_P(scatter_elements_update_gpu_formats_test_f16_2d, scatter_elements_update_gpu_formats_test_f16, ::testing::Combine( - ::testing::ValuesIn(generateScatterElementsUpdateParams2D()), + ::testing::ValuesIn(generateScatterElementsUpdateParams2D()), ::testing::Values(format::bfyx), ::testing::ValuesIn(formats2D), ::testing::Values(format::any), diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/scatter_nd_update_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/scatter_nd_update_gpu_test.cpp index 115aa700688888..d905755e789a71 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/scatter_nd_update_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/scatter_nd_update_gpu_test.cpp @@ -250,7 +250,7 @@ TEST_P(scatter_nd_update_random_test, random) else if (param.input_type == data_types::i64) this->execute(param, false); else if (param.input_type == data_types::f16) - this->execute_fp16(param, false); + this->execute_fp16(param, false); else if (param.input_type == data_types::f32) this->execute(param, false); else @@ -498,65 +498,65 @@ TEST(scatter_nd_update_gpu_fp16_test15, data5_indice3_update5) { set_values(input1, { // 0 - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), // 1 - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), }); set_values(input2, { - FLOAT16(1.0f), - FLOAT16(0.0f), + ov::float16(1.0f), + ov::float16(0.0f), }); set_values(input3, { // 0 - FLOAT16(91.0f), FLOAT16(2.0f), FLOAT16(83.0f), FLOAT16(4.0f), FLOAT16(71.0f), FLOAT16(2.0f), FLOAT16(63.0f), FLOAT16(4.0f), - FLOAT16(95.0f), FLOAT16(6.0f), FLOAT16(87.0f), FLOAT16(8.0f), FLOAT16(75.0f), FLOAT16(6.0f), FLOAT16(67.0f), FLOAT16(8.0f), - FLOAT16(99.0f), FLOAT16(10.0f), FLOAT16(811.0f), FLOAT16(12.0f), FLOAT16(79.0f), FLOAT16(10.0f), FLOAT16(611.0f), FLOAT16(12.0f), + ov::float16(91.0f), ov::float16(2.0f), ov::float16(83.0f), ov::float16(4.0f), ov::float16(71.0f), ov::float16(2.0f), ov::float16(63.0f), ov::float16(4.0f), + ov::float16(95.0f), ov::float16(6.0f), ov::float16(87.0f), ov::float16(8.0f), ov::float16(75.0f), ov::float16(6.0f), ov::float16(67.0f), ov::float16(8.0f), + ov::float16(99.0f), ov::float16(10.0f), ov::float16(811.0f), ov::float16(12.0f), ov::float16(79.0f), ov::float16(10.0f), ov::float16(611.0f), ov::float16(12.0f), - FLOAT16(91.0f), FLOAT16(2.0f), FLOAT16(83.0f), FLOAT16(4.0f), FLOAT16(71.0f), FLOAT16(2.0f), FLOAT16(63.0f), FLOAT16(4.0f), - FLOAT16(95.0f), FLOAT16(6.0f), FLOAT16(87.0f), FLOAT16(8.0f), FLOAT16(75.0f), FLOAT16(6.0f), FLOAT16(67.0f), FLOAT16(8.0f), - FLOAT16(99.0f), FLOAT16(10.0f), FLOAT16(811.0f), FLOAT16(12.0f), FLOAT16(79.0f), FLOAT16(10.0f), FLOAT16(611.0f), FLOAT16(12.0f), + ov::float16(91.0f), ov::float16(2.0f), ov::float16(83.0f), ov::float16(4.0f), ov::float16(71.0f), ov::float16(2.0f), ov::float16(63.0f), ov::float16(4.0f), + ov::float16(95.0f), ov::float16(6.0f), ov::float16(87.0f), ov::float16(8.0f), ov::float16(75.0f), ov::float16(6.0f), ov::float16(67.0f), ov::float16(8.0f), + ov::float16(99.0f), ov::float16(10.0f), ov::float16(811.0f), ov::float16(12.0f), ov::float16(79.0f), ov::float16(10.0f), ov::float16(611.0f), ov::float16(12.0f), // 1 - FLOAT16(91.0f), FLOAT16(2.0f), FLOAT16(83.0f), FLOAT16(4.0f), FLOAT16(71.0f), FLOAT16(2.0f), FLOAT16(63.0f), FLOAT16(4.0f), - FLOAT16(95.0f), FLOAT16(6.0f), FLOAT16(87.0f), FLOAT16(8.0f), FLOAT16(75.0f), FLOAT16(6.0f), FLOAT16(67.0f), FLOAT16(8.0f), - FLOAT16(99.0f), FLOAT16(10.0f), FLOAT16(811.0f), FLOAT16(12.0f), FLOAT16(79.0f), FLOAT16(10.0f), FLOAT16(611.0f), FLOAT16(12.0f), + ov::float16(91.0f), ov::float16(2.0f), ov::float16(83.0f), ov::float16(4.0f), ov::float16(71.0f), ov::float16(2.0f), ov::float16(63.0f), ov::float16(4.0f), + ov::float16(95.0f), ov::float16(6.0f), ov::float16(87.0f), ov::float16(8.0f), ov::float16(75.0f), ov::float16(6.0f), ov::float16(67.0f), ov::float16(8.0f), + ov::float16(99.0f), ov::float16(10.0f), ov::float16(811.0f), ov::float16(12.0f), ov::float16(79.0f), ov::float16(10.0f), ov::float16(611.0f), ov::float16(12.0f), - FLOAT16(91.0f), FLOAT16(2.0f), FLOAT16(83.0f), FLOAT16(4.0f), FLOAT16(71.0f), FLOAT16(2.0f), FLOAT16(63.0f), FLOAT16(4.0f), - FLOAT16(95.0f), FLOAT16(6.0f), FLOAT16(87.0f), FLOAT16(8.0f), FLOAT16(75.0f), FLOAT16(6.0f), FLOAT16(67.0f), FLOAT16(8.0f), - FLOAT16(99.0f), FLOAT16(10.0f), FLOAT16(811.0f), FLOAT16(12.0f), FLOAT16(79.0f), FLOAT16(10.0f), FLOAT16(611.0f), FLOAT16(12.0f), + ov::float16(91.0f), ov::float16(2.0f), ov::float16(83.0f), ov::float16(4.0f), ov::float16(71.0f), ov::float16(2.0f), ov::float16(63.0f), ov::float16(4.0f), + ov::float16(95.0f), ov::float16(6.0f), ov::float16(87.0f), ov::float16(8.0f), ov::float16(75.0f), ov::float16(6.0f), ov::float16(67.0f), ov::float16(8.0f), + ov::float16(99.0f), ov::float16(10.0f), ov::float16(811.0f), ov::float16(12.0f), ov::float16(79.0f), ov::float16(10.0f), ov::float16(611.0f), ov::float16(12.0f), }); std::vector expected_results = { // 0 - FLOAT16(91.0f), FLOAT16(2.0f), FLOAT16(83.0f), FLOAT16(4.0f), FLOAT16(71.0f), FLOAT16(2.0f), FLOAT16(63.0f), FLOAT16(4.0f), - FLOAT16(95.0f), FLOAT16(6.0f), FLOAT16(87.0f), FLOAT16(8.0f), FLOAT16(75.0f), FLOAT16(6.0f), FLOAT16(67.0f), FLOAT16(8.0f), - FLOAT16(99.0f), FLOAT16(10.0f), FLOAT16(811.0f), FLOAT16(12.0f), FLOAT16(79.0f), FLOAT16(10.0f), FLOAT16(611.0f), FLOAT16(12.0f), + ov::float16(91.0f), ov::float16(2.0f), ov::float16(83.0f), ov::float16(4.0f), ov::float16(71.0f), ov::float16(2.0f), ov::float16(63.0f), ov::float16(4.0f), + ov::float16(95.0f), ov::float16(6.0f), ov::float16(87.0f), ov::float16(8.0f), ov::float16(75.0f), ov::float16(6.0f), ov::float16(67.0f), ov::float16(8.0f), + ov::float16(99.0f), ov::float16(10.0f), ov::float16(811.0f), ov::float16(12.0f), ov::float16(79.0f), ov::float16(10.0f), ov::float16(611.0f), ov::float16(12.0f), - FLOAT16(91.0f), FLOAT16(2.0f), FLOAT16(83.0f), FLOAT16(4.0f), FLOAT16(71.0f), FLOAT16(2.0f), FLOAT16(63.0f), FLOAT16(4.0f), - FLOAT16(95.0f), FLOAT16(6.0f), FLOAT16(87.0f), FLOAT16(8.0f), FLOAT16(75.0f), FLOAT16(6.0f), FLOAT16(67.0f), FLOAT16(8.0f), - FLOAT16(99.0f), FLOAT16(10.0f), FLOAT16(811.0f), FLOAT16(12.0f), FLOAT16(79.0f), FLOAT16(10.0f), FLOAT16(611.0f), FLOAT16(12.0f), + ov::float16(91.0f), ov::float16(2.0f), ov::float16(83.0f), ov::float16(4.0f), ov::float16(71.0f), ov::float16(2.0f), ov::float16(63.0f), ov::float16(4.0f), + ov::float16(95.0f), ov::float16(6.0f), ov::float16(87.0f), ov::float16(8.0f), ov::float16(75.0f), ov::float16(6.0f), ov::float16(67.0f), ov::float16(8.0f), + ov::float16(99.0f), ov::float16(10.0f), ov::float16(811.0f), ov::float16(12.0f), ov::float16(79.0f), ov::float16(10.0f), ov::float16(611.0f), ov::float16(12.0f), // 1 - FLOAT16(91.0f), FLOAT16(2.0f), FLOAT16(83.0f), FLOAT16(4.0f), FLOAT16(71.0f), FLOAT16(2.0f), FLOAT16(63.0f), FLOAT16(4.0f), - FLOAT16(95.0f), FLOAT16(6.0f), FLOAT16(87.0f), FLOAT16(8.0f), FLOAT16(75.0f), FLOAT16(6.0f), FLOAT16(67.0f), FLOAT16(8.0f), - FLOAT16(99.0f), FLOAT16(10.0f), FLOAT16(811.0f), FLOAT16(12.0f), FLOAT16(79.0f), FLOAT16(10.0f), FLOAT16(611.0f), FLOAT16(12.0f), + ov::float16(91.0f), ov::float16(2.0f), ov::float16(83.0f), ov::float16(4.0f), ov::float16(71.0f), ov::float16(2.0f), ov::float16(63.0f), ov::float16(4.0f), + ov::float16(95.0f), ov::float16(6.0f), ov::float16(87.0f), ov::float16(8.0f), ov::float16(75.0f), ov::float16(6.0f), ov::float16(67.0f), ov::float16(8.0f), + ov::float16(99.0f), ov::float16(10.0f), ov::float16(811.0f), ov::float16(12.0f), ov::float16(79.0f), ov::float16(10.0f), ov::float16(611.0f), ov::float16(12.0f), - FLOAT16(91.0f), FLOAT16(2.0f), FLOAT16(83.0f), FLOAT16(4.0f), FLOAT16(71.0f), FLOAT16(2.0f), FLOAT16(63.0f), FLOAT16(4.0f), - FLOAT16(95.0f), FLOAT16(6.0f), FLOAT16(87.0f), FLOAT16(8.0f), FLOAT16(75.0f), FLOAT16(6.0f), FLOAT16(67.0f), FLOAT16(8.0f), - FLOAT16(99.0f), FLOAT16(10.0f), FLOAT16(811.0f), FLOAT16(12.0f), FLOAT16(79.0f), FLOAT16(10.0f), FLOAT16(611.0f), FLOAT16(12.0f), + ov::float16(91.0f), ov::float16(2.0f), ov::float16(83.0f), ov::float16(4.0f), ov::float16(71.0f), ov::float16(2.0f), ov::float16(63.0f), ov::float16(4.0f), + ov::float16(95.0f), ov::float16(6.0f), ov::float16(87.0f), ov::float16(8.0f), ov::float16(75.0f), ov::float16(6.0f), ov::float16(67.0f), ov::float16(8.0f), + ov::float16(99.0f), ov::float16(10.0f), ov::float16(811.0f), ov::float16(12.0f), ov::float16(79.0f), ov::float16(10.0f), ov::float16(611.0f), ov::float16(12.0f), }; topology topology; @@ -593,54 +593,54 @@ TEST(scatter_nd_update_gpu_fp16_test14, data5_indice2_update3) { set_values(input1, { // 0 - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), // 1 - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), }); set_values(input2, { - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(2.0f), - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(0.0f), - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(1.0f), + ov::float16(1.0f), ov::float16(1.0f), ov::float16(2.0f), + ov::float16(1.0f), ov::float16(1.0f), ov::float16(0.0f), + ov::float16(0.0f), ov::float16(1.0f), ov::float16(1.0f), }); set_values(input3, { - FLOAT16(51.0f), FLOAT16(52.0f), FLOAT16(53.0f), FLOAT16(54.0f), FLOAT16(55.0f), FLOAT16(56.0f), FLOAT16(57.0f), FLOAT16(58.0f), - FLOAT16(61.0f), FLOAT16(62.0f), FLOAT16(63.0f), FLOAT16(64.0f), FLOAT16(65.0f), FLOAT16(66.0f), FLOAT16(67.0f), FLOAT16(68.0f), - FLOAT16(71.0f), FLOAT16(72.0f), FLOAT16(73.0f), FLOAT16(74.0f), FLOAT16(75.0f), FLOAT16(76.0f), FLOAT16(77.0f), FLOAT16(78.0f), + ov::float16(51.0f), ov::float16(52.0f), ov::float16(53.0f), ov::float16(54.0f), ov::float16(55.0f), ov::float16(56.0f), ov::float16(57.0f), ov::float16(58.0f), + ov::float16(61.0f), ov::float16(62.0f), ov::float16(63.0f), ov::float16(64.0f), ov::float16(65.0f), ov::float16(66.0f), ov::float16(67.0f), ov::float16(68.0f), + ov::float16(71.0f), ov::float16(72.0f), ov::float16(73.0f), ov::float16(74.0f), ov::float16(75.0f), ov::float16(76.0f), ov::float16(77.0f), ov::float16(78.0f), }); std::vector expected_results = { // 0 - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(71.0f), FLOAT16(72.0f), FLOAT16(73.0f), FLOAT16(74.0f), FLOAT16(75.0f), FLOAT16(76.0f), FLOAT16(77.0f), FLOAT16(78.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(71.0f), ov::float16(72.0f), ov::float16(73.0f), ov::float16(74.0f), ov::float16(75.0f), ov::float16(76.0f), ov::float16(77.0f), ov::float16(78.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), // 1 - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), - FLOAT16(61.0f), FLOAT16(62.0f), FLOAT16(63.0f), FLOAT16(64.0f), FLOAT16(65.0f), FLOAT16(66.0f), FLOAT16(67.0f), FLOAT16(68.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(51.0f), FLOAT16(52.0f), FLOAT16(53.0f), FLOAT16(54.0f), FLOAT16(55.0f), FLOAT16(56.0f), FLOAT16(57.0f), FLOAT16(58.0f), + ov::float16(61.0f), ov::float16(62.0f), ov::float16(63.0f), ov::float16(64.0f), ov::float16(65.0f), ov::float16(66.0f), ov::float16(67.0f), ov::float16(68.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(51.0f), ov::float16(52.0f), ov::float16(53.0f), ov::float16(54.0f), ov::float16(55.0f), ov::float16(56.0f), ov::float16(57.0f), ov::float16(58.0f), }; topology topology; @@ -676,35 +676,35 @@ TEST(scatter_nd_update_gpu_fp16_test13, data4_indice2_update2) { auto input3 = engine.allocate_memory({ data_types::f16, format::bfyx, { 3, 4, 1, 1 } }); // updates set_values(input1, { - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), }); set_values(input2, { - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(0.0f), - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(1.0f), - FLOAT16(0.0f), FLOAT16(2.0f), FLOAT16(1.0f), + ov::float16(1.0f), ov::float16(1.0f), ov::float16(0.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(1.0f), + ov::float16(0.0f), ov::float16(2.0f), ov::float16(1.0f), }); set_values(input3, { - FLOAT16(51.0f), FLOAT16(52.0f), FLOAT16(53.0f), FLOAT16(54.0f), - FLOAT16(61.0f), FLOAT16(62.0f), FLOAT16(63.0f), FLOAT16(64.0f), - FLOAT16(71.0f), FLOAT16(72.0f), FLOAT16(73.0f), FLOAT16(74.0f), + ov::float16(51.0f), ov::float16(52.0f), ov::float16(53.0f), ov::float16(54.0f), + ov::float16(61.0f), ov::float16(62.0f), ov::float16(63.0f), ov::float16(64.0f), + ov::float16(71.0f), ov::float16(72.0f), ov::float16(73.0f), ov::float16(74.0f), }); std::vector expected_results = { - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(71.0f), FLOAT16(72.0f), FLOAT16(73.0f), FLOAT16(74.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(71.0f), ov::float16(72.0f), ov::float16(73.0f), ov::float16(74.0f), - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(51.0f), FLOAT16(52.0f), FLOAT16(53.0f), FLOAT16(54.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(61.0f), FLOAT16(62.0f), FLOAT16(63.0f), FLOAT16(64.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(51.0f), ov::float16(52.0f), ov::float16(53.0f), ov::float16(54.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(61.0f), ov::float16(62.0f), ov::float16(63.0f), ov::float16(64.0f), }; topology topology; @@ -740,42 +740,42 @@ TEST(scatter_nd_update_gpu_fp16_test12, data3_indice3_update1) { auto input3 = engine.allocate_memory({ data_types::f16, format::bfyx, { 4, 1, 1, 1 } }); // updates set_values(input1, { - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), }); set_values(input2, { - FLOAT16(2.0f), FLOAT16(0.0f), FLOAT16(0.0f), - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(0.0f), + ov::float16(2.0f), ov::float16(0.0f), ov::float16(0.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), + ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), + ov::float16(0.0f), ov::float16(1.0f), ov::float16(0.0f), }); set_values(input3, { - FLOAT16(51.0f), FLOAT16(52.0f), FLOAT16(53.0f), FLOAT16(54.0f), + ov::float16(51.0f), ov::float16(52.0f), ov::float16(53.0f), ov::float16(54.0f), }); std::vector expected_results = { - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(54.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(54.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(53.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(52.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(53.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(52.0f), - FLOAT16(51.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(51.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), }; topology topology; @@ -812,100 +812,100 @@ TEST(scatter_nd_update_gpu_fp16_test11, data6_indice1_update6) { set_values(input1, { // 0, 0, 0 - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), // 0, 0, 1 - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), // 0, 1, 0 - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), // 1, 0 - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), // 1, 1 - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), }); set_values(input2, { - FLOAT16(1.0f), + ov::float16(1.0f), }); set_values(input3, { // 0 - FLOAT16(51.0f), FLOAT16(52.0f), FLOAT16(53.0f), FLOAT16(54.0f), FLOAT16(55.0f), FLOAT16(56.0f), FLOAT16(57.0f), FLOAT16(58.0f), - FLOAT16(59.0f), FLOAT16(60.0f), FLOAT16(61.0f), FLOAT16(62.0f), FLOAT16(63.0f), FLOAT16(64.0f), FLOAT16(65.0f), FLOAT16(66.0f), - FLOAT16(67.0f), FLOAT16(68.0f), FLOAT16(69.0f), FLOAT16(70.0f), FLOAT16(71.0f), FLOAT16(72.0f), FLOAT16(73.0f), FLOAT16(74.0f), + ov::float16(51.0f), ov::float16(52.0f), ov::float16(53.0f), ov::float16(54.0f), ov::float16(55.0f), ov::float16(56.0f), ov::float16(57.0f), ov::float16(58.0f), + ov::float16(59.0f), ov::float16(60.0f), ov::float16(61.0f), ov::float16(62.0f), ov::float16(63.0f), ov::float16(64.0f), ov::float16(65.0f), ov::float16(66.0f), + ov::float16(67.0f), ov::float16(68.0f), ov::float16(69.0f), ov::float16(70.0f), ov::float16(71.0f), ov::float16(72.0f), ov::float16(73.0f), ov::float16(74.0f), - FLOAT16(50.0f), FLOAT16(51.0f), FLOAT16(53.0f), FLOAT16(54.0f), FLOAT16(55.0f), FLOAT16(56.0f), FLOAT16(57.0f), FLOAT16(58.0f), - FLOAT16(59.0f), FLOAT16(60.0f), FLOAT16(61.0f), FLOAT16(62.0f), FLOAT16(63.0f), FLOAT16(64.0f), FLOAT16(65.0f), FLOAT16(66.0f), - FLOAT16(67.0f), FLOAT16(68.0f), FLOAT16(69.0f), FLOAT16(70.0f), FLOAT16(71.0f), FLOAT16(72.0f), FLOAT16(73.0f), FLOAT16(74.0f), + ov::float16(50.0f), ov::float16(51.0f), ov::float16(53.0f), ov::float16(54.0f), ov::float16(55.0f), ov::float16(56.0f), ov::float16(57.0f), ov::float16(58.0f), + ov::float16(59.0f), ov::float16(60.0f), ov::float16(61.0f), ov::float16(62.0f), ov::float16(63.0f), ov::float16(64.0f), ov::float16(65.0f), ov::float16(66.0f), + ov::float16(67.0f), ov::float16(68.0f), ov::float16(69.0f), ov::float16(70.0f), ov::float16(71.0f), ov::float16(72.0f), ov::float16(73.0f), ov::float16(74.0f), - FLOAT16(151.0f), FLOAT16(152.0f), FLOAT16(153.0f), FLOAT16(154.0f), FLOAT16(155.0f), FLOAT16(156.0f), FLOAT16(157.0f), FLOAT16(158.0f), - FLOAT16(159.0f), FLOAT16(160.0f), FLOAT16(161.0f), FLOAT16(162.0f), FLOAT16(163.0f), FLOAT16(164.0f), FLOAT16(165.0f), FLOAT16(166.0f), - FLOAT16(167.0f), FLOAT16(168.0f), FLOAT16(169.0f), FLOAT16(170.0f), FLOAT16(171.0f), FLOAT16(172.0f), FLOAT16(173.0f), FLOAT16(174.0f), + ov::float16(151.0f), ov::float16(152.0f), ov::float16(153.0f), ov::float16(154.0f), ov::float16(155.0f), ov::float16(156.0f), ov::float16(157.0f), ov::float16(158.0f), + ov::float16(159.0f), ov::float16(160.0f), ov::float16(161.0f), ov::float16(162.0f), ov::float16(163.0f), ov::float16(164.0f), ov::float16(165.0f), ov::float16(166.0f), + ov::float16(167.0f), ov::float16(168.0f), ov::float16(169.0f), ov::float16(170.0f), ov::float16(171.0f), ov::float16(172.0f), ov::float16(173.0f), ov::float16(174.0f), - FLOAT16(150.0f), FLOAT16(151.0f), FLOAT16(153.0f), FLOAT16(154.0f), FLOAT16(155.0f), FLOAT16(156.0f), FLOAT16(157.0f), FLOAT16(158.0f), - FLOAT16(159.0f), FLOAT16(160.0f), FLOAT16(161.0f), FLOAT16(162.0f), FLOAT16(163.0f), FLOAT16(164.0f), FLOAT16(165.0f), FLOAT16(166.0f), - FLOAT16(167.0f), FLOAT16(168.0f), FLOAT16(169.0f), FLOAT16(170.0f), FLOAT16(171.0f), FLOAT16(172.0f), FLOAT16(173.0f), FLOAT16(174.0f), + ov::float16(150.0f), ov::float16(151.0f), ov::float16(153.0f), ov::float16(154.0f), ov::float16(155.0f), ov::float16(156.0f), ov::float16(157.0f), ov::float16(158.0f), + ov::float16(159.0f), ov::float16(160.0f), ov::float16(161.0f), ov::float16(162.0f), ov::float16(163.0f), ov::float16(164.0f), ov::float16(165.0f), ov::float16(166.0f), + ov::float16(167.0f), ov::float16(168.0f), ov::float16(169.0f), ov::float16(170.0f), ov::float16(171.0f), ov::float16(172.0f), ov::float16(173.0f), ov::float16(174.0f), }); std::vector expected_results = { // 0 - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), // 1 - FLOAT16(51.0f), FLOAT16(52.0f), FLOAT16(53.0f), FLOAT16(54.0f), FLOAT16(55.0f), FLOAT16(56.0f), FLOAT16(57.0f), FLOAT16(58.0f), - FLOAT16(59.0f), FLOAT16(60.0f), FLOAT16(61.0f), FLOAT16(62.0f), FLOAT16(63.0f), FLOAT16(64.0f), FLOAT16(65.0f), FLOAT16(66.0f), - FLOAT16(67.0f), FLOAT16(68.0f), FLOAT16(69.0f), FLOAT16(70.0f), FLOAT16(71.0f), FLOAT16(72.0f), FLOAT16(73.0f), FLOAT16(74.0f), + ov::float16(51.0f), ov::float16(52.0f), ov::float16(53.0f), ov::float16(54.0f), ov::float16(55.0f), ov::float16(56.0f), ov::float16(57.0f), ov::float16(58.0f), + ov::float16(59.0f), ov::float16(60.0f), ov::float16(61.0f), ov::float16(62.0f), ov::float16(63.0f), ov::float16(64.0f), ov::float16(65.0f), ov::float16(66.0f), + ov::float16(67.0f), ov::float16(68.0f), ov::float16(69.0f), ov::float16(70.0f), ov::float16(71.0f), ov::float16(72.0f), ov::float16(73.0f), ov::float16(74.0f), - FLOAT16(50.0f), FLOAT16(51.0f), FLOAT16(53.0f), FLOAT16(54.0f), FLOAT16(55.0f), FLOAT16(56.0f), FLOAT16(57.0f), FLOAT16(58.0f), - FLOAT16(59.0f), FLOAT16(60.0f), FLOAT16(61.0f), FLOAT16(62.0f), FLOAT16(63.0f), FLOAT16(64.0f), FLOAT16(65.0f), FLOAT16(66.0f), - FLOAT16(67.0f), FLOAT16(68.0f), FLOAT16(69.0f), FLOAT16(70.0f), FLOAT16(71.0f), FLOAT16(72.0f), FLOAT16(73.0f), FLOAT16(74.0f), + ov::float16(50.0f), ov::float16(51.0f), ov::float16(53.0f), ov::float16(54.0f), ov::float16(55.0f), ov::float16(56.0f), ov::float16(57.0f), ov::float16(58.0f), + ov::float16(59.0f), ov::float16(60.0f), ov::float16(61.0f), ov::float16(62.0f), ov::float16(63.0f), ov::float16(64.0f), ov::float16(65.0f), ov::float16(66.0f), + ov::float16(67.0f), ov::float16(68.0f), ov::float16(69.0f), ov::float16(70.0f), ov::float16(71.0f), ov::float16(72.0f), ov::float16(73.0f), ov::float16(74.0f), - FLOAT16(151.0f), FLOAT16(152.0f), FLOAT16(153.0f), FLOAT16(154.0f), FLOAT16(155.0f), FLOAT16(156.0f), FLOAT16(157.0f), FLOAT16(158.0f), - FLOAT16(159.0f), FLOAT16(160.0f), FLOAT16(161.0f), FLOAT16(162.0f), FLOAT16(163.0f), FLOAT16(164.0f), FLOAT16(165.0f), FLOAT16(166.0f), - FLOAT16(167.0f), FLOAT16(168.0f), FLOAT16(169.0f), FLOAT16(170.0f), FLOAT16(171.0f), FLOAT16(172.0f), FLOAT16(173.0f), FLOAT16(174.0f), + ov::float16(151.0f), ov::float16(152.0f), ov::float16(153.0f), ov::float16(154.0f), ov::float16(155.0f), ov::float16(156.0f), ov::float16(157.0f), ov::float16(158.0f), + ov::float16(159.0f), ov::float16(160.0f), ov::float16(161.0f), ov::float16(162.0f), ov::float16(163.0f), ov::float16(164.0f), ov::float16(165.0f), ov::float16(166.0f), + ov::float16(167.0f), ov::float16(168.0f), ov::float16(169.0f), ov::float16(170.0f), ov::float16(171.0f), ov::float16(172.0f), ov::float16(173.0f), ov::float16(174.0f), - FLOAT16(150.0f), FLOAT16(151.0f), FLOAT16(153.0f), FLOAT16(154.0f), FLOAT16(155.0f), FLOAT16(156.0f), FLOAT16(157.0f), FLOAT16(158.0f), - FLOAT16(159.0f), FLOAT16(160.0f), FLOAT16(161.0f), FLOAT16(162.0f), FLOAT16(163.0f), FLOAT16(164.0f), FLOAT16(165.0f), FLOAT16(166.0f), - FLOAT16(167.0f), FLOAT16(168.0f), FLOAT16(169.0f), FLOAT16(170.0f), FLOAT16(171.0f), FLOAT16(172.0f), FLOAT16(173.0f), FLOAT16(174.0f), + ov::float16(150.0f), ov::float16(151.0f), ov::float16(153.0f), ov::float16(154.0f), ov::float16(155.0f), ov::float16(156.0f), ov::float16(157.0f), ov::float16(158.0f), + ov::float16(159.0f), ov::float16(160.0f), ov::float16(161.0f), ov::float16(162.0f), ov::float16(163.0f), ov::float16(164.0f), ov::float16(165.0f), ov::float16(166.0f), + ov::float16(167.0f), ov::float16(168.0f), ov::float16(169.0f), ov::float16(170.0f), ov::float16(171.0f), ov::float16(172.0f), ov::float16(173.0f), ov::float16(174.0f), }; topology topology; @@ -942,66 +942,66 @@ TEST(scatter_nd_update_gpu_fp16_test10, data5_indice1_update5) { set_values(input1, { // 0 - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), // 1 - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), }); set_values(input2, { - FLOAT16(1.0f), FLOAT16(0.0f), + ov::float16(1.0f), ov::float16(0.0f), }); set_values(input3, { // 0 - FLOAT16(51.0f), FLOAT16(52.0f), FLOAT16(53.0f), FLOAT16(54.0f), FLOAT16(55.0f), FLOAT16(56.0f), FLOAT16(57.0f), FLOAT16(58.0f), - FLOAT16(59.0f), FLOAT16(60.0f), FLOAT16(61.0f), FLOAT16(62.0f), FLOAT16(63.0f), FLOAT16(64.0f), FLOAT16(65.0f), FLOAT16(66.0f), - FLOAT16(67.0f), FLOAT16(68.0f), FLOAT16(69.0f), FLOAT16(70.0f), FLOAT16(71.0f), FLOAT16(72.0f), FLOAT16(73.0f), FLOAT16(74.0f), + ov::float16(51.0f), ov::float16(52.0f), ov::float16(53.0f), ov::float16(54.0f), ov::float16(55.0f), ov::float16(56.0f), ov::float16(57.0f), ov::float16(58.0f), + ov::float16(59.0f), ov::float16(60.0f), ov::float16(61.0f), ov::float16(62.0f), ov::float16(63.0f), ov::float16(64.0f), ov::float16(65.0f), ov::float16(66.0f), + ov::float16(67.0f), ov::float16(68.0f), ov::float16(69.0f), ov::float16(70.0f), ov::float16(71.0f), ov::float16(72.0f), ov::float16(73.0f), ov::float16(74.0f), - FLOAT16(50.0f), FLOAT16(51.0f), FLOAT16(53.0f), FLOAT16(54.0f), FLOAT16(55.0f), FLOAT16(56.0f), FLOAT16(57.0f), FLOAT16(58.0f), - FLOAT16(59.0f), FLOAT16(60.0f), FLOAT16(61.0f), FLOAT16(62.0f), FLOAT16(63.0f), FLOAT16(64.0f), FLOAT16(65.0f), FLOAT16(66.0f), - FLOAT16(67.0f), FLOAT16(68.0f), FLOAT16(69.0f), FLOAT16(70.0f), FLOAT16(71.0f), FLOAT16(72.0f), FLOAT16(73.0f), FLOAT16(74.0f), + ov::float16(50.0f), ov::float16(51.0f), ov::float16(53.0f), ov::float16(54.0f), ov::float16(55.0f), ov::float16(56.0f), ov::float16(57.0f), ov::float16(58.0f), + ov::float16(59.0f), ov::float16(60.0f), ov::float16(61.0f), ov::float16(62.0f), ov::float16(63.0f), ov::float16(64.0f), ov::float16(65.0f), ov::float16(66.0f), + ov::float16(67.0f), ov::float16(68.0f), ov::float16(69.0f), ov::float16(70.0f), ov::float16(71.0f), ov::float16(72.0f), ov::float16(73.0f), ov::float16(74.0f), // 1 - FLOAT16(151.0f), FLOAT16(152.0f), FLOAT16(153.0f), FLOAT16(154.0f), FLOAT16(155.0f), FLOAT16(156.0f), FLOAT16(157.0f), FLOAT16(158.0f), - FLOAT16(159.0f), FLOAT16(160.0f), FLOAT16(161.0f), FLOAT16(162.0f), FLOAT16(163.0f), FLOAT16(164.0f), FLOAT16(165.0f), FLOAT16(166.0f), - FLOAT16(167.0f), FLOAT16(168.0f), FLOAT16(169.0f), FLOAT16(170.0f), FLOAT16(171.0f), FLOAT16(172.0f), FLOAT16(173.0f), FLOAT16(174.0f), + ov::float16(151.0f), ov::float16(152.0f), ov::float16(153.0f), ov::float16(154.0f), ov::float16(155.0f), ov::float16(156.0f), ov::float16(157.0f), ov::float16(158.0f), + ov::float16(159.0f), ov::float16(160.0f), ov::float16(161.0f), ov::float16(162.0f), ov::float16(163.0f), ov::float16(164.0f), ov::float16(165.0f), ov::float16(166.0f), + ov::float16(167.0f), ov::float16(168.0f), ov::float16(169.0f), ov::float16(170.0f), ov::float16(171.0f), ov::float16(172.0f), ov::float16(173.0f), ov::float16(174.0f), - FLOAT16(150.0f), FLOAT16(151.0f), FLOAT16(153.0f), FLOAT16(154.0f), FLOAT16(155.0f), FLOAT16(156.0f), FLOAT16(157.0f), FLOAT16(158.0f), - FLOAT16(159.0f), FLOAT16(160.0f), FLOAT16(161.0f), FLOAT16(162.0f), FLOAT16(163.0f), FLOAT16(164.0f), FLOAT16(165.0f), FLOAT16(166.0f), - FLOAT16(167.0f), FLOAT16(168.0f), FLOAT16(169.0f), FLOAT16(170.0f), FLOAT16(171.0f), FLOAT16(172.0f), FLOAT16(173.0f), FLOAT16(174.0f), + ov::float16(150.0f), ov::float16(151.0f), ov::float16(153.0f), ov::float16(154.0f), ov::float16(155.0f), ov::float16(156.0f), ov::float16(157.0f), ov::float16(158.0f), + ov::float16(159.0f), ov::float16(160.0f), ov::float16(161.0f), ov::float16(162.0f), ov::float16(163.0f), ov::float16(164.0f), ov::float16(165.0f), ov::float16(166.0f), + ov::float16(167.0f), ov::float16(168.0f), ov::float16(169.0f), ov::float16(170.0f), ov::float16(171.0f), ov::float16(172.0f), ov::float16(173.0f), ov::float16(174.0f), }); std::vector expected_results = { // 0 - FLOAT16(151.0f), FLOAT16(152.0f), FLOAT16(153.0f), FLOAT16(154.0f), FLOAT16(155.0f), FLOAT16(156.0f), FLOAT16(157.0f), FLOAT16(158.0f), - FLOAT16(159.0f), FLOAT16(160.0f), FLOAT16(161.0f), FLOAT16(162.0f), FLOAT16(163.0f), FLOAT16(164.0f), FLOAT16(165.0f), FLOAT16(166.0f), - FLOAT16(167.0f), FLOAT16(168.0f), FLOAT16(169.0f), FLOAT16(170.0f), FLOAT16(171.0f), FLOAT16(172.0f), FLOAT16(173.0f), FLOAT16(174.0f), + ov::float16(151.0f), ov::float16(152.0f), ov::float16(153.0f), ov::float16(154.0f), ov::float16(155.0f), ov::float16(156.0f), ov::float16(157.0f), ov::float16(158.0f), + ov::float16(159.0f), ov::float16(160.0f), ov::float16(161.0f), ov::float16(162.0f), ov::float16(163.0f), ov::float16(164.0f), ov::float16(165.0f), ov::float16(166.0f), + ov::float16(167.0f), ov::float16(168.0f), ov::float16(169.0f), ov::float16(170.0f), ov::float16(171.0f), ov::float16(172.0f), ov::float16(173.0f), ov::float16(174.0f), - FLOAT16(150.0f), FLOAT16(151.0f), FLOAT16(153.0f), FLOAT16(154.0f), FLOAT16(155.0f), FLOAT16(156.0f), FLOAT16(157.0f), FLOAT16(158.0f), - FLOAT16(159.0f), FLOAT16(160.0f), FLOAT16(161.0f), FLOAT16(162.0f), FLOAT16(163.0f), FLOAT16(164.0f), FLOAT16(165.0f), FLOAT16(166.0f), - FLOAT16(167.0f), FLOAT16(168.0f), FLOAT16(169.0f), FLOAT16(170.0f), FLOAT16(171.0f), FLOAT16(172.0f), FLOAT16(173.0f), FLOAT16(174.0f), + ov::float16(150.0f), ov::float16(151.0f), ov::float16(153.0f), ov::float16(154.0f), ov::float16(155.0f), ov::float16(156.0f), ov::float16(157.0f), ov::float16(158.0f), + ov::float16(159.0f), ov::float16(160.0f), ov::float16(161.0f), ov::float16(162.0f), ov::float16(163.0f), ov::float16(164.0f), ov::float16(165.0f), ov::float16(166.0f), + ov::float16(167.0f), ov::float16(168.0f), ov::float16(169.0f), ov::float16(170.0f), ov::float16(171.0f), ov::float16(172.0f), ov::float16(173.0f), ov::float16(174.0f), // 1 - FLOAT16(51.0f), FLOAT16(52.0f), FLOAT16(53.0f), FLOAT16(54.0f), FLOAT16(55.0f), FLOAT16(56.0f), FLOAT16(57.0f), FLOAT16(58.0f), - FLOAT16(59.0f), FLOAT16(60.0f), FLOAT16(61.0f), FLOAT16(62.0f), FLOAT16(63.0f), FLOAT16(64.0f), FLOAT16(65.0f), FLOAT16(66.0f), - FLOAT16(67.0f), FLOAT16(68.0f), FLOAT16(69.0f), FLOAT16(70.0f), FLOAT16(71.0f), FLOAT16(72.0f), FLOAT16(73.0f), FLOAT16(74.0f), + ov::float16(51.0f), ov::float16(52.0f), ov::float16(53.0f), ov::float16(54.0f), ov::float16(55.0f), ov::float16(56.0f), ov::float16(57.0f), ov::float16(58.0f), + ov::float16(59.0f), ov::float16(60.0f), ov::float16(61.0f), ov::float16(62.0f), ov::float16(63.0f), ov::float16(64.0f), ov::float16(65.0f), ov::float16(66.0f), + ov::float16(67.0f), ov::float16(68.0f), ov::float16(69.0f), ov::float16(70.0f), ov::float16(71.0f), ov::float16(72.0f), ov::float16(73.0f), ov::float16(74.0f), - FLOAT16(50.0f), FLOAT16(51.0f), FLOAT16(53.0f), FLOAT16(54.0f), FLOAT16(55.0f), FLOAT16(56.0f), FLOAT16(57.0f), FLOAT16(58.0f), - FLOAT16(59.0f), FLOAT16(60.0f), FLOAT16(61.0f), FLOAT16(62.0f), FLOAT16(63.0f), FLOAT16(64.0f), FLOAT16(65.0f), FLOAT16(66.0f), - FLOAT16(67.0f), FLOAT16(68.0f), FLOAT16(69.0f), FLOAT16(70.0f), FLOAT16(71.0f), FLOAT16(72.0f), FLOAT16(73.0f), FLOAT16(74.0f), + ov::float16(50.0f), ov::float16(51.0f), ov::float16(53.0f), ov::float16(54.0f), ov::float16(55.0f), ov::float16(56.0f), ov::float16(57.0f), ov::float16(58.0f), + ov::float16(59.0f), ov::float16(60.0f), ov::float16(61.0f), ov::float16(62.0f), ov::float16(63.0f), ov::float16(64.0f), ov::float16(65.0f), ov::float16(66.0f), + ov::float16(67.0f), ov::float16(68.0f), ov::float16(69.0f), ov::float16(70.0f), ov::float16(71.0f), ov::float16(72.0f), ov::float16(73.0f), ov::float16(74.0f), }; topology topology; @@ -1038,48 +1038,48 @@ TEST(scatter_nd_update_gpu_fp16_test9, data4_indice1_update4) { set_values(input1, { // 0 - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), // 1 - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), // 2 - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), }); set_values(input2, { - FLOAT16(2.0f), FLOAT16(0.0f), + ov::float16(2.0f), ov::float16(0.0f), }); set_values(input3, { // 0 - FLOAT16(51.0f), FLOAT16(52.0f), FLOAT16(53.0f), FLOAT16(54.0f), FLOAT16(55.0f), FLOAT16(56.0f), FLOAT16(57.0f), FLOAT16(58.0f), - FLOAT16(59.0f), FLOAT16(60.0f), FLOAT16(61.0f), FLOAT16(62.0f), FLOAT16(63.0f), FLOAT16(64.0f), FLOAT16(65.0f), FLOAT16(66.0f), - FLOAT16(67.0f), FLOAT16(68.0f), FLOAT16(69.0f), FLOAT16(70.0f), FLOAT16(71.0f), FLOAT16(72.0f), FLOAT16(73.0f), FLOAT16(74.0f), + ov::float16(51.0f), ov::float16(52.0f), ov::float16(53.0f), ov::float16(54.0f), ov::float16(55.0f), ov::float16(56.0f), ov::float16(57.0f), ov::float16(58.0f), + ov::float16(59.0f), ov::float16(60.0f), ov::float16(61.0f), ov::float16(62.0f), ov::float16(63.0f), ov::float16(64.0f), ov::float16(65.0f), ov::float16(66.0f), + ov::float16(67.0f), ov::float16(68.0f), ov::float16(69.0f), ov::float16(70.0f), ov::float16(71.0f), ov::float16(72.0f), ov::float16(73.0f), ov::float16(74.0f), // 1 - FLOAT16(151.0f), FLOAT16(152.0f), FLOAT16(153.0f), FLOAT16(154.0f), FLOAT16(155.0f), FLOAT16(156.0f), FLOAT16(157.0f), FLOAT16(158.0f), - FLOAT16(159.0f), FLOAT16(160.0f), FLOAT16(161.0f), FLOAT16(162.0f), FLOAT16(163.0f), FLOAT16(164.0f), FLOAT16(165.0f), FLOAT16(166.0f), - FLOAT16(167.0f), FLOAT16(168.0f), FLOAT16(169.0f), FLOAT16(170.0f), FLOAT16(171.0f), FLOAT16(172.0f), FLOAT16(173.0f), FLOAT16(174.0f), + ov::float16(151.0f), ov::float16(152.0f), ov::float16(153.0f), ov::float16(154.0f), ov::float16(155.0f), ov::float16(156.0f), ov::float16(157.0f), ov::float16(158.0f), + ov::float16(159.0f), ov::float16(160.0f), ov::float16(161.0f), ov::float16(162.0f), ov::float16(163.0f), ov::float16(164.0f), ov::float16(165.0f), ov::float16(166.0f), + ov::float16(167.0f), ov::float16(168.0f), ov::float16(169.0f), ov::float16(170.0f), ov::float16(171.0f), ov::float16(172.0f), ov::float16(173.0f), ov::float16(174.0f), }); std::vector expected_results = { // 0 - FLOAT16(151.0f), FLOAT16(152.0f), FLOAT16(153.0f), FLOAT16(154.0f), FLOAT16(155.0f), FLOAT16(156.0f), FLOAT16(157.0f), FLOAT16(158.0f), - FLOAT16(159.0f), FLOAT16(160.0f), FLOAT16(161.0f), FLOAT16(162.0f), FLOAT16(163.0f), FLOAT16(164.0f), FLOAT16(165.0f), FLOAT16(166.0f), - FLOAT16(167.0f), FLOAT16(168.0f), FLOAT16(169.0f), FLOAT16(170.0f), FLOAT16(171.0f), FLOAT16(172.0f), FLOAT16(173.0f), FLOAT16(174.0f), + ov::float16(151.0f), ov::float16(152.0f), ov::float16(153.0f), ov::float16(154.0f), ov::float16(155.0f), ov::float16(156.0f), ov::float16(157.0f), ov::float16(158.0f), + ov::float16(159.0f), ov::float16(160.0f), ov::float16(161.0f), ov::float16(162.0f), ov::float16(163.0f), ov::float16(164.0f), ov::float16(165.0f), ov::float16(166.0f), + ov::float16(167.0f), ov::float16(168.0f), ov::float16(169.0f), ov::float16(170.0f), ov::float16(171.0f), ov::float16(172.0f), ov::float16(173.0f), ov::float16(174.0f), // 1 - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), // 2 - FLOAT16(51.0f), FLOAT16(52.0f), FLOAT16(53.0f), FLOAT16(54.0f), FLOAT16(55.0f), FLOAT16(56.0f), FLOAT16(57.0f), FLOAT16(58.0f), - FLOAT16(59.0f), FLOAT16(60.0f), FLOAT16(61.0f), FLOAT16(62.0f), FLOAT16(63.0f), FLOAT16(64.0f), FLOAT16(65.0f), FLOAT16(66.0f), - FLOAT16(67.0f), FLOAT16(68.0f), FLOAT16(69.0f), FLOAT16(70.0f), FLOAT16(71.0f), FLOAT16(72.0f), FLOAT16(73.0f), FLOAT16(74.0f), + ov::float16(51.0f), ov::float16(52.0f), ov::float16(53.0f), ov::float16(54.0f), ov::float16(55.0f), ov::float16(56.0f), ov::float16(57.0f), ov::float16(58.0f), + ov::float16(59.0f), ov::float16(60.0f), ov::float16(61.0f), ov::float16(62.0f), ov::float16(63.0f), ov::float16(64.0f), ov::float16(65.0f), ov::float16(66.0f), + ov::float16(67.0f), ov::float16(68.0f), ov::float16(69.0f), ov::float16(70.0f), ov::float16(71.0f), ov::float16(72.0f), ov::float16(73.0f), ov::float16(74.0f), }; topology topology; @@ -1116,68 +1116,68 @@ TEST(scatter_nd_update_gpu_fp16_test8, data6_indice2_update5) { set_values(input1, { //0,0 - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), //0,1 - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), }); set_values(input2, { - FLOAT16(0.0f), FLOAT16(1.0f), - FLOAT16(0.0f), FLOAT16(0.0f) + ov::float16(0.0f), ov::float16(1.0f), + ov::float16(0.0f), ov::float16(0.0f) }); set_values(input3, { // 0 - FLOAT16(51.0f), FLOAT16(52.0f), FLOAT16(53.0f), FLOAT16(54.0f), FLOAT16(55.0f), FLOAT16(56.0f), FLOAT16(57.0f), FLOAT16(58.0f), - FLOAT16(59.0f), FLOAT16(60.0f), FLOAT16(61.0f), FLOAT16(62.0f), FLOAT16(63.0f), FLOAT16(64.0f), FLOAT16(65.0f), FLOAT16(66.0f), - FLOAT16(67.0f), FLOAT16(68.0f), FLOAT16(69.0f), FLOAT16(70.0f), FLOAT16(71.0f), FLOAT16(72.0f), FLOAT16(73.0f), FLOAT16(74.0f), + ov::float16(51.0f), ov::float16(52.0f), ov::float16(53.0f), ov::float16(54.0f), ov::float16(55.0f), ov::float16(56.0f), ov::float16(57.0f), ov::float16(58.0f), + ov::float16(59.0f), ov::float16(60.0f), ov::float16(61.0f), ov::float16(62.0f), ov::float16(63.0f), ov::float16(64.0f), ov::float16(65.0f), ov::float16(66.0f), + ov::float16(67.0f), ov::float16(68.0f), ov::float16(69.0f), ov::float16(70.0f), ov::float16(71.0f), ov::float16(72.0f), ov::float16(73.0f), ov::float16(74.0f), - FLOAT16(51.0f), FLOAT16(52.0f), FLOAT16(53.0f), FLOAT16(54.0f), FLOAT16(55.0f), FLOAT16(56.0f), FLOAT16(57.0f), FLOAT16(58.0f), - FLOAT16(59.0f), FLOAT16(60.0f), FLOAT16(61.0f), FLOAT16(62.0f), FLOAT16(63.0f), FLOAT16(64.0f), FLOAT16(65.0f), FLOAT16(66.0f), - FLOAT16(67.0f), FLOAT16(68.0f), FLOAT16(69.0f), FLOAT16(70.0f), FLOAT16(71.0f), FLOAT16(72.0f), FLOAT16(73.0f), FLOAT16(74.0f), + ov::float16(51.0f), ov::float16(52.0f), ov::float16(53.0f), ov::float16(54.0f), ov::float16(55.0f), ov::float16(56.0f), ov::float16(57.0f), ov::float16(58.0f), + ov::float16(59.0f), ov::float16(60.0f), ov::float16(61.0f), ov::float16(62.0f), ov::float16(63.0f), ov::float16(64.0f), ov::float16(65.0f), ov::float16(66.0f), + ov::float16(67.0f), ov::float16(68.0f), ov::float16(69.0f), ov::float16(70.0f), ov::float16(71.0f), ov::float16(72.0f), ov::float16(73.0f), ov::float16(74.0f), // 1 - FLOAT16(151.0f), FLOAT16(152.0f), FLOAT16(153.0f), FLOAT16(154.0f), FLOAT16(155.0f), FLOAT16(156.0f), FLOAT16(157.0f), FLOAT16(158.0f), - FLOAT16(159.0f), FLOAT16(160.0f), FLOAT16(161.0f), FLOAT16(162.0f), FLOAT16(163.0f), FLOAT16(164.0f), FLOAT16(165.0f), FLOAT16(166.0f), - FLOAT16(167.0f), FLOAT16(168.0f), FLOAT16(169.0f), FLOAT16(170.0f), FLOAT16(171.0f), FLOAT16(172.0f), FLOAT16(173.0f), FLOAT16(174.0f), + ov::float16(151.0f), ov::float16(152.0f), ov::float16(153.0f), ov::float16(154.0f), ov::float16(155.0f), ov::float16(156.0f), ov::float16(157.0f), ov::float16(158.0f), + ov::float16(159.0f), ov::float16(160.0f), ov::float16(161.0f), ov::float16(162.0f), ov::float16(163.0f), ov::float16(164.0f), ov::float16(165.0f), ov::float16(166.0f), + ov::float16(167.0f), ov::float16(168.0f), ov::float16(169.0f), ov::float16(170.0f), ov::float16(171.0f), ov::float16(172.0f), ov::float16(173.0f), ov::float16(174.0f), - FLOAT16(151.0f), FLOAT16(152.0f), FLOAT16(153.0f), FLOAT16(154.0f), FLOAT16(155.0f), FLOAT16(156.0f), FLOAT16(157.0f), FLOAT16(158.0f), - FLOAT16(159.0f), FLOAT16(160.0f), FLOAT16(161.0f), FLOAT16(162.0f), FLOAT16(163.0f), FLOAT16(164.0f), FLOAT16(165.0f), FLOAT16(166.0f), - FLOAT16(167.0f), FLOAT16(168.0f), FLOAT16(169.0f), FLOAT16(170.0f), FLOAT16(171.0f), FLOAT16(172.0f), FLOAT16(173.0f), FLOAT16(174.0f), + ov::float16(151.0f), ov::float16(152.0f), ov::float16(153.0f), ov::float16(154.0f), ov::float16(155.0f), ov::float16(156.0f), ov::float16(157.0f), ov::float16(158.0f), + ov::float16(159.0f), ov::float16(160.0f), ov::float16(161.0f), ov::float16(162.0f), ov::float16(163.0f), ov::float16(164.0f), ov::float16(165.0f), ov::float16(166.0f), + ov::float16(167.0f), ov::float16(168.0f), ov::float16(169.0f), ov::float16(170.0f), ov::float16(171.0f), ov::float16(172.0f), ov::float16(173.0f), ov::float16(174.0f), }); std::vector expected_results = { // 0,0 - FLOAT16(151.0f), FLOAT16(152.0f), FLOAT16(153.0f), FLOAT16(154.0f), FLOAT16(155.0f), FLOAT16(156.0f), FLOAT16(157.0f), FLOAT16(158.0f), - FLOAT16(159.0f), FLOAT16(160.0f), FLOAT16(161.0f), FLOAT16(162.0f), FLOAT16(163.0f), FLOAT16(164.0f), FLOAT16(165.0f), FLOAT16(166.0f), - FLOAT16(167.0f), FLOAT16(168.0f), FLOAT16(169.0f), FLOAT16(170.0f), FLOAT16(171.0f), FLOAT16(172.0f), FLOAT16(173.0f), FLOAT16(174.0f), + ov::float16(151.0f), ov::float16(152.0f), ov::float16(153.0f), ov::float16(154.0f), ov::float16(155.0f), ov::float16(156.0f), ov::float16(157.0f), ov::float16(158.0f), + ov::float16(159.0f), ov::float16(160.0f), ov::float16(161.0f), ov::float16(162.0f), ov::float16(163.0f), ov::float16(164.0f), ov::float16(165.0f), ov::float16(166.0f), + ov::float16(167.0f), ov::float16(168.0f), ov::float16(169.0f), ov::float16(170.0f), ov::float16(171.0f), ov::float16(172.0f), ov::float16(173.0f), ov::float16(174.0f), - FLOAT16(151.0f), FLOAT16(152.0f), FLOAT16(153.0f), FLOAT16(154.0f), FLOAT16(155.0f), FLOAT16(156.0f), FLOAT16(157.0f), FLOAT16(158.0f), - FLOAT16(159.0f), FLOAT16(160.0f), FLOAT16(161.0f), FLOAT16(162.0f), FLOAT16(163.0f), FLOAT16(164.0f), FLOAT16(165.0f), FLOAT16(166.0f), - FLOAT16(167.0f), FLOAT16(168.0f), FLOAT16(169.0f), FLOAT16(170.0f), FLOAT16(171.0f), FLOAT16(172.0f), FLOAT16(173.0f), FLOAT16(174.0f), + ov::float16(151.0f), ov::float16(152.0f), ov::float16(153.0f), ov::float16(154.0f), ov::float16(155.0f), ov::float16(156.0f), ov::float16(157.0f), ov::float16(158.0f), + ov::float16(159.0f), ov::float16(160.0f), ov::float16(161.0f), ov::float16(162.0f), ov::float16(163.0f), ov::float16(164.0f), ov::float16(165.0f), ov::float16(166.0f), + ov::float16(167.0f), ov::float16(168.0f), ov::float16(169.0f), ov::float16(170.0f), ov::float16(171.0f), ov::float16(172.0f), ov::float16(173.0f), ov::float16(174.0f), // 0,1 - FLOAT16(51.0f), FLOAT16(52.0f), FLOAT16(53.0f), FLOAT16(54.0f), FLOAT16(55.0f), FLOAT16(56.0f), FLOAT16(57.0f), FLOAT16(58.0f), - FLOAT16(59.0f), FLOAT16(60.0f), FLOAT16(61.0f), FLOAT16(62.0f), FLOAT16(63.0f), FLOAT16(64.0f), FLOAT16(65.0f), FLOAT16(66.0f), - FLOAT16(67.0f), FLOAT16(68.0f), FLOAT16(69.0f), FLOAT16(70.0f), FLOAT16(71.0f), FLOAT16(72.0f), FLOAT16(73.0f), FLOAT16(74.0f), + ov::float16(51.0f), ov::float16(52.0f), ov::float16(53.0f), ov::float16(54.0f), ov::float16(55.0f), ov::float16(56.0f), ov::float16(57.0f), ov::float16(58.0f), + ov::float16(59.0f), ov::float16(60.0f), ov::float16(61.0f), ov::float16(62.0f), ov::float16(63.0f), ov::float16(64.0f), ov::float16(65.0f), ov::float16(66.0f), + ov::float16(67.0f), ov::float16(68.0f), ov::float16(69.0f), ov::float16(70.0f), ov::float16(71.0f), ov::float16(72.0f), ov::float16(73.0f), ov::float16(74.0f), - FLOAT16(51.0f), FLOAT16(52.0f), FLOAT16(53.0f), FLOAT16(54.0f), FLOAT16(55.0f), FLOAT16(56.0f), FLOAT16(57.0f), FLOAT16(58.0f), - FLOAT16(59.0f), FLOAT16(60.0f), FLOAT16(61.0f), FLOAT16(62.0f), FLOAT16(63.0f), FLOAT16(64.0f), FLOAT16(65.0f), FLOAT16(66.0f), - FLOAT16(67.0f), FLOAT16(68.0f), FLOAT16(69.0f), FLOAT16(70.0f), FLOAT16(71.0f), FLOAT16(72.0f), FLOAT16(73.0f), FLOAT16(74.0f), + ov::float16(51.0f), ov::float16(52.0f), ov::float16(53.0f), ov::float16(54.0f), ov::float16(55.0f), ov::float16(56.0f), ov::float16(57.0f), ov::float16(58.0f), + ov::float16(59.0f), ov::float16(60.0f), ov::float16(61.0f), ov::float16(62.0f), ov::float16(63.0f), ov::float16(64.0f), ov::float16(65.0f), ov::float16(66.0f), + ov::float16(67.0f), ov::float16(68.0f), ov::float16(69.0f), ov::float16(70.0f), ov::float16(71.0f), ov::float16(72.0f), ov::float16(73.0f), ov::float16(74.0f), }; topology topology; @@ -1214,38 +1214,38 @@ TEST(scatter_nd_update_gpu_fp16_test7, data5_indice2_update4) { set_values(input1, { - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), }); set_values(input2, { - FLOAT16(0.0f), FLOAT16(1.0f), - FLOAT16(0.0f), FLOAT16(0.0f) + ov::float16(0.0f), ov::float16(1.0f), + ov::float16(0.0f), ov::float16(0.0f) }); set_values(input3, { - FLOAT16(51.0f), FLOAT16(52.0f), FLOAT16(53.0f), FLOAT16(54.0f), FLOAT16(55.0f), FLOAT16(56.0f), FLOAT16(57.0f), FLOAT16(58.0f), - FLOAT16(59.0f), FLOAT16(60.0f), FLOAT16(61.0f), FLOAT16(62.0f), FLOAT16(63.0f), FLOAT16(64.0f), FLOAT16(65.0f), FLOAT16(66.0f), - FLOAT16(67.0f), FLOAT16(68.0f), FLOAT16(69.0f), FLOAT16(70.0f), FLOAT16(71.0f), FLOAT16(72.0f), FLOAT16(73.0f), FLOAT16(74.0f), + ov::float16(51.0f), ov::float16(52.0f), ov::float16(53.0f), ov::float16(54.0f), ov::float16(55.0f), ov::float16(56.0f), ov::float16(57.0f), ov::float16(58.0f), + ov::float16(59.0f), ov::float16(60.0f), ov::float16(61.0f), ov::float16(62.0f), ov::float16(63.0f), ov::float16(64.0f), ov::float16(65.0f), ov::float16(66.0f), + ov::float16(67.0f), ov::float16(68.0f), ov::float16(69.0f), ov::float16(70.0f), ov::float16(71.0f), ov::float16(72.0f), ov::float16(73.0f), ov::float16(74.0f), - FLOAT16(151.0f), FLOAT16(152.0f), FLOAT16(153.0f), FLOAT16(154.0f), FLOAT16(155.0f), FLOAT16(156.0f), FLOAT16(157.0f), FLOAT16(158.0f), - FLOAT16(159.0f), FLOAT16(160.0f), FLOAT16(161.0f), FLOAT16(162.0f), FLOAT16(163.0f), FLOAT16(164.0f), FLOAT16(165.0f), FLOAT16(166.0f), - FLOAT16(167.0f), FLOAT16(168.0f), FLOAT16(169.0f), FLOAT16(170.0f), FLOAT16(171.0f), FLOAT16(172.0f), FLOAT16(173.0f), FLOAT16(174.0f), + ov::float16(151.0f), ov::float16(152.0f), ov::float16(153.0f), ov::float16(154.0f), ov::float16(155.0f), ov::float16(156.0f), ov::float16(157.0f), ov::float16(158.0f), + ov::float16(159.0f), ov::float16(160.0f), ov::float16(161.0f), ov::float16(162.0f), ov::float16(163.0f), ov::float16(164.0f), ov::float16(165.0f), ov::float16(166.0f), + ov::float16(167.0f), ov::float16(168.0f), ov::float16(169.0f), ov::float16(170.0f), ov::float16(171.0f), ov::float16(172.0f), ov::float16(173.0f), ov::float16(174.0f), }); std::vector expected_results = { - FLOAT16(151.0f), FLOAT16(152.0f), FLOAT16(153.0f), FLOAT16(154.0f), FLOAT16(155.0f), FLOAT16(156.0f), FLOAT16(157.0f), FLOAT16(158.0f), - FLOAT16(159.0f), FLOAT16(160.0f), FLOAT16(161.0f), FLOAT16(162.0f), FLOAT16(163.0f), FLOAT16(164.0f), FLOAT16(165.0f), FLOAT16(166.0f), - FLOAT16(167.0f), FLOAT16(168.0f), FLOAT16(169.0f), FLOAT16(170.0f), FLOAT16(171.0f), FLOAT16(172.0f), FLOAT16(173.0f), FLOAT16(174.0f), + ov::float16(151.0f), ov::float16(152.0f), ov::float16(153.0f), ov::float16(154.0f), ov::float16(155.0f), ov::float16(156.0f), ov::float16(157.0f), ov::float16(158.0f), + ov::float16(159.0f), ov::float16(160.0f), ov::float16(161.0f), ov::float16(162.0f), ov::float16(163.0f), ov::float16(164.0f), ov::float16(165.0f), ov::float16(166.0f), + ov::float16(167.0f), ov::float16(168.0f), ov::float16(169.0f), ov::float16(170.0f), ov::float16(171.0f), ov::float16(172.0f), ov::float16(173.0f), ov::float16(174.0f), - FLOAT16(51.0f), FLOAT16(52.0f), FLOAT16(53.0f), FLOAT16(54.0f), FLOAT16(55.0f), FLOAT16(56.0f), FLOAT16(57.0f), FLOAT16(58.0f), - FLOAT16(59.0f), FLOAT16(60.0f), FLOAT16(61.0f), FLOAT16(62.0f), FLOAT16(63.0f), FLOAT16(64.0f), FLOAT16(65.0f), FLOAT16(66.0f), - FLOAT16(67.0f), FLOAT16(68.0f), FLOAT16(69.0f), FLOAT16(70.0f), FLOAT16(71.0f), FLOAT16(72.0f), FLOAT16(73.0f), FLOAT16(74.0f), + ov::float16(51.0f), ov::float16(52.0f), ov::float16(53.0f), ov::float16(54.0f), ov::float16(55.0f), ov::float16(56.0f), ov::float16(57.0f), ov::float16(58.0f), + ov::float16(59.0f), ov::float16(60.0f), ov::float16(61.0f), ov::float16(62.0f), ov::float16(63.0f), ov::float16(64.0f), ov::float16(65.0f), ov::float16(66.0f), + ov::float16(67.0f), ov::float16(68.0f), ov::float16(69.0f), ov::float16(70.0f), ov::float16(71.0f), ov::float16(72.0f), ov::float16(73.0f), ov::float16(74.0f), }; topology topology; @@ -1283,35 +1283,35 @@ TEST(scatter_nd_update_gpu_fp16_test6, data4_indice2_update3) { set_values(input1, { - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), }); set_values(input2, { - FLOAT16(1.0f), FLOAT16(1.0f), - FLOAT16(1.0f), FLOAT16(0.0f), - FLOAT16(0.0f), FLOAT16(2.0f) + ov::float16(1.0f), ov::float16(1.0f), + ov::float16(1.0f), ov::float16(0.0f), + ov::float16(0.0f), ov::float16(2.0f) }); set_values(input3, { - FLOAT16(51.0f), FLOAT16(52.0f), FLOAT16(53.0f), FLOAT16(54.0f), FLOAT16(55.0f), FLOAT16(56.0f), FLOAT16(57.0f), FLOAT16(58.0f), - FLOAT16(59.0f), FLOAT16(60.0f), FLOAT16(61.0f), FLOAT16(62.0f), FLOAT16(63.0f), FLOAT16(64.0f), FLOAT16(65.0f), FLOAT16(66.0f), - FLOAT16(67.0f), FLOAT16(68.0f), FLOAT16(69.0f), FLOAT16(70.0f), FLOAT16(71.0f), FLOAT16(72.0f), FLOAT16(73.0f), FLOAT16(74.0f), + ov::float16(51.0f), ov::float16(52.0f), ov::float16(53.0f), ov::float16(54.0f), ov::float16(55.0f), ov::float16(56.0f), ov::float16(57.0f), ov::float16(58.0f), + ov::float16(59.0f), ov::float16(60.0f), ov::float16(61.0f), ov::float16(62.0f), ov::float16(63.0f), ov::float16(64.0f), ov::float16(65.0f), ov::float16(66.0f), + ov::float16(67.0f), ov::float16(68.0f), ov::float16(69.0f), ov::float16(70.0f), ov::float16(71.0f), ov::float16(72.0f), ov::float16(73.0f), ov::float16(74.0f), }); std::vector expected_results = { - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(67.0f), FLOAT16(68.0f), FLOAT16(69.0f), FLOAT16(70.0f), FLOAT16(71.0f), FLOAT16(72.0f), FLOAT16(73.0f), FLOAT16(74.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(67.0f), ov::float16(68.0f), ov::float16(69.0f), ov::float16(70.0f), ov::float16(71.0f), ov::float16(72.0f), ov::float16(73.0f), ov::float16(74.0f), - FLOAT16(59.0f), FLOAT16(60.0f), FLOAT16(61.0f), FLOAT16(62.0f), FLOAT16(63.0f), FLOAT16(64.0f), FLOAT16(65.0f), FLOAT16(66.0f), - FLOAT16(51.0f), FLOAT16(52.0f), FLOAT16(53.0f), FLOAT16(54.0f), FLOAT16(55.0f), FLOAT16(56.0f), FLOAT16(57.0f), FLOAT16(58.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(59.0f), ov::float16(60.0f), ov::float16(61.0f), ov::float16(62.0f), ov::float16(63.0f), ov::float16(64.0f), ov::float16(65.0f), ov::float16(66.0f), + ov::float16(51.0f), ov::float16(52.0f), ov::float16(53.0f), ov::float16(54.0f), ov::float16(55.0f), ov::float16(56.0f), ov::float16(57.0f), ov::float16(58.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), }; topology topology; @@ -1348,35 +1348,35 @@ TEST(scatter_nd_update_gpu_fp16_test5, data3_indice2_update2) { set_values(input1, { - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), }); set_values(input2, { - FLOAT16(1.0f), FLOAT16(1.0f), - FLOAT16(1.0f), FLOAT16(0.0f), - FLOAT16(0.0f), FLOAT16(2.0f) + ov::float16(1.0f), ov::float16(1.0f), + ov::float16(1.0f), ov::float16(0.0f), + ov::float16(0.0f), ov::float16(2.0f) }); set_values(input3, { - FLOAT16(51.0f), FLOAT16(52.0f), FLOAT16(53.0f), FLOAT16(54.0f), - FLOAT16(61.0f), FLOAT16(62.0f), FLOAT16(63.0f), FLOAT16(64.0f), - FLOAT16(71.0f), FLOAT16(72.0f), FLOAT16(73.0f), FLOAT16(74.0f), + ov::float16(51.0f), ov::float16(52.0f), ov::float16(53.0f), ov::float16(54.0f), + ov::float16(61.0f), ov::float16(62.0f), ov::float16(63.0f), ov::float16(64.0f), + ov::float16(71.0f), ov::float16(72.0f), ov::float16(73.0f), ov::float16(74.0f), }); std::vector expected_results = { - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(71.0f), FLOAT16(72.0f), FLOAT16(73.0f), FLOAT16(74.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(71.0f), ov::float16(72.0f), ov::float16(73.0f), ov::float16(74.0f), - FLOAT16(61.0f), FLOAT16(62.0f), FLOAT16(63.0f), FLOAT16(64.0f), - FLOAT16(51.0f), FLOAT16(52.0f), FLOAT16(53.0f), FLOAT16(54.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(61.0f), ov::float16(62.0f), ov::float16(63.0f), ov::float16(64.0f), + ov::float16(51.0f), ov::float16(52.0f), ov::float16(53.0f), ov::float16(54.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), }; topology topology; @@ -1413,25 +1413,25 @@ TEST(scatter_nd_update_gpu_fp16_test4, data2_indice2_update1) { set_values(input1, { - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), }); set_values(input2, { - FLOAT16(2.0f), FLOAT16(1.0f), - FLOAT16(0.0f), FLOAT16(3.0f), - FLOAT16(0.0f), FLOAT16(2.0f) + ov::float16(2.0f), ov::float16(1.0f), + ov::float16(0.0f), ov::float16(3.0f), + ov::float16(0.0f), ov::float16(2.0f) }); set_values(input3, { - FLOAT16(21.0f), FLOAT16(22.0f), FLOAT16(23.0f) + ov::float16(21.0f), ov::float16(22.0f), ov::float16(23.0f) }); std::vector expected_results = { - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(23.0f), FLOAT16(22.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(21.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(23.0f), ov::float16(22.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(21.0f), ov::float16(11.0f), ov::float16(12.0f), }; topology topology; @@ -1468,45 +1468,45 @@ TEST(scatter_nd_update_gpu_fp16_test3, data3_indice1_update3) { set_values(input1, { - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), }); set_values(input2, { - FLOAT16(2.0f), FLOAT16(0.0f) + ov::float16(2.0f), ov::float16(0.0f) }); set_values(input3, { - FLOAT16(21.0f), FLOAT16(22.0f), FLOAT16(23.0f), FLOAT16(24.0f), - FLOAT16(25.0f), FLOAT16(26.0f), FLOAT16(27.0f), FLOAT16(28.0f), - FLOAT16(29.0f), FLOAT16(30.0f), FLOAT16(31.0f), FLOAT16(32.0f), + ov::float16(21.0f), ov::float16(22.0f), ov::float16(23.0f), ov::float16(24.0f), + ov::float16(25.0f), ov::float16(26.0f), ov::float16(27.0f), ov::float16(28.0f), + ov::float16(29.0f), ov::float16(30.0f), ov::float16(31.0f), ov::float16(32.0f), - FLOAT16(41.0f), FLOAT16(42.0f), FLOAT16(43.0f), FLOAT16(44.0f), - FLOAT16(45.0f), FLOAT16(46.0f), FLOAT16(47.0f), FLOAT16(48.0f), - FLOAT16(49.0f), FLOAT16(50.0f), FLOAT16(51.0f), FLOAT16(52.0f), + ov::float16(41.0f), ov::float16(42.0f), ov::float16(43.0f), ov::float16(44.0f), + ov::float16(45.0f), ov::float16(46.0f), ov::float16(47.0f), ov::float16(48.0f), + ov::float16(49.0f), ov::float16(50.0f), ov::float16(51.0f), ov::float16(52.0f), }); std::vector expected_results = { - FLOAT16(41.0f), FLOAT16(42.0f), FLOAT16(43.0f), FLOAT16(44.0f), - FLOAT16(45.0f), FLOAT16(46.0f), FLOAT16(47.0f), FLOAT16(48.0f), - FLOAT16(49.0f), FLOAT16(50.0f), FLOAT16(51.0f), FLOAT16(52.0f), + ov::float16(41.0f), ov::float16(42.0f), ov::float16(43.0f), ov::float16(44.0f), + ov::float16(45.0f), ov::float16(46.0f), ov::float16(47.0f), ov::float16(48.0f), + ov::float16(49.0f), ov::float16(50.0f), ov::float16(51.0f), ov::float16(52.0f), - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), - FLOAT16(21.0f), FLOAT16(22.0f), FLOAT16(23.0f), FLOAT16(24.0f), - FLOAT16(25.0f), FLOAT16(26.0f), FLOAT16(27.0f), FLOAT16(28.0f), - FLOAT16(29.0f), FLOAT16(30.0f), FLOAT16(31.0f), FLOAT16(32.0f), + ov::float16(21.0f), ov::float16(22.0f), ov::float16(23.0f), ov::float16(24.0f), + ov::float16(25.0f), ov::float16(26.0f), ov::float16(27.0f), ov::float16(28.0f), + ov::float16(29.0f), ov::float16(30.0f), ov::float16(31.0f), ov::float16(32.0f), }; topology topology; @@ -1544,24 +1544,24 @@ TEST(scatter_nd_update_gpu_fp16_test2, data2_indice1_update2) { set_values(input1, { - FLOAT16(13.0f), FLOAT16(12.0f), FLOAT16(11.0f), FLOAT16(10.0f), - FLOAT16(9.0f), FLOAT16(8.0f), FLOAT16(7.0f), FLOAT16(6.0f), - FLOAT16(5.0f), FLOAT16(4.0f), FLOAT16(3.0f), FLOAT16(2.0f) + ov::float16(13.0f), ov::float16(12.0f), ov::float16(11.0f), ov::float16(10.0f), + ov::float16(9.0f), ov::float16(8.0f), ov::float16(7.0f), ov::float16(6.0f), + ov::float16(5.0f), ov::float16(4.0f), ov::float16(3.0f), ov::float16(2.0f) }); set_values(input2, { - FLOAT16(2.0f), FLOAT16(0.0f) + ov::float16(2.0f), ov::float16(0.0f) }); set_values(input3, { - FLOAT16(20.0f), FLOAT16(21.0f), FLOAT16(22.0f), FLOAT16(23.0f), - FLOAT16(24.0f), FLOAT16(25.0f), FLOAT16(26.0f), FLOAT16(27.0f) + ov::float16(20.0f), ov::float16(21.0f), ov::float16(22.0f), ov::float16(23.0f), + ov::float16(24.0f), ov::float16(25.0f), ov::float16(26.0f), ov::float16(27.0f) }); std::vector expected_results = { - FLOAT16(24.0f), FLOAT16(25.0f), FLOAT16(26.0f), FLOAT16(27.0f), - FLOAT16(9.0f), FLOAT16(8.0f), FLOAT16(7.0f), FLOAT16(6.0f), - FLOAT16(20.0f), FLOAT16(21.0f), FLOAT16(22.0f), FLOAT16(23.0f), + ov::float16(24.0f), ov::float16(25.0f), ov::float16(26.0f), ov::float16(27.0f), + ov::float16(9.0f), ov::float16(8.0f), ov::float16(7.0f), ov::float16(6.0f), + ov::float16(20.0f), ov::float16(21.0f), ov::float16(22.0f), ov::float16(23.0f), }; topology topology; @@ -1598,15 +1598,15 @@ TEST(scatter_nd_update_gpu_fp16_test1, data1_indice1_update1) { set_values(input1, { - FLOAT16(9.0f), FLOAT16(8.0f), FLOAT16(7.0f), FLOAT16(6.0f), FLOAT16(5.0f), FLOAT16(4.0f), FLOAT16(3.0f), FLOAT16(2.0f) + ov::float16(9.0f), ov::float16(8.0f), ov::float16(7.0f), ov::float16(6.0f), ov::float16(5.0f), ov::float16(4.0f), ov::float16(3.0f), ov::float16(2.0f) }); set_values(input2, { - FLOAT16(2.0f), FLOAT16(4.0f), FLOAT16(5.0f), FLOAT16(7.0f) + ov::float16(2.0f), ov::float16(4.0f), ov::float16(5.0f), ov::float16(7.0f) }); set_values(input3, { - FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(13.0f) + ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(13.0f) }); std::vector expected_results = { @@ -1655,56 +1655,56 @@ TEST(scatter_nd_update_gpu_fp16, d6661_i2311) { auto input3 = engine.allocate_memory({ data_types::f16, format::bfyx, { 2, 1, 1, 1 } }); // Updates set_values(input1, { - FLOAT16(100.f), FLOAT16(101.f), FLOAT16(102.f), FLOAT16(103.f), FLOAT16(104.f), FLOAT16(105.f), - FLOAT16(106.f), FLOAT16(107.f), FLOAT16(108.f), FLOAT16(109.f), FLOAT16(110.f), FLOAT16(111.f), - FLOAT16(112.f), FLOAT16(113.f), FLOAT16(114.f), FLOAT16(115.f), FLOAT16(116.f), FLOAT16(117.f), - FLOAT16(118.f), FLOAT16(119.f), FLOAT16(120.f), FLOAT16(121.f), FLOAT16(122.f), FLOAT16(123.f), - FLOAT16(124.f), FLOAT16(125.f), FLOAT16(126.f), FLOAT16(127.f), FLOAT16(128.f), FLOAT16(129.f), - FLOAT16(130.f), FLOAT16(131.f), FLOAT16(132.f), FLOAT16(133.f), FLOAT16(134.f), FLOAT16(135.f), - - FLOAT16(136.f), FLOAT16(137.f), FLOAT16(138.f), FLOAT16(139.f), FLOAT16(140.f), FLOAT16(141.f), - FLOAT16(142.f), FLOAT16(143.f), FLOAT16(144.f), FLOAT16(145.f), FLOAT16(146.f), FLOAT16(147.f), - FLOAT16(148.f), FLOAT16(149.f), FLOAT16(150.f), FLOAT16(151.f), FLOAT16(152.f), FLOAT16(153.f), - FLOAT16(154.f), FLOAT16(155.f), FLOAT16(156.f), FLOAT16(157.f), FLOAT16(158.f), FLOAT16(159.f), - FLOAT16(160.f), FLOAT16(161.f), FLOAT16(162.f), FLOAT16(163.f), FLOAT16(164.f), FLOAT16(165.f), - FLOAT16(166.f), FLOAT16(167.f), FLOAT16(168.f), FLOAT16(169.f), FLOAT16(170.f), FLOAT16(171.f), - - FLOAT16(172.f), FLOAT16(173.f), FLOAT16(174.f), FLOAT16(175.f), FLOAT16(176.f), FLOAT16(177.f), - FLOAT16(178.f), FLOAT16(179.f), FLOAT16(180.f), FLOAT16(181.f), FLOAT16(182.f), FLOAT16(183.f), - FLOAT16(184.f), FLOAT16(185.f), FLOAT16(186.f), FLOAT16(187.f), FLOAT16(188.f), FLOAT16(189.f), - FLOAT16(190.f), FLOAT16(191.f), FLOAT16(192.f), FLOAT16(193.f), FLOAT16(194.f), FLOAT16(195.f), - FLOAT16(196.f), FLOAT16(197.f), FLOAT16(198.f), FLOAT16(199.f), FLOAT16(200.f), FLOAT16(201.f), - FLOAT16(202.f), FLOAT16(203.f), FLOAT16(204.f), FLOAT16(205.f), FLOAT16(206.f), FLOAT16(207.f), - - FLOAT16(208.f), FLOAT16(209.f), FLOAT16(210.f), FLOAT16(211.f), FLOAT16(212.f), FLOAT16(213.f), - FLOAT16(214.f), FLOAT16(215.f), FLOAT16(216.f), FLOAT16(217.f), FLOAT16(218.f), FLOAT16(219.f), - FLOAT16(220.f), FLOAT16(221.f), FLOAT16(222.f), FLOAT16(223.f), FLOAT16(224.f), FLOAT16(225.f), - FLOAT16(226.f), FLOAT16(227.f), FLOAT16(228.f), FLOAT16(229.f), FLOAT16(230.f), FLOAT16(231.f), - FLOAT16(232.f), FLOAT16(233.f), FLOAT16(234.f), FLOAT16(235.f), FLOAT16(236.f), FLOAT16(237.f), - FLOAT16(238.f), FLOAT16(239.f), FLOAT16(240.f), FLOAT16(241.f), FLOAT16(242.f), FLOAT16(243.f), - - FLOAT16(244.f), FLOAT16(245.f), FLOAT16(246.f), FLOAT16(247.f), FLOAT16(248.f), FLOAT16(249.f), - FLOAT16(250.f), FLOAT16(251.f), FLOAT16(252.f), FLOAT16(253.f), FLOAT16(254.f), FLOAT16(255.f), - FLOAT16(256.f), FLOAT16(257.f), FLOAT16(258.f), FLOAT16(259.f), FLOAT16(260.f), FLOAT16(261.f), - FLOAT16(262.f), FLOAT16(263.f), FLOAT16(264.f), FLOAT16(265.f), FLOAT16(266.f), FLOAT16(267.f), - FLOAT16(268.f), FLOAT16(269.f), FLOAT16(270.f), FLOAT16(271.f), FLOAT16(272.f), FLOAT16(273.f), - FLOAT16(274.f), FLOAT16(275.f), FLOAT16(276.f), FLOAT16(277.f), FLOAT16(278.f), FLOAT16(279.f), - - FLOAT16(280.f), FLOAT16(281.f), FLOAT16(282.f), FLOAT16(283.f), FLOAT16(284.f), FLOAT16(285.f), - FLOAT16(286.f), FLOAT16(287.f), FLOAT16(288.f), FLOAT16(289.f), FLOAT16(290.f), FLOAT16(291.f), - FLOAT16(292.f), FLOAT16(293.f), FLOAT16(294.f), FLOAT16(295.f), FLOAT16(296.f), FLOAT16(297.f), - FLOAT16(298.f), FLOAT16(299.f), FLOAT16(300.f), FLOAT16(301.f), FLOAT16(302.f), FLOAT16(303.f), - FLOAT16(304.f), FLOAT16(305.f), FLOAT16(306.f), FLOAT16(307.f), FLOAT16(308.f), FLOAT16(309.f), - FLOAT16(310.f), FLOAT16(311.f), FLOAT16(312.f), FLOAT16(313.f), FLOAT16(314.f), FLOAT16(315.f), + ov::float16(100.f), ov::float16(101.f), ov::float16(102.f), ov::float16(103.f), ov::float16(104.f), ov::float16(105.f), + ov::float16(106.f), ov::float16(107.f), ov::float16(108.f), ov::float16(109.f), ov::float16(110.f), ov::float16(111.f), + ov::float16(112.f), ov::float16(113.f), ov::float16(114.f), ov::float16(115.f), ov::float16(116.f), ov::float16(117.f), + ov::float16(118.f), ov::float16(119.f), ov::float16(120.f), ov::float16(121.f), ov::float16(122.f), ov::float16(123.f), + ov::float16(124.f), ov::float16(125.f), ov::float16(126.f), ov::float16(127.f), ov::float16(128.f), ov::float16(129.f), + ov::float16(130.f), ov::float16(131.f), ov::float16(132.f), ov::float16(133.f), ov::float16(134.f), ov::float16(135.f), + + ov::float16(136.f), ov::float16(137.f), ov::float16(138.f), ov::float16(139.f), ov::float16(140.f), ov::float16(141.f), + ov::float16(142.f), ov::float16(143.f), ov::float16(144.f), ov::float16(145.f), ov::float16(146.f), ov::float16(147.f), + ov::float16(148.f), ov::float16(149.f), ov::float16(150.f), ov::float16(151.f), ov::float16(152.f), ov::float16(153.f), + ov::float16(154.f), ov::float16(155.f), ov::float16(156.f), ov::float16(157.f), ov::float16(158.f), ov::float16(159.f), + ov::float16(160.f), ov::float16(161.f), ov::float16(162.f), ov::float16(163.f), ov::float16(164.f), ov::float16(165.f), + ov::float16(166.f), ov::float16(167.f), ov::float16(168.f), ov::float16(169.f), ov::float16(170.f), ov::float16(171.f), + + ov::float16(172.f), ov::float16(173.f), ov::float16(174.f), ov::float16(175.f), ov::float16(176.f), ov::float16(177.f), + ov::float16(178.f), ov::float16(179.f), ov::float16(180.f), ov::float16(181.f), ov::float16(182.f), ov::float16(183.f), + ov::float16(184.f), ov::float16(185.f), ov::float16(186.f), ov::float16(187.f), ov::float16(188.f), ov::float16(189.f), + ov::float16(190.f), ov::float16(191.f), ov::float16(192.f), ov::float16(193.f), ov::float16(194.f), ov::float16(195.f), + ov::float16(196.f), ov::float16(197.f), ov::float16(198.f), ov::float16(199.f), ov::float16(200.f), ov::float16(201.f), + ov::float16(202.f), ov::float16(203.f), ov::float16(204.f), ov::float16(205.f), ov::float16(206.f), ov::float16(207.f), + + ov::float16(208.f), ov::float16(209.f), ov::float16(210.f), ov::float16(211.f), ov::float16(212.f), ov::float16(213.f), + ov::float16(214.f), ov::float16(215.f), ov::float16(216.f), ov::float16(217.f), ov::float16(218.f), ov::float16(219.f), + ov::float16(220.f), ov::float16(221.f), ov::float16(222.f), ov::float16(223.f), ov::float16(224.f), ov::float16(225.f), + ov::float16(226.f), ov::float16(227.f), ov::float16(228.f), ov::float16(229.f), ov::float16(230.f), ov::float16(231.f), + ov::float16(232.f), ov::float16(233.f), ov::float16(234.f), ov::float16(235.f), ov::float16(236.f), ov::float16(237.f), + ov::float16(238.f), ov::float16(239.f), ov::float16(240.f), ov::float16(241.f), ov::float16(242.f), ov::float16(243.f), + + ov::float16(244.f), ov::float16(245.f), ov::float16(246.f), ov::float16(247.f), ov::float16(248.f), ov::float16(249.f), + ov::float16(250.f), ov::float16(251.f), ov::float16(252.f), ov::float16(253.f), ov::float16(254.f), ov::float16(255.f), + ov::float16(256.f), ov::float16(257.f), ov::float16(258.f), ov::float16(259.f), ov::float16(260.f), ov::float16(261.f), + ov::float16(262.f), ov::float16(263.f), ov::float16(264.f), ov::float16(265.f), ov::float16(266.f), ov::float16(267.f), + ov::float16(268.f), ov::float16(269.f), ov::float16(270.f), ov::float16(271.f), ov::float16(272.f), ov::float16(273.f), + ov::float16(274.f), ov::float16(275.f), ov::float16(276.f), ov::float16(277.f), ov::float16(278.f), ov::float16(279.f), + + ov::float16(280.f), ov::float16(281.f), ov::float16(282.f), ov::float16(283.f), ov::float16(284.f), ov::float16(285.f), + ov::float16(286.f), ov::float16(287.f), ov::float16(288.f), ov::float16(289.f), ov::float16(290.f), ov::float16(291.f), + ov::float16(292.f), ov::float16(293.f), ov::float16(294.f), ov::float16(295.f), ov::float16(296.f), ov::float16(297.f), + ov::float16(298.f), ov::float16(299.f), ov::float16(300.f), ov::float16(301.f), ov::float16(302.f), ov::float16(303.f), + ov::float16(304.f), ov::float16(305.f), ov::float16(306.f), ov::float16(307.f), ov::float16(308.f), ov::float16(309.f), + ov::float16(310.f), ov::float16(311.f), ov::float16(312.f), ov::float16(313.f), ov::float16(314.f), ov::float16(315.f), }); set_values(input2, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(2.0f), - FLOAT16(3.0f), FLOAT16(4.0f), FLOAT16(5.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(2.0f), + ov::float16(3.0f), ov::float16(4.0f), ov::float16(5.0f) }); set_values(input3, { - FLOAT16(999.0f), FLOAT16(888.0f) + ov::float16(999.0f), ov::float16(888.0f) }); @@ -1794,57 +1794,57 @@ TEST(scatter_nd_update_gpu_fp16, d6661_i2211) { set_values(input1, { - FLOAT16(100.f), FLOAT16(101.f), FLOAT16(102.f), FLOAT16(103.f), FLOAT16(104.f), FLOAT16(105.f), - FLOAT16(106.f), FLOAT16(107.f), FLOAT16(108.f), FLOAT16(109.f), FLOAT16(110.f), FLOAT16(111.f), - FLOAT16(112.f), FLOAT16(113.f), FLOAT16(114.f), FLOAT16(115.f), FLOAT16(116.f), FLOAT16(117.f), - FLOAT16(118.f), FLOAT16(119.f), FLOAT16(120.f), FLOAT16(121.f), FLOAT16(122.f), FLOAT16(123.f), - FLOAT16(124.f), FLOAT16(125.f), FLOAT16(126.f), FLOAT16(127.f), FLOAT16(128.f), FLOAT16(129.f), - FLOAT16(130.f), FLOAT16(131.f), FLOAT16(132.f), FLOAT16(133.f), FLOAT16(134.f), FLOAT16(135.f), - - FLOAT16(136.f), FLOAT16(137.f), FLOAT16(138.f), FLOAT16(139.f), FLOAT16(140.f), FLOAT16(141.f), - FLOAT16(142.f), FLOAT16(143.f), FLOAT16(144.f), FLOAT16(145.f), FLOAT16(146.f), FLOAT16(147.f), - FLOAT16(148.f), FLOAT16(149.f), FLOAT16(150.f), FLOAT16(151.f), FLOAT16(152.f), FLOAT16(153.f), - FLOAT16(154.f), FLOAT16(155.f), FLOAT16(156.f), FLOAT16(157.f), FLOAT16(158.f), FLOAT16(159.f), - FLOAT16(160.f), FLOAT16(161.f), FLOAT16(162.f), FLOAT16(163.f), FLOAT16(164.f), FLOAT16(165.f), - FLOAT16(166.f), FLOAT16(167.f), FLOAT16(168.f), FLOAT16(169.f), FLOAT16(170.f), FLOAT16(171.f), - - FLOAT16(172.f), FLOAT16(173.f), FLOAT16(174.f), FLOAT16(175.f), FLOAT16(176.f), FLOAT16(177.f), - FLOAT16(178.f), FLOAT16(179.f), FLOAT16(180.f), FLOAT16(181.f), FLOAT16(182.f), FLOAT16(183.f), - FLOAT16(184.f), FLOAT16(185.f), FLOAT16(186.f), FLOAT16(187.f), FLOAT16(188.f), FLOAT16(189.f), - FLOAT16(190.f), FLOAT16(191.f), FLOAT16(192.f), FLOAT16(193.f), FLOAT16(194.f), FLOAT16(195.f), - FLOAT16(196.f), FLOAT16(197.f), FLOAT16(198.f), FLOAT16(199.f), FLOAT16(200.f), FLOAT16(201.f), - FLOAT16(202.f), FLOAT16(203.f), FLOAT16(204.f), FLOAT16(205.f), FLOAT16(206.f), FLOAT16(207.f), - - FLOAT16(208.f), FLOAT16(209.f), FLOAT16(210.f), FLOAT16(211.f), FLOAT16(212.f), FLOAT16(213.f), - FLOAT16(214.f), FLOAT16(215.f), FLOAT16(216.f), FLOAT16(217.f), FLOAT16(218.f), FLOAT16(219.f), - FLOAT16(220.f), FLOAT16(221.f), FLOAT16(222.f), FLOAT16(223.f), FLOAT16(224.f), FLOAT16(225.f), - FLOAT16(226.f), FLOAT16(227.f), FLOAT16(228.f), FLOAT16(229.f), FLOAT16(230.f), FLOAT16(231.f), - FLOAT16(232.f), FLOAT16(233.f), FLOAT16(234.f), FLOAT16(235.f), FLOAT16(236.f), FLOAT16(237.f), - FLOAT16(238.f), FLOAT16(239.f), FLOAT16(240.f), FLOAT16(241.f), FLOAT16(242.f), FLOAT16(243.f), - - FLOAT16(244.f), FLOAT16(245.f), FLOAT16(246.f), FLOAT16(247.f), FLOAT16(248.f), FLOAT16(249.f), - FLOAT16(250.f), FLOAT16(251.f), FLOAT16(252.f), FLOAT16(253.f), FLOAT16(254.f), FLOAT16(255.f), - FLOAT16(256.f), FLOAT16(257.f), FLOAT16(258.f), FLOAT16(259.f), FLOAT16(260.f), FLOAT16(261.f), - FLOAT16(262.f), FLOAT16(263.f), FLOAT16(264.f), FLOAT16(265.f), FLOAT16(266.f), FLOAT16(267.f), - FLOAT16(268.f), FLOAT16(269.f), FLOAT16(270.f), FLOAT16(271.f), FLOAT16(272.f), FLOAT16(273.f), - FLOAT16(274.f), FLOAT16(275.f), FLOAT16(276.f), FLOAT16(277.f), FLOAT16(278.f), FLOAT16(279.f), - - FLOAT16(280.f), FLOAT16(281.f), FLOAT16(282.f), FLOAT16(283.f), FLOAT16(284.f), FLOAT16(285.f), - FLOAT16(286.f), FLOAT16(287.f), FLOAT16(288.f), FLOAT16(289.f), FLOAT16(290.f), FLOAT16(291.f), - FLOAT16(292.f), FLOAT16(293.f), FLOAT16(294.f), FLOAT16(295.f), FLOAT16(296.f), FLOAT16(297.f), - FLOAT16(298.f), FLOAT16(299.f), FLOAT16(300.f), FLOAT16(301.f), FLOAT16(302.f), FLOAT16(303.f), - FLOAT16(304.f), FLOAT16(305.f), FLOAT16(306.f), FLOAT16(307.f), FLOAT16(308.f), FLOAT16(309.f), - FLOAT16(310.f), FLOAT16(311.f), FLOAT16(312.f), FLOAT16(313.f), FLOAT16(314.f), FLOAT16(315.f), + ov::float16(100.f), ov::float16(101.f), ov::float16(102.f), ov::float16(103.f), ov::float16(104.f), ov::float16(105.f), + ov::float16(106.f), ov::float16(107.f), ov::float16(108.f), ov::float16(109.f), ov::float16(110.f), ov::float16(111.f), + ov::float16(112.f), ov::float16(113.f), ov::float16(114.f), ov::float16(115.f), ov::float16(116.f), ov::float16(117.f), + ov::float16(118.f), ov::float16(119.f), ov::float16(120.f), ov::float16(121.f), ov::float16(122.f), ov::float16(123.f), + ov::float16(124.f), ov::float16(125.f), ov::float16(126.f), ov::float16(127.f), ov::float16(128.f), ov::float16(129.f), + ov::float16(130.f), ov::float16(131.f), ov::float16(132.f), ov::float16(133.f), ov::float16(134.f), ov::float16(135.f), + + ov::float16(136.f), ov::float16(137.f), ov::float16(138.f), ov::float16(139.f), ov::float16(140.f), ov::float16(141.f), + ov::float16(142.f), ov::float16(143.f), ov::float16(144.f), ov::float16(145.f), ov::float16(146.f), ov::float16(147.f), + ov::float16(148.f), ov::float16(149.f), ov::float16(150.f), ov::float16(151.f), ov::float16(152.f), ov::float16(153.f), + ov::float16(154.f), ov::float16(155.f), ov::float16(156.f), ov::float16(157.f), ov::float16(158.f), ov::float16(159.f), + ov::float16(160.f), ov::float16(161.f), ov::float16(162.f), ov::float16(163.f), ov::float16(164.f), ov::float16(165.f), + ov::float16(166.f), ov::float16(167.f), ov::float16(168.f), ov::float16(169.f), ov::float16(170.f), ov::float16(171.f), + + ov::float16(172.f), ov::float16(173.f), ov::float16(174.f), ov::float16(175.f), ov::float16(176.f), ov::float16(177.f), + ov::float16(178.f), ov::float16(179.f), ov::float16(180.f), ov::float16(181.f), ov::float16(182.f), ov::float16(183.f), + ov::float16(184.f), ov::float16(185.f), ov::float16(186.f), ov::float16(187.f), ov::float16(188.f), ov::float16(189.f), + ov::float16(190.f), ov::float16(191.f), ov::float16(192.f), ov::float16(193.f), ov::float16(194.f), ov::float16(195.f), + ov::float16(196.f), ov::float16(197.f), ov::float16(198.f), ov::float16(199.f), ov::float16(200.f), ov::float16(201.f), + ov::float16(202.f), ov::float16(203.f), ov::float16(204.f), ov::float16(205.f), ov::float16(206.f), ov::float16(207.f), + + ov::float16(208.f), ov::float16(209.f), ov::float16(210.f), ov::float16(211.f), ov::float16(212.f), ov::float16(213.f), + ov::float16(214.f), ov::float16(215.f), ov::float16(216.f), ov::float16(217.f), ov::float16(218.f), ov::float16(219.f), + ov::float16(220.f), ov::float16(221.f), ov::float16(222.f), ov::float16(223.f), ov::float16(224.f), ov::float16(225.f), + ov::float16(226.f), ov::float16(227.f), ov::float16(228.f), ov::float16(229.f), ov::float16(230.f), ov::float16(231.f), + ov::float16(232.f), ov::float16(233.f), ov::float16(234.f), ov::float16(235.f), ov::float16(236.f), ov::float16(237.f), + ov::float16(238.f), ov::float16(239.f), ov::float16(240.f), ov::float16(241.f), ov::float16(242.f), ov::float16(243.f), + + ov::float16(244.f), ov::float16(245.f), ov::float16(246.f), ov::float16(247.f), ov::float16(248.f), ov::float16(249.f), + ov::float16(250.f), ov::float16(251.f), ov::float16(252.f), ov::float16(253.f), ov::float16(254.f), ov::float16(255.f), + ov::float16(256.f), ov::float16(257.f), ov::float16(258.f), ov::float16(259.f), ov::float16(260.f), ov::float16(261.f), + ov::float16(262.f), ov::float16(263.f), ov::float16(264.f), ov::float16(265.f), ov::float16(266.f), ov::float16(267.f), + ov::float16(268.f), ov::float16(269.f), ov::float16(270.f), ov::float16(271.f), ov::float16(272.f), ov::float16(273.f), + ov::float16(274.f), ov::float16(275.f), ov::float16(276.f), ov::float16(277.f), ov::float16(278.f), ov::float16(279.f), + + ov::float16(280.f), ov::float16(281.f), ov::float16(282.f), ov::float16(283.f), ov::float16(284.f), ov::float16(285.f), + ov::float16(286.f), ov::float16(287.f), ov::float16(288.f), ov::float16(289.f), ov::float16(290.f), ov::float16(291.f), + ov::float16(292.f), ov::float16(293.f), ov::float16(294.f), ov::float16(295.f), ov::float16(296.f), ov::float16(297.f), + ov::float16(298.f), ov::float16(299.f), ov::float16(300.f), ov::float16(301.f), ov::float16(302.f), ov::float16(303.f), + ov::float16(304.f), ov::float16(305.f), ov::float16(306.f), ov::float16(307.f), ov::float16(308.f), ov::float16(309.f), + ov::float16(310.f), ov::float16(311.f), ov::float16(312.f), ov::float16(313.f), ov::float16(314.f), ov::float16(315.f), }); set_values(input2, { - FLOAT16(0.0f), FLOAT16(1.0f), - FLOAT16(3.0f), FLOAT16(4.0f), + ov::float16(0.0f), ov::float16(1.0f), + ov::float16(3.0f), ov::float16(4.0f), }); set_values(input3, { - FLOAT16(999.0f), FLOAT16(999.0f), FLOAT16(999.0f), FLOAT16(999.0f), FLOAT16(999.0f), FLOAT16(999.0f), - FLOAT16(888.0f), FLOAT16(888.0f), FLOAT16(888.0f), FLOAT16(888.0f), FLOAT16(888.0f), FLOAT16(888.0f) + ov::float16(999.0f), ov::float16(999.0f), ov::float16(999.0f), ov::float16(999.0f), ov::float16(999.0f), ov::float16(999.0f), + ov::float16(888.0f), ov::float16(888.0f), ov::float16(888.0f), ov::float16(888.0f), ov::float16(888.0f), ov::float16(888.0f) }); topology topology; @@ -1933,68 +1933,68 @@ TEST(scatter_nd_update_gpu_fp16, d6661_i2111) { set_values(input1, { - FLOAT16(100.f), FLOAT16(101.f), FLOAT16(102.f), FLOAT16(103.f), FLOAT16(104.f), FLOAT16(105.f), - FLOAT16(106.f), FLOAT16(107.f), FLOAT16(108.f), FLOAT16(109.f), FLOAT16(110.f), FLOAT16(111.f), - FLOAT16(112.f), FLOAT16(113.f), FLOAT16(114.f), FLOAT16(115.f), FLOAT16(116.f), FLOAT16(117.f), - FLOAT16(118.f), FLOAT16(119.f), FLOAT16(120.f), FLOAT16(121.f), FLOAT16(122.f), FLOAT16(123.f), - FLOAT16(124.f), FLOAT16(125.f), FLOAT16(126.f), FLOAT16(127.f), FLOAT16(128.f), FLOAT16(129.f), - FLOAT16(130.f), FLOAT16(131.f), FLOAT16(132.f), FLOAT16(133.f), FLOAT16(134.f), FLOAT16(135.f), - - FLOAT16(136.f), FLOAT16(137.f), FLOAT16(138.f), FLOAT16(139.f), FLOAT16(140.f), FLOAT16(141.f), - FLOAT16(142.f), FLOAT16(143.f), FLOAT16(144.f), FLOAT16(145.f), FLOAT16(146.f), FLOAT16(147.f), - FLOAT16(148.f), FLOAT16(149.f), FLOAT16(150.f), FLOAT16(151.f), FLOAT16(152.f), FLOAT16(153.f), - FLOAT16(154.f), FLOAT16(155.f), FLOAT16(156.f), FLOAT16(157.f), FLOAT16(158.f), FLOAT16(159.f), - FLOAT16(160.f), FLOAT16(161.f), FLOAT16(162.f), FLOAT16(163.f), FLOAT16(164.f), FLOAT16(165.f), - FLOAT16(166.f), FLOAT16(167.f), FLOAT16(168.f), FLOAT16(169.f), FLOAT16(170.f), FLOAT16(171.f), - - FLOAT16(172.f), FLOAT16(173.f), FLOAT16(174.f), FLOAT16(175.f), FLOAT16(176.f), FLOAT16(177.f), - FLOAT16(178.f), FLOAT16(179.f), FLOAT16(180.f), FLOAT16(181.f), FLOAT16(182.f), FLOAT16(183.f), - FLOAT16(184.f), FLOAT16(185.f), FLOAT16(186.f), FLOAT16(187.f), FLOAT16(188.f), FLOAT16(189.f), - FLOAT16(190.f), FLOAT16(191.f), FLOAT16(192.f), FLOAT16(193.f), FLOAT16(194.f), FLOAT16(195.f), - FLOAT16(196.f), FLOAT16(197.f), FLOAT16(198.f), FLOAT16(199.f), FLOAT16(200.f), FLOAT16(201.f), - FLOAT16(202.f), FLOAT16(203.f), FLOAT16(204.f), FLOAT16(205.f), FLOAT16(206.f), FLOAT16(207.f), - - FLOAT16(208.f), FLOAT16(209.f), FLOAT16(210.f), FLOAT16(211.f), FLOAT16(212.f), FLOAT16(213.f), - FLOAT16(214.f), FLOAT16(215.f), FLOAT16(216.f), FLOAT16(217.f), FLOAT16(218.f), FLOAT16(219.f), - FLOAT16(220.f), FLOAT16(221.f), FLOAT16(222.f), FLOAT16(223.f), FLOAT16(224.f), FLOAT16(225.f), - FLOAT16(226.f), FLOAT16(227.f), FLOAT16(228.f), FLOAT16(229.f), FLOAT16(230.f), FLOAT16(231.f), - FLOAT16(232.f), FLOAT16(233.f), FLOAT16(234.f), FLOAT16(235.f), FLOAT16(236.f), FLOAT16(237.f), - FLOAT16(238.f), FLOAT16(239.f), FLOAT16(240.f), FLOAT16(241.f), FLOAT16(242.f), FLOAT16(243.f), - - FLOAT16(244.f), FLOAT16(245.f), FLOAT16(246.f), FLOAT16(247.f), FLOAT16(248.f), FLOAT16(249.f), - FLOAT16(250.f), FLOAT16(251.f), FLOAT16(252.f), FLOAT16(253.f), FLOAT16(254.f), FLOAT16(255.f), - FLOAT16(256.f), FLOAT16(257.f), FLOAT16(258.f), FLOAT16(259.f), FLOAT16(260.f), FLOAT16(261.f), - FLOAT16(262.f), FLOAT16(263.f), FLOAT16(264.f), FLOAT16(265.f), FLOAT16(266.f), FLOAT16(267.f), - FLOAT16(268.f), FLOAT16(269.f), FLOAT16(270.f), FLOAT16(271.f), FLOAT16(272.f), FLOAT16(273.f), - FLOAT16(274.f), FLOAT16(275.f), FLOAT16(276.f), FLOAT16(277.f), FLOAT16(278.f), FLOAT16(279.f), - - FLOAT16(280.f), FLOAT16(281.f), FLOAT16(282.f), FLOAT16(283.f), FLOAT16(284.f), FLOAT16(285.f), - FLOAT16(286.f), FLOAT16(287.f), FLOAT16(288.f), FLOAT16(289.f), FLOAT16(290.f), FLOAT16(291.f), - FLOAT16(292.f), FLOAT16(293.f), FLOAT16(294.f), FLOAT16(295.f), FLOAT16(296.f), FLOAT16(297.f), - FLOAT16(298.f), FLOAT16(299.f), FLOAT16(300.f), FLOAT16(301.f), FLOAT16(302.f), FLOAT16(303.f), - FLOAT16(304.f), FLOAT16(305.f), FLOAT16(306.f), FLOAT16(307.f), FLOAT16(308.f), FLOAT16(309.f), - FLOAT16(310.f), FLOAT16(311.f), FLOAT16(312.f), FLOAT16(313.f), FLOAT16(314.f), FLOAT16(315.f), + ov::float16(100.f), ov::float16(101.f), ov::float16(102.f), ov::float16(103.f), ov::float16(104.f), ov::float16(105.f), + ov::float16(106.f), ov::float16(107.f), ov::float16(108.f), ov::float16(109.f), ov::float16(110.f), ov::float16(111.f), + ov::float16(112.f), ov::float16(113.f), ov::float16(114.f), ov::float16(115.f), ov::float16(116.f), ov::float16(117.f), + ov::float16(118.f), ov::float16(119.f), ov::float16(120.f), ov::float16(121.f), ov::float16(122.f), ov::float16(123.f), + ov::float16(124.f), ov::float16(125.f), ov::float16(126.f), ov::float16(127.f), ov::float16(128.f), ov::float16(129.f), + ov::float16(130.f), ov::float16(131.f), ov::float16(132.f), ov::float16(133.f), ov::float16(134.f), ov::float16(135.f), + + ov::float16(136.f), ov::float16(137.f), ov::float16(138.f), ov::float16(139.f), ov::float16(140.f), ov::float16(141.f), + ov::float16(142.f), ov::float16(143.f), ov::float16(144.f), ov::float16(145.f), ov::float16(146.f), ov::float16(147.f), + ov::float16(148.f), ov::float16(149.f), ov::float16(150.f), ov::float16(151.f), ov::float16(152.f), ov::float16(153.f), + ov::float16(154.f), ov::float16(155.f), ov::float16(156.f), ov::float16(157.f), ov::float16(158.f), ov::float16(159.f), + ov::float16(160.f), ov::float16(161.f), ov::float16(162.f), ov::float16(163.f), ov::float16(164.f), ov::float16(165.f), + ov::float16(166.f), ov::float16(167.f), ov::float16(168.f), ov::float16(169.f), ov::float16(170.f), ov::float16(171.f), + + ov::float16(172.f), ov::float16(173.f), ov::float16(174.f), ov::float16(175.f), ov::float16(176.f), ov::float16(177.f), + ov::float16(178.f), ov::float16(179.f), ov::float16(180.f), ov::float16(181.f), ov::float16(182.f), ov::float16(183.f), + ov::float16(184.f), ov::float16(185.f), ov::float16(186.f), ov::float16(187.f), ov::float16(188.f), ov::float16(189.f), + ov::float16(190.f), ov::float16(191.f), ov::float16(192.f), ov::float16(193.f), ov::float16(194.f), ov::float16(195.f), + ov::float16(196.f), ov::float16(197.f), ov::float16(198.f), ov::float16(199.f), ov::float16(200.f), ov::float16(201.f), + ov::float16(202.f), ov::float16(203.f), ov::float16(204.f), ov::float16(205.f), ov::float16(206.f), ov::float16(207.f), + + ov::float16(208.f), ov::float16(209.f), ov::float16(210.f), ov::float16(211.f), ov::float16(212.f), ov::float16(213.f), + ov::float16(214.f), ov::float16(215.f), ov::float16(216.f), ov::float16(217.f), ov::float16(218.f), ov::float16(219.f), + ov::float16(220.f), ov::float16(221.f), ov::float16(222.f), ov::float16(223.f), ov::float16(224.f), ov::float16(225.f), + ov::float16(226.f), ov::float16(227.f), ov::float16(228.f), ov::float16(229.f), ov::float16(230.f), ov::float16(231.f), + ov::float16(232.f), ov::float16(233.f), ov::float16(234.f), ov::float16(235.f), ov::float16(236.f), ov::float16(237.f), + ov::float16(238.f), ov::float16(239.f), ov::float16(240.f), ov::float16(241.f), ov::float16(242.f), ov::float16(243.f), + + ov::float16(244.f), ov::float16(245.f), ov::float16(246.f), ov::float16(247.f), ov::float16(248.f), ov::float16(249.f), + ov::float16(250.f), ov::float16(251.f), ov::float16(252.f), ov::float16(253.f), ov::float16(254.f), ov::float16(255.f), + ov::float16(256.f), ov::float16(257.f), ov::float16(258.f), ov::float16(259.f), ov::float16(260.f), ov::float16(261.f), + ov::float16(262.f), ov::float16(263.f), ov::float16(264.f), ov::float16(265.f), ov::float16(266.f), ov::float16(267.f), + ov::float16(268.f), ov::float16(269.f), ov::float16(270.f), ov::float16(271.f), ov::float16(272.f), ov::float16(273.f), + ov::float16(274.f), ov::float16(275.f), ov::float16(276.f), ov::float16(277.f), ov::float16(278.f), ov::float16(279.f), + + ov::float16(280.f), ov::float16(281.f), ov::float16(282.f), ov::float16(283.f), ov::float16(284.f), ov::float16(285.f), + ov::float16(286.f), ov::float16(287.f), ov::float16(288.f), ov::float16(289.f), ov::float16(290.f), ov::float16(291.f), + ov::float16(292.f), ov::float16(293.f), ov::float16(294.f), ov::float16(295.f), ov::float16(296.f), ov::float16(297.f), + ov::float16(298.f), ov::float16(299.f), ov::float16(300.f), ov::float16(301.f), ov::float16(302.f), ov::float16(303.f), + ov::float16(304.f), ov::float16(305.f), ov::float16(306.f), ov::float16(307.f), ov::float16(308.f), ov::float16(309.f), + ov::float16(310.f), ov::float16(311.f), ov::float16(312.f), ov::float16(313.f), ov::float16(314.f), ov::float16(315.f), }); set_values(input2, { - FLOAT16(0.0f), - FLOAT16(3.0f) + ov::float16(0.0f), + ov::float16(3.0f) }); set_values(input3, { - FLOAT16(777.0f), FLOAT16(999.0f), FLOAT16(999.0f), FLOAT16(999.0f), FLOAT16(999.0f), FLOAT16(999.0f), - FLOAT16(999.0f), FLOAT16(777.0f), FLOAT16(999.0f), FLOAT16(999.0f), FLOAT16(999.0f), FLOAT16(999.0f), - FLOAT16(999.0f), FLOAT16(999.0f), FLOAT16(777.0f), FLOAT16(999.0f), FLOAT16(999.0f), FLOAT16(999.0f), - FLOAT16(999.0f), FLOAT16(999.0f), FLOAT16(999.0f), FLOAT16(777.0f), FLOAT16(999.0f), FLOAT16(999.0f), - FLOAT16(999.0f), FLOAT16(999.0f), FLOAT16(999.0f), FLOAT16(999.0f), FLOAT16(777.0f), FLOAT16(999.0f), - FLOAT16(999.0f), FLOAT16(999.0f), FLOAT16(999.0f), FLOAT16(999.0f), FLOAT16(999.0f), FLOAT16(777.0f), - - FLOAT16(666.0f), FLOAT16(888.0f), FLOAT16(888.0f), FLOAT16(888.0f), FLOAT16(888.0f), FLOAT16(888.0f), - FLOAT16(888.0f), FLOAT16(666.0f), FLOAT16(888.0f), FLOAT16(888.0f), FLOAT16(888.0f), FLOAT16(888.0f), - FLOAT16(888.0f), FLOAT16(888.0f), FLOAT16(666.0f), FLOAT16(888.0f), FLOAT16(888.0f), FLOAT16(888.0f), - FLOAT16(888.0f), FLOAT16(888.0f), FLOAT16(888.0f), FLOAT16(666.0f), FLOAT16(888.0f), FLOAT16(888.0f), - FLOAT16(888.0f), FLOAT16(888.0f), FLOAT16(888.0f), FLOAT16(888.0f), FLOAT16(666.0f), FLOAT16(888.0f), - FLOAT16(888.0f), FLOAT16(888.0f), FLOAT16(888.0f), FLOAT16(888.0f), FLOAT16(888.0f), FLOAT16(666.0f), + ov::float16(777.0f), ov::float16(999.0f), ov::float16(999.0f), ov::float16(999.0f), ov::float16(999.0f), ov::float16(999.0f), + ov::float16(999.0f), ov::float16(777.0f), ov::float16(999.0f), ov::float16(999.0f), ov::float16(999.0f), ov::float16(999.0f), + ov::float16(999.0f), ov::float16(999.0f), ov::float16(777.0f), ov::float16(999.0f), ov::float16(999.0f), ov::float16(999.0f), + ov::float16(999.0f), ov::float16(999.0f), ov::float16(999.0f), ov::float16(777.0f), ov::float16(999.0f), ov::float16(999.0f), + ov::float16(999.0f), ov::float16(999.0f), ov::float16(999.0f), ov::float16(999.0f), ov::float16(777.0f), ov::float16(999.0f), + ov::float16(999.0f), ov::float16(999.0f), ov::float16(999.0f), ov::float16(999.0f), ov::float16(999.0f), ov::float16(777.0f), + + ov::float16(666.0f), ov::float16(888.0f), ov::float16(888.0f), ov::float16(888.0f), ov::float16(888.0f), ov::float16(888.0f), + ov::float16(888.0f), ov::float16(666.0f), ov::float16(888.0f), ov::float16(888.0f), ov::float16(888.0f), ov::float16(888.0f), + ov::float16(888.0f), ov::float16(888.0f), ov::float16(666.0f), ov::float16(888.0f), ov::float16(888.0f), ov::float16(888.0f), + ov::float16(888.0f), ov::float16(888.0f), ov::float16(888.0f), ov::float16(666.0f), ov::float16(888.0f), ov::float16(888.0f), + ov::float16(888.0f), ov::float16(888.0f), ov::float16(888.0f), ov::float16(888.0f), ov::float16(666.0f), ov::float16(888.0f), + ov::float16(888.0f), ov::float16(888.0f), ov::float16(888.0f), ov::float16(888.0f), ov::float16(888.0f), ov::float16(666.0f), }); topology topology; @@ -2084,38 +2084,38 @@ TEST(scatter_nd_update_gpu_fp16, d3232_i2411) { set_values(input1, { - FLOAT16(100.f), FLOAT16(101.f), - FLOAT16(102.f), FLOAT16(103.f), - FLOAT16(104.f), FLOAT16(105.f), + ov::float16(100.f), ov::float16(101.f), + ov::float16(102.f), ov::float16(103.f), + ov::float16(104.f), ov::float16(105.f), - FLOAT16(106.f), FLOAT16(107.f), - FLOAT16(108.f), FLOAT16(109.f), - FLOAT16(110.f), FLOAT16(111.f), + ov::float16(106.f), ov::float16(107.f), + ov::float16(108.f), ov::float16(109.f), + ov::float16(110.f), ov::float16(111.f), - FLOAT16(112.f), FLOAT16(113.f), - FLOAT16(114.f), FLOAT16(115.f), - FLOAT16(116.f), FLOAT16(117.f), + ov::float16(112.f), ov::float16(113.f), + ov::float16(114.f), ov::float16(115.f), + ov::float16(116.f), ov::float16(117.f), - FLOAT16(118.f), FLOAT16(119.f), - FLOAT16(120.f), FLOAT16(121.f), - FLOAT16(122.f), FLOAT16(123.f), + ov::float16(118.f), ov::float16(119.f), + ov::float16(120.f), ov::float16(121.f), + ov::float16(122.f), ov::float16(123.f), - FLOAT16(124.f), FLOAT16(125.f), - FLOAT16(126.f), FLOAT16(127.f), - FLOAT16(128.f), FLOAT16(129.f), + ov::float16(124.f), ov::float16(125.f), + ov::float16(126.f), ov::float16(127.f), + ov::float16(128.f), ov::float16(129.f), - FLOAT16(130.f), FLOAT16(131.f), - FLOAT16(132.f), FLOAT16(133.f), - FLOAT16(134.f), FLOAT16(135.f) + ov::float16(130.f), ov::float16(131.f), + ov::float16(132.f), ov::float16(133.f), + ov::float16(134.f), ov::float16(135.f) }); set_values(input2, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(1.0f), - FLOAT16(2.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(1.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(1.0f), + ov::float16(2.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(1.0f) }); set_values(input3, { - FLOAT16(777.0f), FLOAT16(999.0f) + ov::float16(777.0f), ov::float16(999.0f) }); topology topology; @@ -2187,38 +2187,38 @@ TEST(scatter_nd_update_gpu_fp16, d3232_i2311) { set_values(input1, { - FLOAT16(100.f), FLOAT16(101.f), - FLOAT16(102.f), FLOAT16(103.f), - FLOAT16(104.f), FLOAT16(105.f), + ov::float16(100.f), ov::float16(101.f), + ov::float16(102.f), ov::float16(103.f), + ov::float16(104.f), ov::float16(105.f), - FLOAT16(106.f), FLOAT16(107.f), - FLOAT16(108.f), FLOAT16(109.f), - FLOAT16(110.f), FLOAT16(111.f), + ov::float16(106.f), ov::float16(107.f), + ov::float16(108.f), ov::float16(109.f), + ov::float16(110.f), ov::float16(111.f), - FLOAT16(112.f), FLOAT16(113.f), - FLOAT16(114.f), FLOAT16(115.f), - FLOAT16(116.f), FLOAT16(117.f), + ov::float16(112.f), ov::float16(113.f), + ov::float16(114.f), ov::float16(115.f), + ov::float16(116.f), ov::float16(117.f), - FLOAT16(118.f), FLOAT16(119.f), - FLOAT16(120.f), FLOAT16(121.f), - FLOAT16(122.f), FLOAT16(123.f), + ov::float16(118.f), ov::float16(119.f), + ov::float16(120.f), ov::float16(121.f), + ov::float16(122.f), ov::float16(123.f), - FLOAT16(124.f), FLOAT16(125.f), - FLOAT16(126.f), FLOAT16(127.f), - FLOAT16(128.f), FLOAT16(129.f), + ov::float16(124.f), ov::float16(125.f), + ov::float16(126.f), ov::float16(127.f), + ov::float16(128.f), ov::float16(129.f), - FLOAT16(130.f), FLOAT16(131.f), - FLOAT16(132.f), FLOAT16(133.f), - FLOAT16(134.f), FLOAT16(135.f) + ov::float16(130.f), ov::float16(131.f), + ov::float16(132.f), ov::float16(133.f), + ov::float16(134.f), ov::float16(135.f) }); set_values(input2, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(2.0f), - FLOAT16(2.0f), FLOAT16(1.0f), FLOAT16(2.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(2.0f), + ov::float16(2.0f), ov::float16(1.0f), ov::float16(2.0f) }); set_values(input3, { - FLOAT16(777.0f), FLOAT16(777.0f), FLOAT16(999.0f), FLOAT16(999.0f) + ov::float16(777.0f), ov::float16(777.0f), ov::float16(999.0f), ov::float16(999.0f) }); topology topology; @@ -2290,44 +2290,44 @@ TEST(scatter_nd_update_gpu_fp16, d3232_i2211) { set_values(input1, { - FLOAT16(100.f), FLOAT16(101.f), - FLOAT16(102.f), FLOAT16(103.f), - FLOAT16(104.f), FLOAT16(105.f), + ov::float16(100.f), ov::float16(101.f), + ov::float16(102.f), ov::float16(103.f), + ov::float16(104.f), ov::float16(105.f), - FLOAT16(106.f), FLOAT16(107.f), - FLOAT16(108.f), FLOAT16(109.f), - FLOAT16(110.f), FLOAT16(111.f), + ov::float16(106.f), ov::float16(107.f), + ov::float16(108.f), ov::float16(109.f), + ov::float16(110.f), ov::float16(111.f), - FLOAT16(112.f), FLOAT16(113.f), - FLOAT16(114.f), FLOAT16(115.f), - FLOAT16(116.f), FLOAT16(117.f), + ov::float16(112.f), ov::float16(113.f), + ov::float16(114.f), ov::float16(115.f), + ov::float16(116.f), ov::float16(117.f), - FLOAT16(118.f), FLOAT16(119.f), - FLOAT16(120.f), FLOAT16(121.f), - FLOAT16(122.f), FLOAT16(123.f), + ov::float16(118.f), ov::float16(119.f), + ov::float16(120.f), ov::float16(121.f), + ov::float16(122.f), ov::float16(123.f), - FLOAT16(124.f), FLOAT16(125.f), - FLOAT16(126.f), FLOAT16(127.f), - FLOAT16(128.f), FLOAT16(129.f), + ov::float16(124.f), ov::float16(125.f), + ov::float16(126.f), ov::float16(127.f), + ov::float16(128.f), ov::float16(129.f), - FLOAT16(130.f), FLOAT16(131.f), - FLOAT16(132.f), FLOAT16(133.f), - FLOAT16(134.f), FLOAT16(135.f) + ov::float16(130.f), ov::float16(131.f), + ov::float16(132.f), ov::float16(133.f), + ov::float16(134.f), ov::float16(135.f) }); set_values(input2, { - FLOAT16(0.0f), FLOAT16(1.0f), - FLOAT16(2.0f), FLOAT16(1.0f) + ov::float16(0.0f), ov::float16(1.0f), + ov::float16(2.0f), ov::float16(1.0f) }); set_values(input3, { - FLOAT16(777.0f), FLOAT16(777.0f), - FLOAT16(777.0f), FLOAT16(777.0f), - FLOAT16(777.0f), FLOAT16(777.0f), + ov::float16(777.0f), ov::float16(777.0f), + ov::float16(777.0f), ov::float16(777.0f), + ov::float16(777.0f), ov::float16(777.0f), - FLOAT16(999.0f), FLOAT16(999.0f), - FLOAT16(999.0f), FLOAT16(999.0f), - FLOAT16(999.0f), FLOAT16(999.0f) + ov::float16(999.0f), ov::float16(999.0f), + ov::float16(999.0f), ov::float16(999.0f), + ov::float16(999.0f), ov::float16(999.0f) }); topology topology; @@ -2399,52 +2399,52 @@ TEST(scatter_nd_update_gpu_fp16, d3232_i2111) { set_values(input1, { - FLOAT16(100.f), FLOAT16(101.f), - FLOAT16(102.f), FLOAT16(103.f), - FLOAT16(104.f), FLOAT16(105.f), + ov::float16(100.f), ov::float16(101.f), + ov::float16(102.f), ov::float16(103.f), + ov::float16(104.f), ov::float16(105.f), - FLOAT16(106.f), FLOAT16(107.f), - FLOAT16(108.f), FLOAT16(109.f), - FLOAT16(110.f), FLOAT16(111.f), + ov::float16(106.f), ov::float16(107.f), + ov::float16(108.f), ov::float16(109.f), + ov::float16(110.f), ov::float16(111.f), - FLOAT16(112.f), FLOAT16(113.f), - FLOAT16(114.f), FLOAT16(115.f), - FLOAT16(116.f), FLOAT16(117.f), + ov::float16(112.f), ov::float16(113.f), + ov::float16(114.f), ov::float16(115.f), + ov::float16(116.f), ov::float16(117.f), - FLOAT16(118.f), FLOAT16(119.f), - FLOAT16(120.f), FLOAT16(121.f), - FLOAT16(122.f), FLOAT16(123.f), + ov::float16(118.f), ov::float16(119.f), + ov::float16(120.f), ov::float16(121.f), + ov::float16(122.f), ov::float16(123.f), - FLOAT16(124.f), FLOAT16(125.f), - FLOAT16(126.f), FLOAT16(127.f), - FLOAT16(128.f), FLOAT16(129.f), + ov::float16(124.f), ov::float16(125.f), + ov::float16(126.f), ov::float16(127.f), + ov::float16(128.f), ov::float16(129.f), - FLOAT16(130.f), FLOAT16(131.f), - FLOAT16(132.f), FLOAT16(133.f), - FLOAT16(134.f), FLOAT16(135.f) + ov::float16(130.f), ov::float16(131.f), + ov::float16(132.f), ov::float16(133.f), + ov::float16(134.f), ov::float16(135.f) }); set_values(input2, { - FLOAT16(0.0f), - FLOAT16(2.0f) + ov::float16(0.0f), + ov::float16(2.0f) }); set_values(input3, { - FLOAT16(666.0f), FLOAT16(666.0f), - FLOAT16(666.0f), FLOAT16(666.0f), - FLOAT16(666.0f), FLOAT16(666.0f), + ov::float16(666.0f), ov::float16(666.0f), + ov::float16(666.0f), ov::float16(666.0f), + ov::float16(666.0f), ov::float16(666.0f), - FLOAT16(777.0f), FLOAT16(777.0f), - FLOAT16(777.0f), FLOAT16(777.0f), - FLOAT16(777.0f), FLOAT16(777.0f), + ov::float16(777.0f), ov::float16(777.0f), + ov::float16(777.0f), ov::float16(777.0f), + ov::float16(777.0f), ov::float16(777.0f), - FLOAT16(888.0f), FLOAT16(888.0f), - FLOAT16(888.0f), FLOAT16(888.0f), - FLOAT16(888.0f), FLOAT16(888.0f), + ov::float16(888.0f), ov::float16(888.0f), + ov::float16(888.0f), ov::float16(888.0f), + ov::float16(888.0f), ov::float16(888.0f), - FLOAT16(999.0f), FLOAT16(999.0f), - FLOAT16(999.0f), FLOAT16(999.0f), - FLOAT16(999.0f), FLOAT16(999.0f) + ov::float16(999.0f), ov::float16(999.0f), + ov::float16(999.0f), ov::float16(999.0f), + ov::float16(999.0f), ov::float16(999.0f) }); topology topology; @@ -2515,70 +2515,70 @@ TEST(scatter_nd_update_gpu_fp16, d32323_i25111) { set_values(input1, { - FLOAT16(100.f), FLOAT16(101.f), FLOAT16(102.f), - FLOAT16(103.f), FLOAT16(104.f), FLOAT16(105.f), + ov::float16(100.f), ov::float16(101.f), ov::float16(102.f), + ov::float16(103.f), ov::float16(104.f), ov::float16(105.f), - FLOAT16(106.f), FLOAT16(107.f), FLOAT16(108.f), - FLOAT16(109.f), FLOAT16(110.f), FLOAT16(111.f), + ov::float16(106.f), ov::float16(107.f), ov::float16(108.f), + ov::float16(109.f), ov::float16(110.f), ov::float16(111.f), - FLOAT16(112.f), FLOAT16(113.f), FLOAT16(114.f), - FLOAT16(115.f), FLOAT16(116.f), FLOAT16(117.f), + ov::float16(112.f), ov::float16(113.f), ov::float16(114.f), + ov::float16(115.f), ov::float16(116.f), ov::float16(117.f), - FLOAT16(118.f), FLOAT16(119.f), FLOAT16(120.f), - FLOAT16(121.f), FLOAT16(122.f), FLOAT16(123.f), + ov::float16(118.f), ov::float16(119.f), ov::float16(120.f), + ov::float16(121.f), ov::float16(122.f), ov::float16(123.f), - FLOAT16(124.f), FLOAT16(125.f), FLOAT16(126.f), - FLOAT16(127.f), FLOAT16(128.f), FLOAT16(129.f), + ov::float16(124.f), ov::float16(125.f), ov::float16(126.f), + ov::float16(127.f), ov::float16(128.f), ov::float16(129.f), - FLOAT16(130.f), FLOAT16(131.f), FLOAT16(132.f), - FLOAT16(133.f), FLOAT16(134.f), FLOAT16(135.f), + ov::float16(130.f), ov::float16(131.f), ov::float16(132.f), + ov::float16(133.f), ov::float16(134.f), ov::float16(135.f), // 2 - FLOAT16(100.f), FLOAT16(101.f), FLOAT16(102.f), - FLOAT16(103.f), FLOAT16(104.f), FLOAT16(105.f), + ov::float16(100.f), ov::float16(101.f), ov::float16(102.f), + ov::float16(103.f), ov::float16(104.f), ov::float16(105.f), - FLOAT16(106.f), FLOAT16(107.f), FLOAT16(108.f), - FLOAT16(109.f), FLOAT16(110.f), FLOAT16(111.f), + ov::float16(106.f), ov::float16(107.f), ov::float16(108.f), + ov::float16(109.f), ov::float16(110.f), ov::float16(111.f), - FLOAT16(112.f), FLOAT16(113.f), FLOAT16(114.f), - FLOAT16(115.f), FLOAT16(116.f), FLOAT16(117.f), + ov::float16(112.f), ov::float16(113.f), ov::float16(114.f), + ov::float16(115.f), ov::float16(116.f), ov::float16(117.f), - FLOAT16(118.f), FLOAT16(119.f), FLOAT16(120.f), - FLOAT16(121.f), FLOAT16(122.f), FLOAT16(123.f), + ov::float16(118.f), ov::float16(119.f), ov::float16(120.f), + ov::float16(121.f), ov::float16(122.f), ov::float16(123.f), - FLOAT16(124.f), FLOAT16(125.f), FLOAT16(126.f), - FLOAT16(127.f), FLOAT16(128.f), FLOAT16(129.f), + ov::float16(124.f), ov::float16(125.f), ov::float16(126.f), + ov::float16(127.f), ov::float16(128.f), ov::float16(129.f), - FLOAT16(130.f), FLOAT16(131.f), FLOAT16(132.f), - FLOAT16(133.f), FLOAT16(134.f), FLOAT16(135.f), + ov::float16(130.f), ov::float16(131.f), ov::float16(132.f), + ov::float16(133.f), ov::float16(134.f), ov::float16(135.f), // 3 - FLOAT16(100.f), FLOAT16(101.f), FLOAT16(102.f), - FLOAT16(103.f), FLOAT16(104.f), FLOAT16(105.f), + ov::float16(100.f), ov::float16(101.f), ov::float16(102.f), + ov::float16(103.f), ov::float16(104.f), ov::float16(105.f), - FLOAT16(106.f), FLOAT16(107.f), FLOAT16(108.f), - FLOAT16(109.f), FLOAT16(110.f), FLOAT16(111.f), + ov::float16(106.f), ov::float16(107.f), ov::float16(108.f), + ov::float16(109.f), ov::float16(110.f), ov::float16(111.f), - FLOAT16(112.f), FLOAT16(113.f), FLOAT16(114.f), - FLOAT16(115.f), FLOAT16(116.f), FLOAT16(117.f), + ov::float16(112.f), ov::float16(113.f), ov::float16(114.f), + ov::float16(115.f), ov::float16(116.f), ov::float16(117.f), - FLOAT16(118.f), FLOAT16(119.f), FLOAT16(120.f), - FLOAT16(121.f), FLOAT16(122.f), FLOAT16(123.f), + ov::float16(118.f), ov::float16(119.f), ov::float16(120.f), + ov::float16(121.f), ov::float16(122.f), ov::float16(123.f), - FLOAT16(124.f), FLOAT16(125.f), FLOAT16(126.f), - FLOAT16(127.f), FLOAT16(128.f), FLOAT16(129.f), + ov::float16(124.f), ov::float16(125.f), ov::float16(126.f), + ov::float16(127.f), ov::float16(128.f), ov::float16(129.f), - FLOAT16(130.f), FLOAT16(131.f), FLOAT16(132.f), - FLOAT16(133.f), FLOAT16(134.f), FLOAT16(135.f) + ov::float16(130.f), ov::float16(131.f), ov::float16(132.f), + ov::float16(133.f), ov::float16(134.f), ov::float16(135.f) }); set_values(input2, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(2.0f), - FLOAT16(2.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(2.0f), + ov::float16(2.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f) }); set_values(input3, { - FLOAT16(777.0f), FLOAT16(999.0f) + ov::float16(777.0f), ov::float16(999.0f) }); topology topology; @@ -2682,71 +2682,71 @@ TEST(scatter_nd_update_gpu_fp16, d32323_i24111) { set_values(input1, { - FLOAT16(100.f), FLOAT16(101.f), FLOAT16(102.f), - FLOAT16(103.f), FLOAT16(104.f), FLOAT16(105.f), + ov::float16(100.f), ov::float16(101.f), ov::float16(102.f), + ov::float16(103.f), ov::float16(104.f), ov::float16(105.f), - FLOAT16(106.f), FLOAT16(107.f), FLOAT16(108.f), - FLOAT16(109.f), FLOAT16(110.f), FLOAT16(111.f), + ov::float16(106.f), ov::float16(107.f), ov::float16(108.f), + ov::float16(109.f), ov::float16(110.f), ov::float16(111.f), - FLOAT16(112.f), FLOAT16(113.f), FLOAT16(114.f), - FLOAT16(115.f), FLOAT16(116.f), FLOAT16(117.f), + ov::float16(112.f), ov::float16(113.f), ov::float16(114.f), + ov::float16(115.f), ov::float16(116.f), ov::float16(117.f), - FLOAT16(118.f), FLOAT16(119.f), FLOAT16(120.f), - FLOAT16(121.f), FLOAT16(122.f), FLOAT16(123.f), + ov::float16(118.f), ov::float16(119.f), ov::float16(120.f), + ov::float16(121.f), ov::float16(122.f), ov::float16(123.f), - FLOAT16(124.f), FLOAT16(125.f), FLOAT16(126.f), - FLOAT16(127.f), FLOAT16(128.f), FLOAT16(129.f), + ov::float16(124.f), ov::float16(125.f), ov::float16(126.f), + ov::float16(127.f), ov::float16(128.f), ov::float16(129.f), - FLOAT16(130.f), FLOAT16(131.f), FLOAT16(132.f), - FLOAT16(133.f), FLOAT16(134.f), FLOAT16(135.f), + ov::float16(130.f), ov::float16(131.f), ov::float16(132.f), + ov::float16(133.f), ov::float16(134.f), ov::float16(135.f), // 2 - FLOAT16(100.f), FLOAT16(101.f), FLOAT16(102.f), - FLOAT16(103.f), FLOAT16(104.f), FLOAT16(105.f), + ov::float16(100.f), ov::float16(101.f), ov::float16(102.f), + ov::float16(103.f), ov::float16(104.f), ov::float16(105.f), - FLOAT16(106.f), FLOAT16(107.f), FLOAT16(108.f), - FLOAT16(109.f), FLOAT16(110.f), FLOAT16(111.f), + ov::float16(106.f), ov::float16(107.f), ov::float16(108.f), + ov::float16(109.f), ov::float16(110.f), ov::float16(111.f), - FLOAT16(112.f), FLOAT16(113.f), FLOAT16(114.f), - FLOAT16(115.f), FLOAT16(116.f), FLOAT16(117.f), + ov::float16(112.f), ov::float16(113.f), ov::float16(114.f), + ov::float16(115.f), ov::float16(116.f), ov::float16(117.f), - FLOAT16(118.f), FLOAT16(119.f), FLOAT16(120.f), - FLOAT16(121.f), FLOAT16(122.f), FLOAT16(123.f), + ov::float16(118.f), ov::float16(119.f), ov::float16(120.f), + ov::float16(121.f), ov::float16(122.f), ov::float16(123.f), - FLOAT16(124.f), FLOAT16(125.f), FLOAT16(126.f), - FLOAT16(127.f), FLOAT16(128.f), FLOAT16(129.f), + ov::float16(124.f), ov::float16(125.f), ov::float16(126.f), + ov::float16(127.f), ov::float16(128.f), ov::float16(129.f), - FLOAT16(130.f), FLOAT16(131.f), FLOAT16(132.f), - FLOAT16(133.f), FLOAT16(134.f), FLOAT16(135.f), + ov::float16(130.f), ov::float16(131.f), ov::float16(132.f), + ov::float16(133.f), ov::float16(134.f), ov::float16(135.f), // 3 - FLOAT16(100.f), FLOAT16(101.f), FLOAT16(102.f), - FLOAT16(103.f), FLOAT16(104.f), FLOAT16(105.f), + ov::float16(100.f), ov::float16(101.f), ov::float16(102.f), + ov::float16(103.f), ov::float16(104.f), ov::float16(105.f), - FLOAT16(106.f), FLOAT16(107.f), FLOAT16(108.f), - FLOAT16(109.f), FLOAT16(110.f), FLOAT16(111.f), + ov::float16(106.f), ov::float16(107.f), ov::float16(108.f), + ov::float16(109.f), ov::float16(110.f), ov::float16(111.f), - FLOAT16(112.f), FLOAT16(113.f), FLOAT16(114.f), - FLOAT16(115.f), FLOAT16(116.f), FLOAT16(117.f), + ov::float16(112.f), ov::float16(113.f), ov::float16(114.f), + ov::float16(115.f), ov::float16(116.f), ov::float16(117.f), - FLOAT16(118.f), FLOAT16(119.f), FLOAT16(120.f), - FLOAT16(121.f), FLOAT16(122.f), FLOAT16(123.f), + ov::float16(118.f), ov::float16(119.f), ov::float16(120.f), + ov::float16(121.f), ov::float16(122.f), ov::float16(123.f), - FLOAT16(124.f), FLOAT16(125.f), FLOAT16(126.f), - FLOAT16(127.f), FLOAT16(128.f), FLOAT16(129.f), + ov::float16(124.f), ov::float16(125.f), ov::float16(126.f), + ov::float16(127.f), ov::float16(128.f), ov::float16(129.f), - FLOAT16(130.f), FLOAT16(131.f), FLOAT16(132.f), - FLOAT16(133.f), FLOAT16(134.f), FLOAT16(135.f) + ov::float16(130.f), ov::float16(131.f), ov::float16(132.f), + ov::float16(133.f), ov::float16(134.f), ov::float16(135.f) }); set_values(input2, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), - FLOAT16(2.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), + ov::float16(2.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f) }); set_values(input3, { - FLOAT16(777.0f), FLOAT16(777.0f), FLOAT16(777.0f), - FLOAT16(999.0f), FLOAT16(999.0f), FLOAT16(999.0f) + ov::float16(777.0f), ov::float16(777.0f), ov::float16(777.0f), + ov::float16(999.0f), ov::float16(999.0f), ov::float16(999.0f) }); topology topology; @@ -2850,74 +2850,74 @@ TEST(scatter_nd_update_gpu_fp16, d32323_i23111) { set_values(input1, { - FLOAT16(100.f), FLOAT16(101.f), FLOAT16(102.f), - FLOAT16(103.f), FLOAT16(104.f), FLOAT16(105.f), + ov::float16(100.f), ov::float16(101.f), ov::float16(102.f), + ov::float16(103.f), ov::float16(104.f), ov::float16(105.f), - FLOAT16(106.f), FLOAT16(107.f), FLOAT16(108.f), - FLOAT16(109.f), FLOAT16(110.f), FLOAT16(111.f), + ov::float16(106.f), ov::float16(107.f), ov::float16(108.f), + ov::float16(109.f), ov::float16(110.f), ov::float16(111.f), - FLOAT16(112.f), FLOAT16(113.f), FLOAT16(114.f), - FLOAT16(115.f), FLOAT16(116.f), FLOAT16(117.f), + ov::float16(112.f), ov::float16(113.f), ov::float16(114.f), + ov::float16(115.f), ov::float16(116.f), ov::float16(117.f), - FLOAT16(118.f), FLOAT16(119.f), FLOAT16(120.f), - FLOAT16(121.f), FLOAT16(122.f), FLOAT16(123.f), + ov::float16(118.f), ov::float16(119.f), ov::float16(120.f), + ov::float16(121.f), ov::float16(122.f), ov::float16(123.f), - FLOAT16(124.f), FLOAT16(125.f), FLOAT16(126.f), - FLOAT16(127.f), FLOAT16(128.f), FLOAT16(129.f), + ov::float16(124.f), ov::float16(125.f), ov::float16(126.f), + ov::float16(127.f), ov::float16(128.f), ov::float16(129.f), - FLOAT16(130.f), FLOAT16(131.f), FLOAT16(132.f), - FLOAT16(133.f), FLOAT16(134.f), FLOAT16(135.f), + ov::float16(130.f), ov::float16(131.f), ov::float16(132.f), + ov::float16(133.f), ov::float16(134.f), ov::float16(135.f), // 2 - FLOAT16(100.f), FLOAT16(101.f), FLOAT16(102.f), - FLOAT16(103.f), FLOAT16(104.f), FLOAT16(105.f), + ov::float16(100.f), ov::float16(101.f), ov::float16(102.f), + ov::float16(103.f), ov::float16(104.f), ov::float16(105.f), - FLOAT16(106.f), FLOAT16(107.f), FLOAT16(108.f), - FLOAT16(109.f), FLOAT16(110.f), FLOAT16(111.f), + ov::float16(106.f), ov::float16(107.f), ov::float16(108.f), + ov::float16(109.f), ov::float16(110.f), ov::float16(111.f), - FLOAT16(112.f), FLOAT16(113.f), FLOAT16(114.f), - FLOAT16(115.f), FLOAT16(116.f), FLOAT16(117.f), + ov::float16(112.f), ov::float16(113.f), ov::float16(114.f), + ov::float16(115.f), ov::float16(116.f), ov::float16(117.f), - FLOAT16(118.f), FLOAT16(119.f), FLOAT16(120.f), - FLOAT16(121.f), FLOAT16(122.f), FLOAT16(123.f), + ov::float16(118.f), ov::float16(119.f), ov::float16(120.f), + ov::float16(121.f), ov::float16(122.f), ov::float16(123.f), - FLOAT16(124.f), FLOAT16(125.f), FLOAT16(126.f), - FLOAT16(127.f), FLOAT16(128.f), FLOAT16(129.f), + ov::float16(124.f), ov::float16(125.f), ov::float16(126.f), + ov::float16(127.f), ov::float16(128.f), ov::float16(129.f), - FLOAT16(130.f), FLOAT16(131.f), FLOAT16(132.f), - FLOAT16(133.f), FLOAT16(134.f), FLOAT16(135.f), + ov::float16(130.f), ov::float16(131.f), ov::float16(132.f), + ov::float16(133.f), ov::float16(134.f), ov::float16(135.f), // 3 - FLOAT16(100.f), FLOAT16(101.f), FLOAT16(102.f), - FLOAT16(103.f), FLOAT16(104.f), FLOAT16(105.f), + ov::float16(100.f), ov::float16(101.f), ov::float16(102.f), + ov::float16(103.f), ov::float16(104.f), ov::float16(105.f), - FLOAT16(106.f), FLOAT16(107.f), FLOAT16(108.f), - FLOAT16(109.f), FLOAT16(110.f), FLOAT16(111.f), + ov::float16(106.f), ov::float16(107.f), ov::float16(108.f), + ov::float16(109.f), ov::float16(110.f), ov::float16(111.f), - FLOAT16(112.f), FLOAT16(113.f), FLOAT16(114.f), - FLOAT16(115.f), FLOAT16(116.f), FLOAT16(117.f), + ov::float16(112.f), ov::float16(113.f), ov::float16(114.f), + ov::float16(115.f), ov::float16(116.f), ov::float16(117.f), - FLOAT16(118.f), FLOAT16(119.f), FLOAT16(120.f), - FLOAT16(121.f), FLOAT16(122.f), FLOAT16(123.f), + ov::float16(118.f), ov::float16(119.f), ov::float16(120.f), + ov::float16(121.f), ov::float16(122.f), ov::float16(123.f), - FLOAT16(124.f), FLOAT16(125.f), FLOAT16(126.f), - FLOAT16(127.f), FLOAT16(128.f), FLOAT16(129.f), + ov::float16(124.f), ov::float16(125.f), ov::float16(126.f), + ov::float16(127.f), ov::float16(128.f), ov::float16(129.f), - FLOAT16(130.f), FLOAT16(131.f), FLOAT16(132.f), - FLOAT16(133.f), FLOAT16(134.f), FLOAT16(135.f) + ov::float16(130.f), ov::float16(131.f), ov::float16(132.f), + ov::float16(133.f), ov::float16(134.f), ov::float16(135.f) }); set_values(input2, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(1.0f), - FLOAT16(2.0f), FLOAT16(1.0f), FLOAT16(1.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(1.0f), + ov::float16(2.0f), ov::float16(1.0f), ov::float16(1.0f) }); set_values(input3, { - FLOAT16(777.0f), FLOAT16(777.0f), FLOAT16(777.0f), - FLOAT16(777.0f), FLOAT16(777.0f), FLOAT16(777.0f), + ov::float16(777.0f), ov::float16(777.0f), ov::float16(777.0f), + ov::float16(777.0f), ov::float16(777.0f), ov::float16(777.0f), - FLOAT16(999.0f), FLOAT16(999.0f), FLOAT16(999.0f), - FLOAT16(999.0f), FLOAT16(999.0f), FLOAT16(999.0f) + ov::float16(999.0f), ov::float16(999.0f), ov::float16(999.0f), + ov::float16(999.0f), ov::float16(999.0f), ov::float16(999.0f) }); topology topology; @@ -3021,86 +3021,86 @@ TEST(scatter_nd_update_gpu_fp16, d32323_i22111) { set_values(input1, { - FLOAT16(100.f), FLOAT16(101.f), FLOAT16(102.f), - FLOAT16(103.f), FLOAT16(104.f), FLOAT16(105.f), + ov::float16(100.f), ov::float16(101.f), ov::float16(102.f), + ov::float16(103.f), ov::float16(104.f), ov::float16(105.f), - FLOAT16(106.f), FLOAT16(107.f), FLOAT16(108.f), - FLOAT16(109.f), FLOAT16(110.f), FLOAT16(111.f), + ov::float16(106.f), ov::float16(107.f), ov::float16(108.f), + ov::float16(109.f), ov::float16(110.f), ov::float16(111.f), - FLOAT16(112.f), FLOAT16(113.f), FLOAT16(114.f), - FLOAT16(115.f), FLOAT16(116.f), FLOAT16(117.f), + ov::float16(112.f), ov::float16(113.f), ov::float16(114.f), + ov::float16(115.f), ov::float16(116.f), ov::float16(117.f), - FLOAT16(118.f), FLOAT16(119.f), FLOAT16(120.f), - FLOAT16(121.f), FLOAT16(122.f), FLOAT16(123.f), + ov::float16(118.f), ov::float16(119.f), ov::float16(120.f), + ov::float16(121.f), ov::float16(122.f), ov::float16(123.f), - FLOAT16(124.f), FLOAT16(125.f), FLOAT16(126.f), - FLOAT16(127.f), FLOAT16(128.f), FLOAT16(129.f), + ov::float16(124.f), ov::float16(125.f), ov::float16(126.f), + ov::float16(127.f), ov::float16(128.f), ov::float16(129.f), - FLOAT16(130.f), FLOAT16(131.f), FLOAT16(132.f), - FLOAT16(133.f), FLOAT16(134.f), FLOAT16(135.f), + ov::float16(130.f), ov::float16(131.f), ov::float16(132.f), + ov::float16(133.f), ov::float16(134.f), ov::float16(135.f), // 2 - FLOAT16(100.f), FLOAT16(101.f), FLOAT16(102.f), - FLOAT16(103.f), FLOAT16(104.f), FLOAT16(105.f), + ov::float16(100.f), ov::float16(101.f), ov::float16(102.f), + ov::float16(103.f), ov::float16(104.f), ov::float16(105.f), - FLOAT16(106.f), FLOAT16(107.f), FLOAT16(108.f), - FLOAT16(109.f), FLOAT16(110.f), FLOAT16(111.f), + ov::float16(106.f), ov::float16(107.f), ov::float16(108.f), + ov::float16(109.f), ov::float16(110.f), ov::float16(111.f), - FLOAT16(112.f), FLOAT16(113.f), FLOAT16(114.f), - FLOAT16(115.f), FLOAT16(116.f), FLOAT16(117.f), + ov::float16(112.f), ov::float16(113.f), ov::float16(114.f), + ov::float16(115.f), ov::float16(116.f), ov::float16(117.f), - FLOAT16(118.f), FLOAT16(119.f), FLOAT16(120.f), - FLOAT16(121.f), FLOAT16(122.f), FLOAT16(123.f), + ov::float16(118.f), ov::float16(119.f), ov::float16(120.f), + ov::float16(121.f), ov::float16(122.f), ov::float16(123.f), - FLOAT16(124.f), FLOAT16(125.f), FLOAT16(126.f), - FLOAT16(127.f), FLOAT16(128.f), FLOAT16(129.f), + ov::float16(124.f), ov::float16(125.f), ov::float16(126.f), + ov::float16(127.f), ov::float16(128.f), ov::float16(129.f), - FLOAT16(130.f), FLOAT16(131.f), FLOAT16(132.f), - FLOAT16(133.f), FLOAT16(134.f), FLOAT16(135.f), + ov::float16(130.f), ov::float16(131.f), ov::float16(132.f), + ov::float16(133.f), ov::float16(134.f), ov::float16(135.f), // 3 - FLOAT16(100.f), FLOAT16(101.f), FLOAT16(102.f), - FLOAT16(103.f), FLOAT16(104.f), FLOAT16(105.f), + ov::float16(100.f), ov::float16(101.f), ov::float16(102.f), + ov::float16(103.f), ov::float16(104.f), ov::float16(105.f), - FLOAT16(106.f), FLOAT16(107.f), FLOAT16(108.f), - FLOAT16(109.f), FLOAT16(110.f), FLOAT16(111.f), + ov::float16(106.f), ov::float16(107.f), ov::float16(108.f), + ov::float16(109.f), ov::float16(110.f), ov::float16(111.f), - FLOAT16(112.f), FLOAT16(113.f), FLOAT16(114.f), - FLOAT16(115.f), FLOAT16(116.f), FLOAT16(117.f), + ov::float16(112.f), ov::float16(113.f), ov::float16(114.f), + ov::float16(115.f), ov::float16(116.f), ov::float16(117.f), - FLOAT16(118.f), FLOAT16(119.f), FLOAT16(120.f), - FLOAT16(121.f), FLOAT16(122.f), FLOAT16(123.f), + ov::float16(118.f), ov::float16(119.f), ov::float16(120.f), + ov::float16(121.f), ov::float16(122.f), ov::float16(123.f), - FLOAT16(124.f), FLOAT16(125.f), FLOAT16(126.f), - FLOAT16(127.f), FLOAT16(128.f), FLOAT16(129.f), + ov::float16(124.f), ov::float16(125.f), ov::float16(126.f), + ov::float16(127.f), ov::float16(128.f), ov::float16(129.f), - FLOAT16(130.f), FLOAT16(131.f), FLOAT16(132.f), - FLOAT16(133.f), FLOAT16(134.f), FLOAT16(135.f) + ov::float16(130.f), ov::float16(131.f), ov::float16(132.f), + ov::float16(133.f), ov::float16(134.f), ov::float16(135.f) }); set_values(input2, { - FLOAT16(0.0f), FLOAT16(1.0f), - FLOAT16(2.0f), FLOAT16(1.0f) + ov::float16(0.0f), ov::float16(1.0f), + ov::float16(2.0f), ov::float16(1.0f) }); set_values(input3, { - FLOAT16(555.0f), FLOAT16(555.0f), FLOAT16(555.0f), - FLOAT16(555.0f), FLOAT16(555.0f), FLOAT16(555.0f), + ov::float16(555.0f), ov::float16(555.0f), ov::float16(555.0f), + ov::float16(555.0f), ov::float16(555.0f), ov::float16(555.0f), - FLOAT16(666.0f), FLOAT16(666.0f), FLOAT16(666.0f), - FLOAT16(666.0f), FLOAT16(666.0f), FLOAT16(666.0f), + ov::float16(666.0f), ov::float16(666.0f), ov::float16(666.0f), + ov::float16(666.0f), ov::float16(666.0f), ov::float16(666.0f), - FLOAT16(444.0f), FLOAT16(444.0f), FLOAT16(444.0f), - FLOAT16(444.0f), FLOAT16(444.0f), FLOAT16(444.0f), + ov::float16(444.0f), ov::float16(444.0f), ov::float16(444.0f), + ov::float16(444.0f), ov::float16(444.0f), ov::float16(444.0f), - FLOAT16(777.0f), FLOAT16(777.0f), FLOAT16(777.0f), - FLOAT16(777.0f), FLOAT16(777.0f), FLOAT16(777.0f), + ov::float16(777.0f), ov::float16(777.0f), ov::float16(777.0f), + ov::float16(777.0f), ov::float16(777.0f), ov::float16(777.0f), - FLOAT16(888.0f), FLOAT16(888.0f), FLOAT16(888.0f), - FLOAT16(888.0f), FLOAT16(888.0f), FLOAT16(888.0f), + ov::float16(888.0f), ov::float16(888.0f), ov::float16(888.0f), + ov::float16(888.0f), ov::float16(888.0f), ov::float16(888.0f), - FLOAT16(999.0f), FLOAT16(999.0f), FLOAT16(999.0f), - FLOAT16(999.0f), FLOAT16(999.0f), FLOAT16(999.0f) + ov::float16(999.0f), ov::float16(999.0f), ov::float16(999.0f), + ov::float16(999.0f), ov::float16(999.0f), ov::float16(999.0f) }); topology topology; @@ -3204,104 +3204,104 @@ TEST(scatter_nd_update_gpu_fp16, d32323_i21111) { set_values(input1, { - FLOAT16(100.f), FLOAT16(101.f), FLOAT16(102.f), - FLOAT16(103.f), FLOAT16(104.f), FLOAT16(105.f), + ov::float16(100.f), ov::float16(101.f), ov::float16(102.f), + ov::float16(103.f), ov::float16(104.f), ov::float16(105.f), - FLOAT16(106.f), FLOAT16(107.f), FLOAT16(108.f), - FLOAT16(109.f), FLOAT16(110.f), FLOAT16(111.f), + ov::float16(106.f), ov::float16(107.f), ov::float16(108.f), + ov::float16(109.f), ov::float16(110.f), ov::float16(111.f), - FLOAT16(112.f), FLOAT16(113.f), FLOAT16(114.f), - FLOAT16(115.f), FLOAT16(116.f), FLOAT16(117.f), + ov::float16(112.f), ov::float16(113.f), ov::float16(114.f), + ov::float16(115.f), ov::float16(116.f), ov::float16(117.f), - FLOAT16(118.f), FLOAT16(119.f), FLOAT16(120.f), - FLOAT16(121.f), FLOAT16(122.f), FLOAT16(123.f), + ov::float16(118.f), ov::float16(119.f), ov::float16(120.f), + ov::float16(121.f), ov::float16(122.f), ov::float16(123.f), - FLOAT16(124.f), FLOAT16(125.f), FLOAT16(126.f), - FLOAT16(127.f), FLOAT16(128.f), FLOAT16(129.f), + ov::float16(124.f), ov::float16(125.f), ov::float16(126.f), + ov::float16(127.f), ov::float16(128.f), ov::float16(129.f), - FLOAT16(130.f), FLOAT16(131.f), FLOAT16(132.f), - FLOAT16(133.f), FLOAT16(134.f), FLOAT16(135.f), + ov::float16(130.f), ov::float16(131.f), ov::float16(132.f), + ov::float16(133.f), ov::float16(134.f), ov::float16(135.f), // 2 - FLOAT16(100.f), FLOAT16(101.f), FLOAT16(102.f), - FLOAT16(103.f), FLOAT16(104.f), FLOAT16(105.f), + ov::float16(100.f), ov::float16(101.f), ov::float16(102.f), + ov::float16(103.f), ov::float16(104.f), ov::float16(105.f), - FLOAT16(106.f), FLOAT16(107.f), FLOAT16(108.f), - FLOAT16(109.f), FLOAT16(110.f), FLOAT16(111.f), + ov::float16(106.f), ov::float16(107.f), ov::float16(108.f), + ov::float16(109.f), ov::float16(110.f), ov::float16(111.f), - FLOAT16(112.f), FLOAT16(113.f), FLOAT16(114.f), - FLOAT16(115.f), FLOAT16(116.f), FLOAT16(117.f), + ov::float16(112.f), ov::float16(113.f), ov::float16(114.f), + ov::float16(115.f), ov::float16(116.f), ov::float16(117.f), - FLOAT16(118.f), FLOAT16(119.f), FLOAT16(120.f), - FLOAT16(121.f), FLOAT16(122.f), FLOAT16(123.f), + ov::float16(118.f), ov::float16(119.f), ov::float16(120.f), + ov::float16(121.f), ov::float16(122.f), ov::float16(123.f), - FLOAT16(124.f), FLOAT16(125.f), FLOAT16(126.f), - FLOAT16(127.f), FLOAT16(128.f), FLOAT16(129.f), + ov::float16(124.f), ov::float16(125.f), ov::float16(126.f), + ov::float16(127.f), ov::float16(128.f), ov::float16(129.f), - FLOAT16(130.f), FLOAT16(131.f), FLOAT16(132.f), - FLOAT16(133.f), FLOAT16(134.f), FLOAT16(135.f), + ov::float16(130.f), ov::float16(131.f), ov::float16(132.f), + ov::float16(133.f), ov::float16(134.f), ov::float16(135.f), // 3 - FLOAT16(100.f), FLOAT16(101.f), FLOAT16(102.f), - FLOAT16(103.f), FLOAT16(104.f), FLOAT16(105.f), + ov::float16(100.f), ov::float16(101.f), ov::float16(102.f), + ov::float16(103.f), ov::float16(104.f), ov::float16(105.f), - FLOAT16(106.f), FLOAT16(107.f), FLOAT16(108.f), - FLOAT16(109.f), FLOAT16(110.f), FLOAT16(111.f), + ov::float16(106.f), ov::float16(107.f), ov::float16(108.f), + ov::float16(109.f), ov::float16(110.f), ov::float16(111.f), - FLOAT16(112.f), FLOAT16(113.f), FLOAT16(114.f), - FLOAT16(115.f), FLOAT16(116.f), FLOAT16(117.f), + ov::float16(112.f), ov::float16(113.f), ov::float16(114.f), + ov::float16(115.f), ov::float16(116.f), ov::float16(117.f), - FLOAT16(118.f), FLOAT16(119.f), FLOAT16(120.f), - FLOAT16(121.f), FLOAT16(122.f), FLOAT16(123.f), + ov::float16(118.f), ov::float16(119.f), ov::float16(120.f), + ov::float16(121.f), ov::float16(122.f), ov::float16(123.f), - FLOAT16(124.f), FLOAT16(125.f), FLOAT16(126.f), - FLOAT16(127.f), FLOAT16(128.f), FLOAT16(129.f), + ov::float16(124.f), ov::float16(125.f), ov::float16(126.f), + ov::float16(127.f), ov::float16(128.f), ov::float16(129.f), - FLOAT16(130.f), FLOAT16(131.f), FLOAT16(132.f), - FLOAT16(133.f), FLOAT16(134.f), FLOAT16(135.f) + ov::float16(130.f), ov::float16(131.f), ov::float16(132.f), + ov::float16(133.f), ov::float16(134.f), ov::float16(135.f) }); set_values(input2, { - FLOAT16(0.0f), - FLOAT16(2.0f) + ov::float16(0.0f), + ov::float16(2.0f) }); set_values(input3, { - FLOAT16(555.0f), FLOAT16(555.0f), FLOAT16(555.0f), - FLOAT16(555.0f), FLOAT16(555.0f), FLOAT16(555.0f), + ov::float16(555.0f), ov::float16(555.0f), ov::float16(555.0f), + ov::float16(555.0f), ov::float16(555.0f), ov::float16(555.0f), - FLOAT16(666.0f), FLOAT16(666.0f), FLOAT16(666.0f), - FLOAT16(666.0f), FLOAT16(666.0f), FLOAT16(666.0f), + ov::float16(666.0f), ov::float16(666.0f), ov::float16(666.0f), + ov::float16(666.0f), ov::float16(666.0f), ov::float16(666.0f), - FLOAT16(444.0f), FLOAT16(444.0f), FLOAT16(444.0f), - FLOAT16(444.0f), FLOAT16(444.0f), FLOAT16(444.0f), + ov::float16(444.0f), ov::float16(444.0f), ov::float16(444.0f), + ov::float16(444.0f), ov::float16(444.0f), ov::float16(444.0f), - FLOAT16(555.0f), FLOAT16(555.0f), FLOAT16(555.0f), - FLOAT16(555.0f), FLOAT16(555.0f), FLOAT16(555.0f), + ov::float16(555.0f), ov::float16(555.0f), ov::float16(555.0f), + ov::float16(555.0f), ov::float16(555.0f), ov::float16(555.0f), - FLOAT16(666.0f), FLOAT16(666.0f), FLOAT16(666.0f), - FLOAT16(666.0f), FLOAT16(666.0f), FLOAT16(666.0f), + ov::float16(666.0f), ov::float16(666.0f), ov::float16(666.0f), + ov::float16(666.0f), ov::float16(666.0f), ov::float16(666.0f), - FLOAT16(444.0f), FLOAT16(444.0f), FLOAT16(444.0f), - FLOAT16(444.0f), FLOAT16(444.0f), FLOAT16(444.0f), + ov::float16(444.0f), ov::float16(444.0f), ov::float16(444.0f), + ov::float16(444.0f), ov::float16(444.0f), ov::float16(444.0f), - FLOAT16(777.0f), FLOAT16(777.0f), FLOAT16(777.0f), - FLOAT16(777.0f), FLOAT16(777.0f), FLOAT16(777.0f), + ov::float16(777.0f), ov::float16(777.0f), ov::float16(777.0f), + ov::float16(777.0f), ov::float16(777.0f), ov::float16(777.0f), - FLOAT16(888.0f), FLOAT16(888.0f), FLOAT16(888.0f), - FLOAT16(888.0f), FLOAT16(888.0f), FLOAT16(888.0f), + ov::float16(888.0f), ov::float16(888.0f), ov::float16(888.0f), + ov::float16(888.0f), ov::float16(888.0f), ov::float16(888.0f), - FLOAT16(999.0f), FLOAT16(999.0f), FLOAT16(999.0f), - FLOAT16(999.0f), FLOAT16(999.0f), FLOAT16(999.0f), + ov::float16(999.0f), ov::float16(999.0f), ov::float16(999.0f), + ov::float16(999.0f), ov::float16(999.0f), ov::float16(999.0f), - FLOAT16(777.0f), FLOAT16(777.0f), FLOAT16(777.0f), - FLOAT16(777.0f), FLOAT16(777.0f), FLOAT16(777.0f), + ov::float16(777.0f), ov::float16(777.0f), ov::float16(777.0f), + ov::float16(777.0f), ov::float16(777.0f), ov::float16(777.0f), - FLOAT16(888.0f), FLOAT16(888.0f), FLOAT16(888.0f), - FLOAT16(888.0f), FLOAT16(888.0f), FLOAT16(888.0f), + ov::float16(888.0f), ov::float16(888.0f), ov::float16(888.0f), + ov::float16(888.0f), ov::float16(888.0f), ov::float16(888.0f), - FLOAT16(999.0f), FLOAT16(999.0f), FLOAT16(999.0f), - FLOAT16(999.0f), FLOAT16(999.0f), FLOAT16(999.0f) + ov::float16(999.0f), ov::float16(999.0f), ov::float16(999.0f), + ov::float16(999.0f), ov::float16(999.0f), ov::float16(999.0f) }); topology topology; @@ -3406,62 +3406,62 @@ TEST(scatter_nd_update_gpu_fp16, d222222_i261111) { set_values(input1, { - FLOAT16(100.f), FLOAT16(101.f), - FLOAT16(102.f), FLOAT16(103.f), + ov::float16(100.f), ov::float16(101.f), + ov::float16(102.f), ov::float16(103.f), - FLOAT16(104.f), FLOAT16(105.f), - FLOAT16(106.f), FLOAT16(107.f),//1 + ov::float16(104.f), ov::float16(105.f), + ov::float16(106.f), ov::float16(107.f),//1 - FLOAT16(108.f), FLOAT16(109.f), - FLOAT16(110.f), FLOAT16(111.f), + ov::float16(108.f), ov::float16(109.f), + ov::float16(110.f), ov::float16(111.f), - FLOAT16(112.f), FLOAT16(113.f), - FLOAT16(114.f), FLOAT16(115.f),//2 + ov::float16(112.f), ov::float16(113.f), + ov::float16(114.f), ov::float16(115.f),//2 - FLOAT16(116.f), FLOAT16(117.f), - FLOAT16(118.f), FLOAT16(119.f), + ov::float16(116.f), ov::float16(117.f), + ov::float16(118.f), ov::float16(119.f), - FLOAT16(120.f), FLOAT16(121.f), - FLOAT16(122.f), FLOAT16(123.f),//3 + ov::float16(120.f), ov::float16(121.f), + ov::float16(122.f), ov::float16(123.f),//3 - FLOAT16(124.f), FLOAT16(125.f), - FLOAT16(126.f), FLOAT16(127.f), + ov::float16(124.f), ov::float16(125.f), + ov::float16(126.f), ov::float16(127.f), - FLOAT16(128.f), FLOAT16(129.f), - FLOAT16(130.f), FLOAT16(131.f),//4 + ov::float16(128.f), ov::float16(129.f), + ov::float16(130.f), ov::float16(131.f),//4 - FLOAT16(132.f), FLOAT16(133.f), - FLOAT16(134.f), FLOAT16(135.f), + ov::float16(132.f), ov::float16(133.f), + ov::float16(134.f), ov::float16(135.f), - FLOAT16(100.f), FLOAT16(101.f), - FLOAT16(102.f), FLOAT16(103.f),//5 + ov::float16(100.f), ov::float16(101.f), + ov::float16(102.f), ov::float16(103.f),//5 - FLOAT16(104.f), FLOAT16(105.f), - FLOAT16(106.f), FLOAT16(107.f), + ov::float16(104.f), ov::float16(105.f), + ov::float16(106.f), ov::float16(107.f), - FLOAT16(108.f), FLOAT16(109.f), - FLOAT16(110.f), FLOAT16(111.f),//6 + ov::float16(108.f), ov::float16(109.f), + ov::float16(110.f), ov::float16(111.f),//6 - FLOAT16(112.f), FLOAT16(113.f), - FLOAT16(114.f), FLOAT16(115.f), + ov::float16(112.f), ov::float16(113.f), + ov::float16(114.f), ov::float16(115.f), - FLOAT16(116.f), FLOAT16(117.f), - FLOAT16(118.f), FLOAT16(119.f),//7 + ov::float16(116.f), ov::float16(117.f), + ov::float16(118.f), ov::float16(119.f),//7 - FLOAT16(120.f), FLOAT16(121.f), - FLOAT16(122.f), FLOAT16(123.f), + ov::float16(120.f), ov::float16(121.f), + ov::float16(122.f), ov::float16(123.f), - FLOAT16(124.f), FLOAT16(125.f), - FLOAT16(126.f), FLOAT16(127.f),//8 + ov::float16(124.f), ov::float16(125.f), + ov::float16(126.f), ov::float16(127.f),//8 }); set_values(input2, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(0.0f), - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(0.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(0.0f), + ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(0.0f) }); set_values(input3, { - FLOAT16(777.0f), FLOAT16(999.0f) + ov::float16(777.0f), ov::float16(999.0f) }); topology topology; @@ -3558,63 +3558,63 @@ TEST(scatter_nd_update_gpu_fp16, d222222_i251111) { set_values(input1, { - FLOAT16(100.f), FLOAT16(101.f), - FLOAT16(102.f), FLOAT16(103.f), + ov::float16(100.f), ov::float16(101.f), + ov::float16(102.f), ov::float16(103.f), - FLOAT16(104.f), FLOAT16(105.f), - FLOAT16(106.f), FLOAT16(107.f),//1 + ov::float16(104.f), ov::float16(105.f), + ov::float16(106.f), ov::float16(107.f),//1 - FLOAT16(108.f), FLOAT16(109.f), - FLOAT16(110.f), FLOAT16(111.f), + ov::float16(108.f), ov::float16(109.f), + ov::float16(110.f), ov::float16(111.f), - FLOAT16(112.f), FLOAT16(113.f), - FLOAT16(114.f), FLOAT16(115.f),//2 + ov::float16(112.f), ov::float16(113.f), + ov::float16(114.f), ov::float16(115.f),//2 - FLOAT16(116.f), FLOAT16(117.f), - FLOAT16(118.f), FLOAT16(119.f), + ov::float16(116.f), ov::float16(117.f), + ov::float16(118.f), ov::float16(119.f), - FLOAT16(120.f), FLOAT16(121.f), - FLOAT16(122.f), FLOAT16(123.f),//3 + ov::float16(120.f), ov::float16(121.f), + ov::float16(122.f), ov::float16(123.f),//3 - FLOAT16(124.f), FLOAT16(125.f), - FLOAT16(126.f), FLOAT16(127.f), + ov::float16(124.f), ov::float16(125.f), + ov::float16(126.f), ov::float16(127.f), - FLOAT16(128.f), FLOAT16(129.f), - FLOAT16(130.f), FLOAT16(131.f),//4 + ov::float16(128.f), ov::float16(129.f), + ov::float16(130.f), ov::float16(131.f),//4 - FLOAT16(132.f), FLOAT16(133.f), - FLOAT16(134.f), FLOAT16(135.f), + ov::float16(132.f), ov::float16(133.f), + ov::float16(134.f), ov::float16(135.f), - FLOAT16(100.f), FLOAT16(101.f), - FLOAT16(102.f), FLOAT16(103.f),//5 + ov::float16(100.f), ov::float16(101.f), + ov::float16(102.f), ov::float16(103.f),//5 - FLOAT16(104.f), FLOAT16(105.f), - FLOAT16(106.f), FLOAT16(107.f), + ov::float16(104.f), ov::float16(105.f), + ov::float16(106.f), ov::float16(107.f), - FLOAT16(108.f), FLOAT16(109.f), - FLOAT16(110.f), FLOAT16(111.f),//6 + ov::float16(108.f), ov::float16(109.f), + ov::float16(110.f), ov::float16(111.f),//6 - FLOAT16(112.f), FLOAT16(113.f), - FLOAT16(114.f), FLOAT16(115.f), + ov::float16(112.f), ov::float16(113.f), + ov::float16(114.f), ov::float16(115.f), - FLOAT16(116.f), FLOAT16(117.f), - FLOAT16(118.f), FLOAT16(119.f),//7 + ov::float16(116.f), ov::float16(117.f), + ov::float16(118.f), ov::float16(119.f),//7 - FLOAT16(120.f), FLOAT16(121.f), - FLOAT16(122.f), FLOAT16(123.f), + ov::float16(120.f), ov::float16(121.f), + ov::float16(122.f), ov::float16(123.f), - FLOAT16(124.f), FLOAT16(125.f), - FLOAT16(126.f), FLOAT16(127.f),//8 + ov::float16(124.f), ov::float16(125.f), + ov::float16(126.f), ov::float16(127.f),//8 }); set_values(input2, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), + ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f) }); set_values(input3, { - FLOAT16(777.0f), FLOAT16(777.0f), - FLOAT16(999.0f), FLOAT16(999.0f) + ov::float16(777.0f), ov::float16(777.0f), + ov::float16(999.0f), ov::float16(999.0f) }); topology topology; @@ -3711,66 +3711,66 @@ TEST(scatter_nd_update_gpu_fp16, d222222_i241111) { set_values(input1, { - FLOAT16(100.f), FLOAT16(101.f), - FLOAT16(102.f), FLOAT16(103.f), + ov::float16(100.f), ov::float16(101.f), + ov::float16(102.f), ov::float16(103.f), - FLOAT16(104.f), FLOAT16(105.f), - FLOAT16(106.f), FLOAT16(107.f),//1 + ov::float16(104.f), ov::float16(105.f), + ov::float16(106.f), ov::float16(107.f),//1 - FLOAT16(108.f), FLOAT16(109.f), - FLOAT16(110.f), FLOAT16(111.f), + ov::float16(108.f), ov::float16(109.f), + ov::float16(110.f), ov::float16(111.f), - FLOAT16(112.f), FLOAT16(113.f), - FLOAT16(114.f), FLOAT16(115.f),//2 + ov::float16(112.f), ov::float16(113.f), + ov::float16(114.f), ov::float16(115.f),//2 - FLOAT16(116.f), FLOAT16(117.f), - FLOAT16(118.f), FLOAT16(119.f), + ov::float16(116.f), ov::float16(117.f), + ov::float16(118.f), ov::float16(119.f), - FLOAT16(120.f), FLOAT16(121.f), - FLOAT16(122.f), FLOAT16(123.f),//3 + ov::float16(120.f), ov::float16(121.f), + ov::float16(122.f), ov::float16(123.f),//3 - FLOAT16(124.f), FLOAT16(125.f), - FLOAT16(126.f), FLOAT16(127.f), + ov::float16(124.f), ov::float16(125.f), + ov::float16(126.f), ov::float16(127.f), - FLOAT16(128.f), FLOAT16(129.f), - FLOAT16(130.f), FLOAT16(131.f),//4 + ov::float16(128.f), ov::float16(129.f), + ov::float16(130.f), ov::float16(131.f),//4 - FLOAT16(132.f), FLOAT16(133.f), - FLOAT16(134.f), FLOAT16(135.f), + ov::float16(132.f), ov::float16(133.f), + ov::float16(134.f), ov::float16(135.f), - FLOAT16(100.f), FLOAT16(101.f), - FLOAT16(102.f), FLOAT16(103.f),//5 + ov::float16(100.f), ov::float16(101.f), + ov::float16(102.f), ov::float16(103.f),//5 - FLOAT16(104.f), FLOAT16(105.f), - FLOAT16(106.f), FLOAT16(107.f), + ov::float16(104.f), ov::float16(105.f), + ov::float16(106.f), ov::float16(107.f), - FLOAT16(108.f), FLOAT16(109.f), - FLOAT16(110.f), FLOAT16(111.f),//6 + ov::float16(108.f), ov::float16(109.f), + ov::float16(110.f), ov::float16(111.f),//6 - FLOAT16(112.f), FLOAT16(113.f), - FLOAT16(114.f), FLOAT16(115.f), + ov::float16(112.f), ov::float16(113.f), + ov::float16(114.f), ov::float16(115.f), - FLOAT16(116.f), FLOAT16(117.f), - FLOAT16(118.f), FLOAT16(119.f),//7 + ov::float16(116.f), ov::float16(117.f), + ov::float16(118.f), ov::float16(119.f),//7 - FLOAT16(120.f), FLOAT16(121.f), - FLOAT16(122.f), FLOAT16(123.f), + ov::float16(120.f), ov::float16(121.f), + ov::float16(122.f), ov::float16(123.f), - FLOAT16(124.f), FLOAT16(125.f), - FLOAT16(126.f), FLOAT16(127.f),//8 + ov::float16(124.f), ov::float16(125.f), + ov::float16(126.f), ov::float16(127.f),//8 }); set_values(input2, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), + ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f) }); set_values(input3, { - FLOAT16(777.0f), FLOAT16(777.0f), - FLOAT16(777.0f), FLOAT16(777.0f), + ov::float16(777.0f), ov::float16(777.0f), + ov::float16(777.0f), ov::float16(777.0f), - FLOAT16(999.0f), FLOAT16(999.0f), - FLOAT16(999.0f), FLOAT16(999.0f) + ov::float16(999.0f), ov::float16(999.0f), + ov::float16(999.0f), ov::float16(999.0f) }); topology topology; @@ -3868,72 +3868,72 @@ TEST(scatter_nd_update_gpu_fp16, d222222_i231111) { set_values(input1, { - FLOAT16(100.f), FLOAT16(101.f), - FLOAT16(102.f), FLOAT16(103.f), + ov::float16(100.f), ov::float16(101.f), + ov::float16(102.f), ov::float16(103.f), - FLOAT16(104.f), FLOAT16(105.f), - FLOAT16(106.f), FLOAT16(107.f),//1 + ov::float16(104.f), ov::float16(105.f), + ov::float16(106.f), ov::float16(107.f),//1 - FLOAT16(108.f), FLOAT16(109.f), - FLOAT16(110.f), FLOAT16(111.f), + ov::float16(108.f), ov::float16(109.f), + ov::float16(110.f), ov::float16(111.f), - FLOAT16(112.f), FLOAT16(113.f), - FLOAT16(114.f), FLOAT16(115.f),//2 + ov::float16(112.f), ov::float16(113.f), + ov::float16(114.f), ov::float16(115.f),//2 - FLOAT16(116.f), FLOAT16(117.f), - FLOAT16(118.f), FLOAT16(119.f), + ov::float16(116.f), ov::float16(117.f), + ov::float16(118.f), ov::float16(119.f), - FLOAT16(120.f), FLOAT16(121.f), - FLOAT16(122.f), FLOAT16(123.f),//3 + ov::float16(120.f), ov::float16(121.f), + ov::float16(122.f), ov::float16(123.f),//3 - FLOAT16(124.f), FLOAT16(125.f), - FLOAT16(126.f), FLOAT16(127.f), + ov::float16(124.f), ov::float16(125.f), + ov::float16(126.f), ov::float16(127.f), - FLOAT16(128.f), FLOAT16(129.f), - FLOAT16(130.f), FLOAT16(131.f),//4 + ov::float16(128.f), ov::float16(129.f), + ov::float16(130.f), ov::float16(131.f),//4 - FLOAT16(132.f), FLOAT16(133.f), - FLOAT16(134.f), FLOAT16(135.f), + ov::float16(132.f), ov::float16(133.f), + ov::float16(134.f), ov::float16(135.f), - FLOAT16(100.f), FLOAT16(101.f), - FLOAT16(102.f), FLOAT16(103.f),//5 + ov::float16(100.f), ov::float16(101.f), + ov::float16(102.f), ov::float16(103.f),//5 - FLOAT16(104.f), FLOAT16(105.f), - FLOAT16(106.f), FLOAT16(107.f), + ov::float16(104.f), ov::float16(105.f), + ov::float16(106.f), ov::float16(107.f), - FLOAT16(108.f), FLOAT16(109.f), - FLOAT16(110.f), FLOAT16(111.f),//6 + ov::float16(108.f), ov::float16(109.f), + ov::float16(110.f), ov::float16(111.f),//6 - FLOAT16(112.f), FLOAT16(113.f), - FLOAT16(114.f), FLOAT16(115.f), + ov::float16(112.f), ov::float16(113.f), + ov::float16(114.f), ov::float16(115.f), - FLOAT16(116.f), FLOAT16(117.f), - FLOAT16(118.f), FLOAT16(119.f),//7 + ov::float16(116.f), ov::float16(117.f), + ov::float16(118.f), ov::float16(119.f),//7 - FLOAT16(120.f), FLOAT16(121.f), - FLOAT16(122.f), FLOAT16(123.f), + ov::float16(120.f), ov::float16(121.f), + ov::float16(122.f), ov::float16(123.f), - FLOAT16(124.f), FLOAT16(125.f), - FLOAT16(126.f), FLOAT16(127.f),//8 + ov::float16(124.f), ov::float16(125.f), + ov::float16(126.f), ov::float16(127.f),//8 }); set_values(input2, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(1.0f), - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(1.0f), + ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f) }); set_values(input3, { - FLOAT16(777.0f), FLOAT16(777.0f), - FLOAT16(777.0f), FLOAT16(777.0f), + ov::float16(777.0f), ov::float16(777.0f), + ov::float16(777.0f), ov::float16(777.0f), - FLOAT16(777.0f), FLOAT16(777.0f), - FLOAT16(777.0f), FLOAT16(777.0f), + ov::float16(777.0f), ov::float16(777.0f), + ov::float16(777.0f), ov::float16(777.0f), - FLOAT16(999.0f), FLOAT16(999.0f), - FLOAT16(999.0f), FLOAT16(999.0f), + ov::float16(999.0f), ov::float16(999.0f), + ov::float16(999.0f), ov::float16(999.0f), - FLOAT16(999.0f), FLOAT16(999.0f), - FLOAT16(999.0f), FLOAT16(999.0f) + ov::float16(999.0f), ov::float16(999.0f), + ov::float16(999.0f), ov::float16(999.0f) }); topology topology; @@ -4030,84 +4030,84 @@ TEST(scatter_nd_update_gpu_fp16, d222222_i221111) { set_values(input1, { - FLOAT16(100.f), FLOAT16(101.f), - FLOAT16(102.f), FLOAT16(103.f), + ov::float16(100.f), ov::float16(101.f), + ov::float16(102.f), ov::float16(103.f), - FLOAT16(104.f), FLOAT16(105.f), - FLOAT16(106.f), FLOAT16(107.f),//1 + ov::float16(104.f), ov::float16(105.f), + ov::float16(106.f), ov::float16(107.f),//1 - FLOAT16(108.f), FLOAT16(109.f), - FLOAT16(110.f), FLOAT16(111.f), + ov::float16(108.f), ov::float16(109.f), + ov::float16(110.f), ov::float16(111.f), - FLOAT16(112.f), FLOAT16(113.f), - FLOAT16(114.f), FLOAT16(115.f),//2 + ov::float16(112.f), ov::float16(113.f), + ov::float16(114.f), ov::float16(115.f),//2 - FLOAT16(116.f), FLOAT16(117.f), - FLOAT16(118.f), FLOAT16(119.f), + ov::float16(116.f), ov::float16(117.f), + ov::float16(118.f), ov::float16(119.f), - FLOAT16(120.f), FLOAT16(121.f), - FLOAT16(122.f), FLOAT16(123.f),//3 + ov::float16(120.f), ov::float16(121.f), + ov::float16(122.f), ov::float16(123.f),//3 - FLOAT16(124.f), FLOAT16(125.f), - FLOAT16(126.f), FLOAT16(127.f), + ov::float16(124.f), ov::float16(125.f), + ov::float16(126.f), ov::float16(127.f), - FLOAT16(128.f), FLOAT16(129.f), - FLOAT16(130.f), FLOAT16(131.f),//4 + ov::float16(128.f), ov::float16(129.f), + ov::float16(130.f), ov::float16(131.f),//4 - FLOAT16(132.f), FLOAT16(133.f), - FLOAT16(134.f), FLOAT16(135.f), + ov::float16(132.f), ov::float16(133.f), + ov::float16(134.f), ov::float16(135.f), - FLOAT16(100.f), FLOAT16(101.f), - FLOAT16(102.f), FLOAT16(103.f),//5 + ov::float16(100.f), ov::float16(101.f), + ov::float16(102.f), ov::float16(103.f),//5 - FLOAT16(104.f), FLOAT16(105.f), - FLOAT16(106.f), FLOAT16(107.f), + ov::float16(104.f), ov::float16(105.f), + ov::float16(106.f), ov::float16(107.f), - FLOAT16(108.f), FLOAT16(109.f), - FLOAT16(110.f), FLOAT16(111.f),//6 + ov::float16(108.f), ov::float16(109.f), + ov::float16(110.f), ov::float16(111.f),//6 - FLOAT16(112.f), FLOAT16(113.f), - FLOAT16(114.f), FLOAT16(115.f), + ov::float16(112.f), ov::float16(113.f), + ov::float16(114.f), ov::float16(115.f), - FLOAT16(116.f), FLOAT16(117.f), - FLOAT16(118.f), FLOAT16(119.f),//7 + ov::float16(116.f), ov::float16(117.f), + ov::float16(118.f), ov::float16(119.f),//7 - FLOAT16(120.f), FLOAT16(121.f), - FLOAT16(122.f), FLOAT16(123.f), + ov::float16(120.f), ov::float16(121.f), + ov::float16(122.f), ov::float16(123.f), - FLOAT16(124.f), FLOAT16(125.f), - FLOAT16(126.f), FLOAT16(127.f),//8 + ov::float16(124.f), ov::float16(125.f), + ov::float16(126.f), ov::float16(127.f),//8 }); set_values(input2, { - FLOAT16(0.0f), FLOAT16(1.0f), - FLOAT16(1.0f), FLOAT16(1.0f) + ov::float16(0.0f), ov::float16(1.0f), + ov::float16(1.0f), ov::float16(1.0f) }); set_values(input3, { - FLOAT16(777.0f), FLOAT16(777.0f), - FLOAT16(777.0f), FLOAT16(777.0f), + ov::float16(777.0f), ov::float16(777.0f), + ov::float16(777.0f), ov::float16(777.0f), - FLOAT16(777.0f), FLOAT16(777.0f), - FLOAT16(777.0f), FLOAT16(777.0f), + ov::float16(777.0f), ov::float16(777.0f), + ov::float16(777.0f), ov::float16(777.0f), - FLOAT16(777.0f), FLOAT16(777.0f), - FLOAT16(777.0f), FLOAT16(777.0f), + ov::float16(777.0f), ov::float16(777.0f), + ov::float16(777.0f), ov::float16(777.0f), - FLOAT16(777.0f), FLOAT16(777.0f), - FLOAT16(777.0f), FLOAT16(777.0f), + ov::float16(777.0f), ov::float16(777.0f), + ov::float16(777.0f), ov::float16(777.0f), - FLOAT16(999.0f), FLOAT16(999.0f), - FLOAT16(999.0f), FLOAT16(999.0f), + ov::float16(999.0f), ov::float16(999.0f), + ov::float16(999.0f), ov::float16(999.0f), - FLOAT16(999.0f), FLOAT16(999.0f), - FLOAT16(999.0f), FLOAT16(999.0f), + ov::float16(999.0f), ov::float16(999.0f), + ov::float16(999.0f), ov::float16(999.0f), - FLOAT16(999.0f), FLOAT16(999.0f), - FLOAT16(999.0f), FLOAT16(999.0f), + ov::float16(999.0f), ov::float16(999.0f), + ov::float16(999.0f), ov::float16(999.0f), - FLOAT16(999.0f), FLOAT16(999.0f), - FLOAT16(999.0f), FLOAT16(999.0f) + ov::float16(999.0f), ov::float16(999.0f), + ov::float16(999.0f), ov::float16(999.0f) }); topology topology; @@ -4384,7 +4384,7 @@ void test_d222222_i211111(bool is_caching_test) { } TEST(scatter_nd_update_gpu_fp16, d222222_i211111) { - test_d222222_i211111(false); + test_d222222_i211111(false); } TEST(scatter_nd_update_gpu, dynamic) { @@ -4584,7 +4584,7 @@ TEST_P(scatter_nd_update_random_test, random_cached) else if (param.input_type == data_types::i64) this->execute(param, true); else if (param.input_type == data_types::f16) - this->execute_fp16(param, true); + this->execute_fp16(param, true); else if (param.input_type == data_types::f32) this->execute(param, true); else @@ -4592,5 +4592,5 @@ TEST_P(scatter_nd_update_random_test, random_cached) } #endif TEST(scatter_nd_update_gpu_fp16, d222222_i211111_cached) { - test_d222222_i211111(true); + test_d222222_i211111(true); } diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/scatter_update_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/scatter_update_gpu_test.cpp index 4c6c90ddbb66f2..facc98497043aa 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/scatter_update_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/scatter_update_gpu_test.cpp @@ -120,7 +120,7 @@ void test_d2411_axisB(bool is_caching_test) { } TEST(scatter_update_gpu_fp16, d2411_axisB) { - test_d2411_axisB(false); + test_d2411_axisB(false); } TEST(scatter_update_gpu_fp32, d8111_axisB) { @@ -239,10 +239,10 @@ TEST(scatter_update_gpu_fp16, d4311_axisB) { auto axis = 0; set_values(input1, { - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), - FLOAT16(2.0f), FLOAT16(2.0f), FLOAT16(2.0f), - FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), - FLOAT16(3.0f), FLOAT16(3.0f), FLOAT16(3.0f) + ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), + ov::float16(2.0f), ov::float16(2.0f), ov::float16(2.0f), + ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), + ov::float16(3.0f), ov::float16(3.0f), ov::float16(3.0f) }); set_values(input2, { @@ -251,11 +251,11 @@ TEST(scatter_update_gpu_fp16, d4311_axisB) { }); set_values(input3, { - FLOAT16(7.0f), FLOAT16(7.0f), FLOAT16(7.0f), - FLOAT16(8.0f), FLOAT16(8.0f), FLOAT16(8.0f), + ov::float16(7.0f), ov::float16(7.0f), ov::float16(7.0f), + ov::float16(8.0f), ov::float16(8.0f), ov::float16(8.0f), - FLOAT16(6.0f), FLOAT16(6.0f), FLOAT16(6.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f) + ov::float16(6.0f), ov::float16(6.0f), ov::float16(6.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f) }); topology topology; @@ -355,17 +355,17 @@ TEST(scatter_update_gpu_fp16, d2521_axisF) { auto axis = 1; set_values(input1, { - FLOAT16(0.0f), FLOAT16(1.0f), - FLOAT16(2.0f), FLOAT16(3.0f), - FLOAT16(4.0f), FLOAT16(5.0f), - FLOAT16(6.0f), FLOAT16(7.0f), - FLOAT16(8.0f), FLOAT16(9.0f), - - FLOAT16(10.0f), FLOAT16(11.0f), - FLOAT16(12.0f), FLOAT16(13.0f), - FLOAT16(14.0f), FLOAT16(15.0f), - FLOAT16(16.0f), FLOAT16(17.0f), - FLOAT16(18.0f), FLOAT16(19.0f) + ov::float16(0.0f), ov::float16(1.0f), + ov::float16(2.0f), ov::float16(3.0f), + ov::float16(4.0f), ov::float16(5.0f), + ov::float16(6.0f), ov::float16(7.0f), + ov::float16(8.0f), ov::float16(9.0f), + + ov::float16(10.0f), ov::float16(11.0f), + ov::float16(12.0f), ov::float16(13.0f), + ov::float16(14.0f), ov::float16(15.0f), + ov::float16(16.0f), ov::float16(17.0f), + ov::float16(18.0f), ov::float16(19.0f) }); set_values(input2, { @@ -374,15 +374,15 @@ TEST(scatter_update_gpu_fp16, d2521_axisF) { }); set_values(input3, { - FLOAT16(21.0f), FLOAT16(31.0f), - FLOAT16(41.0f), FLOAT16(51.0f), - FLOAT16(61.0f), FLOAT16(71.0f), - FLOAT16(81.0f), FLOAT16(91.0f), - - FLOAT16(101.0f), FLOAT16(111.0f), - FLOAT16(121.0f), FLOAT16(131.0f), - FLOAT16(141.0f), FLOAT16(151.0f), - FLOAT16(161.0f), FLOAT16(171.0f) + ov::float16(21.0f), ov::float16(31.0f), + ov::float16(41.0f), ov::float16(51.0f), + ov::float16(61.0f), ov::float16(71.0f), + ov::float16(81.0f), ov::float16(91.0f), + + ov::float16(101.0f), ov::float16(111.0f), + ov::float16(121.0f), ov::float16(131.0f), + ov::float16(141.0f), ov::float16(151.0f), + ov::float16(161.0f), ov::float16(171.0f) }); topology topology; @@ -475,10 +475,10 @@ TEST(scatter_update_gpu_fp16, d2241_axisY) { auto axis = 2; set_values(input1, { - FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), - FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), - FLOAT16(13.0f), FLOAT16(14.0f), FLOAT16(15.0f), FLOAT16(16.0f) + ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), + ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), + ov::float16(13.0f), ov::float16(14.0f), ov::float16(15.0f), ov::float16(16.0f) }); set_values(input2, { @@ -487,15 +487,15 @@ TEST(scatter_update_gpu_fp16, d2241_axisY) { }); set_values(input3, { - FLOAT16(0.0f), FLOAT16(20.0f), - FLOAT16(30.0f), FLOAT16(40.0f), - FLOAT16(50.0f), FLOAT16(60.0f), - FLOAT16(70.0f), FLOAT16(80.0f), - - FLOAT16(90.0f), FLOAT16(100.0f), - FLOAT16(110.0f), FLOAT16(120.0f), - FLOAT16(130.0f), FLOAT16(140.0f), - FLOAT16(150.0f), FLOAT16(160.0f) + ov::float16(0.0f), ov::float16(20.0f), + ov::float16(30.0f), ov::float16(40.0f), + ov::float16(50.0f), ov::float16(60.0f), + ov::float16(70.0f), ov::float16(80.0f), + + ov::float16(90.0f), ov::float16(100.0f), + ov::float16(110.0f), ov::float16(120.0f), + ov::float16(130.0f), ov::float16(140.0f), + ov::float16(150.0f), ov::float16(160.0f) }); topology topology; @@ -553,61 +553,61 @@ TEST(scatter_update_gpu_fp16, d8x2x20x1_axisB) { auto axis = 0; set_values(input1, { - FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), - FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), - FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), - - FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), - FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), - FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), - - FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), - FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), - FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), - - FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), - FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), - FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), - - FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), - FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), - FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), - - FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), - FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), - FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), - - FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), - FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), - FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), - - FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), - FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), - FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), FLOAT16(0.0f), - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), - FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f), FLOAT16(1.0f) + ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), + ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), + ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), + ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), + ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), + ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), + + ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), + ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), + ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), + ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), + ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), + ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), + + ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), + ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), + ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), + ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), + ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), + ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), + + ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), + ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), + ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), + ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), + ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), + ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), + + ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), + ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), + ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), + ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), + ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), + ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), + + ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), + ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), + ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), + ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), + ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), + ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), + + ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), + ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), + ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), + ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), + ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), + ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), + + ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), + ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), + ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), ov::float16(0.0f), + ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), + ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), + ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f), ov::float16(1.0f) }); set_values(input2, { @@ -616,47 +616,47 @@ TEST(scatter_update_gpu_fp16, d8x2x20x1_axisB) { }); set_values(input3, { - FLOAT16(0), FLOAT16(1), FLOAT16(2), FLOAT16(3), FLOAT16(4), FLOAT16(5), FLOAT16(6), FLOAT16(7), - FLOAT16(8), FLOAT16(9), FLOAT16(10), FLOAT16(11), FLOAT16(12), FLOAT16(13), FLOAT16(14), FLOAT16(15), - FLOAT16(16), FLOAT16(17), FLOAT16(18), FLOAT16(19), - FLOAT16(20), FLOAT16(21), FLOAT16(22), FLOAT16(23), FLOAT16(24), FLOAT16(25), FLOAT16(26), FLOAT16(27), - FLOAT16(28), FLOAT16(29), FLOAT16(30), FLOAT16(31), FLOAT16(32), FLOAT16(33), FLOAT16(34), FLOAT16(35), - FLOAT16(36), FLOAT16(37), FLOAT16(38), FLOAT16(39), - - FLOAT16(40), FLOAT16(41), FLOAT16(42), FLOAT16(43), FLOAT16(44), FLOAT16(45), FLOAT16(46), FLOAT16(47), - FLOAT16(48), FLOAT16(49), FLOAT16(50), FLOAT16(51), FLOAT16(52), FLOAT16(53), FLOAT16(54), FLOAT16(55), - FLOAT16(56), FLOAT16(57), FLOAT16(58), FLOAT16(59), - FLOAT16(60), FLOAT16(61), FLOAT16(62), FLOAT16(63), FLOAT16(64), FLOAT16(65), FLOAT16(66), FLOAT16(67), - FLOAT16(68), FLOAT16(69), FLOAT16(70), FLOAT16(71), FLOAT16(72), FLOAT16(73), FLOAT16(74), FLOAT16(75), - FLOAT16(76), FLOAT16(77), FLOAT16(78), FLOAT16(79), - - FLOAT16(80), FLOAT16(81), FLOAT16(82), FLOAT16(83), FLOAT16(84), FLOAT16(85), FLOAT16(86), FLOAT16(87), - FLOAT16(88), FLOAT16(89), FLOAT16(90), FLOAT16(91), FLOAT16(92), FLOAT16(93), FLOAT16(94), FLOAT16(95), - FLOAT16(96), FLOAT16(97), FLOAT16(98), FLOAT16(99), - FLOAT16(100), FLOAT16(101), FLOAT16(102), FLOAT16(103), FLOAT16(104), FLOAT16(105), FLOAT16(106), - FLOAT16(107), FLOAT16(108), FLOAT16(109), FLOAT16(110), FLOAT16(111), FLOAT16(112), FLOAT16(113), - FLOAT16(114), FLOAT16(115), FLOAT16(116), FLOAT16(117), FLOAT16(118), FLOAT16(119), - - FLOAT16(120), FLOAT16(121), FLOAT16(122), FLOAT16(123), FLOAT16(124), FLOAT16(125), FLOAT16(126), - FLOAT16(127), FLOAT16(128), FLOAT16(129), FLOAT16(130), FLOAT16(131), FLOAT16(132), FLOAT16(133), - FLOAT16(134), FLOAT16(135), FLOAT16(136), FLOAT16(137), FLOAT16(138), FLOAT16(139), - FLOAT16(140), FLOAT16(141), FLOAT16(142), FLOAT16(143), FLOAT16(144), FLOAT16(145), FLOAT16(146), - FLOAT16(147), FLOAT16(148), FLOAT16(149), FLOAT16(150), FLOAT16(151), FLOAT16(152), FLOAT16(153), - FLOAT16(154), FLOAT16(155), FLOAT16(156), FLOAT16(157), FLOAT16(158), FLOAT16(159), - - FLOAT16(160), FLOAT16(161), FLOAT16(162), FLOAT16(163), FLOAT16(164), FLOAT16(165), FLOAT16(166), - FLOAT16(167), FLOAT16(168), FLOAT16(169), FLOAT16(170), FLOAT16(171), FLOAT16(172), FLOAT16(173), - FLOAT16(174), FLOAT16(175), FLOAT16(176), FLOAT16(177), FLOAT16(178), FLOAT16(179), - FLOAT16(180), FLOAT16(181), FLOAT16(182), FLOAT16(183), FLOAT16(184), FLOAT16(185), FLOAT16(186), - FLOAT16(187), FLOAT16(188), FLOAT16(189), FLOAT16(190), FLOAT16(191), FLOAT16(192), FLOAT16(193), - FLOAT16(194), FLOAT16(195), FLOAT16(196), FLOAT16(197), FLOAT16(198), FLOAT16(199), - - FLOAT16(200), FLOAT16(201), FLOAT16(202), FLOAT16(203), FLOAT16(204), FLOAT16(205), FLOAT16(206), - FLOAT16(207), FLOAT16(208), FLOAT16(209), FLOAT16(210), FLOAT16(211), FLOAT16(212), FLOAT16(213), - FLOAT16(214), FLOAT16(215), FLOAT16(216), FLOAT16(217), FLOAT16(218), FLOAT16(219), - FLOAT16(220), FLOAT16(221), FLOAT16(222), FLOAT16(223), FLOAT16(224), FLOAT16(225), FLOAT16(226), - FLOAT16(227), FLOAT16(228), FLOAT16(229), FLOAT16(230), FLOAT16(231), FLOAT16(232), FLOAT16(233), - FLOAT16(234), FLOAT16(235), FLOAT16(236), FLOAT16(237), FLOAT16(238), FLOAT16(239) + ov::float16(0), ov::float16(1), ov::float16(2), ov::float16(3), ov::float16(4), ov::float16(5), ov::float16(6), ov::float16(7), + ov::float16(8), ov::float16(9), ov::float16(10), ov::float16(11), ov::float16(12), ov::float16(13), ov::float16(14), ov::float16(15), + ov::float16(16), ov::float16(17), ov::float16(18), ov::float16(19), + ov::float16(20), ov::float16(21), ov::float16(22), ov::float16(23), ov::float16(24), ov::float16(25), ov::float16(26), ov::float16(27), + ov::float16(28), ov::float16(29), ov::float16(30), ov::float16(31), ov::float16(32), ov::float16(33), ov::float16(34), ov::float16(35), + ov::float16(36), ov::float16(37), ov::float16(38), ov::float16(39), + + ov::float16(40), ov::float16(41), ov::float16(42), ov::float16(43), ov::float16(44), ov::float16(45), ov::float16(46), ov::float16(47), + ov::float16(48), ov::float16(49), ov::float16(50), ov::float16(51), ov::float16(52), ov::float16(53), ov::float16(54), ov::float16(55), + ov::float16(56), ov::float16(57), ov::float16(58), ov::float16(59), + ov::float16(60), ov::float16(61), ov::float16(62), ov::float16(63), ov::float16(64), ov::float16(65), ov::float16(66), ov::float16(67), + ov::float16(68), ov::float16(69), ov::float16(70), ov::float16(71), ov::float16(72), ov::float16(73), ov::float16(74), ov::float16(75), + ov::float16(76), ov::float16(77), ov::float16(78), ov::float16(79), + + ov::float16(80), ov::float16(81), ov::float16(82), ov::float16(83), ov::float16(84), ov::float16(85), ov::float16(86), ov::float16(87), + ov::float16(88), ov::float16(89), ov::float16(90), ov::float16(91), ov::float16(92), ov::float16(93), ov::float16(94), ov::float16(95), + ov::float16(96), ov::float16(97), ov::float16(98), ov::float16(99), + ov::float16(100), ov::float16(101), ov::float16(102), ov::float16(103), ov::float16(104), ov::float16(105), ov::float16(106), + ov::float16(107), ov::float16(108), ov::float16(109), ov::float16(110), ov::float16(111), ov::float16(112), ov::float16(113), + ov::float16(114), ov::float16(115), ov::float16(116), ov::float16(117), ov::float16(118), ov::float16(119), + + ov::float16(120), ov::float16(121), ov::float16(122), ov::float16(123), ov::float16(124), ov::float16(125), ov::float16(126), + ov::float16(127), ov::float16(128), ov::float16(129), ov::float16(130), ov::float16(131), ov::float16(132), ov::float16(133), + ov::float16(134), ov::float16(135), ov::float16(136), ov::float16(137), ov::float16(138), ov::float16(139), + ov::float16(140), ov::float16(141), ov::float16(142), ov::float16(143), ov::float16(144), ov::float16(145), ov::float16(146), + ov::float16(147), ov::float16(148), ov::float16(149), ov::float16(150), ov::float16(151), ov::float16(152), ov::float16(153), + ov::float16(154), ov::float16(155), ov::float16(156), ov::float16(157), ov::float16(158), ov::float16(159), + + ov::float16(160), ov::float16(161), ov::float16(162), ov::float16(163), ov::float16(164), ov::float16(165), ov::float16(166), + ov::float16(167), ov::float16(168), ov::float16(169), ov::float16(170), ov::float16(171), ov::float16(172), ov::float16(173), + ov::float16(174), ov::float16(175), ov::float16(176), ov::float16(177), ov::float16(178), ov::float16(179), + ov::float16(180), ov::float16(181), ov::float16(182), ov::float16(183), ov::float16(184), ov::float16(185), ov::float16(186), + ov::float16(187), ov::float16(188), ov::float16(189), ov::float16(190), ov::float16(191), ov::float16(192), ov::float16(193), + ov::float16(194), ov::float16(195), ov::float16(196), ov::float16(197), ov::float16(198), ov::float16(199), + + ov::float16(200), ov::float16(201), ov::float16(202), ov::float16(203), ov::float16(204), ov::float16(205), ov::float16(206), + ov::float16(207), ov::float16(208), ov::float16(209), ov::float16(210), ov::float16(211), ov::float16(212), ov::float16(213), + ov::float16(214), ov::float16(215), ov::float16(216), ov::float16(217), ov::float16(218), ov::float16(219), + ov::float16(220), ov::float16(221), ov::float16(222), ov::float16(223), ov::float16(224), ov::float16(225), ov::float16(226), + ov::float16(227), ov::float16(228), ov::float16(229), ov::float16(230), ov::float16(231), ov::float16(232), ov::float16(233), + ov::float16(234), ov::float16(235), ov::float16(236), ov::float16(237), ov::float16(238), ov::float16(239) }); topology topology; @@ -1598,7 +1598,7 @@ void test_d21214_bfzyx_axisX_bfwzyx(bool is_caching_test) { } TEST(scatter_update_gpu_fp16, d21214_bfzyx_axisX_bfwzyx) { - test_d21214_bfzyx_axisX_bfwzyx(false); + test_d21214_bfzyx_axisX_bfwzyx(false); } TEST(scatter_update_gpu_fp32, dynamic) { @@ -1768,11 +1768,11 @@ TEST(scatter_update_cpu_impl_fp32, dynamic) { #ifdef RUN_ALL_MODEL_CACHING_TESTS TEST(scatter_update_gpu_fp16, d21214_bfzyx_axisX_bfwzyx_cached) { - test_d21214_bfzyx_axisX_bfwzyx(true); + test_d21214_bfzyx_axisX_bfwzyx(true); } #endif TEST(scatter_update_gpu_fp16, d2411_axisB_cached) { - test_d2411_axisB(true); + test_d2411_axisB(true); } TEST(scatter_update_gpu_fp32, output_padding) { diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/softmax_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/softmax_gpu_test.cpp index e45969be2f041d..19dead60da48e9 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/softmax_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/softmax_gpu_test.cpp @@ -647,7 +647,7 @@ class softmax_test : public tests::generic_test if (generic_params->data_type == data_types::f32) { return generate_reference_typed(inputs); } else { - return generate_reference_typed(inputs); + return generate_reference_typed(inputs); } } @@ -900,7 +900,7 @@ float getError() { } template<> -float getError() { +float getError() { return 0.2; } @@ -928,7 +928,7 @@ struct softmax_gpu_formats_test : public ::testing::TestWithParam > { public: void test(bool is_caching_test) { - const auto data_type = type_to_data_type::value; + const auto data_type = ov::element::from(); SoftmaxParams params; format::type plain_format; format::type target_format; @@ -963,7 +963,7 @@ struct softmax_gpu_formats_test }; using softmax_gpu_formats_test_f32 = softmax_gpu_formats_test; -using softmax_gpu_formats_test_f16 = softmax_gpu_formats_test; +using softmax_gpu_formats_test_f16 = softmax_gpu_formats_test; TEST_P(softmax_gpu_formats_test_f32, softmax_gpu_formats_test_f32) { ASSERT_NO_FATAL_FAILURE(test(false)); @@ -985,7 +985,7 @@ INSTANTIATE_TEST_SUITE_P(softmax_gpu_formats_test_f32_2d, INSTANTIATE_TEST_SUITE_P(softmax_gpu_formats_test_f16_2d, softmax_gpu_formats_test_f16, ::testing::Combine( - ::testing::ValuesIn(generateSoftmaxParams2D()), + ::testing::ValuesIn(generateSoftmaxParams2D()), ::testing::Values(format::bfyx), ::testing::ValuesIn(formats2D) ), @@ -1003,7 +1003,7 @@ INSTANTIATE_TEST_SUITE_P(softmax_gpu_formats_test_f32_3d, INSTANTIATE_TEST_SUITE_P(softmax_gpu_formats_test_f16_3d, softmax_gpu_formats_test_f16, ::testing::Combine( - ::testing::ValuesIn(generateSoftmaxParams3D()), + ::testing::ValuesIn(generateSoftmaxParams3D()), ::testing::Values(format::bfzyx), ::testing::ValuesIn(formats3D) ), diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/space_to_batch_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/space_to_batch_gpu_test.cpp index ddec5b3c84a4c3..72860cbc1ee660 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/space_to_batch_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/space_to_batch_gpu_test.cpp @@ -27,10 +27,10 @@ class space_to_batch_fp16_gpu: public ::testing::Test { auto input = engine.allocate_memory({ data_types::f16, format::bfyx, {1,2,2,2} }); set_values(input, { - FLOAT16(0.0f), FLOAT16(1.0f), - FLOAT16(2.0f), FLOAT16(3.0f), - FLOAT16(4.0f), FLOAT16(5.0f), - FLOAT16(6.0f), FLOAT16(7.0f) + ov::float16(0.0f), ov::float16(1.0f), + ov::float16(2.0f), ov::float16(3.0f), + ov::float16(4.0f), ov::float16(5.0f), + ov::float16(6.0f), ov::float16(7.0f) }); topology topology; @@ -72,10 +72,10 @@ class space_to_batch_fp16_gpu: public ::testing::Test { auto input = engine.allocate_memory({ data_types::f16, format::bfyx, {1,2,2,4} }); set_values(input, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), - FLOAT16(4.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), - FLOAT16(8.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), - FLOAT16(12.0f), FLOAT16(13.0f), FLOAT16(14.0f), FLOAT16(15.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), + ov::float16(4.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), + ov::float16(8.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), + ov::float16(12.0f), ov::float16(13.0f), ov::float16(14.0f), ov::float16(15.0f) }); topology topology; @@ -119,9 +119,9 @@ class space_to_batch_fp16_gpu: public ::testing::Test { auto input = engine.allocate_memory({ data_types::f16, format::bfyx, {2,1,2,3} }); set_values(input, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), - FLOAT16(4.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), - FLOAT16(8.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), + ov::float16(4.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), + ov::float16(8.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f) }); topology topology; @@ -165,9 +165,9 @@ class space_to_batch_fp16_gpu: public ::testing::Test { auto input = engine.allocate_memory({ data_types::f16, format::bfzyx, {1,2,2,3,1} }); set_values(input, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), - FLOAT16(4.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), - FLOAT16(8.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), + ov::float16(4.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), + ov::float16(8.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f) }); topology topology; @@ -210,12 +210,12 @@ class space_to_batch_fp16_gpu: public ::testing::Test { auto input = engine.allocate_memory({ data_types::f16, format::bfwzyx, input_shape }); set_values(input, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), - FLOAT16(4.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), - FLOAT16(8.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), - FLOAT16(12.0f), FLOAT16(13.0f), FLOAT16(14.0f), FLOAT16(15.0f), - FLOAT16(16.0f), FLOAT16(17.0f), FLOAT16(18.0f), FLOAT16(19.0f), - FLOAT16(20.0f), FLOAT16(21.0f), FLOAT16(22.0f), FLOAT16(23.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), + ov::float16(4.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), + ov::float16(8.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), + ov::float16(12.0f), ov::float16(13.0f), ov::float16(14.0f), ov::float16(15.0f), + ov::float16(16.0f), ov::float16(17.0f), ov::float16(18.0f), ov::float16(19.0f), + ov::float16(20.0f), ov::float16(21.0f), ov::float16(22.0f), ov::float16(23.0f) }); topology topology; @@ -263,10 +263,10 @@ class space_to_batch_fp16_gpu: public ::testing::Test { auto input = engine.allocate_memory({ data_types::f16, format::bfyx, {1,16,1,1} }); set_values(input, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), - FLOAT16(4.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), - FLOAT16(8.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), - FLOAT16(12.0f), FLOAT16(13.0f), FLOAT16(14.0f), FLOAT16(15.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), + ov::float16(4.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), + ov::float16(8.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), + ov::float16(12.0f), ov::float16(13.0f), ov::float16(14.0f), ov::float16(15.0f) }); topology topology; @@ -316,10 +316,10 @@ class space_to_batch_fp16_gpu: public ::testing::Test { auto input = engine.allocate_memory({ data_types::f16, format::bfyx, {1,8,2,1} }); set_values(input, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), - FLOAT16(4.0f), FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), - FLOAT16(8.0f), FLOAT16(9.0f), FLOAT16(10.0f), FLOAT16(11.0f), - FLOAT16(12.0f), FLOAT16(13.0f), FLOAT16(14.0f), FLOAT16(15.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), + ov::float16(4.0f), ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), + ov::float16(8.0f), ov::float16(9.0f), ov::float16(10.0f), ov::float16(11.0f), + ov::float16(12.0f), ov::float16(13.0f), ov::float16(14.0f), ov::float16(15.0f) }); topology topology; diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/space_to_depth_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/space_to_depth_gpu_test.cpp index ad4c415829b324..5847642b660b79 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/space_to_depth_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/space_to_depth_gpu_test.cpp @@ -26,8 +26,8 @@ class space_to_depth_fp16_gpu: public ::testing::Test { size_t block_size = 2; set_values(input1, { - FLOAT16(0.0f), FLOAT16(1.0f), - FLOAT16(2.0f), FLOAT16(3.0f) + ov::float16(0.0f), ov::float16(1.0f), + ov::float16(2.0f), ov::float16(3.0f) }); topology topology; @@ -66,10 +66,10 @@ class space_to_depth_fp16_gpu: public ::testing::Test { size_t block_size = 2; set_values(input1, { - FLOAT16(0.0f), FLOAT16(1.0f), - FLOAT16(2.0f), FLOAT16(3.0f), - FLOAT16(4.0f), FLOAT16(5.0f), - FLOAT16(6.0f), FLOAT16(7.0f) + ov::float16(0.0f), ov::float16(1.0f), + ov::float16(2.0f), ov::float16(3.0f), + ov::float16(4.0f), ov::float16(5.0f), + ov::float16(6.0f), ov::float16(7.0f) }); topology topology; @@ -108,16 +108,16 @@ class space_to_depth_fp16_gpu: public ::testing::Test { size_t block_size = 2; set_values(input1, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(9.0f), - FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(13.0f), FLOAT16(14.0f), - FLOAT16(15.0f), FLOAT16(16.0f), FLOAT16(17.0f), FLOAT16(18.0f), FLOAT16(19.0f), - FLOAT16(20.0f), FLOAT16(21.0f), FLOAT16(22.0f), FLOAT16(23.0f), FLOAT16(24.0f), - FLOAT16(25.0f), FLOAT16(26.0f), FLOAT16(27.0f), FLOAT16(28.0f), FLOAT16(29.0f), - FLOAT16(30.0f), FLOAT16(31.0f), FLOAT16(32.0f), FLOAT16(33.0f), FLOAT16(34.0f), - FLOAT16(35.0f), FLOAT16(36.0f), FLOAT16(37.0f), FLOAT16(38.0f), FLOAT16(39.0f), - FLOAT16(40.0f), FLOAT16(41.0f), FLOAT16(42.0f), FLOAT16(43.0f), FLOAT16(44.0f), - FLOAT16(45.0f), FLOAT16(46.0f), FLOAT16(47.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(9.0f), + ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(13.0f), ov::float16(14.0f), + ov::float16(15.0f), ov::float16(16.0f), ov::float16(17.0f), ov::float16(18.0f), ov::float16(19.0f), + ov::float16(20.0f), ov::float16(21.0f), ov::float16(22.0f), ov::float16(23.0f), ov::float16(24.0f), + ov::float16(25.0f), ov::float16(26.0f), ov::float16(27.0f), ov::float16(28.0f), ov::float16(29.0f), + ov::float16(30.0f), ov::float16(31.0f), ov::float16(32.0f), ov::float16(33.0f), ov::float16(34.0f), + ov::float16(35.0f), ov::float16(36.0f), ov::float16(37.0f), ov::float16(38.0f), ov::float16(39.0f), + ov::float16(40.0f), ov::float16(41.0f), ov::float16(42.0f), ov::float16(43.0f), ov::float16(44.0f), + ov::float16(45.0f), ov::float16(46.0f), ov::float16(47.0f) }); topology topology; @@ -163,23 +163,23 @@ class space_to_depth_fp16_gpu: public ::testing::Test { size_t block_size = 3; set_values(input1, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(9.0f), - FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(13.0f), FLOAT16(14.0f), - FLOAT16(15.0f), FLOAT16(16.0f), FLOAT16(17.0f), FLOAT16(18.0f), FLOAT16(19.0f), - FLOAT16(20.0f), FLOAT16(21.0f), FLOAT16(22.0f), FLOAT16(23.0f), FLOAT16(24.0f), - FLOAT16(25.0f), FLOAT16(26.0f), FLOAT16(27.0f), FLOAT16(28.0f), FLOAT16(29.0f), - FLOAT16(30.0f), FLOAT16(31.0f), FLOAT16(32.0f), FLOAT16(33.0f), FLOAT16(34.0f), - FLOAT16(35.0f), FLOAT16(36.0f), FLOAT16(37.0f), FLOAT16(38.0f), FLOAT16(39.0f), - FLOAT16(40.0f), FLOAT16(41.0f), FLOAT16(42.0f), FLOAT16(43.0f), FLOAT16(44.0f), - FLOAT16(45.0f), FLOAT16(46.0f), FLOAT16(47.0f), FLOAT16(48.0f), FLOAT16(49.0f), - FLOAT16(50.0f), FLOAT16(51.0f), FLOAT16(52.0f), FLOAT16(53.0f), FLOAT16(54.0f), - FLOAT16(55.0f), FLOAT16(56.0f), FLOAT16(57.0f), FLOAT16(58.0f), FLOAT16(59.0f), - FLOAT16(60.0f), FLOAT16(61.0f), FLOAT16(62.0f), FLOAT16(63.0f), FLOAT16(64.0f), - FLOAT16(65.0f), FLOAT16(66.0f), FLOAT16(67.0f), FLOAT16(68.0f), FLOAT16(69.0f), - FLOAT16(70.0f), FLOAT16(71.0f), FLOAT16(72.0f), FLOAT16(73.0f), FLOAT16(74.0f), - FLOAT16(75.0f), FLOAT16(76.0f), FLOAT16(77.0f), FLOAT16(78.0f), FLOAT16(79.0f), - FLOAT16(80.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(9.0f), + ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(13.0f), ov::float16(14.0f), + ov::float16(15.0f), ov::float16(16.0f), ov::float16(17.0f), ov::float16(18.0f), ov::float16(19.0f), + ov::float16(20.0f), ov::float16(21.0f), ov::float16(22.0f), ov::float16(23.0f), ov::float16(24.0f), + ov::float16(25.0f), ov::float16(26.0f), ov::float16(27.0f), ov::float16(28.0f), ov::float16(29.0f), + ov::float16(30.0f), ov::float16(31.0f), ov::float16(32.0f), ov::float16(33.0f), ov::float16(34.0f), + ov::float16(35.0f), ov::float16(36.0f), ov::float16(37.0f), ov::float16(38.0f), ov::float16(39.0f), + ov::float16(40.0f), ov::float16(41.0f), ov::float16(42.0f), ov::float16(43.0f), ov::float16(44.0f), + ov::float16(45.0f), ov::float16(46.0f), ov::float16(47.0f), ov::float16(48.0f), ov::float16(49.0f), + ov::float16(50.0f), ov::float16(51.0f), ov::float16(52.0f), ov::float16(53.0f), ov::float16(54.0f), + ov::float16(55.0f), ov::float16(56.0f), ov::float16(57.0f), ov::float16(58.0f), ov::float16(59.0f), + ov::float16(60.0f), ov::float16(61.0f), ov::float16(62.0f), ov::float16(63.0f), ov::float16(64.0f), + ov::float16(65.0f), ov::float16(66.0f), ov::float16(67.0f), ov::float16(68.0f), ov::float16(69.0f), + ov::float16(70.0f), ov::float16(71.0f), ov::float16(72.0f), ov::float16(73.0f), ov::float16(74.0f), + ov::float16(75.0f), ov::float16(76.0f), ov::float16(77.0f), ov::float16(78.0f), ov::float16(79.0f), + ov::float16(80.0f) }); topology topology; @@ -226,8 +226,8 @@ class space_to_depth_fp16_gpu: public ::testing::Test { size_t block_size = 2; set_values(input1, { - FLOAT16(0.0f), FLOAT16(1.0f), - FLOAT16(2.0f), FLOAT16(3.0f) + ov::float16(0.0f), ov::float16(1.0f), + ov::float16(2.0f), ov::float16(3.0f) }); topology topology; @@ -266,10 +266,10 @@ class space_to_depth_fp16_gpu: public ::testing::Test { size_t block_size = 2; set_values(input1, { - FLOAT16(0.0f), FLOAT16(1.0f), - FLOAT16(2.0f), FLOAT16(3.0f), - FLOAT16(4.0f), FLOAT16(5.0f), - FLOAT16(6.0f), FLOAT16(7.0f) + ov::float16(0.0f), ov::float16(1.0f), + ov::float16(2.0f), ov::float16(3.0f), + ov::float16(4.0f), ov::float16(5.0f), + ov::float16(6.0f), ov::float16(7.0f) }); topology topology; @@ -308,16 +308,16 @@ class space_to_depth_fp16_gpu: public ::testing::Test { size_t block_size = 2; set_values(input1, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(9.0f), - FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(13.0f), FLOAT16(14.0f), - FLOAT16(15.0f), FLOAT16(16.0f), FLOAT16(17.0f), FLOAT16(18.0f), FLOAT16(19.0f), - FLOAT16(20.0f), FLOAT16(21.0f), FLOAT16(22.0f), FLOAT16(23.0f), FLOAT16(24.0f), - FLOAT16(25.0f), FLOAT16(26.0f), FLOAT16(27.0f), FLOAT16(28.0f), FLOAT16(29.0f), - FLOAT16(30.0f), FLOAT16(31.0f), FLOAT16(32.0f), FLOAT16(33.0f), FLOAT16(34.0f), - FLOAT16(35.0f), FLOAT16(36.0f), FLOAT16(37.0f), FLOAT16(38.0f), FLOAT16(39.0f), - FLOAT16(40.0f), FLOAT16(41.0f), FLOAT16(42.0f), FLOAT16(43.0f), FLOAT16(44.0f), - FLOAT16(45.0f), FLOAT16(46.0f), FLOAT16(47.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(9.0f), + ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(13.0f), ov::float16(14.0f), + ov::float16(15.0f), ov::float16(16.0f), ov::float16(17.0f), ov::float16(18.0f), ov::float16(19.0f), + ov::float16(20.0f), ov::float16(21.0f), ov::float16(22.0f), ov::float16(23.0f), ov::float16(24.0f), + ov::float16(25.0f), ov::float16(26.0f), ov::float16(27.0f), ov::float16(28.0f), ov::float16(29.0f), + ov::float16(30.0f), ov::float16(31.0f), ov::float16(32.0f), ov::float16(33.0f), ov::float16(34.0f), + ov::float16(35.0f), ov::float16(36.0f), ov::float16(37.0f), ov::float16(38.0f), ov::float16(39.0f), + ov::float16(40.0f), ov::float16(41.0f), ov::float16(42.0f), ov::float16(43.0f), ov::float16(44.0f), + ov::float16(45.0f), ov::float16(46.0f), ov::float16(47.0f) }); topology topology; @@ -363,23 +363,23 @@ class space_to_depth_fp16_gpu: public ::testing::Test { size_t block_size = 3; set_values(input1, { - FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f), FLOAT16(4.0f), - FLOAT16(5.0f), FLOAT16(6.0f), FLOAT16(7.0f), FLOAT16(8.0f), FLOAT16(9.0f), - FLOAT16(10.0f), FLOAT16(11.0f), FLOAT16(12.0f), FLOAT16(13.0f), FLOAT16(14.0f), - FLOAT16(15.0f), FLOAT16(16.0f), FLOAT16(17.0f), FLOAT16(18.0f), FLOAT16(19.0f), - FLOAT16(20.0f), FLOAT16(21.0f), FLOAT16(22.0f), FLOAT16(23.0f), FLOAT16(24.0f), - FLOAT16(25.0f), FLOAT16(26.0f), FLOAT16(27.0f), FLOAT16(28.0f), FLOAT16(29.0f), - FLOAT16(30.0f), FLOAT16(31.0f), FLOAT16(32.0f), FLOAT16(33.0f), FLOAT16(34.0f), - FLOAT16(35.0f), FLOAT16(36.0f), FLOAT16(37.0f), FLOAT16(38.0f), FLOAT16(39.0f), - FLOAT16(40.0f), FLOAT16(41.0f), FLOAT16(42.0f), FLOAT16(43.0f), FLOAT16(44.0f), - FLOAT16(45.0f), FLOAT16(46.0f), FLOAT16(47.0f), FLOAT16(48.0f), FLOAT16(49.0f), - FLOAT16(50.0f), FLOAT16(51.0f), FLOAT16(52.0f), FLOAT16(53.0f), FLOAT16(54.0f), - FLOAT16(55.0f), FLOAT16(56.0f), FLOAT16(57.0f), FLOAT16(58.0f), FLOAT16(59.0f), - FLOAT16(60.0f), FLOAT16(61.0f), FLOAT16(62.0f), FLOAT16(63.0f), FLOAT16(64.0f), - FLOAT16(65.0f), FLOAT16(66.0f), FLOAT16(67.0f), FLOAT16(68.0f), FLOAT16(69.0f), - FLOAT16(70.0f), FLOAT16(71.0f), FLOAT16(72.0f), FLOAT16(73.0f), FLOAT16(74.0f), - FLOAT16(75.0f), FLOAT16(76.0f), FLOAT16(77.0f), FLOAT16(78.0f), FLOAT16(79.0f), - FLOAT16(80.0f) + ov::float16(0.0f), ov::float16(1.0f), ov::float16(2.0f), ov::float16(3.0f), ov::float16(4.0f), + ov::float16(5.0f), ov::float16(6.0f), ov::float16(7.0f), ov::float16(8.0f), ov::float16(9.0f), + ov::float16(10.0f), ov::float16(11.0f), ov::float16(12.0f), ov::float16(13.0f), ov::float16(14.0f), + ov::float16(15.0f), ov::float16(16.0f), ov::float16(17.0f), ov::float16(18.0f), ov::float16(19.0f), + ov::float16(20.0f), ov::float16(21.0f), ov::float16(22.0f), ov::float16(23.0f), ov::float16(24.0f), + ov::float16(25.0f), ov::float16(26.0f), ov::float16(27.0f), ov::float16(28.0f), ov::float16(29.0f), + ov::float16(30.0f), ov::float16(31.0f), ov::float16(32.0f), ov::float16(33.0f), ov::float16(34.0f), + ov::float16(35.0f), ov::float16(36.0f), ov::float16(37.0f), ov::float16(38.0f), ov::float16(39.0f), + ov::float16(40.0f), ov::float16(41.0f), ov::float16(42.0f), ov::float16(43.0f), ov::float16(44.0f), + ov::float16(45.0f), ov::float16(46.0f), ov::float16(47.0f), ov::float16(48.0f), ov::float16(49.0f), + ov::float16(50.0f), ov::float16(51.0f), ov::float16(52.0f), ov::float16(53.0f), ov::float16(54.0f), + ov::float16(55.0f), ov::float16(56.0f), ov::float16(57.0f), ov::float16(58.0f), ov::float16(59.0f), + ov::float16(60.0f), ov::float16(61.0f), ov::float16(62.0f), ov::float16(63.0f), ov::float16(64.0f), + ov::float16(65.0f), ov::float16(66.0f), ov::float16(67.0f), ov::float16(68.0f), ov::float16(69.0f), + ov::float16(70.0f), ov::float16(71.0f), ov::float16(72.0f), ov::float16(73.0f), ov::float16(74.0f), + ov::float16(75.0f), ov::float16(76.0f), ov::float16(77.0f), ov::float16(78.0f), ov::float16(79.0f), + ov::float16(80.0f) }); topology topology; diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/split_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/split_gpu_test.cpp index 88f3418f4d756c..6aea709c0fe496 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/split_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/split_gpu_test.cpp @@ -37,7 +37,7 @@ void split_test(int batch_num, int feature_num, int x_size, int y_size, std::vec auto& engine = get_test_engine(); cldnn::tensor reference_input_size = { batch_num, feature_num, x_size, y_size }; - cldnn::memory::ptr input = engine.allocate_memory({ type_to_data_type::value, format::bfyx, reference_input_size }); + cldnn::memory::ptr input = engine.allocate_memory({ ov::element::from(), format::bfyx, reference_input_size }); std::vector > input_ids_offsets; topology topology; diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/tile_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/tile_gpu_test.cpp index 2ca69f746d036e..20f529dde04a8c 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/tile_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/tile_gpu_test.cpp @@ -696,7 +696,7 @@ struct tile_test : public ::testing::TestWithParam > { public: void test(bool is_caching_test) { - const auto data_type = type_to_data_type::value; + const auto data_type = ov::element::from(); Params params; format::type plain_layout; format::type target_layout; @@ -750,7 +750,7 @@ struct tile_test }; using tile_test_f32 = tile_test; -using tile_test_f16 = tile_test; +using tile_test_f16 = tile_test; TEST_P(tile_test_f32, test_case) { ASSERT_NO_FATAL_FAILURE(test(false)); @@ -771,7 +771,7 @@ INSTANTIATE_TEST_SUITE_P(tile_gpu_2D, INSTANTIATE_TEST_SUITE_P(tile_gpu_2D, tile_test_f16, ::testing::Combine( - ::testing::ValuesIn(generateTileParams2D()), + ::testing::ValuesIn(generateTileParams2D()), ::testing::Values(format::bfyx), ::testing::ValuesIn(layouts_2d)), PrintToStringParamName()); @@ -787,7 +787,7 @@ INSTANTIATE_TEST_SUITE_P(tile_gpu_3D, INSTANTIATE_TEST_SUITE_P(tile_gpu_3D, tile_test_f16, ::testing::Combine( - ::testing::ValuesIn(generateTileParams3D()), + ::testing::ValuesIn(generateTileParams3D()), ::testing::Values(format::bfzyx), ::testing::ValuesIn(layouts_3d)), PrintToStringParamName()); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/unique_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/unique_gpu_test.cpp index 9f943b0817a858..5bfee149d6f587 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/unique_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/unique_gpu_test.cpp @@ -49,9 +49,9 @@ struct unique_gpu_test : public testing::TestWithParam>::GetParam(); auto& engine = get_test_engine(); - const auto elem_data_type = type_to_data_type::value; - const auto index_data_type = type_to_data_type::value; - const auto count_data_type = type_to_data_type::value; + const auto elem_data_type = ov::element::from(); + const auto index_data_type = ov::element::from(); + const auto count_data_type = ov::element::from(); const auto plain_format = format::bfyx; const layout in_layout(p.data_shape, elem_data_type, plain_format); @@ -120,9 +120,9 @@ struct unique_gpu_test : public testing::TestWithParam(const FLOAT16 &v1, const FLOAT16 &v2); - friend bool operator>=(const FLOAT16 &v1, const FLOAT16 &v2); - friend bool operator<(const FLOAT16 &v1, const FLOAT16 &v2); - friend bool operator>(const FLOAT16 &v1, const float &v2); - friend bool operator<(const FLOAT16 &v1, const float &v2); - friend bool operator==(const FLOAT16 &v1, const FLOAT16 &v2); - friend bool operator!=(const FLOAT16 &v1, const FLOAT16 &v2); - - FLOAT16() { v = 0; } - - FLOAT16 &operator+=(const FLOAT16 &v1) { - *this = (float)*this + (float)v1; - return *this; - } - - FLOAT16 &operator/=(const FLOAT16 &v1) { - *this = (float)*this / (float)v1; - return *this; - } - - FLOAT16 &operator*=(const FLOAT16 &v1) { - *this = (float)*this * (float)v1; - return *this; - } -}; - -inline FLOAT16 operator+(const FLOAT16 &v1, const FLOAT16 &v2) { return (float)v1 + (float)v2; } - -inline FLOAT16 operator-(const FLOAT16 &v1, const FLOAT16 &v2) { return (float)v1 - (float)v2; } - -inline FLOAT16 operator*(const FLOAT16 &v1, const FLOAT16 &v2) { return (float)v1 * (float)v2; } - -inline FLOAT16 operator/(const FLOAT16 &v1, const FLOAT16 &v2) { return (float)v1 / (float)v2; } - -inline bool operator>(const FLOAT16 &v1, const FLOAT16 &v2) { return (float)v1 > (float)v2; } - -inline bool operator>=(const FLOAT16 &v1, const FLOAT16 &v2) { return (float)v1 >= (float)v2; } - -inline bool operator<(const FLOAT16 &v1, const FLOAT16 &v2) { return (float)v1 < (float)v2; } - -inline bool operator>(const FLOAT16 &v1, const float &v2) { return (float)v1 > v2; } - -inline bool operator<(const FLOAT16 &v1, const float &v2) { return (float)v1 < v2; } - -inline bool operator==(const FLOAT16 &v1, const FLOAT16 &v2) { return v1.v == v2.v; } - -inline bool operator!=(const FLOAT16 &v1, const FLOAT16 &v2) { return v1.v != v2.v; } - -namespace std { - -template <> -struct numeric_limits { - static constexpr FLOAT16 lowest() { return FLOAT16::lowest_val(); } -}; - -} // namespace std diff --git a/src/plugins/intel_gpu/tests/unit/test_utils/network_test.h b/src/plugins/intel_gpu/tests/unit/test_utils/network_test.h index e052776dfecf64..d7958a3ffc2004 100644 --- a/src/plugins/intel_gpu/tests/unit/test_utils/network_test.h +++ b/src/plugins/intel_gpu/tests/unit/test_utils/network_test.h @@ -43,8 +43,8 @@ struct typed_comparator { }; template <> -struct typed_comparator { - static ::testing::AssertionResult compare(const char* lhs_expr, const char* rhs_expr, FLOAT16 ref, FLOAT16 val) { +struct typed_comparator { + static ::testing::AssertionResult compare(const char* lhs_expr, const char* rhs_expr, ov::float16 ref, ov::float16 val) { double abs_error = std::abs(0.05 * (double)ref); return ::testing::internal::DoubleNearPredFormat(lhs_expr, rhs_expr, "5 percent", (double)ref, (double)val, abs_error); } @@ -287,7 +287,7 @@ class network_test { typename reference_tensor_typed::vector_type data) { auto output = reference_tensor_typed(std::move(data)); auto shape = output.get_shape(); - auto lt = cldnn::layout(cldnn::type_to_data_type::value, fmt, shape); + auto lt = cldnn::layout(ov::element::from(), fmt, shape); topo.add(cldnn::input_layout(id, lt)); auto mem = eng.allocate_memory(lt); output.fill_memory(mem); @@ -301,7 +301,7 @@ class network_test { typename reference_tensor_typed::vector_type data) { auto output = reference_tensor_typed(std::move(data)); auto shape = output.get_shape(); - auto lt = cldnn::layout(cldnn::type_to_data_type::value, fmt, shape); + auto lt = cldnn::layout(ov::element::from(), fmt, shape); auto mem = eng.allocate_memory(lt); output.fill_memory(mem); topo.add(cldnn::data(id, mem)); @@ -314,7 +314,7 @@ class network_test { std::shared_ptr> weights, std::shared_ptr> bias, ov::intel_gpu::ImplementationDesc force = ov::intel_gpu::ImplementationDesc{ cldnn::format::any, "" }) { - topo.add(cldnn::fully_connected(id, input_info(input->id), weights->id, bias->id, cldnn::type_to_data_type::value)); + topo.add(cldnn::fully_connected(id, input_info(input->id), weights->id, bias->id, ov::element::from())); if (force.output_format != cldnn::format::any || force.kernel_name != "") forced_impls[id] = force; VVF output_data = fully_connected_reference_typed(input->reference.reference, @@ -330,7 +330,7 @@ class network_test { std::shared_ptr> bias, ov::intel_gpu::ImplementationDesc force = ov::intel_gpu::ImplementationDesc{cldnn::format::any, ""}, size_t input_dim_size = 3) { - topo.add(cldnn::fully_connected(id, input_info(input->id), weights->id, bias->id, cldnn::type_to_data_type::value, cldnn::padding(), input_dim_size)); + topo.add(cldnn::fully_connected(id, input_info(input->id), weights->id, bias->id, ov::element::from(), cldnn::padding(), input_dim_size)); if (force.output_format != cldnn::format::any || force.kernel_name != "") forced_impls[id] = force; VVVVF output_data = fully_connected_reference_typed_3d(input->reference.reference, diff --git a/src/plugins/intel_gpu/tests/unit/test_utils/random_gen.h b/src/plugins/intel_gpu/tests/unit/test_utils/random_gen.h index 7d34c2db21403c..8e8620f9412b6c 100644 --- a/src/plugins/intel_gpu/tests/unit/test_utils/random_gen.h +++ b/src/plugins/intel_gpu/tests/unit/test_utils/random_gen.h @@ -5,7 +5,7 @@ #include #include #include -#include "float16.h" +#include "openvino/core/type/float16.hpp" // NOTE: Needed only for possibly imported type (always_false). #include @@ -85,15 +85,15 @@ namespace rnd_generators static_assert(number_caps::inv_exp2(8) == 0.00390625, "1/exp2(8)"); template <> - struct number_caps : number_caps + struct number_caps : number_caps { - using output_type = FLOAT16; // NOTE: Exchange with actual half_t. + using output_type = ov::float16; // NOTE: Exchange with actual ov::float16. static constexpr unsigned significand_bits = 10; // Number of stored bits of significand part of FP. static output_type convert(const calc_type value) { - return FLOAT16(cldnn::float_to_half(value)); + return ov::float16(value); } }; diff --git a/src/plugins/intel_gpu/tests/unit/test_utils/test_utils.cpp b/src/plugins/intel_gpu/tests/unit/test_utils/test_utils.cpp index 3f359b46654a89..895e727e07e735 100644 --- a/src/plugins/intel_gpu/tests/unit/test_utils/test_utils.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_utils/test_utils.cpp @@ -3,14 +3,9 @@ // #include "test_utils.h" -#include "float16.h" #include -namespace cldnn { -const cldnn::data_types type_to_data_type::value; -} // namespace cldnn - using namespace cldnn; namespace tests { @@ -37,7 +32,7 @@ void generic_test::run_single_test(bool is_caching_test) { if (generic_params->data_type == data_types::f32) { tests::set_random_values(input_mems[i], true, 7, 10); } else { - tests::set_random_values(input_mems[i], true, 5, 10); + tests::set_random_values(input_mems[i], true, 5, 10); } } else { size_t size = generic_params->input_layouts[i].batch() * generic_params->input_layouts[i].feature(); @@ -50,11 +45,11 @@ void generic_test::run_single_test(bool is_caching_test) { tests::set_values_per_batch_and_feature(input_mems[i], values); multipler = values.size(); } else { - std::vector values; + std::vector values; for (size_t j = 1; j <= size; j++) { - values.push_back(FLOAT16(static_cast(multipler + j))); + values.push_back(ov::float16(static_cast(multipler + j))); } - tests::set_values_per_batch_and_feature(input_mems[i], values); + tests::set_values_per_batch_and_feature(input_mems[i], values); multipler = values.size(); } } @@ -107,7 +102,7 @@ void generic_test::run_single_test(bool is_caching_test) { if (output->get_layout().data_type == data_types::f32) { compare_buffers(output, output_ref); } else { - compare_buffers(output, output_ref); + compare_buffers(output, output_ref); } } @@ -369,7 +364,7 @@ std::string test_params::print_tensor(cldnn::tensor t) { std::string test_params::print() { std::stringstream str; - str << "Data type: " << data_type_traits::name(data_type) << std::endl; + str << "Data type: " << ov::element::Type(data_type) << std::endl; for (int j = 0 ; j < (int)input_layouts.size(); j++) { const cldnn::tensor& t = input_layouts[j].get_tensor(); diff --git a/src/plugins/intel_gpu/tests/unit/test_utils/test_utils.h b/src/plugins/intel_gpu/tests/unit/test_utils/test_utils.h index dcc1e256c7ef00..08d49918a0de10 100644 --- a/src/plugins/intel_gpu/tests/unit/test_utils/test_utils.h +++ b/src/plugins/intel_gpu/tests/unit/test_utils/test_utils.h @@ -6,6 +6,8 @@ #pragma once +#include "openvino/core/type/float16.hpp" + #include #include #include @@ -30,7 +32,6 @@ #include #include -#include "float16.h" #include "random_gen.h" #include "uniform_quantized_real_distribution.hpp" #include "to_string_utils.h" @@ -45,13 +46,6 @@ #define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0])) -namespace cldnn { -template <> -struct type_to_data_type { - static constexpr data_types value = data_types::f16; -}; -} // namespace cldnn - namespace tests { std::shared_ptr create_test_engine(); @@ -139,6 +133,11 @@ inline VF flatten_2d(cldnn::format input_format, VVF &data) { return vec; } +inline float half_to_float(uint16_t val) { + auto half = ov::float16::from_bits(val); + return static_cast(half); +} + template inline VF flatten_4d(cldnn::format input_format, VVVVF &data) { size_t a = data.size(); @@ -280,7 +279,7 @@ void set_values_per_batch_and_feature(cldnn::memory::ptr mem, std::vector arg } template::value || - std::is_same::value>::type* = nullptr> + std::is_same::value>::type* = nullptr> void set_random_values(cldnn::memory::ptr mem, bool sign = false, unsigned significand_bit = 8, unsigned scale = 1) { cldnn::mem_lock ptr(mem, get_test_stream()); @@ -349,10 +348,10 @@ inline bool are_equal( return true; } -inline bool floating_point_equal(FLOAT16 x, FLOAT16 y, int max_ulps_diff = 4) { +inline bool floating_point_equal(ov::float16 x, ov::float16 y, int max_ulps_diff = 4) { int16_t sign_bit_mask = 1; sign_bit_mask <<= 15; - int16_t a = x.v, b = y.v; + int16_t a = reinterpret_cast(x), b = reinterpret_cast(y);; if ((a & sign_bit_mask) != (b & sign_bit_mask)) { a &= ~sign_bit_mask; b &= ~sign_bit_mask; @@ -512,7 +511,7 @@ inline void PrintTupleTo(const std::tuple, std::sha (void)sm; } else if (primitive->type == cldnn::reorder::type_id()) { auto reorder = std::static_pointer_cast(primitive); - str << "Output data type: " << cldnn::data_type_traits::name(*reorder->output_data_types[0]) << " Mean: " << reorder->mean << "Subtract per feature: " << "TODO" /*std::vector subtract_per_feature*/; + str << "Output data type: " << ov::element::Type(*reorder->output_data_types[0]) << " Mean: " << reorder->mean << "Subtract per feature: " << "TODO" /*std::vector subtract_per_feature*/; } else if (primitive->type == cldnn::normalize::type_id()) { auto normalize = std::static_pointer_cast(primitive); std::string norm_region = normalize->across_spatial ? "across_spatial" : "within_spatial"; @@ -551,9 +550,9 @@ std::vector get_output_values_to_float(cldnn::network& net, const cldnn:: std::vector ret; auto ptr = output.get_memory(); cldnn::mem_lock mem(ptr, net.get_stream()); - if (ptr->get_layout().data_type != cldnn::type_to_data_type::value) - OPENVINO_THROW("target type ", cldnn::data_type_traits::name(cldnn::type_to_data_type::value), - " mismatched with actual type ", cldnn::data_type_traits::name(ptr->get_layout().data_type)); + if (ptr->get_layout().data_type != ov::element::from()) + OPENVINO_THROW("target type ", ov::element::from().get_type_name(), + " mismatched with actual type ", ov::element::Type(ptr->get_layout().data_type).get_type_name()); for (size_t i = 0; i < std::min(max_cnt, ptr->get_layout().count()); i++) ret.push_back(mem[i]); return ret; @@ -562,7 +561,7 @@ std::vector get_output_values_to_float(cldnn::network& net, const cldnn:: inline std::vector get_output_values_to_float(cldnn::network& net, const cldnn::network_output& output, size_t max_cnt = std::numeric_limits::max()) { switch(output.get_layout().data_type){ case cldnn::data_types::f16: - return get_output_values_to_float(net, output, max_cnt); + return get_output_values_to_float(net, output, max_cnt); case cldnn::data_types::f32: return get_output_values_to_float(net, output, max_cnt); case cldnn::data_types::i8: From e072dc33b092d59f389e1725e736ed45faa2c9f3 Mon Sep 17 00:00:00 2001 From: Maciej Smyk Date: Tue, 3 Oct 2023 15:14:03 +0200 Subject: [PATCH 044/257] Device Plugins Restructure (#20219) --- .../openvino_workflow/openvino_intro/Device_Plugins}/CPU.md | 0 .../openvino_workflow/openvino_intro/Device_Plugins}/GNA.md | 0 .../openvino_workflow/openvino_intro/Device_Plugins}/GPU.md | 0 .../openvino_intro/Device_Plugins/GPU}/GPU_RemoteTensor_API.md | 0 .../openvino_workflow/openvino_intro/Device_Plugins}/NPU.md | 0 .../openvino_intro/Device_Plugins}/config_properties.md | 0 6 files changed, 0 insertions(+), 0 deletions(-) rename docs/{OV_Runtime_UG/supported_plugins => articles_en/openvino_workflow/openvino_intro/Device_Plugins}/CPU.md (100%) rename docs/{OV_Runtime_UG/supported_plugins => articles_en/openvino_workflow/openvino_intro/Device_Plugins}/GNA.md (100%) rename docs/{OV_Runtime_UG/supported_plugins => articles_en/openvino_workflow/openvino_intro/Device_Plugins}/GPU.md (100%) rename docs/{OV_Runtime_UG/supported_plugins => articles_en/openvino_workflow/openvino_intro/Device_Plugins/GPU}/GPU_RemoteTensor_API.md (100%) rename docs/{OV_Runtime_UG/supported_plugins => articles_en/openvino_workflow/openvino_intro/Device_Plugins}/NPU.md (100%) rename docs/{OV_Runtime_UG/supported_plugins => articles_en/openvino_workflow/openvino_intro/Device_Plugins}/config_properties.md (100%) diff --git a/docs/OV_Runtime_UG/supported_plugins/CPU.md b/docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins/CPU.md similarity index 100% rename from docs/OV_Runtime_UG/supported_plugins/CPU.md rename to docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins/CPU.md diff --git a/docs/OV_Runtime_UG/supported_plugins/GNA.md b/docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins/GNA.md similarity index 100% rename from docs/OV_Runtime_UG/supported_plugins/GNA.md rename to docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins/GNA.md diff --git a/docs/OV_Runtime_UG/supported_plugins/GPU.md b/docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins/GPU.md similarity index 100% rename from docs/OV_Runtime_UG/supported_plugins/GPU.md rename to docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins/GPU.md diff --git a/docs/OV_Runtime_UG/supported_plugins/GPU_RemoteTensor_API.md b/docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins/GPU/GPU_RemoteTensor_API.md similarity index 100% rename from docs/OV_Runtime_UG/supported_plugins/GPU_RemoteTensor_API.md rename to docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins/GPU/GPU_RemoteTensor_API.md diff --git a/docs/OV_Runtime_UG/supported_plugins/NPU.md b/docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins/NPU.md similarity index 100% rename from docs/OV_Runtime_UG/supported_plugins/NPU.md rename to docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins/NPU.md diff --git a/docs/OV_Runtime_UG/supported_plugins/config_properties.md b/docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins/config_properties.md similarity index 100% rename from docs/OV_Runtime_UG/supported_plugins/config_properties.md rename to docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins/config_properties.md From d5f0c6f4ef785a5384f5384ef44b7a846e9b5abf Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Tue, 3 Oct 2023 15:32:47 +0200 Subject: [PATCH 045/257] Changing file structure of Operation Sets section (#20214) --- docs/{resources => articles_en/documentation}/openvino_ir.md | 0 .../documentation/openvino_ir/operation_sets.md} | 0 .../documentation/openvino_ir/operation_sets/available_opsets.md} | 0 .../openvino_ir/operation_sets/available_opsets}/opset1.md | 0 .../openvino_ir/operation_sets/available_opsets}/opset10.md | 0 .../openvino_ir/operation_sets/available_opsets}/opset11.md | 0 .../openvino_ir/operation_sets/available_opsets}/opset12.md | 0 .../openvino_ir/operation_sets/available_opsets}/opset13.md | 0 .../openvino_ir/operation_sets/available_opsets}/opset2.md | 0 .../openvino_ir/operation_sets/available_opsets}/opset3.md | 0 .../openvino_ir/operation_sets/available_opsets}/opset4.md | 0 .../openvino_ir/operation_sets/available_opsets}/opset5.md | 0 .../openvino_ir/operation_sets/available_opsets}/opset6.md | 0 .../openvino_ir/operation_sets/available_opsets}/opset7.md | 0 .../openvino_ir/operation_sets/available_opsets}/opset8.md | 0 .../openvino_ir/operation_sets/available_opsets}/opset9.md | 0 16 files changed, 0 insertions(+), 0 deletions(-) rename docs/{resources => articles_en/documentation}/openvino_ir.md (100%) rename docs/{MO_DG/IR_and_opsets.md => articles_en/documentation/openvino_ir/operation_sets.md} (100%) rename docs/{ops/opset.md => articles_en/documentation/openvino_ir/operation_sets/available_opsets.md} (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/available_opsets}/opset1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/available_opsets}/opset10.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/available_opsets}/opset11.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/available_opsets}/opset12.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/available_opsets}/opset13.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/available_opsets}/opset2.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/available_opsets}/opset3.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/available_opsets}/opset4.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/available_opsets}/opset5.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/available_opsets}/opset6.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/available_opsets}/opset7.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/available_opsets}/opset8.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/available_opsets}/opset9.md (100%) diff --git a/docs/resources/openvino_ir.md b/docs/articles_en/documentation/openvino_ir.md similarity index 100% rename from docs/resources/openvino_ir.md rename to docs/articles_en/documentation/openvino_ir.md diff --git a/docs/MO_DG/IR_and_opsets.md b/docs/articles_en/documentation/openvino_ir/operation_sets.md similarity index 100% rename from docs/MO_DG/IR_and_opsets.md rename to docs/articles_en/documentation/openvino_ir/operation_sets.md diff --git a/docs/ops/opset.md b/docs/articles_en/documentation/openvino_ir/operation_sets/available_opsets.md similarity index 100% rename from docs/ops/opset.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/available_opsets.md diff --git a/docs/ops/opset1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/available_opsets/opset1.md similarity index 100% rename from docs/ops/opset1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/available_opsets/opset1.md diff --git a/docs/ops/opset10.md b/docs/articles_en/documentation/openvino_ir/operation_sets/available_opsets/opset10.md similarity index 100% rename from docs/ops/opset10.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/available_opsets/opset10.md diff --git a/docs/ops/opset11.md b/docs/articles_en/documentation/openvino_ir/operation_sets/available_opsets/opset11.md similarity index 100% rename from docs/ops/opset11.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/available_opsets/opset11.md diff --git a/docs/ops/opset12.md b/docs/articles_en/documentation/openvino_ir/operation_sets/available_opsets/opset12.md similarity index 100% rename from docs/ops/opset12.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/available_opsets/opset12.md diff --git a/docs/ops/opset13.md b/docs/articles_en/documentation/openvino_ir/operation_sets/available_opsets/opset13.md similarity index 100% rename from docs/ops/opset13.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/available_opsets/opset13.md diff --git a/docs/ops/opset2.md b/docs/articles_en/documentation/openvino_ir/operation_sets/available_opsets/opset2.md similarity index 100% rename from docs/ops/opset2.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/available_opsets/opset2.md diff --git a/docs/ops/opset3.md b/docs/articles_en/documentation/openvino_ir/operation_sets/available_opsets/opset3.md similarity index 100% rename from docs/ops/opset3.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/available_opsets/opset3.md diff --git a/docs/ops/opset4.md b/docs/articles_en/documentation/openvino_ir/operation_sets/available_opsets/opset4.md similarity index 100% rename from docs/ops/opset4.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/available_opsets/opset4.md diff --git a/docs/ops/opset5.md b/docs/articles_en/documentation/openvino_ir/operation_sets/available_opsets/opset5.md similarity index 100% rename from docs/ops/opset5.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/available_opsets/opset5.md diff --git a/docs/ops/opset6.md b/docs/articles_en/documentation/openvino_ir/operation_sets/available_opsets/opset6.md similarity index 100% rename from docs/ops/opset6.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/available_opsets/opset6.md diff --git a/docs/ops/opset7.md b/docs/articles_en/documentation/openvino_ir/operation_sets/available_opsets/opset7.md similarity index 100% rename from docs/ops/opset7.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/available_opsets/opset7.md diff --git a/docs/ops/opset8.md b/docs/articles_en/documentation/openvino_ir/operation_sets/available_opsets/opset8.md similarity index 100% rename from docs/ops/opset8.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/available_opsets/opset8.md diff --git a/docs/ops/opset9.md b/docs/articles_en/documentation/openvino_ir/operation_sets/available_opsets/opset9.md similarity index 100% rename from docs/ops/opset9.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/available_opsets/opset9.md From 2b07576e2b4fa2746fd9382e3d09b37c3656ba17 Mon Sep 17 00:00:00 2001 From: Maciej Smyk Date: Tue, 3 Oct 2023 16:29:54 +0200 Subject: [PATCH 046/257] [DOCS] Adding Conan distribution article to docs for master (#20205) --- .../installing-openvino-linux-header.md | 2 + .../installing-openvino-macos-header.md | 2 + .../installing-openvino-conan.md | 96 +++++++++++++++++++ .../installing-openvino-windows-header.md | 2 + 4 files changed, 102 insertions(+) create mode 100644 docs/articles_en/get started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conan.md diff --git a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-linux-header.md b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-linux-header.md index a45b11d20e2f5e..ee958af006480a 100644 --- a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-linux-header.md +++ b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-linux-header.md @@ -20,6 +20,7 @@ Use vcpkg Use Homebrew Use Docker + Use Conan If you want to install OpenVINO™ Runtime on Linux, you have the following options: @@ -32,6 +33,7 @@ If you want to install OpenVINO™ Runtime on Linux, you have the following opti * :doc:`Install OpenVINO using vcpkg ` * :doc:`Install OpenVINO using Homebrew ` * :doc:`Install OpenVINO using Docker ` +* :doc:`Install OpenVINO using Conan Package Manager ` diff --git a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-macos-header.md b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-macos-header.md index 2e0d70b61d04be..aa9697f1a34ec4 100644 --- a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-macos-header.md +++ b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-macos-header.md @@ -17,6 +17,7 @@ Use PyPI Use Conda Forge Use vcpkg + Use Conan If you want to install OpenVINO™ Runtime on macOS, you have the following options: @@ -27,6 +28,7 @@ If you want to install OpenVINO™ Runtime on macOS, you have the following opti * :doc:`Install OpenVINO using Conda Forge ` * :doc:`Install OpenVINO using Homebrew ` * :doc:`Install OpenVINO using vcpkg ` +* :doc:`Install OpenVINO using Conan Package Manager ` diff --git a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conan.md b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conan.md new file mode 100644 index 00000000000000..fcba7f0df4a5d5 --- /dev/null +++ b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conan.md @@ -0,0 +1,96 @@ +# Install OpenVINO™ Runtime from Conan Package Manager {#openvino_docs_install_guides_installing_openvino_conan} + +@sphinxdirective + +.. meta:: + :description: Learn how to install OpenVINO™ Runtime on Windows, Linux, and + macOS operating systems, using Conan Package Manager. + +.. note:: + + Note that the Conan Package Manager distribution: + + * is dedicated to users of all major OSs: Windows, Linux, macOS + + + +.. tab-set:: + + .. tab-item:: System Requirements + :sync: system-requirements + + Full requirement listing is available in: + `System Requirements Page `__ + + + .. tab-item:: Processor Notes + :sync: processor-notes + + To see if your processor includes the integrated graphics technology and supports iGPU inference, refer to: + `Product Specifications `__ + + .. tab-item:: Software + :sync: software + + There are many ways to work with Conan Package Manager. Before you proceed, learn more about it on the + `Conan distribution page `__ + +Installing OpenVINO Runtime with Conan Package Manager +############################################################ + +1. Install Conan 2.0 or higher: + + .. code-block:: + + python3 -m pip install conan + +2. Create a ``conanfile.txt`` file for your OpenVINO project and add "*openvino*" dependency in there: + + .. code-block:: console + + [requires] + openvino/2023.1.0 + [generators] + CMakeDeps + CMakeToolchain + [layout] + cmake_layout + + Run the command below to create ``conan_toolchain.cmake`` file, which will be used to compile your project with OpenVINO: + + .. code-block:: + + conan install conanfile.txt --build=missing + + .. note:: + + By default, OpenVINO is statically compiled. All available plugins and frontends are compiled as well. You can build a tailored OpenVINO by using the command below: + + .. code-block:: + + conan install conanfile.txt --build=missing -o:h openvino/*:enable_intel_gpu=False -o:h openvino/*:enable_onnx_frontend=False' -o:h openvino/*:shared=True. + + For more details on available options, see the `Conan Package Manager page on OpenVINO `__ + +3. Configure and compile your project with OpenVINO: + + .. code-block:: + + cmake -DCMAKE_TOOLCHAIN_FILE= -DCMAKE_BUILD_TYPE=Release -S -B + cmake --build --parallel + + .. note:: + + OpenVINO can be used with any build interface, as long as it is supported by Conan 2.0. + +Additional Resources +######################## + +* `Conan Package Manager `__ +* Learn more about :doc:`OpenVINO Workflow `. +* To prepare your models for working with OpenVINO, see :doc:`Model Preparation `. +* Learn more about :doc:`Inference with OpenVINO Runtime `. +* See sample applications in :doc:`OpenVINO toolkit Samples Overview `. +* Check out the OpenVINO product `home page `__ + +@endsphinxdirective diff --git a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-windows-header.md b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-windows-header.md index 65b1803ec711ff..73b11591046804 100644 --- a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-windows-header.md +++ b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-windows-header.md @@ -17,6 +17,7 @@ Use Conda Forge Use vcpkg Use Docker + Use Conan @@ -27,6 +28,7 @@ If you want to install OpenVINO™ Runtime on Windows, you have the following op * :doc:`Install OpenVINO using Conda Forge ` * :doc:`Install OpenVINO using vcpkg ` * :doc:`Install OpenVINO using Docker ` +* :doc:`Install OpenVINO using Conan Package Manager ` @endsphinxdirective From ae3b19d034075eae43ec28302d2d4827a1a5608b Mon Sep 17 00:00:00 2001 From: Edward Shogulin Date: Tue, 3 Oct 2023 15:31:33 +0100 Subject: [PATCH 047/257] [LPT] [NPU] Multiply support (#19859) * [LPT] [NPU] Multiply support * [LPT] [NPU] Multiply support documentation * 1) FakeQuantize support 2) refactoring * [LPT] DisableCleanup attribute + cleanup transformations extension * [LPT] DisableCleanup usage * [LPT] Tests infrastructure support * [LPT] infrastructure quick fix * [LPT] Recurrent Cell Transformation fix * refactoring & comment fixes --- .../low_precision_transformations.md | 2 +- .../step3_main.md | 6 +- .../step3_main/arithmetic/multiply_partial.md | 3 + .../low_precision/cleanup_transformation.hpp | 30 + .../common/precisions_restriction.hpp | 2 +- .../low_precision/eliminate_fake_quantize.hpp | 4 +- .../include/low_precision/fold_convert.hpp | 4 +- .../include/low_precision/fuse_convert.hpp | 6 +- .../fuse_elementwise_to_fake_quantize.hpp | 29 + .../fuse_multiply_to_fake_quantize.hpp | 5 +- .../fuse_subtract_to_fake_quantize.hpp | 5 +- .../low_precision/layer_transformation.hpp | 2 +- .../include/low_precision/low_precision.hpp | 15 +- .../include/low_precision/multiply.hpp | 8 +- .../low_precision/multiply_partial.hpp | 32 + .../multiply_to_group_convolution.hpp | 4 +- .../rt_info/disable_cleanup_attribute.hpp | 27 + .../rt_info/skip_cleanup_attribute.hpp | 17 - .../weightable_layer_transformation.hpp | 27 +- .../src/cleanup_transformation.cpp | 26 + .../src/convolution.cpp | 6 + .../src/convolution_backprop_data.cpp | 6 + .../src/eliminate_fake_quantize.cpp | 6 +- .../src/fake_quantize.cpp | 5 + .../src/fold_convert.cpp | 7 +- .../src/fuse_convert.cpp | 7 +- .../src/fuse_elementwise_to_fake_quantize.cpp | 52 + .../src/fuse_multiply_to_fake_quantize.cpp | 37 +- .../src/fuse_subtract_to_fake_quantize.cpp | 48 +- .../src/layer_transformation.cpp | 6 +- .../src/low_precision.cpp | 8 +- .../src/multiply.cpp | 186 ++- .../src/multiply_partial.cpp | 174 +++ .../src/multiply_to_group_convolution.cpp | 6 +- .../src/recurrent_cell.cpp | 12 +- .../src/rt_info/skip_cleanup_attribute.cpp | 20 - .../src/weightable_layer_transformation.cpp | 56 +- .../lpt_avoid_shapeof_propagation_test.cpp | 4 +- .../tests/multiply_partial_transformation.cpp | 1007 +++++++++++++++++ .../tests/multiply_transformation.cpp | 979 +++++++--------- .../simple_low_precision_transformer.cpp | 27 +- .../simple_low_precision_transformer.hpp | 3 +- .../multiply_transformation.cpp | 4 +- .../multiply_function.hpp | 43 +- .../multiply_partial_function.hpp | 60 + .../src/multiply_function.cpp | 123 +- .../src/multiply_partial_function.cpp | 154 +++ 47 files changed, 2310 insertions(+), 990 deletions(-) create mode 100644 docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/arithmetic/multiply_partial.md create mode 100644 src/common/low_precision_transformations/include/low_precision/cleanup_transformation.hpp create mode 100644 src/common/low_precision_transformations/include/low_precision/fuse_elementwise_to_fake_quantize.hpp create mode 100644 src/common/low_precision_transformations/include/low_precision/multiply_partial.hpp create mode 100644 src/common/low_precision_transformations/include/low_precision/rt_info/disable_cleanup_attribute.hpp delete mode 100644 src/common/low_precision_transformations/include/low_precision/rt_info/skip_cleanup_attribute.hpp create mode 100644 src/common/low_precision_transformations/src/cleanup_transformation.cpp create mode 100644 src/common/low_precision_transformations/src/fuse_elementwise_to_fake_quantize.cpp create mode 100644 src/common/low_precision_transformations/src/multiply_partial.cpp delete mode 100644 src/common/low_precision_transformations/src/rt_info/skip_cleanup_attribute.cpp create mode 100644 src/common/low_precision_transformations/tests/multiply_partial_transformation.cpp create mode 100644 src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/multiply_partial_function.hpp create mode 100644 src/tests/ngraph_helpers/lpt_ngraph_functions/src/multiply_partial_function.cpp diff --git a/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations.md index 4b8dfd6fd5e933..af9ffffed4c17e 100644 --- a/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations.md +++ b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations.md @@ -200,7 +200,7 @@ Transformations: * :doc:`GatherTransformation ` * :doc:`MatMulTransformation ` * :doc:`MaxPoolTransformation ` -* :doc:`MultiplyTransformation ` +* :doc:`MultiplyPartialTransformation ` * :doc:`MVNTransformation ` * :doc:`NormalizeL2Transformation ` * :doc:`PReluTransformation ` diff --git a/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main.md index 162ba3ebfce1df..8bc0c5a0a509de 100644 --- a/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main.md +++ b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main.md @@ -26,7 +26,7 @@ GatherTransformation MatMulTransformation MaxPoolTransformation - MultiplyTransformation + MultiplyPartialTransformation MVNTransformation NormalizeL2Transformation PadTransformation @@ -45,7 +45,7 @@ TransposeTransformation UnsqueezeTransformation VariadicSplitTransformation - + Main transformations are the majority of low precision transformations. Transformations operate with dequantization operations. Main transformations include: @@ -64,7 +64,7 @@ Main transformations are the majority of low precision transformations. Transfor * :doc:`GatherTransformation ` * :doc:`MatMulTransformation ` * :doc:`MaxPoolTransformation ` -* :doc:`MultiplyTransformation ` +* :doc:`MultiplyPartialTransformation ` * :doc:`MVNTransformation ` * :doc:`NormalizeL2Transformation ` * :doc:`PadTransformation` diff --git a/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/arithmetic/multiply_partial.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/arithmetic/multiply_partial.md new file mode 100644 index 00000000000000..1d4b348100ffa7 --- /dev/null +++ b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations/step3_main/arithmetic/multiply_partial.md @@ -0,0 +1,3 @@ +# MultiplyTransformation transformation {#openvino_docs_OV_UG_lpt_MultiplyPartialTransformation} + +ov::pass::low_precision::MultiplyPartialTransformation class represents the `MultiplyPartial` operation transformation. \ No newline at end of file diff --git a/src/common/low_precision_transformations/include/low_precision/cleanup_transformation.hpp b/src/common/low_precision_transformations/include/low_precision/cleanup_transformation.hpp new file mode 100644 index 00000000000000..80e045e386b6d5 --- /dev/null +++ b/src/common/low_precision_transformations/include/low_precision/cleanup_transformation.hpp @@ -0,0 +1,30 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "layer_transformation.hpp" + +namespace ov { +namespace pass { +namespace low_precision { + +/** + * @ingroup ie_transformation_common_api + * @brief Base class for cleanup low precision transformation. + */ +class LP_TRANSFORMATIONS_API CleanupTransformation : public LayerTransformation { +public: + CleanupTransformation(const Params& params); + virtual ~CleanupTransformation() = default; + + bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + static bool canBeTransformedStatic( + const std::shared_ptr& layer, + const std::vector& defaultPrecisions = precision_set::get_int8_support()); +}; + +} // namespace low_precision +} // namespace pass +} // namespace ov diff --git a/src/common/low_precision_transformations/include/low_precision/common/precisions_restriction.hpp b/src/common/low_precision_transformations/include/low_precision/common/precisions_restriction.hpp index 31d820d1eb4aef..7301d13b27b3d2 100644 --- a/src/common/low_precision_transformations/include/low_precision/common/precisions_restriction.hpp +++ b/src/common/low_precision_transformations/include/low_precision/common/precisions_restriction.hpp @@ -74,7 +74,7 @@ class PrecisionsRestriction { } template - static PrecisionsByPorts getPrecisionsByOperationType(std::vector& restrictions) { + static PrecisionsByPorts getPrecisionsByOperationType(const std::vector& restrictions) { for (const auto& restriction : restrictions) { if (restriction.operationType == T::get_type_info_static()) { return restriction.precisionsByPorts; diff --git a/src/common/low_precision_transformations/include/low_precision/eliminate_fake_quantize.hpp b/src/common/low_precision_transformations/include/low_precision/eliminate_fake_quantize.hpp index 2741d6b15cc026..9b3d1f9e0fe0b7 100644 --- a/src/common/low_precision_transformations/include/low_precision/eliminate_fake_quantize.hpp +++ b/src/common/low_precision_transformations/include/low_precision/eliminate_fake_quantize.hpp @@ -6,7 +6,7 @@ #include -#include "low_precision/layer_transformation.hpp" +#include "low_precision/cleanup_transformation.hpp" namespace ov { namespace pass { @@ -20,7 +20,7 @@ namespace low_precision { * [EliminateFakeQuantizeTransformation](@ref openvino_docs_OV_UG_lpt_EliminateFakeQuantizeTransformation) page * in the Inference Engine Developer Guide. */ -class LP_TRANSFORMATIONS_API EliminateFakeQuantizeTransformation : public LayerTransformation { +class LP_TRANSFORMATIONS_API EliminateFakeQuantizeTransformation : public CleanupTransformation { public: OPENVINO_RTTI("EliminateFakeQuantizeTransformation", "0"); EliminateFakeQuantizeTransformation(const Params& params = Params()); diff --git a/src/common/low_precision_transformations/include/low_precision/fold_convert.hpp b/src/common/low_precision_transformations/include/low_precision/fold_convert.hpp index e5fcfd639f7dee..640cdda59e6947 100644 --- a/src/common/low_precision_transformations/include/low_precision/fold_convert.hpp +++ b/src/common/low_precision_transformations/include/low_precision/fold_convert.hpp @@ -6,7 +6,7 @@ #include -#include "low_precision/layer_transformation.hpp" +#include "low_precision/cleanup_transformation.hpp" namespace ov { namespace pass { @@ -20,7 +20,7 @@ namespace low_precision { * [FoldConvertTransformation](@ref openvino_docs_OV_UG_lpt_FoldConvertTransformation) page * in the Inference Engine Developer Guide. */ -class LP_TRANSFORMATIONS_API FoldConvertTransformation : public LayerTransformation { +class LP_TRANSFORMATIONS_API FoldConvertTransformation : public CleanupTransformation { public: OPENVINO_RTTI("FoldConvertTransformation", "0"); FoldConvertTransformation(const Params& params = Params()); diff --git a/src/common/low_precision_transformations/include/low_precision/fuse_convert.hpp b/src/common/low_precision_transformations/include/low_precision/fuse_convert.hpp index 76e5a8e419558f..09c4692198d4ae 100644 --- a/src/common/low_precision_transformations/include/low_precision/fuse_convert.hpp +++ b/src/common/low_precision_transformations/include/low_precision/fuse_convert.hpp @@ -4,9 +4,7 @@ #pragma once - -#include "low_precision/layer_transformation.hpp" -#include "low_precision/eltwise_base_transformation.hpp" +#include "low_precision/cleanup_transformation.hpp" namespace ov { namespace pass { @@ -20,7 +18,7 @@ namespace low_precision { * [FuseConvertTransformation](@ref openvino_docs_OV_UG_lpt_FuseConvertTransformation) page * in the Inference Engine Developer Guide. */ -class LP_TRANSFORMATIONS_API FuseConvertTransformation : public LayerTransformation { +class LP_TRANSFORMATIONS_API FuseConvertTransformation : public CleanupTransformation { public: OPENVINO_RTTI("FuseConvertTransformation", "0"); FuseConvertTransformation(const Params& params = Params()); diff --git a/src/common/low_precision_transformations/include/low_precision/fuse_elementwise_to_fake_quantize.hpp b/src/common/low_precision_transformations/include/low_precision/fuse_elementwise_to_fake_quantize.hpp new file mode 100644 index 00000000000000..d615d0f13bbb24 --- /dev/null +++ b/src/common/low_precision_transformations/include/low_precision/fuse_elementwise_to_fake_quantize.hpp @@ -0,0 +1,29 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "low_precision/cleanup_transformation.hpp" + +namespace ov { +namespace pass { +namespace low_precision { + +/** + * @ingroup ie_transformation_common_api + * @brief Base class for fuse elementwise to FakeQuantize low precision transformation. + */ +class LP_TRANSFORMATIONS_API FuseElementwiseToFakeQuantizeTransformation : public CleanupTransformation { +public: + FuseElementwiseToFakeQuantizeTransformation(const Params& params); + virtual ~FuseElementwiseToFakeQuantizeTransformation() = default; + + bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; +}; + +} // namespace low_precision +} // namespace pass +} // namespace ov diff --git a/src/common/low_precision_transformations/include/low_precision/fuse_multiply_to_fake_quantize.hpp b/src/common/low_precision_transformations/include/low_precision/fuse_multiply_to_fake_quantize.hpp index 34259bb87c9336..af0e152db0b6cb 100644 --- a/src/common/low_precision_transformations/include/low_precision/fuse_multiply_to_fake_quantize.hpp +++ b/src/common/low_precision_transformations/include/low_precision/fuse_multiply_to_fake_quantize.hpp @@ -6,7 +6,7 @@ #include -#include "low_precision/layer_transformation.hpp" +#include "low_precision/fuse_elementwise_to_fake_quantize.hpp" namespace ov { namespace pass { @@ -20,12 +20,11 @@ namespace low_precision { * [FuseMultiplyToFakeQuantizeTransformation](@ref openvino_docs_OV_UG_lpt_FuseMultiplyToFakeQuantizeTransformation) page * in the Inference Engine Developer Guide. */ -class LP_TRANSFORMATIONS_API FuseMultiplyToFakeQuantizeTransformation : public LayerTransformation { +class LP_TRANSFORMATIONS_API FuseMultiplyToFakeQuantizeTransformation : public FuseElementwiseToFakeQuantizeTransformation { public: OPENVINO_RTTI("FuseMultiplyToFakeQuantizeTransformation", "0"); FuseMultiplyToFakeQuantizeTransformation(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/fuse_subtract_to_fake_quantize.hpp b/src/common/low_precision_transformations/include/low_precision/fuse_subtract_to_fake_quantize.hpp index 98527defe13c5c..4b06f6cc7deedb 100644 --- a/src/common/low_precision_transformations/include/low_precision/fuse_subtract_to_fake_quantize.hpp +++ b/src/common/low_precision_transformations/include/low_precision/fuse_subtract_to_fake_quantize.hpp @@ -6,7 +6,7 @@ #include -#include "low_precision/layer_transformation.hpp" +#include "low_precision/fuse_elementwise_to_fake_quantize.hpp" namespace ov { namespace pass { @@ -20,12 +20,11 @@ namespace low_precision { * [FuseSubtractToFakeQuantizeTransformation](@ref openvino_docs_OV_UG_lpt_FuseSubtractToFakeQuantizeTransformation) page * in the Inference Engine Developer Guide. */ -class LP_TRANSFORMATIONS_API FuseSubtractToFakeQuantizeTransformation : public LayerTransformation { +class LP_TRANSFORMATIONS_API FuseSubtractToFakeQuantizeTransformation : public FuseElementwiseToFakeQuantizeTransformation { public: OPENVINO_RTTI("FuseSubtractToFakeQuantizeTransformation", "0"); FuseSubtractToFakeQuantizeTransformation(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/layer_transformation.hpp b/src/common/low_precision_transformations/include/low_precision/layer_transformation.hpp index 80096cdf7c18f6..e68f395049ca8d 100644 --- a/src/common/low_precision_transformations/include/low_precision/layer_transformation.hpp +++ b/src/common/low_precision_transformations/include/low_precision/layer_transformation.hpp @@ -371,7 +371,7 @@ class LP_TRANSFORMATIONS_API LayerTransformation : public ov::pass::MatcherPass const bool updatePrecision, const bool moveSubtract = true) const; - void updateOutput( + bool updateOutput( TransformationContext &context, std::shared_ptr lastNode, std::shared_ptr originalNode) const; diff --git a/src/common/low_precision_transformations/include/low_precision/low_precision.hpp b/src/common/low_precision_transformations/include/low_precision/low_precision.hpp index 9236113c731052..f40c92605d6cf9 100644 --- a/src/common/low_precision_transformations/include/low_precision/low_precision.hpp +++ b/src/common/low_precision_transformations/include/low_precision/low_precision.hpp @@ -48,9 +48,9 @@ class ov::pass::low_precision::MarkupOptimizations : public ov::pass::ModelPass const AttributeParameters& params); bool run_on_model(const std::shared_ptr& m) override; private: - const std::vector& precisionRestrictions; - const std::vector& quantizationRestrictions; - const AttributeParameters& params; + const std::vector precisionRestrictions; + const std::vector quantizationRestrictions; + const AttributeParameters params; }; class ov::pass::low_precision::TypeRelaxedReplacer : public ov::pass::GraphRewrite { @@ -71,9 +71,18 @@ class ov::pass::low_precision::LowPrecision : public ov::pass::ModelPass { static bool isFunctionQuantized(const std::shared_ptr& model); static bool isFQLevelsPresent(const std::shared_ptr& model, const std::set& levels); + template + std::shared_ptr add_main(Args&&... args) { + const auto tr = std::make_shared(std::forward(args)...); + additional_main_passes.push_back(tr); + return tr; + } + protected: std::vector precisionRestrictions; std::vector quantizationRestrictions; // remove LayerTransformation::Params params; + + std::vector> additional_main_passes; }; diff --git a/src/common/low_precision_transformations/include/low_precision/multiply.hpp b/src/common/low_precision_transformations/include/low_precision/multiply.hpp index 3dc4a26d0569aa..55484b041d6069 100644 --- a/src/common/low_precision_transformations/include/low_precision/multiply.hpp +++ b/src/common/low_precision_transformations/include/low_precision/multiply.hpp @@ -5,7 +5,7 @@ #pragma once -#include "low_precision/eltwise_base_transformation.hpp" +#include "low_precision/weightable_layer_transformation.hpp" namespace ov { namespace pass { @@ -19,12 +19,14 @@ namespace low_precision { * [MultiplyTransformation](@ref openvino_docs_OV_UG_lpt_MultiplyTransformation) page * in the Inference Engine Developer Guide. */ -class LP_TRANSFORMATIONS_API MultiplyTransformation : public EltwiseBaseTransformation { +class LP_TRANSFORMATIONS_API MultiplyTransformation : public WeightableLayerTransformation { public: OPENVINO_RTTI("MultiplyTransformation", "0"); MultiplyTransformation(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + +protected: + size_t getInputChannels(const std::shared_ptr op) const override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/multiply_partial.hpp b/src/common/low_precision_transformations/include/low_precision/multiply_partial.hpp new file mode 100644 index 00000000000000..c3db52ce5d9e9d --- /dev/null +++ b/src/common/low_precision_transformations/include/low_precision/multiply_partial.hpp @@ -0,0 +1,32 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include "low_precision/eltwise_base_transformation.hpp" + +namespace ov { +namespace pass { +namespace low_precision { + +/** + * @ingroup ie_transformation_common_api + * @brief MultiplyPartialTransformation propagates dequantization operations through Multiply operation. + * + * For more details about the transformation, refer to + * [MultiplyPartialTransformation](@ref openvino_docs_OV_UG_lpt_MultiplyPartialTransformation) page + * in the Inference Engine Developer Guide. + */ +class LP_TRANSFORMATIONS_API MultiplyPartialTransformation : public EltwiseBaseTransformation { +public: + OPENVINO_RTTI("MultiplyPartialTransformation", "0"); + MultiplyPartialTransformation(const Params& params = Params()); + bool transform(TransformationContext& context, ngraph::pattern::Matcher &m) override; + bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; +}; + +} // namespace low_precision +} // namespace pass +} // namespace ov diff --git a/src/common/low_precision_transformations/include/low_precision/multiply_to_group_convolution.hpp b/src/common/low_precision_transformations/include/low_precision/multiply_to_group_convolution.hpp index b107b7de0418f9..d76f2ef108853d 100644 --- a/src/common/low_precision_transformations/include/low_precision/multiply_to_group_convolution.hpp +++ b/src/common/low_precision_transformations/include/low_precision/multiply_to_group_convolution.hpp @@ -5,7 +5,7 @@ #pragma once #include -#include "low_precision/layer_transformation.hpp" +#include "low_precision/cleanup_transformation.hpp" #include "common/precisions_restriction.hpp" namespace ov { @@ -20,7 +20,7 @@ namespace low_precision { * [MultiplyToGroupConvolutionTransformation](@ref openvino_docs_OV_UG_lpt_MultiplyToGroupConvolutionTransformation) page * in the Inference Engine Developer Guide. */ -class LP_TRANSFORMATIONS_API MultiplyToGroupConvolutionTransformation : public LayerTransformation { +class LP_TRANSFORMATIONS_API MultiplyToGroupConvolutionTransformation : public CleanupTransformation { public: OPENVINO_RTTI("MultiplyToGroupConvolutionTransformation", "0"); MultiplyToGroupConvolutionTransformation( diff --git a/src/common/low_precision_transformations/include/low_precision/rt_info/disable_cleanup_attribute.hpp b/src/common/low_precision_transformations/include/low_precision/rt_info/disable_cleanup_attribute.hpp new file mode 100644 index 00000000000000..71df996fe15aa4 --- /dev/null +++ b/src/common/low_precision_transformations/include/low_precision/rt_info/disable_cleanup_attribute.hpp @@ -0,0 +1,27 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/core/node.hpp" +#include "openvino/core/runtime_attribute.hpp" +#include "low_precision/lpt_visibility.hpp" + +namespace ov { + +class LP_TRANSFORMATIONS_API DisableCleanupAttribute : public ov::RuntimeAttribute { +public: + OPENVINO_RTTI("LowPrecision::DisableCleanup", "", ov::RuntimeAttribute); + DisableCleanupAttribute() = default; + + static ov::Any create(const std::shared_ptr& node) { + auto& rt = node->get_rt_info(); + return (rt[DisableCleanupAttribute::get_type_info_static()] = DisableCleanupAttribute()); + } + + bool is_copyable() const override { + return false; + } +}; +} // namespace ov diff --git a/src/common/low_precision_transformations/include/low_precision/rt_info/skip_cleanup_attribute.hpp b/src/common/low_precision_transformations/include/low_precision/rt_info/skip_cleanup_attribute.hpp deleted file mode 100644 index 39e0bf46e3ab3a..00000000000000 --- a/src/common/low_precision_transformations/include/low_precision/rt_info/skip_cleanup_attribute.hpp +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "openvino/core/node.hpp" - -#include "low_precision/rt_info/attribute_parameters.hpp" - -namespace ov { -class LP_TRANSFORMATIONS_API SkipCleanupAttribute : public ov::RuntimeAttribute { -public: - OPENVINO_RTTI("LowPrecision::SkipCleanup", "", ov::RuntimeAttribute); - static ov::Any create(const std::shared_ptr& node); -}; -} // namespace ov diff --git a/src/common/low_precision_transformations/include/low_precision/weightable_layer_transformation.hpp b/src/common/low_precision_transformations/include/low_precision/weightable_layer_transformation.hpp index 8abad779628de8..4655a940120322 100644 --- a/src/common/low_precision_transformations/include/low_precision/weightable_layer_transformation.hpp +++ b/src/common/low_precision_transformations/include/low_precision/weightable_layer_transformation.hpp @@ -19,7 +19,29 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API WeightableLayerTransformation : public LayerTransformation { public: - WeightableLayerTransformation(const Params& params); + struct LP_TRANSFORMATIONS_API CanBeTransformedParams { + CanBeTransformedParams( + const bool constantWeight = true, + const bool perTensorQuantizationOnData = true, + const bool limitWeightsDataPrecision = true, + const bool dynamicWeights = false) : + constantWeight(constantWeight), + perTensorQuantizationOnData(perTensorQuantizationOnData), + limitWeightsDataPrecision(limitWeightsDataPrecision), + dynamicWeights(dynamicWeights) { + } + + // weights on constant path only + const bool constantWeight; + // data with per-tensor quantization only + const bool perTensorQuantizationOnData; + // limit weights by expected precisions + const bool limitWeightsDataPrecision; + const bool dynamicWeights; + }; + + WeightableLayerTransformation(const Params& params, const CanBeTransformedParams& canBeTransformedParams = {}); + bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; bool canConvolutionBeTransformed(const TransformationContext& context, std::shared_ptr layer, const std::vector& defaultPrecisions) const; @@ -48,6 +70,9 @@ class LP_TRANSFORMATIONS_API WeightableLayerTransformation : public LayerTransfo static DataPrecision getDataPrecisionOnWeights(const std::shared_ptr& node, const std::vector& defaultPrecisions); static bool isAsymmetricOnWeights(const std::shared_ptr& node, const std::vector& defaultPrecisions = precision_set::get_int8_support()); + +private: + const CanBeTransformedParams canBeTransformedParams; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/src/cleanup_transformation.cpp b/src/common/low_precision_transformations/src/cleanup_transformation.cpp new file mode 100644 index 00000000000000..3a7cb0da5d5c36 --- /dev/null +++ b/src/common/low_precision_transformations/src/cleanup_transformation.cpp @@ -0,0 +1,26 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "low_precision/cleanup_transformation.hpp" +#include "low_precision/network_helper.hpp" +#include "low_precision/rt_info/disable_cleanup_attribute.hpp" + +namespace ov { +namespace pass { +namespace low_precision { + +CleanupTransformation::CleanupTransformation(const Params& params) : LayerTransformation(params) { +} + +bool CleanupTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { + return canBeTransformedStatic(layer); +} + +bool CleanupTransformation::canBeTransformedStatic(const std::shared_ptr& layer, const std::vector& defaultPrecisions) { + return getAttribute(layer).empty(); +} + +} // namespace low_precision +} // namespace pass +} // namespace ov diff --git a/src/common/low_precision_transformations/src/convolution.cpp b/src/common/low_precision_transformations/src/convolution.cpp index e6044b6d9edeea..2c80e75e156546 100644 --- a/src/common/low_precision_transformations/src/convolution.cpp +++ b/src/common/low_precision_transformations/src/convolution.cpp @@ -13,6 +13,7 @@ #include "openvino/pass/pattern/op/wrap_type.hpp" #include "openvino/pass/pattern/op/or.hpp" #include "low_precision/network_helper.hpp" +#include "low_precision/rt_info/disable_cleanup_attribute.hpp" #include "transformations/rt_info/disable_constant_folding.hpp" #include "itt.hpp" @@ -333,6 +334,11 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ov::pa ov::copy_runtime_info({ convolution, finalDequantization }, finalDequantization); updateOutput(context, finalDequantization, convolution); + const auto onActiviation = convolution->get_input_node_shared_ptr(0); + if (ov::is_type(onActiviation)) { + DisableCleanupAttribute::create(onActiviation); + } + auto onWeights = convolution->get_input_node_shared_ptr(1); if (ov::is_type(onWeights)) { onWeights = onWeights->get_input_node_shared_ptr(0); diff --git a/src/common/low_precision_transformations/src/convolution_backprop_data.cpp b/src/common/low_precision_transformations/src/convolution_backprop_data.cpp index 890bff9d231639..3e232b5c840575 100644 --- a/src/common/low_precision_transformations/src/convolution_backprop_data.cpp +++ b/src/common/low_precision_transformations/src/convolution_backprop_data.cpp @@ -13,6 +13,7 @@ #include "openvino/pass/pattern/op/wrap_type.hpp" #include "openvino/pass/pattern/op/or.hpp" #include "low_precision/network_helper.hpp" +#include "low_precision/rt_info/disable_cleanup_attribute.hpp" #include "transformations/rt_info/disable_constant_folding.hpp" #include "itt.hpp" @@ -220,6 +221,11 @@ bool ConvolutionBackpropDataTransformation::transform(TransformationContext &con ov::copy_runtime_info({ convolutionBackpropData, finalDequantization }, finalDequantization); updateOutput(context, finalDequantization, convolutionBackpropData); + const auto onActiviation = convolutionBackpropData->get_input_node_shared_ptr(0); + if (ov::is_type(onActiviation)) { + DisableCleanupAttribute::create(onActiviation); + } + auto onWeights = convolutionBackpropData->get_input_node_shared_ptr(1); if (ov::is_type(onWeights)) { onWeights = onWeights->get_input_node_shared_ptr(0); diff --git a/src/common/low_precision_transformations/src/eliminate_fake_quantize.cpp b/src/common/low_precision_transformations/src/eliminate_fake_quantize.cpp index bfa83bb0f44b5b..3010ea213d04f2 100644 --- a/src/common/low_precision_transformations/src/eliminate_fake_quantize.cpp +++ b/src/common/low_precision_transformations/src/eliminate_fake_quantize.cpp @@ -15,7 +15,7 @@ namespace ov { namespace pass { namespace low_precision { -EliminateFakeQuantizeTransformation::EliminateFakeQuantizeTransformation(const Params& params) : LayerTransformation(params) { +EliminateFakeQuantizeTransformation::EliminateFakeQuantizeTransformation(const Params& params) : CleanupTransformation(params) { MATCHER_SCOPE(FuseMultiplyToFakeQuantizeTransformation); const auto matcher = pattern::wrap_type({ pattern::any_input(), @@ -112,6 +112,10 @@ bool check_intervals(const std::shared_ptr& fakeQuanti } // namespace bool EliminateFakeQuantizeTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr operation) const { + if (!CleanupTransformation::canBeTransformed(context, operation)) { + return false; + } + const auto fakeQuantize = ov::as_type_ptr(operation); OPENVINO_ASSERT(fakeQuantize != nullptr, "unexpected operation type"); diff --git a/src/common/low_precision_transformations/src/fake_quantize.cpp b/src/common/low_precision_transformations/src/fake_quantize.cpp index 28e32076181dbb..a60c3bfcd938f5 100644 --- a/src/common/low_precision_transformations/src/fake_quantize.cpp +++ b/src/common/low_precision_transformations/src/fake_quantize.cpp @@ -11,6 +11,7 @@ #include "low_precision/network_helper.hpp" #include "low_precision/rt_info/bias_attribute.hpp" +#include "low_precision/rt_info/disable_cleanup_attribute.hpp" #include "itt.hpp" namespace ov { @@ -167,6 +168,10 @@ std::shared_ptr FakeQuantizeTransformation::fuseElementwis return nullptr; } + if (!getAttribute(eltwise).empty()) { + return nullptr; + } + std::shared_ptr inputLowConst_f32 = foldConvert(fakeQuantize->input_value(1), element::f32); std::shared_ptr inputHighConst_f32 = foldConvert(fakeQuantize->input_value(2), element::f32); diff --git a/src/common/low_precision_transformations/src/fold_convert.cpp b/src/common/low_precision_transformations/src/fold_convert.cpp index 35b3385e2ebf4e..4054b0fad4e6b2 100644 --- a/src/common/low_precision_transformations/src/fold_convert.cpp +++ b/src/common/low_precision_transformations/src/fold_convert.cpp @@ -14,7 +14,7 @@ namespace ov { namespace pass { namespace low_precision { -FoldConvertTransformation::FoldConvertTransformation(const Params& params) : LayerTransformation(params) { +FoldConvertTransformation::FoldConvertTransformation(const Params& params) : CleanupTransformation(params) { MATCHER_SCOPE(FoldConvertTransformation); auto subtract = pattern::wrap_type(); auto matcher = std::make_shared(subtract, matcher_name); @@ -57,10 +57,11 @@ bool FoldConvertTransformation::transform(TransformationContext& context, ov::pa bool FoldConvertTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr operation) const { return - (ov::is_type(operation->get_input_node_ptr(1)) && + CleanupTransformation::canBeTransformed(context, operation) && + ((ov::is_type(operation->get_input_node_ptr(1)) && ov::is_type(operation->get_input_node_ptr(1)->get_input_node_ptr(0))) || (ov::is_type(operation->get_input_node_ptr(0)) && - ov::is_type(operation->get_input_node_ptr(0)->get_input_node_ptr(0))); + ov::is_type(operation->get_input_node_ptr(0)->get_input_node_ptr(0)))); } bool FoldConvertTransformation::isPrecisionPreserved(std::shared_ptr layer) const noexcept { diff --git a/src/common/low_precision_transformations/src/fuse_convert.cpp b/src/common/low_precision_transformations/src/fuse_convert.cpp index 9c17f38074e678..372476aeabe737 100644 --- a/src/common/low_precision_transformations/src/fuse_convert.cpp +++ b/src/common/low_precision_transformations/src/fuse_convert.cpp @@ -12,14 +12,15 @@ #include "low_precision/common/ie_lpt_exception.hpp" #include "low_precision/network_helper.hpp" +#include "low_precision/rt_info/disable_cleanup_attribute.hpp" + #include "itt.hpp" -#include "low_precision/rt_info/skip_cleanup_attribute.hpp" namespace ov { namespace pass { namespace low_precision { -FuseConvertTransformation::FuseConvertTransformation(const Params& params) : LayerTransformation(params) { +FuseConvertTransformation::FuseConvertTransformation(const Params& params) : CleanupTransformation(params) { MATCHER_SCOPE(FuseConvertTransformation); auto multiply = pattern::wrap_type({ pattern::wrap_type(), pattern::wrap_type() }); auto subtract = pattern::wrap_type({ pattern::wrap_type(), pattern::wrap_type() }); @@ -114,7 +115,7 @@ bool FuseConvertTransformation::transform(TransformationContext& context, ov::pa } bool FuseConvertTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr op) const { - if (!getAttribute(op).empty()) { + if (!CleanupTransformation::canBeTransformed(context, op)) { return false; } diff --git a/src/common/low_precision_transformations/src/fuse_elementwise_to_fake_quantize.cpp b/src/common/low_precision_transformations/src/fuse_elementwise_to_fake_quantize.cpp new file mode 100644 index 00000000000000..c641824bf53084 --- /dev/null +++ b/src/common/low_precision_transformations/src/fuse_elementwise_to_fake_quantize.cpp @@ -0,0 +1,52 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "low_precision/fuse_elementwise_to_fake_quantize.hpp" + +#include +#include "low_precision/fake_quantize.hpp" +#include "low_precision/network_helper.hpp" + +namespace ov { +namespace pass { +namespace low_precision { + +FuseElementwiseToFakeQuantizeTransformation::FuseElementwiseToFakeQuantizeTransformation(const Params& params) : CleanupTransformation(params) { +} + +bool FuseElementwiseToFakeQuantizeTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr operation) const { + if (!CleanupTransformation::canBeTransformed(context, operation)) { + return false; + } + + if (!ov::is_type(operation->get_input_node_shared_ptr(1))) { + return false; + } + + if (!FakeQuantizeTransformation::checkElementwise(operation)) { + return false; + } + + const auto parent = operation->get_input_node_shared_ptr(0); + auto fq = ov::as_type_ptr(parent); + const auto convert = ov::as_type_ptr(parent); + + if (convert) { + fq = ov::as_type_ptr(convert->get_input_node_shared_ptr(0)); + } + + if (!fq) { + return false; + } + + if (fq->get_output_target_inputs(0).size() != 1) { + return false; + } + + return true; +} + +} // namespace low_precision +} // namespace pass +} // namespace ov diff --git a/src/common/low_precision_transformations/src/fuse_multiply_to_fake_quantize.cpp b/src/common/low_precision_transformations/src/fuse_multiply_to_fake_quantize.cpp index fd316f0068d62a..ccc2164900981d 100644 --- a/src/common/low_precision_transformations/src/fuse_multiply_to_fake_quantize.cpp +++ b/src/common/low_precision_transformations/src/fuse_multiply_to_fake_quantize.cpp @@ -9,13 +9,14 @@ #include "low_precision/fake_quantize.hpp" #include "low_precision/network_helper.hpp" #include "itt.hpp" -#include "low_precision/rt_info/skip_cleanup_attribute.hpp" +#include "low_precision/rt_info/disable_cleanup_attribute.hpp" namespace ov { namespace pass { namespace low_precision { -FuseMultiplyToFakeQuantizeTransformation::FuseMultiplyToFakeQuantizeTransformation(const Params& params) : LayerTransformation(params) { +FuseMultiplyToFakeQuantizeTransformation::FuseMultiplyToFakeQuantizeTransformation(const Params& params) + : FuseElementwiseToFakeQuantizeTransformation(params) { MATCHER_SCOPE(FuseMultiplyToFakeQuantizeTransformation); auto matcher = pattern::wrap_type(); @@ -89,38 +90,6 @@ bool FuseMultiplyToFakeQuantizeTransformation::transform(TransformationContext& return true; } -bool FuseMultiplyToFakeQuantizeTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr operation) const { - if (!ov::is_type(operation->get_input_node_shared_ptr(1))) { - return false; - } - - if (!FakeQuantizeTransformation::checkElementwise(operation)) { - return false; - } - - if (!getAttribute(operation).empty()) { - return false; - } - - const auto parent = operation->get_input_node_shared_ptr(0); - auto fq = ov::as_type_ptr(parent); - const auto convert = ov::as_type_ptr(parent); - - if (convert) { - fq = ov::as_type_ptr(convert->get_input_node_shared_ptr(0)); - } - - if (!fq) { - return false; - } - - if (fq->get_output_target_inputs(0).size() != 1) { - return false; - } - - return true; -} - bool FuseMultiplyToFakeQuantizeTransformation::isPrecisionPreserved(std::shared_ptr layer) const noexcept { return false; } diff --git a/src/common/low_precision_transformations/src/fuse_subtract_to_fake_quantize.cpp b/src/common/low_precision_transformations/src/fuse_subtract_to_fake_quantize.cpp index d75fde32ee18fa..56ed774ba36b83 100644 --- a/src/common/low_precision_transformations/src/fuse_subtract_to_fake_quantize.cpp +++ b/src/common/low_precision_transformations/src/fuse_subtract_to_fake_quantize.cpp @@ -9,13 +9,14 @@ #include "low_precision/fake_quantize.hpp" #include "low_precision/network_helper.hpp" #include "itt.hpp" -#include "low_precision/rt_info/skip_cleanup_attribute.hpp" +#include "low_precision/rt_info/disable_cleanup_attribute.hpp" namespace ov { namespace pass { namespace low_precision { -FuseSubtractToFakeQuantizeTransformation::FuseSubtractToFakeQuantizeTransformation(const Params& params) : LayerTransformation(params) { +FuseSubtractToFakeQuantizeTransformation::FuseSubtractToFakeQuantizeTransformation(const Params& params) + : FuseElementwiseToFakeQuantizeTransformation(params) { MATCHER_SCOPE(FuseSubtractToFakeQuantizeTransformation); auto matcher = pattern::wrap_type(); @@ -84,49 +85,6 @@ bool FuseSubtractToFakeQuantizeTransformation::transform(TransformationContext& return true; } -bool FuseSubtractToFakeQuantizeTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr operation) const { - if (!ov::is_type(operation->get_input_node_shared_ptr(1))) { - return false; - } - - if (!FakeQuantizeTransformation::checkElementwise(operation)) { - return false; - } - - if (!getAttribute(operation).empty()) { - return false; - } - - const auto children = operation->get_output_target_inputs(0); - - for (const auto& target : children) { - const auto convolution = ov::is_type(target.get_node()); - const auto groupConvolution = ov::is_type(target.get_node()); - const auto convolutionBackpropData = ov::is_type(target.get_node()); - if (convolution || groupConvolution || convolutionBackpropData) { - return false; - } - } - - const auto parent = operation->get_input_node_shared_ptr(0); - auto fq = ov::as_type_ptr(parent); - const auto convert = ov::as_type_ptr(parent); - - if (convert) { - fq = ov::as_type_ptr(convert->get_input_node_shared_ptr(0)); - } - - if (!fq) { - return false; - } - - if (fq->get_output_target_inputs(0).size() != 1) { - return false; - } - - return true; -} - bool FuseSubtractToFakeQuantizeTransformation::isPrecisionPreserved(std::shared_ptr layer) const noexcept { return false; } diff --git a/src/common/low_precision_transformations/src/layer_transformation.cpp b/src/common/low_precision_transformations/src/layer_transformation.cpp index 60ee21c1b34c84..86c2ba9e7df65c 100644 --- a/src/common/low_precision_transformations/src/layer_transformation.cpp +++ b/src/common/low_precision_transformations/src/layer_transformation.cpp @@ -422,21 +422,23 @@ std::shared_ptr LayerTransformation::moveDequantizationBefore( return result.newOperation; } -void LayerTransformation::updateOutput( +bool LayerTransformation::updateOutput( TransformationContext &context, std::shared_ptr lastNode, std::shared_ptr originalNode) const { - // TODO: not tested!!! + bool was_updated = false; for (auto output : lastNode->outputs()) { for (auto input : output.get_target_inputs()) { if (ov::is_type(input.get_node())) { const std::string originalName = originalNode->get_friendly_name(); originalNode->set_friendly_name(originalName + LayerTransformation::originalLayerPostfix); lastNode->set_friendly_name(originalName); + was_updated = true; break; } } } + return was_updated; } void LayerTransformation::updateOutput( diff --git a/src/common/low_precision_transformations/src/low_precision.cpp b/src/common/low_precision_transformations/src/low_precision.cpp index 9b84bb15dae482..0f46c41c81768e 100644 --- a/src/common/low_precision_transformations/src/low_precision.cpp +++ b/src/common/low_precision_transformations/src/low_precision.cpp @@ -53,7 +53,7 @@ #include "low_precision/interpolate.hpp" #include "low_precision/mat_mul.hpp" #include "low_precision/max_pool.hpp" -#include "low_precision/multiply.hpp" +#include "low_precision/multiply_partial.hpp" #include "low_precision/mvn.hpp" #include "low_precision/normalize_l2.hpp" #include "low_precision/pad.hpp" @@ -251,7 +251,7 @@ bool ov::pass::low_precision::LowPrecision::run_on_model(const std::shared_ptradd_matcher(tr); + } + std::shared_ptr cleanup = manager.register_pass(); ADD_MATCHER(cleanup, EliminateFakeQuantizeTransformation, params) ADD_MATCHER(cleanup, FoldConvertTransformation, params) diff --git a/src/common/low_precision_transformations/src/multiply.cpp b/src/common/low_precision_transformations/src/multiply.cpp index 6d336e659e7186..cc654d3deff706 100644 --- a/src/common/low_precision_transformations/src/multiply.cpp +++ b/src/common/low_precision_transformations/src/multiply.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -15,6 +15,7 @@ #include "openvino/pass/pattern/op/wrap_type.hpp" #include "low_precision/common/ie_lpt_exception.hpp" +#include "low_precision/rt_info/disable_cleanup_attribute.hpp" #include "low_precision/network_helper.hpp" #include "itt.hpp" @@ -22,7 +23,8 @@ namespace ov { namespace pass { namespace low_precision { -MultiplyTransformation::MultiplyTransformation(const Params& params) : EltwiseBaseTransformation(params) { +MultiplyTransformation::MultiplyTransformation(const Params& params) : + WeightableLayerTransformation(params, CanBeTransformedParams(false, false, false, true)) { MATCHER_SCOPE(MultiplyTransformation); auto matcher = pattern::wrap_type(); @@ -38,135 +40,107 @@ MultiplyTransformation::MultiplyTransformation(const Params& params) : EltwiseBa this->register_matcher(m, callback); } -bool MultiplyTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool MultiplyTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { auto multiply = m.get_match_root(); if (!canBeTransformed(context, multiply)) { return false; } + multiply = NetworkHelper::separateInStandaloneBranch(multiply, defaultPrecisions); + decomposeFakeQuantizeForWeightsPath(multiply); + NetworkHelper::normalizeDequantization(NetworkHelper::getDequantization(multiply, defaultPrecisions, 0)); NetworkHelper::normalizeDequantization(NetworkHelper::getDequantization(multiply, defaultPrecisions, 1)); - multiply = NetworkHelper::separateInStandaloneBranch(multiply, defaultPrecisions); - auto newMultiply = multiply; - - auto fold_fake_quantizes = [](std::shared_ptr& multiply, const size_t index) { - auto fakeQuantizeOnWeights = ov::as_type_ptr(multiply->get_input_node_shared_ptr(index)); - if (fakeQuantizeOnWeights != nullptr) { - auto result = NetworkHelper::fold_fake_quantize(fakeQuantizeOnWeights); - if (ov::is_type(result)) { - replace_node(fakeQuantizeOnWeights, result); - } - } - }; + const auto dequantization1 = NetworkHelper::getDequantization(multiply, defaultPrecisions, 0); + const auto dequantization2 = NetworkHelper::getDequantization(multiply, defaultPrecisions, 1); + + if ((dequantization1.multiplyConstant == nullptr) && (dequantization2.multiplyConstant == nullptr)) { + return false; + } - fold_fake_quantizes(multiply, 0ul); - fold_fake_quantizes(multiply, 1ul); + // before: y = (deq_scales1 * (x1 - zero_point1)) * (deq_scales2 * (x2 - zero_point2)) + // after : y = deq_scales1 * deq_scales2 * (x1 - zero_point1) * (x2 - zero_point2) - const int fullPathIndex = getNotEmpty(multiply); - if (fullPathIndex == -1) { - const auto multiplyBranch = getMultiplyConstBranch(multiply); - if (multiplyBranch.first != -1) { - NetworkHelper::foldDequantization(multiply, multiplyBranch.first == 0 ? 1 : 0, defaultPrecisions); - } + auto new_scales_values = fold( + dequantization1.empty() ? dequantization1.data : dequantization1.multiplyConstant, + dequantization2.empty() ? dequantization2.data : dequantization2.multiplyConstant); - if (multiplyBranch.first == -1 || multiplyBranch.second == -1) { - // constant folding on dequantization ops (for example: Convert on Subtract) - NetworkHelper::foldDequantization(multiply, 0, defaultPrecisions); - NetworkHelper::foldDequantization(multiply, 1, defaultPrecisions); - return false; - } + if (!ov::is_type(new_scales_values)) { + return false; + } - auto multiplyParent = multiply->input_value(multiplyBranch.first); - auto constParent = multiply->input_value(multiplyBranch.first == 0 ? 1 : 0); - auto multiplyParentParent = multiplyParent.get_node_shared_ptr()->input_value(multiplyBranch.second); - auto multiplyParentConst = multiplyParent.get_node_shared_ptr()->input_value(multiplyBranch.second == 0 ? 1 : 0); - - newMultiply = std::make_shared>( - std::vector{ element::f32, element::f32 }, - std::vector{ multiply->get_output_element_type(0) }, - ov::op::TemporaryReplaceOutputType(multiplyParentParent, element::f32).get(), - ov::op::TemporaryReplaceOutputType( - fold( - foldConvert(multiplyParentConst, element::f32), - foldConvert(constParent, element::f32)), - element::f32).get()); - - NetworkHelper::copyInfo(multiplyParent.get_node_shared_ptr(), newMultiply); - NetworkHelper::copyInfo(multiply, newMultiply); - } else { - const int emptyPathIndex = fullPathIndex == 0 ? 1 : 0; - - if (updatePrecisions) { - const FakeQuantizeDequantization dequantizationEmptyPath = NetworkHelper::getDequantization(multiply, defaultPrecisions, emptyPathIndex); - if (!dequantizationEmptyPath.empty() && !dequantizationEmptyPath.isLowPrecision()) { - return false; - } + const auto init_input = [&new_scales_values](const FakeQuantizeDequantization& dequantization) -> Output { + if (dequantization.empty()) { + return new_scales_values; } - FakeQuantizeDequantization dequantizationEmptyPath = NetworkHelper::foldDequantization(multiply, emptyPathIndex, defaultPrecisions); - std::shared_ptr subtractValuesEmptyPath; - std::shared_ptr multiplyValuesEmptyPath; - std::tie(subtractValuesEmptyPath, multiplyValuesEmptyPath) = NetworkHelper::createEmptyValues(dequantizationEmptyPath, deqPrecision); - - // check if empty path shifts are not zero - if (!NetworkHelper::isZeroConst(subtractValuesEmptyPath)) { - return false; + if (dequantization.subtract == nullptr) { + return dequantization.data; } - FakeQuantizeDequantization dequantizationFullPath = NetworkHelper::foldDequantization(multiply, fullPathIndex, defaultPrecisions); - std::shared_ptr subtractValuesFullPath; - std::shared_ptr multiplyValuesFullPath; - std::tie(subtractValuesFullPath, multiplyValuesFullPath) = NetworkHelper::createEmptyValues(dequantizationFullPath, deqPrecision); - - - // before: Y = (SC1 * (X1 - SH1)) * (SC2 * X2) - // after : Y = (SC1' * (X1 - SH1)) * (X2) , where : - // SC1' = SC1 * SC2 - auto newMultiplyValuesFullPath = fold(multiplyValuesEmptyPath, multiplyValuesFullPath); - OutputVector inputs{ {}, {} }; - inputs[emptyPathIndex] = dequantizationEmptyPath.data; - inputs[fullPathIndex] = std::make_shared( - dequantizationFullPath.subtract == nullptr ? - (dequantizationFullPath.convert == nullptr ? - dequantizationFullPath.data : dequantizationFullPath.convert) : - dequantizationFullPath.subtract, - newMultiplyValuesFullPath); - - newMultiply = std::make_shared>( - std::vector{element::f32, element::f32}, - std::vector{ multiply->get_output_element_type(0) }, - ov::op::TemporaryReplaceOutputType(inputs[0], element::f32).get(), - ov::op::TemporaryReplaceOutputType(inputs[1], element::f32).get()); - NetworkHelper::copyInfo(multiply, newMultiply); - } + const auto subtract = NetworkHelper::optimizeSubtract(dequantization.subtract); + if (subtract != nullptr) { + DisableCleanupAttribute::create(subtract); + } - replace_node(multiply, newMultiply); - updateOutput(context, newMultiply, multiply); + return subtract == nullptr ? dequantization.data : subtract; + }; - if (fullPathIndex != -1) { - NetworkHelper::foldDequantization(newMultiply, fullPathIndex, defaultPrecisions); + if ((dequantization1.empty() && (ov::is_type(dequantization1.data.get_node()))) || + (dequantization2.empty() && (ov::is_type(dequantization2.data.get_node())))) { + // one input is constant + const Output in1 = init_input(dequantization1); + const Output in2 = init_input(dequantization2); + + const auto new_multiply = (in1.get_element_type() == multiply->get_output_element_type(0)) && + (in2.get_element_type() == multiply->get_output_element_type(0)) ? + std::make_shared(in1, in2) : + std::make_shared>( + std::vector{ deqPrecision, deqPrecision }, + std::vector{ multiply->get_output_element_type(0) }, + ov::op::TemporaryReplaceOutputType(in1, deqPrecision).get(), + ov::op::TemporaryReplaceOutputType(in2, deqPrecision).get()); + + replace_node(multiply, new_multiply); + updateOutput(context, new_multiply, multiply); + + return true; } - return true; -} + Output in1 = init_input(dequantization1); + Output in2 = init_input(dequantization2); -bool MultiplyTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { - FakeQuantizeDequantization dequantization1 = pass::low_precision::NetworkHelper::getDequantization(layer, defaultPrecisions, 0ul); - FakeQuantizeDequantization dequantization2 = pass::low_precision::NetworkHelper::getDequantization(layer, defaultPrecisions, 1ul); + // in1 & in2 can have different input types + const auto new_multiply = (in1.get_element_type() == deqPrecision) && + (in2.get_element_type() == deqPrecision) ? + std::make_shared(in1, in2) : + std::make_shared>( + std::vector{ deqPrecision, deqPrecision }, + std::vector{ deqPrecision }, + ov::op::TemporaryReplaceOutputType(in1, deqPrecision).get(), + ov::op::TemporaryReplaceOutputType(in2, deqPrecision).get()); - if (dequantization1.data.get_node() == nullptr || dequantization2.data.get_node() == nullptr) { - return false; - } + DisableCleanupAttribute::create(new_multiply); - const bool nonConstantData = !ov::is_type(dequantization1.data.get_node_shared_ptr()) && - !ov::is_type(dequantization2.data.get_node_shared_ptr()); + auto new_scales = (new_multiply->get_output_element_type(0) == multiply->get_output_element_type(0)) && + (new_scales_values->get_output_element_type(0) == multiply->get_output_element_type(0)) ? + std::make_shared(new_multiply, new_scales_values) : + std::make_shared>( + ov::opset1::Multiply(new_multiply, new_scales_values), + multiply->get_output_element_type(0)); - if (((dequantization1.empty() || dequantization2.empty()) && nonConstantData)) { - return false; - } + replace_node(multiply, new_scales); + const auto was_updated = updateOutput(context, new_scales, multiply); + NetworkHelper::copyInfo(multiply, new_multiply, !was_updated); + + return true; +} - return EltwiseBaseTransformation::canBeTransformed(context, layer); +size_t MultiplyTransformation::getInputChannels(const std::shared_ptr op) const { + const auto channels = op->get_input_partial_shape(1)[1]; + assert(channels.is_static()); + return channels.get_length(); } } // namespace low_precision diff --git a/src/common/low_precision_transformations/src/multiply_partial.cpp b/src/common/low_precision_transformations/src/multiply_partial.cpp new file mode 100644 index 00000000000000..cbe6627392944c --- /dev/null +++ b/src/common/low_precision_transformations/src/multiply_partial.cpp @@ -0,0 +1,174 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "low_precision/multiply_partial.hpp" + +#include +#include +#include +#include +#include +#include +#include + +#include "openvino/pass/pattern/op/wrap_type.hpp" + +#include "low_precision/common/ie_lpt_exception.hpp" +#include "low_precision/network_helper.hpp" +#include "itt.hpp" + +namespace ov { +namespace pass { +namespace low_precision { + +MultiplyPartialTransformation::MultiplyPartialTransformation(const Params& params) : EltwiseBaseTransformation(params) { + MATCHER_SCOPE(MultiplyPartialTransformation); + auto matcher = pattern::wrap_type(); + + ov::graph_rewrite_callback callback = [this](pattern::Matcher& m) { + auto op = m.get_match_root(); + if (transformation_callback(op)) { + return false; + } + return transform(*context, m); + }; + + auto m = std::make_shared(matcher, matcher_name); + this->register_matcher(m, callback); +} + +bool MultiplyPartialTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { + auto multiply = m.get_match_root(); + if (!canBeTransformed(context, multiply)) { + return false; + } + + NetworkHelper::normalizeDequantization(NetworkHelper::getDequantization(multiply, defaultPrecisions, 0)); + NetworkHelper::normalizeDequantization(NetworkHelper::getDequantization(multiply, defaultPrecisions, 1)); + + multiply = NetworkHelper::separateInStandaloneBranch(multiply, defaultPrecisions); + auto newMultiply = multiply; + + auto fold_fake_quantizes = [](std::shared_ptr& multiply, const size_t index) { + auto fakeQuantizeOnWeights = ov::as_type_ptr(multiply->get_input_node_shared_ptr(index)); + if (fakeQuantizeOnWeights != nullptr) { + auto result = NetworkHelper::fold_fake_quantize(fakeQuantizeOnWeights); + if (ov::is_type(result)) { + replace_node(fakeQuantizeOnWeights, result); + } + } + }; + + fold_fake_quantizes(multiply, 0ul); + fold_fake_quantizes(multiply, 1ul); + + const int fullPathIndex = getNotEmpty(multiply); + if (fullPathIndex == -1) { + const auto multiplyBranch = getMultiplyConstBranch(multiply); + if (multiplyBranch.first != -1) { + NetworkHelper::foldDequantization(multiply, multiplyBranch.first == 0 ? 1 : 0, defaultPrecisions); + } + + if (multiplyBranch.first == -1 || multiplyBranch.second == -1) { + // constant folding on dequantization ops (for example: Convert on Subtract) + NetworkHelper::foldDequantization(multiply, 0, defaultPrecisions); + NetworkHelper::foldDequantization(multiply, 1, defaultPrecisions); + return false; + } + + auto multiplyParent = multiply->input_value(multiplyBranch.first); + auto constParent = multiply->input_value(multiplyBranch.first == 0 ? 1 : 0); + auto multiplyParentParent = multiplyParent.get_node_shared_ptr()->input_value(multiplyBranch.second); + auto multiplyParentConst = multiplyParent.get_node_shared_ptr()->input_value(multiplyBranch.second == 0 ? 1 : 0); + + newMultiply = std::make_shared>( + std::vector{ element::f32, element::f32 }, + std::vector{ multiply->get_output_element_type(0) }, + ov::op::TemporaryReplaceOutputType(multiplyParentParent, element::f32).get(), + ov::op::TemporaryReplaceOutputType( + fold( + foldConvert(multiplyParentConst, element::f32), + foldConvert(constParent, element::f32)), + element::f32).get()); + + NetworkHelper::copyInfo(multiplyParent.get_node_shared_ptr(), newMultiply); + NetworkHelper::copyInfo(multiply, newMultiply); + } else { + const int emptyPathIndex = fullPathIndex == 0 ? 1 : 0; + + if (updatePrecisions) { + const FakeQuantizeDequantization dequantizationEmptyPath = NetworkHelper::getDequantization(multiply, defaultPrecisions, emptyPathIndex); + if (!dequantizationEmptyPath.empty() && !dequantizationEmptyPath.isLowPrecision()) { + return false; + } + } + + FakeQuantizeDequantization dequantizationEmptyPath = NetworkHelper::foldDequantization(multiply, emptyPathIndex, defaultPrecisions); + std::shared_ptr subtractValuesEmptyPath; + std::shared_ptr multiplyValuesEmptyPath; + std::tie(subtractValuesEmptyPath, multiplyValuesEmptyPath) = NetworkHelper::createEmptyValues(dequantizationEmptyPath, deqPrecision); + + // check if empty path shifts are not zero + if (!NetworkHelper::isZeroConst(subtractValuesEmptyPath)) { + return false; + } + + FakeQuantizeDequantization dequantizationFullPath = NetworkHelper::foldDequantization(multiply, fullPathIndex, defaultPrecisions); + std::shared_ptr subtractValuesFullPath; + std::shared_ptr multiplyValuesFullPath; + std::tie(subtractValuesFullPath, multiplyValuesFullPath) = NetworkHelper::createEmptyValues(dequantizationFullPath, deqPrecision); + + + // before: Y = (SC1 * (X1 - SH1)) * (SC2 * X2) + // after : Y = (SC1' * (X1 - SH1)) * (X2) , where : + // SC1' = SC1 * SC2 + auto newMultiplyValuesFullPath = fold(multiplyValuesEmptyPath, multiplyValuesFullPath); + OutputVector inputs{ {}, {} }; + inputs[emptyPathIndex] = dequantizationEmptyPath.data; + inputs[fullPathIndex] = std::make_shared( + dequantizationFullPath.subtract == nullptr ? + (dequantizationFullPath.convert == nullptr ? + dequantizationFullPath.data : dequantizationFullPath.convert) : + dequantizationFullPath.subtract, + newMultiplyValuesFullPath); + + newMultiply = std::make_shared>( + std::vector{element::f32, element::f32}, + std::vector{ multiply->get_output_element_type(0) }, + ov::op::TemporaryReplaceOutputType(inputs[0], element::f32).get(), + ov::op::TemporaryReplaceOutputType(inputs[1], element::f32).get()); + NetworkHelper::copyInfo(multiply, newMultiply); + } + + replace_node(multiply, newMultiply); + updateOutput(context, newMultiply, multiply); + + if (fullPathIndex != -1) { + NetworkHelper::foldDequantization(newMultiply, fullPathIndex, defaultPrecisions); + } + + return true; +} + +bool MultiplyPartialTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { + FakeQuantizeDequantization dequantization1 = pass::low_precision::NetworkHelper::getDequantization(layer, defaultPrecisions, 0ul); + FakeQuantizeDequantization dequantization2 = pass::low_precision::NetworkHelper::getDequantization(layer, defaultPrecisions, 1ul); + + if (dequantization1.data.get_node() == nullptr || dequantization2.data.get_node() == nullptr) { + return false; + } + + const bool nonConstantData = !ov::is_type(dequantization1.data.get_node_shared_ptr()) && + !ov::is_type(dequantization2.data.get_node_shared_ptr()); + + if (((dequantization1.empty() || dequantization2.empty()) && nonConstantData)) { + return false; + } + + return EltwiseBaseTransformation::canBeTransformed(context, layer); +} + +} // namespace low_precision +} // namespace pass +} // namespace ov diff --git a/src/common/low_precision_transformations/src/multiply_to_group_convolution.cpp b/src/common/low_precision_transformations/src/multiply_to_group_convolution.cpp index 62a51ef6193d56..a8999aeff8eec6 100644 --- a/src/common/low_precision_transformations/src/multiply_to_group_convolution.cpp +++ b/src/common/low_precision_transformations/src/multiply_to_group_convolution.cpp @@ -15,7 +15,7 @@ namespace low_precision { MultiplyToGroupConvolutionTransformation::MultiplyToGroupConvolutionTransformation( const Params& params, - const PrecisionsRestriction::PrecisionsByPorts& restrictions) : LayerTransformation(params), restrictions(restrictions), groupSize(1ul) { + const PrecisionsRestriction::PrecisionsByPorts& restrictions) : CleanupTransformation(params), restrictions(restrictions), groupSize(1ul) { MATCHER_SCOPE(MultiplyToGroupConvolutionTransformation); auto matcher = pattern::wrap_type(); @@ -143,6 +143,10 @@ bool MultiplyToGroupConvolutionTransformation::transform(TransformationContext& } bool MultiplyToGroupConvolutionTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr operation) const { + if (!CleanupTransformation::canBeTransformed(context, operation)) { + return false; + } + const PartialShape outPShape = operation->get_output_partial_shape(0); const auto rank = outPShape.rank(); if (rank.is_dynamic()) { diff --git a/src/common/low_precision_transformations/src/recurrent_cell.cpp b/src/common/low_precision_transformations/src/recurrent_cell.cpp index cb961a3de409bd..7fd40cf2071a0f 100644 --- a/src/common/low_precision_transformations/src/recurrent_cell.cpp +++ b/src/common/low_precision_transformations/src/recurrent_cell.cpp @@ -14,7 +14,7 @@ #include "openvino/pass/pattern/op/or.hpp" #include "low_precision/network_helper.hpp" -#include "low_precision/rt_info/skip_cleanup_attribute.hpp" +#include "low_precision/rt_info/disable_cleanup_attribute.hpp" namespace ov { namespace pass { @@ -96,6 +96,7 @@ bool RecurrentCellTransformation::transform(TransformationContext& context, ov:: if (!canBeTransformed(context, lstm)) { return false; } + for (size_t parentIndex = 0ul; parentIndex < lstm->get_input_size(); parentIndex++) { auto lstm_parent = lstm->get_input_node_shared_ptr(parentIndex); if (is_type(lstm_parent)) { @@ -108,7 +109,7 @@ bool RecurrentCellTransformation::transform(TransformationContext& context, ov:: ? defaultPrecisions : precisionsAttribute.as().value(); const DataPrecision dataPrecision = getDataPrecision(lstm_parent, quantizationDetails, precisions); - if (dataPrecision.empty()) { + if (dataPrecision.empty() || dataPrecision.hasZeroPoint) { return false; } @@ -148,6 +149,7 @@ bool RecurrentCellTransformation::transform(TransformationContext& context, ov:: continue; } } + return true; } @@ -172,12 +174,12 @@ bool RecurrentCellTransformation::isPrecisionPreserved(std::shared_ptr) co } void RecurrentCellTransformation::propagateSkipCleanupAttribute(std::shared_ptr multiply) { - SkipCleanupAttribute::create(multiply); + DisableCleanupAttribute::create(multiply); auto multiply_parent = multiply->get_input_node_shared_ptr(0); - SkipCleanupAttribute::create(multiply_parent); + DisableCleanupAttribute::create(multiply_parent); if (is_type(multiply_parent)) { auto subtract_parent = multiply_parent->get_input_node_shared_ptr(0); - SkipCleanupAttribute::create(subtract_parent); + DisableCleanupAttribute::create(subtract_parent); } } diff --git a/src/common/low_precision_transformations/src/rt_info/skip_cleanup_attribute.cpp b/src/common/low_precision_transformations/src/rt_info/skip_cleanup_attribute.cpp deleted file mode 100644 index 1d7d4a1549a6aa..00000000000000 --- a/src/common/low_precision_transformations/src/rt_info/skip_cleanup_attribute.cpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "low_precision/rt_info/skip_cleanup_attribute.hpp" - -#include -#include -#include -#include -#include - -using namespace ov; -using namespace ov; - -ov::Any SkipCleanupAttribute::create( - const std::shared_ptr& node) { - auto& rt = node->get_rt_info(); - return (rt[SkipCleanupAttribute::get_type_info_static()] = SkipCleanupAttribute()); -} diff --git a/src/common/low_precision_transformations/src/weightable_layer_transformation.cpp b/src/common/low_precision_transformations/src/weightable_layer_transformation.cpp index ac945339755f9e..d3dd47d2107737 100644 --- a/src/common/low_precision_transformations/src/weightable_layer_transformation.cpp +++ b/src/common/low_precision_transformations/src/weightable_layer_transformation.cpp @@ -15,6 +15,7 @@ namespace pass { namespace low_precision { namespace { +// used in isQuantizedStatic static method, can not be virtual method std::vector getWeightsDequantizationIdces(const std::shared_ptr weightableLayer) { if (ov::is_type(weightableLayer)) { return std::vector{0}; @@ -22,7 +23,9 @@ std::vector getWeightsDequantizationIdces(const std::shared_ptr{1}; } else if (ov::is_type(weightableLayer)) { return ov::is_type(weightableLayer->get_input_node_shared_ptr(1)) ? std::vector{0} - : std::vector{0, 1}; + : std::vector{0, 1}; + } else if (ov::is_type(weightableLayer)) { + return std::vector{}; } else { THROW_IE_LPT_EXCEPTION(*weightableLayer) << "getWeightsDequantizationIdces is called for unexpected layer"; } @@ -41,7 +44,10 @@ bool checkConstShape(const std::vector& idcesToCheck, const std::shared_ } } // namespace -WeightableLayerTransformation::WeightableLayerTransformation(const Params& params) : LayerTransformation(params) {} +WeightableLayerTransformation::WeightableLayerTransformation(const Params& params, const CanBeTransformedParams& canBeTransformedParams) : + LayerTransformation(params), + canBeTransformedParams(canBeTransformedParams) { +} bool WeightableLayerTransformation::canConvolutionBeTransformed(const TransformationContext& context, std::shared_ptr layer, const std::vector& defaultPrecisions) const { @@ -88,7 +94,7 @@ bool WeightableLayerTransformation::canBeTransformed(const TransformationContext } // dynamic activations rank and dynamic weights aren't supported - if (layer->get_input_partial_shape(0).rank().is_dynamic() || layer->get_input_partial_shape(1).is_dynamic()) { + if (!canBeTransformedParams.dynamicWeights && (layer->get_input_partial_shape(0).rank().is_dynamic() || layer->get_input_partial_shape(1).is_dynamic())) { return false; } @@ -138,14 +144,16 @@ bool WeightableLayerTransformation::canBeTransformed(const TransformationContext return false; } - // exactly cast vector as original code has a conversion; - // optimize cast: - // two branches depending on real type of the constant? - const auto scalesBuffer = dequantization.multiplyConstant->cast_vector(); - size_t scalesBufferSize = shape_size(dequantization.multiplyConstant->get_shape()); - for (size_t i = 1ul; i < scalesBufferSize; ++i) { - if (scalesBuffer[i - 1] != scalesBuffer[i]) { - return false; + if (canBeTransformedParams.perTensorQuantizationOnData) { + // exactly cast vector as original code has a conversion; + // optimize cast: + // two branches depending on real type of the constant? + const auto scalesBuffer = dequantization.multiplyConstant->cast_vector(); + size_t scalesBufferSize = shape_size(dequantization.multiplyConstant->get_shape()); + for (size_t i = 1ul; i < scalesBufferSize; ++i) { + if (scalesBuffer[i - 1] != scalesBuffer[i]) { + return false; + } } } } @@ -213,8 +221,11 @@ bool WeightableLayerTransformation::canBeTransformed(const TransformationContext dqVolume *= constChannels; } } - if (shape_size(constShape) != 1 && shape_size(constShape) != dqVolume) { - return false; + + if (!dqIdces.empty()) { + if (shape_size(constShape) != 1 && shape_size(constShape) != dqVolume) { + return false; + } } } else { // TODO: LPT: is it possible to share with isQuantized? @@ -225,13 +236,16 @@ bool WeightableLayerTransformation::canBeTransformed(const TransformationContext return false; } - const auto weightsData = ov::as_type_ptr(dequantizationOnWeights.data.get_node_shared_ptr()); - if (weightsData == nullptr) { - return false; + const auto weightsData = dequantizationOnWeights.data.get_node_shared_ptr(); + if (canBeTransformedParams.constantWeight) { + const auto constantWeightsData = ov::as_type_ptr(weightsData); + if (constantWeightsData == nullptr) { + return false; + } } const auto weightsDataPrecision = weightsData->get_element_type(); - if (!DataPrecision::isSupported(weightsDataPrecision)) { + if (canBeTransformedParams.limitWeightsDataPrecision && !DataPrecision::isSupported(weightsDataPrecision)) { return false; } @@ -243,9 +257,11 @@ bool WeightableLayerTransformation::canBeTransformed(const TransformationContext } const auto dqIdces = getWeightsDequantizationIdces(layer); - if ((dequantizationOnWeights.subtract && !checkConstShape(dqIdces, dequantizationOnWeights.subtractConstant)) || - (dequantizationOnWeights.multiply && !checkConstShape(dqIdces, dequantizationOnWeights.multiplyConstant))) { - return false; + if (!dqIdces.empty()) { + if ((dequantizationOnWeights.subtract && !checkConstShape(dqIdces, dequantizationOnWeights.subtractConstant)) || + (dequantizationOnWeights.multiply && !checkConstShape(dqIdces, dequantizationOnWeights.multiplyConstant))) { + return false; + } } } diff --git a/src/common/low_precision_transformations/tests/lpt_avoid_shapeof_propagation_test.cpp b/src/common/low_precision_transformations/tests/lpt_avoid_shapeof_propagation_test.cpp index 431c4459a4c57b..f2459620019351 100644 --- a/src/common/low_precision_transformations/tests/lpt_avoid_shapeof_propagation_test.cpp +++ b/src/common/low_precision_transformations/tests/lpt_avoid_shapeof_propagation_test.cpp @@ -22,7 +22,7 @@ #include "low_precision/interpolate.hpp" #include "low_precision/mat_mul.hpp" #include "low_precision/max_pool.hpp" -#include "low_precision/multiply.hpp" +#include "low_precision/multiply_partial.hpp" #include "low_precision/mvn.hpp" #include "low_precision/network_helper.hpp" #include "low_precision/normalize_l2.hpp" @@ -361,7 +361,7 @@ TEST(LPT, AvoidDequantizationToShapeOfPropagationMultiplyTransformation) { auto f = std::make_shared(ResultVector{result1, result2}, ParameterVector{input1, input2}); pass::Manager m; - m.register_pass(); + m.register_pass(); m.run_passes(f); auto dqBeforeShapeOf = ov::pass::low_precision::NetworkHelper::getDequantization(result2->get_input_node_shared_ptr(0)); diff --git a/src/common/low_precision_transformations/tests/multiply_partial_transformation.cpp b/src/common/low_precision_transformations/tests/multiply_partial_transformation.cpp new file mode 100644 index 00000000000000..1e556df70bc31b --- /dev/null +++ b/src/common/low_precision_transformations/tests/multiply_partial_transformation.cpp @@ -0,0 +1,1007 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "layer_transformation.hpp" + +#include +#include +#include + +#include + +#include +#include +#include +#include "low_precision/multiply_partial.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" + +#include "common_test_utils/ov_test_utils.hpp" +#include "simple_low_precision_transformer.hpp" +#include "lpt_ngraph_functions/multiply_partial_function.hpp" + +namespace { +using namespace testing; +using namespace ov; +using namespace ov::pass; +using namespace ngraph::builder::subgraph; + +class MultiplyPartialTransformationTestValues { +public: + TestTransformationParams transformationParams; + MultiplyPartialValues actual; + MultiplyPartialValues expected; + + MultiplyPartialTransformationTestValues() = default; + + MultiplyPartialTransformationTestValues( + TestTransformationParams transformationParams, + MultiplyPartialValues actual, + MultiplyPartialValues expected): + transformationParams(std::move(transformationParams)), + actual(std::move(actual)), + expected(std::move(expected)) {} +}; + +typedef std::tuple< + ov::element::Type, + MultiplyPartialTransformationTestValues> MultiplyPartialTransformationParams; + +class MultiplyPartialTransformation : public LayerTransformation, public testing::WithParamInterface { +public: + void SetUp() override { + const ov::element::Type precision = std::get<0>(GetParam()); + const MultiplyPartialTransformationTestValues testParams = std::get<1>(GetParam()); + + actualFunction = MultiplyPartialFunction::get(precision, testParams.actual); + + SimpleLowPrecisionTransformer transform; + transform.add(testParams.transformationParams); + transform.transform(actualFunction); + + referenceFunction = MultiplyPartialFunction::get(precision, testParams.expected); + } + + static std::string getTestCaseName(testing::TestParamInfo obj) { + const ov::element::Type precision = std::get<0>(obj.param); + const MultiplyPartialTransformationTestValues testParams = std::get<1>(obj.param); + + std::ostringstream result; + result << + LayerTransformation::getTestCaseNameByParams(precision, testParams.expected.branch1.inputShape, testParams.transformationParams) << + testParams.actual << + testParams.expected; + return result.str(); + } +}; + +TEST_P(MultiplyPartialTransformation, CompareFunctions) { + actualFunction->validate_nodes_and_infer_types(); + auto res = compare_functions(actualFunction, referenceFunction, true, true, false); + ASSERT_TRUE(res.first) << res.second; + + ASSERT_TRUE(LayerTransformation::allNamesAreUnique(actualFunction)) << "Not all names are unique"; +} + +const std::vector precisions = { + ov::element::f32, + ov::element::f16 +}; + +const std::vector multiplyTransformationTestValues = { + // U8 + { + LayerTransformation::createParamsU8I8(), + { + { + { 1, 3, 8, 16 }, + {}, + ov::element::u8, + {ov::element::f32, { 2.f }, { 10.f }} + }, + { + { 1, 3, 8, 16 }, + {}, + ov::element::u8, + {ov::element::f32, { 3.f }, { 7.f }} + }, + false + }, + { + { + { 1, 3, 8, 16 }, + {}, + ov::element::u8, + {ov::element::f32, { 2.f }, { 10.f }} + }, + { + { 1, 3, 8, 16 }, + {}, + ov::element::u8, + {ov::element::f32, { 3.f }, { 7.f }} + }, + false + } + }, + + { + LayerTransformation::createParamsU8I8(), + { + { + { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, + {}, + ov::element::u8, + {ov::element::f32, { 2.f }, { 10.f }} + }, + { + { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, + {}, + ov::element::u8, + {ov::element::f32, { 3.f }, { 7.f }} + }, + false + }, + { + { + { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, + {}, + ov::element::u8, + {ov::element::f32, { 2.f }, { 10.f }} + }, + { + { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, + {}, + ov::element::u8, + {ov::element::f32, { 3.f }, { 7.f }} + }, + false + } + }, + + { + LayerTransformation::createParamsU8I8(), + { + { + { 1, 3, 8, 16 }, + {}, + ov::element::u8, + {ov::element::f32, { 2.f }, { 10.f }} + }, + { + { 1, 3, 8, 16 }, + {}, + ov::element::u8, + {ov::element::f32, { }, { 7.f }} + }, + false + }, + { + { + { 1, 3, 8, 16 }, + {}, + ov::element::u8, + {ov::element::f32, { 2.f }, { 70.f }} + }, + { + { 1, 3, 8, 16 }, + {}, + ov::element::u8, + {} + }, + false + } + }, + + { + LayerTransformation::createParamsU8I8(), + { + { + { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, + {}, + ov::element::u8, + {ov::element::f32, { 2.f }, { 10.f }} + }, + { + { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, + {}, + ov::element::u8, + {ov::element::f32, { }, { 7.f }} + }, + false + }, + { + { + { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, + {}, + ov::element::u8, + {ov::element::f32, { 2.f }, { 70.f }} + }, + { + { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, + {}, + ov::element::u8, + {} + }, + false + } + }, + + { + LayerTransformation::createParamsU8I8(), + { + { + { 1, 3, 8, 16 }, + {}, + ov::element::u8, + { ov::element::f32, { }, { 10.f }} + }, + { + { 1, 3, 8, 16 }, + {}, + ov::element::u8, + { ov::element::f32, { }, { 7.f } } + }, + false + }, + { + { + { 1, 3, 8, 16 }, + {}, + ov::element::u8, + {ov::element::f32, { }, { 70.f }} + }, + { + { 1, 3, 8, 16 }, + {}, + ov::element::u8, + {} + }, + false + } + }, + + { + LayerTransformation::createParamsU8I8(), + { + { + { 1, 3, 8, 16 }, + {}, + ov::element::u8, + {ov::element::f32, { 2.f }, { }} + }, + { + { 1, 3, 8, 16 }, + {}, + ov::element::u8, + {ov::element::f32, { }, { 7.f } } + }, + false + }, + { + { + { 1, 3, 8, 16 }, + {}, + ov::element::u8, + {ov::element::f32, { 2.f }, { 7.f }} + }, + { + { 1, 3, 8, 16 }, + {}, + ov::element::u8, + {} + }, + false + } + }, + { + LayerTransformation::createParamsU8I8(), + { + { + { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, + {}, + ov::element::u8, + {ov::element::f32, { 2.f }, { }} + }, + { + { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, + {}, + ov::element::u8, + {ov::element::f32, { }, { 7.f } } + }, + false + }, + { + { + { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, + {}, + ov::element::u8, + {ov::element::f32, { 2.f }, { 7.f }} + }, + { + { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, + {}, + ov::element::u8, + {} + }, + false + } + }, + { + LayerTransformation::createParamsU8I8(), + { + { + PartialShape::dynamic(), + {}, + ov::element::u8, + {ov::element::f32, { 2.f }, { }} + }, + { + PartialShape::dynamic(), + {}, + ov::element::u8, + {ov::element::f32, { }, { 7.f } } + }, + false + }, + { + { + PartialShape::dynamic(), + {}, + ov::element::u8, + {ov::element::f32, { 2.f }, { }} + }, + { + PartialShape::dynamic(), + {}, + ov::element::u8, + {ov::element::f32, { }, { 7.f } } + }, + false + } + }, + + // I8 + { + LayerTransformation::createParamsI8I8(), + { + { + { 1, 3, 8, 16 }, + {}, + ov::element::i8, + {ov::element::f32, { 2.f }, { 10.f }} + }, + { + { 1, 3, 8, 16 }, + {}, + ov::element::i8, + {ov::element::f32, { 3.f }, { 7.f }} + }, + false + }, + { + { + { 1, 3, 8, 16 }, + {}, + ov::element::i8, + {ov::element::f32, { 2.f }, { 10.f }} + }, + { + { 1, 3, 8, 16 }, + {}, + ov::element::i8, + {ov::element::f32, { 3.f }, { 7.f } } + }, + false + } + }, + + // Actual: + // + // Parameter + // |I8 + // | + // Convert Constant Parameter + // \FP32 /FP32 |I8 + // \ / | + // Subtract Constant Convert Constant + // \FP32 /FP32 \FP32 /FP32 + // \ / \ / + // Multiply Multiply + // \FP32 /FP32 + // \ / + // Multiply + // Transformed: + // + // Parameter + // |I8 + // | + // Convert Constant + // \FP32 /FP32 + // \ / + // Subtract Constant + // \FP32 /FP32 + // \ / + // Multiply Parameter + // \FP32 /I8 + // \ / + // Multiply + { + LayerTransformation::createParamsI8I8(), + { + { + { 1, 3, 8, 16 }, + {}, + ov::element::i8, + {ov::element::f32, { 2.f }, { 10.f }} + }, + { + { 1, 3, 8, 16 }, + {}, + ov::element::i8, + {ov::element::f32, { }, { 7.f }} + }, + false + }, + { + { + { 1, 3, 8, 16 }, + {}, + ov::element::i8, + {ov::element::f32, { 2.f }, { 70.f }}, + }, + { + { 1, 3, 8, 16 }, + {}, + ov::element::i8, + {} + }, + false + } + }, + + // Actual: + // + // Parameter Constant + // |I8 |I8 + // | | + // Convert Convert Parameter + // \FP32 /FP32 |I8 + // \ / | + // Subtract Constant Convert Constant + // \FP32 /FP32 \FP32 /FP32 + // \ / \ / + // Multiply Multiply + // \FP32 /FP32 + // \ / + // Multiply + // Transformed: + // + // Parameter + // |I8 + // | + // Convert Constant + // \FP32 /FP32 + // \ / + // Subtract Constant + // \FP32 /FP32 + // \ / + // Multiply Parameter + // \FP32 /I8 + // \ / + // Multiply + { + LayerTransformation::createParamsI8I8(), + { + { + { 1, 3, 8, 16 }, + {}, + ov::element::i8, + { + ov::element::f32, + { {2.f}, ov::element::f32, {}, true, 1ul, ov::element::i8, true }, + { 10.f } + } + }, + { + { 1, 3, 8, 16 }, + {}, + ov::element::i8, + {ov::element::f32, { }, { 7.f }} + }, + false + }, + { + { + { 1, 3, 8, 16 }, + {}, + ov::element::i8, + {ov::element::f32, { 2.f }, { 70.f }}, + }, + { + { 1, 3, 8, 16 }, + {}, + ov::element::i8, + {} + }, + false + } + }, + + { + LayerTransformation::createParamsI8I8(), + { + { + { 1, 3, 8, 16 }, + {}, + ov::element::i8, + {ov::element::f32, { }, { 10.f }} + }, + { + { 1, 3, 8, 16 }, + {}, + ov::element::i8, + {ov::element::f32, { }, { 7.f } } + }, + false + }, + { + { + { 1, 3, 8, 16 }, + {}, + ov::element::i8, + { ov::element::f32, { }, { 70.f }} + }, + { + { 1, 3, 8, 16 }, + {}, + ov::element::i8, + { } + }, + false + } + }, + + { + LayerTransformation::createParamsI8I8(), + { + { + { 1, 3, 8, 16 }, + {}, + ov::element::i8, + {ov::element::f32, { 2.f }, { }}, + }, + { + { 1, 3, 8, 16 }, + {}, + ov::element::i8, + {ov::element::f32, { }, { 7.f } }, + }, + false + }, + { + { + { 1, 3, 8, 16 }, + {}, + ov::element::i8, + {ov::element::f32, { 2.f }, { 7.f }}, + }, + { + { 1, 3, 8, 16 }, + {}, + ov::element::i8, + {} + }, + false + } + }, + + // Constant as input + { + LayerTransformation::createParamsU8I8(), + { + { + { 1, 3, 8, 16 }, + {}, + ov::element::i8, + {ov::element::f32, { }, { 10.f }}, + }, + { + {}, + {{ 7.f }, ov::element::f32}, // Constant as input + ov::element::f32, + {} + }, + false + }, + { + { + { 1, 3, 8, 16 }, + {}, + ov::element::i8, + {ov::element::f32, {}, {}}, + }, + { + {}, + {{ 70.f }, ov::element::f32}, + ov::element::f32, + {} + }, + true + } + }, + + { + LayerTransformation::createParamsU8I8(), + { + { + { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, + {}, + ov::element::i8, + {ov::element::f32, { }, { 10.f }}, + }, + { + {}, + {{ 7.f }, ov::element::f32}, // Constant as input + ov::element::f32, + {} + }, + false + }, + { + { + { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, + {}, + ov::element::i8, + {ov::element::f32, {}, {}}, + }, + { + {}, + {{ 70.f }, ov::element::f32}, + ov::element::f32, + {} + }, + true + } + }, + + { + LayerTransformation::createParamsU8I8(), + { + { + { 1, 3, 8, 16 }, + {}, + ov::element::i8, + {ov::element::f32, { 18.f }, { 10.f }}, + }, + { + {}, + {{ 7.f }, ov::element::f32}, + ov::element::f32, + {} + }, + false + }, + { + { + { 1, 3, 8, 16 }, + {}, + ov::element::i8, + {ov::element::f32, { 18.f }, { }}, + }, + { + {}, + {{ 70.f }, ov::element::f32}, + ov::element::f32, + {} + }, + true + } + }, + + // Constant as input with empty shape + { + LayerTransformation::createParamsU8I8(), + { + { + { 1, 3, 8, 16 }, + {}, + ov::element::i8, + {ov::element::f32, { }, { 0.2f }}, + }, + { + {}, + {{ 7.f }, ov::element::i8}, // Constant as input + ov::element::i8, + {ov::element::f32, { }, { 0.5f }}, + }, + false + }, + { + { + { 1, 3, 8, 16 }, + {}, + ov::element::i8, + {ov::element::f32, {}, {}}, + }, + { + {}, + {{ 0.7f }, ov::element::f32}, + ov::element::f32, + {} + }, + true + } + }, + + // Constant as input with 1 dimension shape + { + LayerTransformation::createParamsU8I8(), + { + { + {}, + {{ 7.f, 8.f, 9.f }, ov::element::i8, ov::Shape{3}}, // Constant as input + ov::element::i8, + {ov::element::f32, { }, { {0.1f, 0.2f, 0.3f}, element::f32, ov::Shape{3} }}, + }, + { + { 1, 2, 3 }, + {}, + ov::element::f32, + {{}, {}, {{0.2f, 0.3f, 0.4f}, element::f32, ov::Shape{3}}}, + }, + false + }, + { + { + { 1, 2, 3 }, + {}, + ov::element::f32, + {}, + }, + { + {}, + { {0.14f, 0.48f, 1.08f}, ov::element::f32, ov::Shape{3}}, // Constant as input + {}, + {}, + }, + true + } + }, + + // Parameter as input with, Constant with 1 dimension shape + { + LayerTransformation::createParamsU8I8(), + { + { + { 1, 2, 3 }, + {}, + ov::element::f32, + {{}, {}, {{0.2f, 0.3f, 0.4f}, element::f32, ov::Shape{3}}}, + }, + { + {}, + {{ 7.f, 8.f, 9.f }, ov::element::i8, ov::Shape{3}}, // Constant as input + ov::element::i8, + {ov::element::f32, { }, { {0.1f, 0.2f, 0.3f}, element::f32, ov::Shape{3} }}, + }, + false + }, + { + { + { 1, 2, 3 }, + {}, + ov::element::f32, + {}, + }, + { + {}, + { {0.14f, 0.48f, 1.08f}, ov::element::f32, ov::Shape{3}}, // Constant as input + {}, + {}, + }, + true + } + }, + + // Actual: + // + // Parameter Constant Constant Constant + // |I8 |I8 |I8 |I8 + // | | | | + // Convert Convert Convert Convert + // \FP32 /FP32 |I8 /FP32 + // \ / | / + // Subtract Constant Subtract Constant + // \FP32 /FP32 \FP32 /FP32 + // \ / \ / + // Multiply Multiply + // \FP32 /FP32 + // \ / + // Multiply + // Transformed: + // + // Parameter Constant + // |I8 |I8 + // | | + // Convert Convert + // \FP32 /FP32 + // \ / + // Subtract Constant + // \FP32 /FP32 + // \ / + // Multiply + // + { + LayerTransformation::createParamsU8I8(), + { + { + { 1, 3, 8, 16 }, + {}, + ov::element::i8, + { + ov::element::f32, + { {127.f}, ov::element::f32, {}, false, 1, ov::element::i8, true }, + { 0.2f } + }, + }, + { + {}, + {{ 7.f }, ov::element::i8}, // Constant as input + ov::element::i8, + { + ov::element::f32, + { {127.f}, ov::element::f32, {}, false, 1, ov::element::i8, true }, + { 0.5f } + }, + }, + false + }, + { + { + { 1, 3, 8, 16 }, + {}, + ov::element::i8, + { + ov::element::f32, + { {127.f}, ov::element::f32, {}, false, 1, ov::element::i8, true }, + {} + }, + }, + { + {}, + {{ -12.f }, ov::element::f32}, + ov::element::f32, + {} + }, + true + } + }, + + // Actual: + // + // Constant Constant Parameter Constant + // |I8 |I8 |I8 |I8 + // | | | | + // Convert Convert Convert Convert + // \FP32 /FP32 |I8 /FP32 + // \ / | / + // Subtract Constant Subtract Constant + // \FP32 /FP32 \FP32 /FP32 + // \ / \ / + // Multiply Multiply + // \FP32 /FP32 + // \ / + // Multiply + // Transformed: + // + // Parameter Constant + // |I8 |I8 + // | | + // Convert Convert + // \FP32 /FP32 + // \ / + // Subtract Constant + // \FP32 /FP32 + // \ / + // Multiply + // + { + LayerTransformation::createParamsU8I8(), + { + { + {}, + {{ 7.f }, ov::element::i8}, // Constant as input + ov::element::i8, + { + ov::element::f32, + { {127.f}, ov::element::f32, {}, false, 1, ov::element::i8, true }, + { 0.5f } + }, + }, + { + { 1, 3, 8, 16 }, + {}, + ov::element::i8, + { + ov::element::f32, + { {127.f}, ov::element::f32, {}, false, 1, ov::element::i8, true }, + { 0.2f } + }, + }, + false + }, + { + { + { 1, 3, 8, 16 }, + {}, + ov::element::i8, + { + ov::element::f32, + { {127.f}, ov::element::f32, {}, false, 1, ov::element::i8, true }, + {} + }, + }, + { + {}, + {{ -12.f }, ov::element::f32}, + ov::element::f32, + {} + }, + true + } + }, + { + LayerTransformation::createParamsU8I8(), + { + { + {}, + {{ 7.f }, ov::element::i8}, // Constant as input + ov::element::i8, + { + ov::element::f32, + { {127.f}, ov::element::f32, {}, false, 1, ov::element::i8, true }, + { 0.5f } + }, + }, + { + { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, + {}, + ov::element::i8, + { + ov::element::f32, + { {127.f}, ov::element::f32, {}, false, 1, ov::element::i8, true }, + { 0.2f } + }, + }, + false + }, + { + { + { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, + {}, + ov::element::i8, + { + ov::element::f32, + { {127.f}, ov::element::f32, {}, false, 1, ov::element::i8, true }, + {} + }, + }, + { + {}, + {{ -12.f }, ov::element::f32}, + ov::element::f32, + {} + }, + true + } + }, +}; + +INSTANTIATE_TEST_SUITE_P( + smoke_LPT, + MultiplyPartialTransformation, + ::testing::Combine( + ::testing::ValuesIn(precisions), + ::testing::ValuesIn(multiplyTransformationTestValues)), + MultiplyPartialTransformation::getTestCaseName); +} // namespace diff --git a/src/common/low_precision_transformations/tests/multiply_transformation.cpp b/src/common/low_precision_transformations/tests/multiply_transformation.cpp index f8925c75c9ee68..3ea4563f62ba62 100644 --- a/src/common/low_precision_transformations/tests/multiply_transformation.cpp +++ b/src/common/low_precision_transformations/tests/multiply_transformation.cpp @@ -14,6 +14,7 @@ #include "transformations/utils/utils.hpp" #include "transformations/init_node_info.hpp" #include "low_precision/multiply.hpp" +#include "low_precision/multiply_to_group_convolution.hpp" #include "lpt_ngraph_functions/common/dequantization_operations.hpp" #include "common_test_utils/ov_test_utils.hpp" @@ -26,8 +27,49 @@ using namespace ov; using namespace ov::pass; using namespace ngraph::builder::subgraph; +class MultiplyBranch { +public: + ngraph::builder::subgraph::Constant constant; + ngraph::element::Type input_precision; + ngraph::builder::subgraph::DequantizationOperations dequantization; + ngraph::builder::subgraph::FakeQuantizeOnData fake_quantize; +}; + +inline std::ostream& operator<<(std::ostream& out, const MultiplyBranch& branch) { + if (branch.input_precision != element::undefined) { + out << "_input=" << branch.input_precision; + } + if (!branch.constant.empty()) { + out << "_constant=" << branch.constant; + } + if (!branch.dequantization.empty()) { + out << "_dequantization=" << branch.dequantization; + } + if (!branch.fake_quantize.empty()) { + out << "_fake_quantize=" << branch.constant; + } + return out; +} + +class MultiplyValues { +public: + MultiplyBranch branch1; + MultiplyBranch branch2; + ngraph::builder::subgraph::DequantizationOperations after_dequantization; +}; + +inline std::ostream& operator<<(std::ostream& out, const MultiplyValues& values) { + return out << "_branch1=" << values.branch1 << "_branch2=" << values.branch2 << "_after=" << values.after_dequantization; +} + class MultiplyTransformationTestValues { public: + // use this value in test case declaration to set precision as input precision + static const ov::element::Type input_precision; + + // use this value in test case declaration to set precision as model precision + static const ov::element::Type model_precision; + TestTransformationParams transformationParams; MultiplyValues actual; MultiplyValues expected; @@ -43,35 +85,107 @@ class MultiplyTransformationTestValues { expected(std::move(expected)) {} }; +const ov::element::Type MultiplyTransformationTestValues::input_precision = ov::element::undefined; +const ov::element::Type MultiplyTransformationTestValues::model_precision = ov::element::undefined; + typedef std::tuple< - ov::element::Type, + ov::element::Type, // model precision + std::pair, // input_shapes + std::pair, // input precisions MultiplyTransformationTestValues> MultiplyTransformationParams; class MultiplyTransformation : public LayerTransformation, public testing::WithParamInterface { public: void SetUp() override { - const ov::element::Type precision = std::get<0>(GetParam()); - const MultiplyTransformationTestValues testParams = std::get<1>(GetParam()); + const auto model_precision = std::get<0>(GetParam()); + const auto input_shapes = std::get<1>(GetParam()); + const auto input_precisions = std::get<2>(GetParam()); + MultiplyTransformationTestValues testParams = std::get<3>(GetParam()); + + update_input_precisions(input_precisions, testParams); + update_dequantization_precision(model_precision, testParams); + + // output precision has to be defined by model precision + if (testParams.expected.after_dequantization.multiply.outPrecision == MultiplyTransformationTestValues::model_precision) { + testParams.expected.after_dequantization.multiply.outPrecision = model_precision; + } - actualFunction = MultiplyFunction::get(precision, testParams.actual); - SimpleLowPrecisionTransformer transform; + const auto to_multiply_values = [&input_shapes, &input_precisions](const MultiplyValues& values) { + return ngraph::builder::subgraph::MultiplyValues( + ngraph::builder::subgraph::MultiplyBranch( + input_shapes.first, values.branch1.constant, input_precisions.first, values.branch1.dequantization, values.branch1.fake_quantize), + ngraph::builder::subgraph::MultiplyBranch( + input_shapes.second, values.branch2.constant, input_precisions.second, values.branch2.dequantization, values.branch2.fake_quantize), + ngraph::builder::subgraph::DequantizationOperations(values.after_dequantization)); + }; + + actualFunction = MultiplyFunction::get(model_precision, to_multiply_values(testParams.actual)); + + SimpleLowPrecisionTransformer transform({}, {}, AttributeParameters(), true); transform.add(testParams.transformationParams); + transform.cleanup->get_pass_config()->disable(); transform.transform(actualFunction); - referenceFunction = MultiplyFunction::get(precision, testParams.expected); + referenceFunction = MultiplyFunction::get(model_precision, to_multiply_values(testParams.expected)); } static std::string getTestCaseName(testing::TestParamInfo obj) { - const ov::element::Type precision = std::get<0>(obj.param); - const MultiplyTransformationTestValues testParams = std::get<1>(obj.param); + const auto model_precision = std::get<0>(obj.param); + const auto input_shapes = std::get<1>(obj.param); + const auto input_precisions = std::get<2>(obj.param); + MultiplyTransformationTestValues testParams = std::get<3>(obj.param); std::ostringstream result; - result << - LayerTransformation::getTestCaseNameByParams(precision, testParams.expected.branch1.inputShape, testParams.transformationParams) << - testParams.actual << - testParams.expected; + result << LayerTransformation::getTestCaseNameByParams(model_precision, input_shapes.first, testParams.transformationParams) << + "_SH1=" << input_shapes.first << + "_TY1=" << input_precisions.first << + "_SH2=" << input_shapes.second << + "_TY2=" << input_precisions.second; + + update_input_precisions(input_precisions, testParams); + update_dequantization_precision(model_precision, testParams); + + result << testParams.actual << testParams.expected; return result.str(); } + +private: + // dequantization output precision has to be defined by input precision + static void update_dequantization_precision(const ov::element::Type& dequantization_precision, + MultiplyTransformationTestValues& test_values) { + if (!test_values.actual.after_dequantization.multiply.empty() && + test_values.actual.after_dequantization.multiply.outPrecision == MultiplyTransformationTestValues::input_precision) { + test_values.actual.after_dequantization.multiply.outPrecision = dequantization_precision; + } + + if (!test_values.expected.after_dequantization.multiply.empty() && + test_values.expected.after_dequantization.multiply.outPrecision == MultiplyTransformationTestValues::input_precision) { + test_values.expected.after_dequantization.multiply.outPrecision = dequantization_precision; + } + } + + // low precision has to be defined by tests parameters + static void update_input_precisions(const std::pair& input_precisions, + MultiplyTransformationTestValues& test_values) { + const auto update_values = [](const std::pair& input_precisions, MultiplyValues& values) { + const auto update_branch = [](const ov::element::Type& input_precision, MultiplyBranch& branch) { + if (branch.input_precision == MultiplyTransformationTestValues::input_precision) { + branch.input_precision = input_precision; + } + + if (!branch.constant.empty() && + (branch.constant.outPrecision == MultiplyTransformationTestValues::input_precision)) { + branch.constant.outPrecision = input_precision; + } + }; + + update_branch(input_precisions.first, values.branch1); + update_branch(input_precisions.second, values.branch2); + }; + + update_values(input_precisions, test_values.actual); + update_values(input_precisions, test_values.expected); + } }; TEST_P(MultiplyTransformation, CompareFunctions) { @@ -82,112 +196,121 @@ TEST_P(MultiplyTransformation, CompareFunctions) { ASSERT_TRUE(LayerTransformation::allNamesAreUnique(actualFunction)) << "Not all names are unique"; } -const std::vector precisions = { +const std::vector model_precisions = { ov::element::f32, ov::element::f16 }; -const std::vector multiplyTransformationTestValues = { - // U8 - { - LayerTransformation::createParamsU8I8(), +const std::vector> input_shapes = { + {{ 1, 3, 8, 16 }, { 1, 3, 8, 16 }}, + {{ 1, 3, 8, 16 }, { 1, 3, 1, 1 }}, + {{ 1, 3, 1, 1 }, { 1, 3, 8, 16 }}, { - { - { 1, 3, 8, 16 }, - {}, - ov::element::u8, - {ov::element::f32, { 2.f }, { 10.f }} - }, - { - { 1, 3, 8, 16 }, - {}, - ov::element::u8, - {ov::element::f32, { 3.f }, { 7.f }} - }, - false + { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, + { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() } }, { - { - { 1, 3, 8, 16 }, - {}, - ov::element::u8, - {ov::element::f32, { 2.f }, { 10.f }} - }, - { - { 1, 3, 8, 16 }, - {}, - ov::element::u8, - {ov::element::f32, { 3.f }, { 7.f }} - }, - false + { Dimension::dynamic(), 3, Dimension::dynamic(), Dimension::dynamic() }, + { Dimension::dynamic(), 3, Dimension::dynamic(), Dimension::dynamic() } } - }, +}; - { - LayerTransformation::createParamsU8I8(), +namespace multiply_channel_fq { + const std::vector> input_precisions = { + { ov::element::u8, ov::element::f32 }, + { ov::element::u8, ov::element::f16 }, + { ov::element::i8, ov::element::f32 }, + { ov::element::i8, ov::element::f16 } + }; + + const std::vector multiplyTransformationTestValues = { { + LayerTransformation::createParamsU8I8(), { - { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, - {}, - ov::element::u8, - {ov::element::f32, { 2.f }, { 10.f }} + { + {}, + MultiplyTransformationTestValues::input_precision, + {ov::element::f32, { 2.f }, { 10.f }} + }, + { + {{ 0.f, 1.27f, 2.55f }, MultiplyTransformationTestValues::input_precision, ov::Shape{1, 3, 1, 1}}, // Constant as input, + {}, + {}, + { + 256, + ov::Shape{1, 3, 1, 1}, + {0.f, 0.f, 0.f}, + {2.55f, 2.55f, 2.55f}, + {0.f, 0.f, 0.f}, + {2.55f, 2.55f, 2.55f}, + MultiplyTransformationTestValues::input_precision + } // FakeQuantize + }, }, { - { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, - {}, - ov::element::u8, - {ov::element::f32, { 3.f }, { 7.f }} + { + {}, + MultiplyTransformationTestValues::input_precision, + {{}, {{2.f}, ov::element::f32}, {}} + }, + { + {{ 0, 127, 255 }, ov::element::u8, ov::Shape{1, 3, 1, 1}}, // Constant as input, + {}, + {} + }, + {{}, {}, {{0.1f, 0.1f, 0.1f}}} }, - false }, - { - { - { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, - {}, - ov::element::u8, - {ov::element::f32, { 2.f }, { 10.f }} - }, - { - { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, - {}, - ov::element::u8, - {ov::element::f32, { 3.f }, { 7.f }} - }, - false - } - }, + }; + + INSTANTIATE_TEST_SUITE_P( + smoke_LPT, + MultiplyTransformation, + ::testing::Combine( + ::testing::ValuesIn(model_precisions), + ::testing::ValuesIn(input_shapes), + ::testing::ValuesIn(input_precisions), + ::testing::ValuesIn(multiplyTransformationTestValues)), + MultiplyTransformation::getTestCaseName); +} // namespace multiply_channel_fq + +const std::vector> input_precisions = { + { ov::element::u8, ov::element::u8 }, + { ov::element::i8, ov::element::i8 }, + { ov::element::u8, ov::element::i8 }, + { ov::element::i8, ov::element::u8 }, + { ov::element::f32, ov::element::f32 }, + { ov::element::f16, ov::element::f16 }, +}; +namespace multiply_channel { +const std::vector multiplyTransformationTestValues = { { LayerTransformation::createParamsU8I8(), { { - { 1, 3, 8, 16 }, {}, - ov::element::u8, + MultiplyTransformationTestValues::input_precision, {ov::element::f32, { 2.f }, { 10.f }} }, { - { 1, 3, 8, 16 }, {}, - ov::element::u8, - {ov::element::f32, { }, { 7.f }} + MultiplyTransformationTestValues::input_precision, + {ov::element::f32, { 3.f }, { 7.f }} }, - false }, { { - { 1, 3, 8, 16 }, {}, - ov::element::u8, - {ov::element::f32, { 2.f }, { 70.f }} + MultiplyTransformationTestValues::input_precision, + {{}, {{2.f}, ov::element::f32}, {}} }, { - { 1, 3, 8, 16 }, {}, - ov::element::u8, - {} + MultiplyTransformationTestValues::input_precision, + {{}, {{3.f}, ov::element::f32}, {}} }, - false + {{}, {}, {{70.f}, MultiplyTransformationTestValues::model_precision}} } }, @@ -195,33 +318,28 @@ const std::vector multiplyTransformationTestVa LayerTransformation::createParamsU8I8(), { { - { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, {}, - ov::element::u8, + MultiplyTransformationTestValues::input_precision, {ov::element::f32, { 2.f }, { 10.f }} }, { - { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, + {{ 7.f, 8.f, 9.f }, MultiplyTransformationTestValues::input_precision, ov::Shape{1, 3, 1, 1}}, // Constant as input, {}, - ov::element::u8, - {ov::element::f32, { }, { 7.f }} + {ov::element::f32, { 3.f }, { 7.f }} }, - false }, { { - { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, {}, - ov::element::u8, - {ov::element::f32, { 2.f }, { 70.f }} + MultiplyTransformationTestValues::input_precision, + {{}, {{2.f}, ov::element::f32}, {}} }, { - { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, + {{ 7.f, 8.f, 9.f }, MultiplyTransformationTestValues::input_precision, ov::Shape{1, 3, 1, 1}}, // Constant as input, {}, - ov::element::u8, - {} + {{}, {{3.f}, ov::element::f32}, {}} }, - false + {{}, {}, {{70.f}, MultiplyTransformationTestValues::model_precision}} } }, @@ -229,33 +347,28 @@ const std::vector multiplyTransformationTestVa LayerTransformation::createParamsU8I8(), { { - { 1, 3, 8, 16 }, + {{ 7.f, 8.f, 9.f }, MultiplyTransformationTestValues::input_precision, ov::Shape{1, 3, 1, 1}}, // Constant as input, {}, - ov::element::u8, - { ov::element::f32, { }, { 10.f }} + {ov::element::f32, { 3.f }, { 7.f }} }, { - { 1, 3, 8, 16 }, {}, - ov::element::u8, - { ov::element::f32, { }, { 7.f } } - }, - false + MultiplyTransformationTestValues::input_precision, + {ov::element::f32, { 2.f }, { 10.f }} + } }, { { - { 1, 3, 8, 16 }, + {{ 7.f, 8.f, 9.f }, MultiplyTransformationTestValues::input_precision, ov::Shape{1, 3, 1, 1}}, // Constant as input, {}, - ov::element::u8, - {ov::element::f32, { }, { 70.f }} + {{}, {{3.f}, ov::element::f32}, {}} }, { - { 1, 3, 8, 16 }, {}, - ov::element::u8, - {} + MultiplyTransformationTestValues::input_precision, + {{}, {{2.f}, ov::element::f32}, {}} }, - false + {{}, {}, {{70.f}, MultiplyTransformationTestValues::model_precision}} } }, @@ -263,369 +376,191 @@ const std::vector multiplyTransformationTestVa LayerTransformation::createParamsU8I8(), { { - { 1, 3, 8, 16 }, {}, - ov::element::u8, - {ov::element::f32, { 2.f }, { }} + MultiplyTransformationTestValues::input_precision, + {ov::element::f32, {}, { 10.f }} }, { - { 1, 3, 8, 16 }, {}, - ov::element::u8, - {ov::element::f32, { }, { 7.f } } - }, - false + MultiplyTransformationTestValues::input_precision, + {ov::element::f32, {}, { 7.f }} + } }, { { - { 1, 3, 8, 16 }, - {}, - ov::element::u8, - {ov::element::f32, { 2.f }, { 7.f }} - }, - { - { 1, 3, 8, 16 }, {}, - ov::element::u8, + MultiplyTransformationTestValues::input_precision, {} }, - false - } - }, - { - LayerTransformation::createParamsU8I8(), - { - { - { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, - {}, - ov::element::u8, - {ov::element::f32, { 2.f }, { }} - }, { - { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, {}, - ov::element::u8, - {ov::element::f32, { }, { 7.f } } - }, - false - }, - { - { - { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, - {}, - ov::element::u8, - {ov::element::f32, { 2.f }, { 7.f }} - }, - { - { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, - {}, - ov::element::u8, + MultiplyTransformationTestValues::input_precision, {} }, - false - } - }, - { - LayerTransformation::createParamsU8I8(), - { - { - PartialShape::dynamic(), - {}, - ov::element::u8, - {ov::element::f32, { 2.f }, { }} - }, - { - PartialShape::dynamic(), - {}, - ov::element::u8, - {ov::element::f32, { }, { 7.f } } - }, - false - }, - { - { - PartialShape::dynamic(), - {}, - ov::element::u8, - {ov::element::f32, { 2.f }, { }} - }, - { - PartialShape::dynamic(), - {}, - ov::element::u8, - {ov::element::f32, { }, { 7.f } } - }, - false + {{}, {}, {{70.f}, MultiplyTransformationTestValues::model_precision}} } }, - // I8 { - LayerTransformation::createParamsI8I8(), + LayerTransformation::createParamsU8I8(), { { - { 1, 3, 8, 16 }, {}, - ov::element::i8, - {ov::element::f32, { 2.f }, { 10.f }} + MultiplyTransformationTestValues::input_precision, + {ov::element::f32, {{ 1.f, 2.f, 3.f }}, {{ 10.f, 11.f, 12.f }}} }, { - { 1, 3, 8, 16 }, {}, - ov::element::i8, - {ov::element::f32, { 3.f }, { 7.f }} - }, - false + MultiplyTransformationTestValues::input_precision, + {ov::element::f32, {{ 3.f, 4.f, 5.f }}, {{ 7.f, 8.f, 9.f }}} + } }, { { - { 1, 3, 8, 16 }, {}, - ov::element::i8, - {ov::element::f32, { 2.f }, { 10.f }} + MultiplyTransformationTestValues::input_precision, + {{}, {{1.f, 2.f, 3.f}, ov::element::f32}, {}} }, { - { 1, 3, 8, 16 }, {}, - ov::element::i8, - {ov::element::f32, { 3.f }, { 7.f } } + MultiplyTransformationTestValues::input_precision, + {{}, {{3.f, 4.f, 5.f }, ov::element::f32}, {}} }, - false + {{}, {}, {{70.f, 88.f, 108.f}, MultiplyTransformationTestValues::model_precision}} } }, - // Actual: - // - // Parameter - // |I8 - // | - // Convert Constant Parameter - // \FP32 /FP32 |I8 - // \ / | - // Subtract Constant Convert Constant - // \FP32 /FP32 \FP32 /FP32 - // \ / \ / - // Multiply Multiply - // \FP32 /FP32 - // \ / - // Multiply - // Transformed: - // - // Parameter - // |I8 - // | - // Convert Constant - // \FP32 /FP32 - // \ / - // Subtract Constant - // \FP32 /FP32 - // \ / - // Multiply Parameter - // \FP32 /I8 - // \ / - // Multiply { - LayerTransformation::createParamsI8I8(), + LayerTransformation::createParamsU8I8(), { { - { 1, 3, 8, 16 }, {}, - ov::element::i8, + MultiplyTransformationTestValues::input_precision, {ov::element::f32, { 2.f }, { 10.f }} }, { - { 1, 3, 8, 16 }, {}, - ov::element::i8, + MultiplyTransformationTestValues::input_precision, {ov::element::f32, { }, { 7.f }} - }, - false + } }, { { - { 1, 3, 8, 16 }, {}, - ov::element::i8, - {ov::element::f32, { 2.f }, { 70.f }}, + MultiplyTransformationTestValues::input_precision, + {{}, {{2.f}, ov::element::f32}, {}} }, { - { 1, 3, 8, 16 }, {}, - ov::element::i8, + MultiplyTransformationTestValues::input_precision, {} }, - false + {{}, {}, {{70.f}, MultiplyTransformationTestValues::model_precision}} } }, - // Actual: - // - // Parameter Constant - // |I8 |I8 - // | | - // Convert Convert Parameter - // \FP32 /FP32 |I8 - // \ / | - // Subtract Constant Convert Constant - // \FP32 /FP32 \FP32 /FP32 - // \ / \ / - // Multiply Multiply - // \FP32 /FP32 - // \ / - // Multiply - // Transformed: - // - // Parameter - // |I8 - // | - // Convert Constant - // \FP32 /FP32 - // \ / - // Subtract Constant - // \FP32 /FP32 - // \ / - // Multiply Parameter - // \FP32 /I8 - // \ / - // Multiply { - LayerTransformation::createParamsI8I8(), + LayerTransformation::createParamsU8I8(), { { - { 1, 3, 8, 16 }, {}, - ov::element::i8, - { - ov::element::f32, - { {2.f}, ov::element::f32, {}, true, 1ul, ov::element::i8, true }, - { 10.f } - } + MultiplyTransformationTestValues::input_precision, + {ov::element::f32, {}, { 10.f }} }, { - { 1, 3, 8, 16 }, {}, - ov::element::i8, - {ov::element::f32, { }, { 7.f }} - }, - false + MultiplyTransformationTestValues::input_precision, + {ov::element::f32, { 3.f }, { 7.f }} + } }, { { - { 1, 3, 8, 16 }, - {}, - ov::element::i8, - {ov::element::f32, { 2.f }, { 70.f }}, - }, - { - { 1, 3, 8, 16 }, {}, - ov::element::i8, + MultiplyTransformationTestValues::input_precision, {} }, - false - } - }, - - { - LayerTransformation::createParamsI8I8(), - { { - { 1, 3, 8, 16 }, {}, - ov::element::i8, - {ov::element::f32, { }, { 10.f }} + MultiplyTransformationTestValues::input_precision, + {{}, {{3.f}, ov::element::f32}, {}} }, - { - { 1, 3, 8, 16 }, - {}, - ov::element::i8, - {ov::element::f32, { }, { 7.f } } - }, - false - }, - { - { - { 1, 3, 8, 16 }, - {}, - ov::element::i8, - { ov::element::f32, { }, { 70.f }} - }, - { - { 1, 3, 8, 16 }, - {}, - ov::element::i8, - { } - }, - false + {{}, {}, {{70.f}, MultiplyTransformationTestValues::model_precision}} } }, +}; +INSTANTIATE_TEST_SUITE_P( + smoke_LPT, + MultiplyTransformation, + ::testing::Combine( + ::testing::ValuesIn(model_precisions), + ::testing::ValuesIn(input_shapes), + ::testing::ValuesIn(input_precisions), + ::testing::ValuesIn(multiplyTransformationTestValues)), + MultiplyTransformation::getTestCaseName); +} // namespace multiply_channel + +namespace broadcast_right { +const std::vector> input_shapes = { + {{ 1, 3, 8, 16 }, { 1, 1, 1, 1 }} +}; + +const std::vector multiplyTransformationTestValues = { { - LayerTransformation::createParamsI8I8(), + LayerTransformation::createParamsU8I8(), { { - { 1, 3, 8, 16 }, {}, - ov::element::i8, - {ov::element::f32, { 2.f }, { }}, + MultiplyTransformationTestValues::input_precision, + {ov::element::f32, { 2.f }, { 10.f }} }, { - { 1, 3, 8, 16 }, {}, - ov::element::i8, - {ov::element::f32, { }, { 7.f } }, + MultiplyTransformationTestValues::input_precision, + {ov::element::f32, { 3.f }, { 7.f }} }, - false }, { { - { 1, 3, 8, 16 }, {}, - ov::element::i8, - {ov::element::f32, { 2.f }, { 7.f }}, + MultiplyTransformationTestValues::input_precision, + {{}, {{ 2.f }, ov::element::f32}, {}} }, { - { 1, 3, 8, 16 }, {}, - ov::element::i8, - {} + MultiplyTransformationTestValues::input_precision, + {{}, {{ 3.f }, ov::element::f32}, {}} }, - false + {{}, {}, {{ 70.f }, MultiplyTransformationTestValues::model_precision}} } }, - // Constant as input { LayerTransformation::createParamsU8I8(), { { - { 1, 3, 8, 16 }, {}, - ov::element::i8, - {ov::element::f32, { }, { 10.f }}, + MultiplyTransformationTestValues::input_precision, + {ov::element::f32, {}, { 10.f }} }, { {}, - {{ 7.f }, ov::element::f32}, // Constant as input - ov::element::f32, - {} - }, - false + MultiplyTransformationTestValues::input_precision, + {ov::element::f32, {}, { 7.f }} + } }, { { - { 1, 3, 8, 16 }, {}, - ov::element::i8, - {ov::element::f32, {}, {}}, + MultiplyTransformationTestValues::input_precision, + {} }, { {}, - {{ 70.f }, ov::element::f32}, - ov::element::f32, + MultiplyTransformationTestValues::input_precision, {} }, - true + {{}, {}, {{ 70.f }, MultiplyTransformationTestValues::model_precision}} } }, @@ -633,33 +568,28 @@ const std::vector multiplyTransformationTestVa LayerTransformation::createParamsU8I8(), { { - { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, {}, - ov::element::i8, - {ov::element::f32, { }, { 10.f }}, + MultiplyTransformationTestValues::input_precision, + {ov::element::f32, {{ 1.f, 2.f, 3.f }}, {{ 10.f, 11.f, 12.f }}} }, { {}, - {{ 7.f }, ov::element::f32}, // Constant as input - ov::element::f32, - {} - }, - false + MultiplyTransformationTestValues::input_precision, + {ov::element::f32, { 3.f }, { 7.f }} + } }, { { - { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, {}, - ov::element::i8, - {ov::element::f32, {}, {}}, + MultiplyTransformationTestValues::input_precision, + {{}, {{ 1.f, 2.f, 3.f }, ov::element::f32}, {}} }, { {}, - {{ 70.f }, ov::element::f32}, - ov::element::f32, - {} + MultiplyTransformationTestValues::input_precision, + {{}, {{ 3.f }, ov::element::f32}, {}} }, - true + {{}, {}, {{70.f, 77.f, 84.f}, MultiplyTransformationTestValues::model_precision}} } }, @@ -667,331 +597,220 @@ const std::vector multiplyTransformationTestVa LayerTransformation::createParamsU8I8(), { { - { 1, 3, 8, 16 }, {}, - ov::element::i8, - {ov::element::f32, { 18.f }, { 10.f }}, + MultiplyTransformationTestValues::input_precision, + {ov::element::f32, { 2.f }, { 10.f }} }, { {}, - {{ 7.f }, ov::element::f32}, - ov::element::f32, - {} - }, - false + MultiplyTransformationTestValues::input_precision, + {ov::element::f32, {}, { 7.f }} + } }, { { - { 1, 3, 8, 16 }, {}, - ov::element::i8, - {ov::element::f32, { 18.f }, { }}, + MultiplyTransformationTestValues::input_precision, + {{}, {{2.f}, ov::element::f32}, {}} }, { {}, - {{ 70.f }, ov::element::f32}, - ov::element::f32, + MultiplyTransformationTestValues::input_precision, {} }, - true + {{}, {}, {{70.f}, MultiplyTransformationTestValues::model_precision}} } }, - // Constant as input with empty shape { LayerTransformation::createParamsU8I8(), { { - { 1, 3, 8, 16 }, {}, - ov::element::i8, - {ov::element::f32, { }, { 0.2f }}, + MultiplyTransformationTestValues::input_precision, + {ov::element::f32, {}, { 10.f }} }, { {}, - {{ 7.f }, ov::element::i8}, // Constant as input - ov::element::i8, - {ov::element::f32, { }, { 0.5f }}, - }, - false + MultiplyTransformationTestValues::input_precision, + {ov::element::f32, { 3.f }, { 7.f }} + } }, { { - { 1, 3, 8, 16 }, {}, - ov::element::i8, - {ov::element::f32, {}, {}}, + MultiplyTransformationTestValues::input_precision, + {} }, { {}, - {{ 0.7f }, ov::element::f32}, - ov::element::f32, - {} + MultiplyTransformationTestValues::input_precision, + {{}, {{3.f}, ov::element::f32}, {}} }, - true + {{}, {}, {{70.f}, MultiplyTransformationTestValues::model_precision}} } }, +}; + +INSTANTIATE_TEST_SUITE_P( + smoke_LPT, + MultiplyTransformation, + ::testing::Combine( + ::testing::ValuesIn(model_precisions), + ::testing::ValuesIn(input_shapes), + ::testing::ValuesIn(input_precisions), + ::testing::ValuesIn(multiplyTransformationTestValues)), + MultiplyTransformation::getTestCaseName); +} // namespace broadcast_right - // Constant as input with 1 dimension shape +namespace broadcast_left { +const std::vector> input_shapes = { + {{ 1, 1, 1, 1 }, { 1, 3, 8, 16 }} +}; + +const std::vector multiplyTransformationTestValues = { { LayerTransformation::createParamsU8I8(), { { {}, - {{ 7.f, 8.f, 9.f }, ov::element::i8, ov::Shape{3}}, // Constant as input - ov::element::i8, - {ov::element::f32, { }, { {0.1f, 0.2f, 0.3f}, element::f32, ov::Shape{3} }}, + MultiplyTransformationTestValues::input_precision, + {ov::element::f32, { 2.f }, { 10.f }} }, { - { 1, 2, 3 }, {}, - ov::element::f32, - {{}, {}, {{0.2f, 0.3f, 0.4f}, element::f32, ov::Shape{3}}}, + MultiplyTransformationTestValues::input_precision, + {ov::element::f32, { 3.f }, { 7.f }} }, - false }, { { - { 1, 2, 3 }, - {}, - ov::element::f32, {}, + MultiplyTransformationTestValues::input_precision, + {{}, {{ 2.f }, ov::element::f32}, {}} }, { {}, - { {0.14f, 0.48f, 1.08f}, ov::element::f32, ov::Shape{3}}, // Constant as input - {}, - {}, + MultiplyTransformationTestValues::input_precision, + {{}, {{ 3.f }, ov::element::f32}, {}} }, - true + {{}, {}, {{ 70.f }, MultiplyTransformationTestValues::model_precision}} } }, - // Parameter as input with, Constant with 1 dimension shape { LayerTransformation::createParamsU8I8(), { { - { 1, 2, 3 }, {}, - ov::element::f32, - {{}, {}, {{0.2f, 0.3f, 0.4f}, element::f32, ov::Shape{3}}}, + MultiplyTransformationTestValues::input_precision, + {ov::element::f32, {}, { 10.f }} }, { {}, - {{ 7.f, 8.f, 9.f }, ov::element::i8, ov::Shape{3}}, // Constant as input - ov::element::i8, - {ov::element::f32, { }, { {0.1f, 0.2f, 0.3f}, element::f32, ov::Shape{3} }}, - }, - false + MultiplyTransformationTestValues::input_precision, + {ov::element::f32, {}, { 7.f }} + } }, { { - { 1, 2, 3 }, - {}, - ov::element::f32, {}, + MultiplyTransformationTestValues::input_precision, + {} }, { {}, - { {0.14f, 0.48f, 1.08f}, ov::element::f32, ov::Shape{3}}, // Constant as input - {}, - {}, + MultiplyTransformationTestValues::input_precision, + {} }, - true + {{}, {}, {{ 70.f }, MultiplyTransformationTestValues::model_precision}} } }, - // Actual: - // - // Parameter Constant Constant Constant - // |I8 |I8 |I8 |I8 - // | | | | - // Convert Convert Convert Convert - // \FP32 /FP32 |I8 /FP32 - // \ / | / - // Subtract Constant Subtract Constant - // \FP32 /FP32 \FP32 /FP32 - // \ / \ / - // Multiply Multiply - // \FP32 /FP32 - // \ / - // Multiply - // Transformed: - // - // Parameter Constant - // |I8 |I8 - // | | - // Convert Convert - // \FP32 /FP32 - // \ / - // Subtract Constant - // \FP32 /FP32 - // \ / - // Multiply - // { LayerTransformation::createParamsU8I8(), { { - { 1, 3, 8, 16 }, {}, - ov::element::i8, - { - ov::element::f32, - { {127.f}, ov::element::f32, {}, false, 1, ov::element::i8, true }, - { 0.2f } - }, + MultiplyTransformationTestValues::input_precision, + {ov::element::f32, { 2.f }, { 10.f }} }, { {}, - {{ 7.f }, ov::element::i8}, // Constant as input - ov::element::i8, - { - ov::element::f32, - { {127.f}, ov::element::f32, {}, false, 1, ov::element::i8, true }, - { 0.5f } - }, - }, - false + MultiplyTransformationTestValues::input_precision, + {ov::element::f32, {{ 3.f, 4.f, 5.f }}, {{ 7.f, 8.f, 9.f }}} + } }, { { - { 1, 3, 8, 16 }, {}, - ov::element::i8, - { - ov::element::f32, - { {127.f}, ov::element::f32, {}, false, 1, ov::element::i8, true }, - {} - }, + MultiplyTransformationTestValues::input_precision, + {{}, {{ 2.f }, ov::element::f32}, {}} }, { {}, - {{ -12.f }, ov::element::f32}, - ov::element::f32, - {} + MultiplyTransformationTestValues::input_precision, + {{}, {{ 3.f, 4.f, 5.f }, ov::element::f32}, {}} }, - true + {{}, {}, {{70.f, 80.f, 90.f}, MultiplyTransformationTestValues::model_precision}} } }, - // Actual: - // - // Constant Constant Parameter Constant - // |I8 |I8 |I8 |I8 - // | | | | - // Convert Convert Convert Convert - // \FP32 /FP32 |I8 /FP32 - // \ / | / - // Subtract Constant Subtract Constant - // \FP32 /FP32 \FP32 /FP32 - // \ / \ / - // Multiply Multiply - // \FP32 /FP32 - // \ / - // Multiply - // Transformed: - // - // Parameter Constant - // |I8 |I8 - // | | - // Convert Convert - // \FP32 /FP32 - // \ / - // Subtract Constant - // \FP32 /FP32 - // \ / - // Multiply - // { LayerTransformation::createParamsU8I8(), { { {}, - {{ 7.f }, ov::element::i8}, // Constant as input - ov::element::i8, - { - ov::element::f32, - { {127.f}, ov::element::f32, {}, false, 1, ov::element::i8, true }, - { 0.5f } - }, + MultiplyTransformationTestValues::input_precision, + {ov::element::f32, { 2.f }, { 10.f }} }, { - { 1, 3, 8, 16 }, {}, - ov::element::i8, - { - ov::element::f32, - { {127.f}, ov::element::f32, {}, false, 1, ov::element::i8, true }, - { 0.2f } - }, - }, - false + MultiplyTransformationTestValues::input_precision, + {ov::element::f32, {}, { 7.f }} + } }, { { - { 1, 3, 8, 16 }, {}, - ov::element::i8, - { - ov::element::f32, - { {127.f}, ov::element::f32, {}, false, 1, ov::element::i8, true }, - {} - }, + MultiplyTransformationTestValues::input_precision, + {{}, {{2.f}, ov::element::f32}, {}} }, { {}, - {{ -12.f }, ov::element::f32}, - ov::element::f32, + MultiplyTransformationTestValues::input_precision, {} }, - true + {{}, {}, {{70.f}, MultiplyTransformationTestValues::model_precision}} } }, + { LayerTransformation::createParamsU8I8(), { { {}, - {{ 7.f }, ov::element::i8}, // Constant as input - ov::element::i8, - { - ov::element::f32, - { {127.f}, ov::element::f32, {}, false, 1, ov::element::i8, true }, - { 0.5f } - }, + MultiplyTransformationTestValues::input_precision, + {ov::element::f32, {}, { 10.f }} }, { - { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, {}, - ov::element::i8, - { - ov::element::f32, - { {127.f}, ov::element::f32, {}, false, 1, ov::element::i8, true }, - { 0.2f } - }, - }, - false + MultiplyTransformationTestValues::input_precision, + {ov::element::f32, { 3.f }, { 7.f }} + } }, { { - { Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic() }, {}, - ov::element::i8, - { - ov::element::f32, - { {127.f}, ov::element::f32, {}, false, 1, ov::element::i8, true }, - {} - }, + MultiplyTransformationTestValues::input_precision, + {} }, { {}, - {{ -12.f }, ov::element::f32}, - ov::element::f32, - {} + MultiplyTransformationTestValues::input_precision, + {{}, {{3.f}, ov::element::f32}, {}} }, - true + {{}, {}, {{70.f}, MultiplyTransformationTestValues::model_precision}} } }, }; @@ -1000,7 +819,11 @@ INSTANTIATE_TEST_SUITE_P( smoke_LPT, MultiplyTransformation, ::testing::Combine( - ::testing::ValuesIn(precisions), + ::testing::ValuesIn(model_precisions), + ::testing::ValuesIn(input_shapes), + ::testing::ValuesIn(input_precisions), ::testing::ValuesIn(multiplyTransformationTestValues)), MultiplyTransformation::getTestCaseName); -} // namespace +} // namespace broadcast_left + +} // namespace \ No newline at end of file diff --git a/src/common/low_precision_transformations/tests/simple_low_precision_transformer.cpp b/src/common/low_precision_transformations/tests/simple_low_precision_transformer.cpp index a805aadb8f479a..9f39cc64de5827 100644 --- a/src/common/low_precision_transformations/tests/simple_low_precision_transformer.cpp +++ b/src/common/low_precision_transformations/tests/simple_low_precision_transformer.cpp @@ -12,17 +12,29 @@ #include "low_precision/markup_quantization_granularity.hpp" #include "low_precision/transformation_context.hpp" +// cleanup transformations +#include "low_precision/convert.hpp" +#include "low_precision/eliminate_fake_quantize.hpp" +#include "low_precision/fold_convert.hpp" +#include "low_precision/fold_fake_quantize.hpp" +#include "low_precision/fuse_convert.hpp" +#include "low_precision/fuse_multiply_to_fake_quantize.hpp" +#include "low_precision/fuse_subtract_to_fake_quantize.hpp" +#include "low_precision/multiply_to_group_convolution.hpp" + #include using namespace testing; using namespace ov::pass; +using namespace ov::pass::low_precision; OPENVINO_SUPPRESS_DEPRECATED_START SimpleLowPrecisionTransformer::SimpleLowPrecisionTransformer( const std::vector& precisionRestrictions, const std::vector& quantizationRestrictions, - const AttributeParameters& params) { + const AttributeParameters& params, + const bool addCleanup) { auto passConfig = get_pass_config(); // TODO: use one pass manager @@ -39,7 +51,20 @@ SimpleLowPrecisionTransformer::SimpleLowPrecisionTransformer( common = std::make_shared(passConfig); commonGraphRewrite = common->register_pass(); + cleanup = common->register_pass(); + if (addCleanup) { + ov::pass::low_precision::LayerTransformation::Params params; + cleanup->add_matcher(params); + cleanup->add_matcher(params); + cleanup->add_matcher(params); + cleanup->add_matcher(params); + cleanup->add_matcher(params); + + cleanup->add_matcher( + params, + PrecisionsRestriction::getPrecisionsByOperationType(precisionRestrictions)); + } } void SimpleLowPrecisionTransformer::transform(std::shared_ptr& model) { diff --git a/src/common/low_precision_transformations/tests/simple_low_precision_transformer.hpp b/src/common/low_precision_transformations/tests/simple_low_precision_transformer.hpp index d7f49649b01680..2c65f0b316bf46 100644 --- a/src/common/low_precision_transformations/tests/simple_low_precision_transformer.hpp +++ b/src/common/low_precision_transformations/tests/simple_low_precision_transformer.hpp @@ -19,7 +19,8 @@ class SimpleLowPrecisionTransformer : public ngraph::pass::FunctionPass{ SimpleLowPrecisionTransformer( const std::vector& precisionRestrictions = {}, const std::vector& quantizationRestrictions = {}, - const AttributeParameters& params = AttributeParameters()); + const AttributeParameters& params = AttributeParameters(), + const bool addCleanup = false); template void add(const TestTransformationParams& params) { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_transformation.cpp index 26846b5f97cb62..2088d4db87696a 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_transformation.cpp @@ -11,7 +11,7 @@ #include #include -#include "lpt_ngraph_functions/multiply_function.hpp" +#include "lpt_ngraph_functions/multiply_partial_function.hpp" #include "ngraph_functions/subgraph_builders.hpp" @@ -56,7 +56,7 @@ void MultiplyTransformation::SetUp() { MultiplyTestValues param; std::tie(precision, inputShape, targetDevice, param) = this->GetParam(); - function = ngraph::builder::subgraph::MultiplyFunction::getOriginal( + function = ngraph::builder::subgraph::MultiplyPartialFunction::get( precision, inputShape, param.broadcast1, diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/multiply_function.hpp b/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/multiply_function.hpp index b5b4c22e5fca6f..553a34b02d1533 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/multiply_function.hpp +++ b/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/multiply_function.hpp @@ -17,42 +17,39 @@ namespace subgraph { class MultiplyBranch { public: + MultiplyBranch(const PartialShape& inputShape, + const ngraph::builder::subgraph::Constant& constant, + const ngraph::element::Type& input_precision, + const ngraph::builder::subgraph::DequantizationOperations& dequantization, + const ngraph::builder::subgraph::FakeQuantizeOnData& fake_quantize) + : inputShape(inputShape), + constant(constant), + input_precision(input_precision), + dequantization(dequantization), + fake_quantize(fake_quantize) {} + PartialShape inputShape; ngraph::builder::subgraph::Constant constant; - ngraph::element::Type precisionBeforeDequantization; + ngraph::element::Type input_precision; ngraph::builder::subgraph::DequantizationOperations dequantization; + ngraph::builder::subgraph::FakeQuantizeOnData fake_quantize; }; -inline std::ostream& operator<<(std::ostream& out, const MultiplyBranch& branch) { - return out << "_" << branch.constant << "_" << branch.precisionBeforeDequantization << "_" << branch.dequantization; -} - class MultiplyValues { public: + MultiplyValues(const MultiplyBranch& branch1, + const MultiplyBranch& branch2, + const ngraph::builder::subgraph::DequantizationOperations& after_dequantization) + : branch1(branch1), branch2(branch2), after_dequantization(after_dequantization) {} + MultiplyBranch branch1; MultiplyBranch branch2; - bool isDequantization; + ngraph::builder::subgraph::DequantizationOperations after_dequantization; }; -inline std::ostream& operator<<(std::ostream& out, const MultiplyValues& values) { - return out << "_" << values.branch1 << "_" << values.branch2 << (values.isDequantization ? "_isDequantization" : ""); -} - class MultiplyFunction : public ElementwiseFunction { public: - static std::shared_ptr get( - const element::Type precision, - const MultiplyValues& actualValues); - - static std::shared_ptr getOriginal( - const ngraph::element::Type precision, - const ngraph::PartialShape& inputShape, - const bool broadcast1, - const ngraph::builder::subgraph::FakeQuantizeOnData& fq1, - const bool broadcast2, - const ngraph::builder::subgraph::FakeQuantizeOnData& fq2, - const ngraph::builder::subgraph::FakeQuantizeOnData& fqAfter, - const bool secondInputIsConstant = false); + static std::shared_ptr get(const element::Type model_precision, const MultiplyValues& actualValues); }; } // namespace subgraph diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/multiply_partial_function.hpp b/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/multiply_partial_function.hpp new file mode 100644 index 00000000000000..878554dd1df4e5 --- /dev/null +++ b/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/multiply_partial_function.hpp @@ -0,0 +1,60 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include "elementwise_function.hpp" +#include "lpt_ngraph_functions/common/constant.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" + +namespace ngraph { +namespace builder { +namespace subgraph { + +class MultiplyPartialBranch { +public: + PartialShape inputShape; + ngraph::builder::subgraph::Constant constant; + ngraph::element::Type precisionBeforeDequantization; + ngraph::builder::subgraph::DequantizationOperations dequantization; +}; + +inline std::ostream& operator<<(std::ostream& out, const MultiplyPartialBranch& branch) { + return out << "_" << branch.constant << "_" << branch.precisionBeforeDequantization << "_" << branch.dequantization; +} + +class MultiplyPartialValues { +public: + MultiplyPartialBranch branch1; + MultiplyPartialBranch branch2; + bool isDequantization; +}; + +inline std::ostream& operator<<(std::ostream& out, const MultiplyPartialValues& values) { + return out << "_" << values.branch1 << "_" << values.branch2 << (values.isDequantization ? "_isDequantization" : ""); +} + +class MultiplyPartialFunction : public ElementwiseFunction { +public: + static std::shared_ptr get( + const element::Type precision, + const MultiplyPartialValues& actualValues); + + static std::shared_ptr get( + const ngraph::element::Type precision, + const ngraph::PartialShape& inputShape, + const bool broadcast1, + const ngraph::builder::subgraph::FakeQuantizeOnData& fq1, + const bool broadcast2, + const ngraph::builder::subgraph::FakeQuantizeOnData& fq2, + const ngraph::builder::subgraph::FakeQuantizeOnData& fqAfter, + const bool secondInputIsConstant = false); +}; + +} // namespace subgraph +} // namespace builder +} // namespace ngraph diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/multiply_function.cpp b/src/tests/ngraph_helpers/lpt_ngraph_functions/src/multiply_function.cpp index 4628acb8f27b8b..e4ff86359f86db 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/multiply_function.cpp +++ b/src/tests/ngraph_helpers/lpt_ngraph_functions/src/multiply_function.cpp @@ -4,6 +4,8 @@ #include "lpt_ngraph_functions/multiply_function.hpp" +#include + #include #include #include "ngraph_functions/subgraph_builders.hpp" @@ -18,49 +20,52 @@ namespace ngraph { namespace builder { namespace subgraph { +namespace multiply_function { struct BranchNodes { std::shared_ptr input; std::shared_ptr dequantization; }; -BranchNodes getBranch(const MultiplyBranch& branch) { - const std::shared_ptr parent = branch.constant.empty() ? - std::make_shared(branch.precisionBeforeDequantization, branch.inputShape) : +BranchNodes makeBranch(const MultiplyBranch& branch) { + std::shared_ptr parent = branch.constant.empty() ? + std::make_shared(branch.input_precision, branch.inputShape) : std::dynamic_pointer_cast(std::make_shared( branch.constant.outPrecision, branch.constant.shape, branch.constant.values)); + if (!branch.fake_quantize.empty()) { + if ((parent->get_output_element_type(0) != element::f32) && + (parent->get_output_element_type(0) != element::f16)) { + throw std::runtime_error("unexpected precision before FakeQuantize"); + } + parent = makeFakeQuantize(parent, parent->get_output_element_type(0), branch.fake_quantize); + } + const auto dequantization = makeDequantization(parent, branch.dequantization); + return {parent, dequantization}; } +} // namespace multiply_function -std::shared_ptr MultiplyFunction::get( - const element::Type precision, - const MultiplyValues& actualValues) { - auto branch1Structure = actualValues.branch1; - branch1Structure.precisionBeforeDequantization = precision; - branch1Structure.dequantization.multiply.outPrecision = precision; - auto branch2Structure = actualValues.branch2; - branch2Structure.precisionBeforeDequantization = precision; - branch2Structure.dequantization.multiply.outPrecision = precision; - - const BranchNodes branchNodes1 = getBranch(actualValues.branch1); - const BranchNodes branchNodes2 = getBranch(actualValues.branch2); +std::shared_ptr MultiplyFunction::get(const element::Type model_precision, const MultiplyValues& actualValues) { + const auto branchNodes1 = multiply_function::makeBranch(actualValues.branch1); + const auto branchNodes2 = multiply_function::makeBranch(actualValues.branch2); - auto multiplyOriginal = opset1::Multiply( + // branchNodes1.dequantization & branchNodes2.dequantization can have different input types + std::shared_ptr parent = std::make_shared>( + std::vector{ element::f32, element::f32 }, + std::vector{ actualValues.after_dequantization.empty() ? model_precision : element::f32 }, ov::op::TemporaryReplaceOutputType(branchNodes1.dequantization, element::f32).get(), ov::op::TemporaryReplaceOutputType(branchNodes2.dequantization, element::f32).get()); - const std::shared_ptr multiply = std::make_shared>( - multiplyOriginal, - std::vector{element::f32, element::f32}, - std::vector{precision}); - auto& rtInfo = multiply->get_rt_info(); + auto& rtInfo = parent->get_rt_info(); rtInfo["Variant::std::string"] = "multiply"; - multiply->set_friendly_name("output"); - ngraph::ResultVector results{ std::make_shared(multiply) }; + parent = makeDequantization(parent, actualValues.after_dequantization); + parent->set_friendly_name("output"); + + ngraph::ResultVector results{ std::make_shared(parent) }; ngraph::ParameterVector inputs; if (is_type(branchNodes1.input)) { @@ -73,78 +78,6 @@ std::shared_ptr MultiplyFunction::get( return std::make_shared(results, inputs, "MultiplyTransformation"); } -std::shared_ptr MultiplyFunction::getOriginal( - const ngraph::element::Type precision, - const ngraph::PartialShape& inputShape, - const bool broadcast1, - const ngraph::builder::subgraph::FakeQuantizeOnData& fq1, - const bool broadcast2, - const ngraph::builder::subgraph::FakeQuantizeOnData& fq2, - const ngraph::builder::subgraph::FakeQuantizeOnData& fqAfter, - const bool secondInputIsConstant) { - auto inputShape1 = inputShape; - if (broadcast1) { - inputShape1[2] = 1; - inputShape1[3] = 1; - } - - ngraph::PartialShape inputShape2; - if (secondInputIsConstant) { - inputShape2 = {}; - } else { - inputShape2 = inputShape; - if (broadcast2) { - inputShape2[2] = 1; - inputShape2[3] = 1; - } - } - - const auto input1 = std::make_shared(precision, inputShape1); - const auto fakeQuantize1 = fq1.empty() ? - nullptr : - ngraph::builder::makeFakeQuantize( - input1, precision, fq1.quantizationLevel, fq1.constantShape, - fq1.inputLowValues, fq1.inputHighValues, fq1.outputLowValues, fq1.outputHighValues); - if (fakeQuantize1 != nullptr) { - fakeQuantize1->set_friendly_name("fakeQuantize1"); - } - - const std::shared_ptr input2 = secondInputIsConstant ? - makeConstant(element::f32, Shape{}, std::vector{0.5f}, false) : - std::make_shared(precision, inputShape2); - const auto fakeQuantize2 = fq2.empty() ? - nullptr : - ngraph::builder::makeFakeQuantize( - input2, precision, fq2.quantizationLevel, fq2.constantShape, - fq2.inputLowValues, fq2.inputHighValues, fq2.outputLowValues, fq2.outputHighValues); - if (fakeQuantize2 != nullptr) { - fakeQuantize2->set_friendly_name("fakeQuantize2"); - } - - const auto multiply = std::make_shared( - fq1.empty() ? input1 : fakeQuantize1, - fq2.empty() ? input2 : fakeQuantize2); - multiply->set_friendly_name("multiply"); - - auto const fakeQuantizeAfter = fqAfter.empty() ? - nullptr : - makeFakeQuantize(multiply, precision, fqAfter); - if (fakeQuantizeAfter != nullptr) { - fakeQuantizeAfter->set_friendly_name("fakeQuantizeAfter"); - } - - const std::shared_ptr result = fakeQuantizeAfter == nullptr ? std::dynamic_pointer_cast(multiply) : fakeQuantizeAfter; - ngraph::ResultVector results{ std::make_shared(result) }; - std::shared_ptr function = std::make_shared( - results, - secondInputIsConstant ? - ngraph::ParameterVector{ input1 } : - ngraph::ParameterVector{ input1, ngraph::as_type_ptr(input2) }, - "MultiplyTransformation"); - - return function; -} - } // namespace subgraph } // namespace builder } // namespace ngraph diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/multiply_partial_function.cpp b/src/tests/ngraph_helpers/lpt_ngraph_functions/src/multiply_partial_function.cpp new file mode 100644 index 00000000000000..e41d340a634d61 --- /dev/null +++ b/src/tests/ngraph_helpers/lpt_ngraph_functions/src/multiply_partial_function.cpp @@ -0,0 +1,154 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "lpt_ngraph_functions/multiply_partial_function.hpp" + +#include + +#include +#include +#include "ngraph_functions/subgraph_builders.hpp" +#include "low_precision/network_helper.hpp" + +#include "lpt_ngraph_functions/common/builders.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" + +using namespace ov::pass::low_precision; + +namespace ngraph { +namespace builder { +namespace subgraph { + +namespace multiply_partial_function { +struct BranchNodes { + std::shared_ptr input; + std::shared_ptr dequantization; +}; + +BranchNodes getBranch(const MultiplyPartialBranch& branch) { + const std::shared_ptr parent = branch.constant.empty() ? + std::make_shared(branch.precisionBeforeDequantization, branch.inputShape) : + std::dynamic_pointer_cast(std::make_shared( + branch.constant.outPrecision, + branch.constant.shape, + branch.constant.values)); + + const auto dequantization = makeDequantization(parent, branch.dequantization); + return {parent, dequantization}; +} +} // namespace multiply_partial_function + +std::shared_ptr MultiplyPartialFunction::get( + const element::Type precision, + const MultiplyPartialValues& actualValues) { + auto branch1Structure = actualValues.branch1; + branch1Structure.precisionBeforeDequantization = precision; + branch1Structure.dequantization.multiply.outPrecision = precision; + auto branch2Structure = actualValues.branch2; + branch2Structure.precisionBeforeDequantization = precision; + branch2Structure.dequantization.multiply.outPrecision = precision; + + const auto branchNodes1 = multiply_partial_function::getBranch(actualValues.branch1); + const auto branchNodes2 = multiply_partial_function::getBranch(actualValues.branch2); + + auto multiplyOriginal = opset1::Multiply( + ov::op::TemporaryReplaceOutputType(branchNodes1.dequantization, element::f32).get(), + ov::op::TemporaryReplaceOutputType(branchNodes2.dequantization, element::f32).get()); + + const std::shared_ptr multiply = std::make_shared>( + multiplyOriginal, + std::vector{element::f32, element::f32}, + std::vector{precision}); + auto& rtInfo = multiply->get_rt_info(); + rtInfo["Variant::std::string"] = "multiply"; + multiply->set_friendly_name("output"); + + ngraph::ResultVector results{ std::make_shared(multiply) }; + + ngraph::ParameterVector inputs; + if (is_type(branchNodes1.input)) { + inputs.push_back(std::dynamic_pointer_cast(branchNodes1.input)); + } + if (is_type(branchNodes2.input)) { + inputs.push_back(std::dynamic_pointer_cast(branchNodes2.input)); + } + + return std::make_shared(results, inputs, "MultiplyTransformation"); +} + +std::shared_ptr MultiplyPartialFunction::get( + const ngraph::element::Type precision, + const ngraph::PartialShape& inputShape, + const bool broadcast1, + const ngraph::builder::subgraph::FakeQuantizeOnData& fq1, + const bool broadcast2, + const ngraph::builder::subgraph::FakeQuantizeOnData& fq2, + const ngraph::builder::subgraph::FakeQuantizeOnData& fqAfter, + const bool secondInputIsConstant) { + auto inputShape1 = inputShape; + if (broadcast1) { + inputShape1[2] = 1; + inputShape1[3] = 1; + } + + ngraph::PartialShape inputShape2; + if (secondInputIsConstant) { + inputShape2 = {}; + } else { + inputShape2 = inputShape; + if (broadcast2) { + inputShape2[2] = 1; + inputShape2[3] = 1; + } + } + + const auto input1 = std::make_shared(precision, inputShape1); + const auto fakeQuantize1 = fq1.empty() ? + nullptr : + ngraph::builder::makeFakeQuantize( + input1, precision, fq1.quantizationLevel, fq1.constantShape, + fq1.inputLowValues, fq1.inputHighValues, fq1.outputLowValues, fq1.outputHighValues); + if (fakeQuantize1 != nullptr) { + fakeQuantize1->set_friendly_name("fakeQuantize1"); + } + + const std::shared_ptr input2 = secondInputIsConstant ? + makeConstant(element::f32, Shape{}, std::vector{0.5f}, false) : + std::make_shared(precision, inputShape2); + const auto fakeQuantize2 = fq2.empty() ? + nullptr : + ngraph::builder::makeFakeQuantize( + input2, precision, fq2.quantizationLevel, fq2.constantShape, + fq2.inputLowValues, fq2.inputHighValues, fq2.outputLowValues, fq2.outputHighValues); + if (fakeQuantize2 != nullptr) { + fakeQuantize2->set_friendly_name("fakeQuantize2"); + } + + const auto multiply = std::make_shared( + fq1.empty() ? input1 : fakeQuantize1, + fq2.empty() ? input2 : fakeQuantize2); + multiply->set_friendly_name("multiply"); + + auto const fakeQuantizeAfter = fqAfter.empty() ? + nullptr : + makeFakeQuantize(multiply, precision, fqAfter); + if (fakeQuantizeAfter != nullptr) { + fakeQuantizeAfter->set_friendly_name("fakeQuantizeAfter"); + } + + const std::shared_ptr result = fakeQuantizeAfter == nullptr ? std::dynamic_pointer_cast(multiply) : fakeQuantizeAfter; + ngraph::ResultVector results{ std::make_shared(result) }; + std::shared_ptr function = std::make_shared( + results, + secondInputIsConstant ? + ngraph::ParameterVector{ input1 } : + ngraph::ParameterVector{ input1, ngraph::as_type_ptr(input2) }, + "MultiplyTransformation"); + + return function; +} + +} // namespace subgraph +} // namespace builder +} // namespace ngraph From d5ea8d5a9225e410628ec821ee5659aa4c1a97fd Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Tue, 3 Oct 2023 18:43:45 +0400 Subject: [PATCH 048/257] Explicitly set Python3_FIND_STRATEGY (#20215) --- cmake/developer_package/IEDevScriptsConfig.cmake | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cmake/developer_package/IEDevScriptsConfig.cmake b/cmake/developer_package/IEDevScriptsConfig.cmake index 6a3bd84b46516a..997f85b3a407c2 100644 --- a/cmake/developer_package/IEDevScriptsConfig.cmake +++ b/cmake/developer_package/IEDevScriptsConfig.cmake @@ -37,6 +37,9 @@ function(set_ci_build_number) endforeach() endfunction() +# explicitly configure FindPython3.cmake to find python3 in virtual environment first +ov_set_if_not_defined(Python3_FIND_STRATEGY LOCATION) + include(features) set_ci_build_number() From 379ff625751f5f1ee0f26d3882d3f580f9924276 Mon Sep 17 00:00:00 2001 From: Sofya Balandina Date: Tue, 3 Oct 2023 17:07:10 +0100 Subject: [PATCH 049/257] [apiConformance] Remove 1.0 api tests (#20185) --- .../executable_network/exec_graph_info.cpp | 29 --- .../executable_network/exec_network_base.cpp | 42 ----- .../executable_network/get_metric.cpp | 74 -------- .../behavior/executable_network/locale.cpp | 15 -- .../src/behavior/infer_request/callback.cpp | 17 -- .../behavior/infer_request/cancellation.cpp | 17 -- .../src/behavior/infer_request/io_blob.cpp | 20 -- .../behavior/infer_request/multitheading.cpp | 22 --- .../behavior/infer_request/perf_counters.cpp | 18 -- .../infer_request/set_blob_by_type.cpp | 24 --- .../src/behavior/infer_request/wait.cpp | 20 -- .../src/behavior/plugin/caching_tests.cpp | 62 ------- .../behavior/plugin/configuration_tests.cpp | 173 ------------------ .../src/behavior/plugin/core_integration.cpp | 88 --------- .../behavior/plugin/core_threading_tests.cpp | 36 ---- .../src/behavior/plugin/life_time.cpp | 33 ---- .../src/behavior/plugin/set_preprocess.cpp | 66 ------- .../src/behavior/plugin/version.cpp | 13 -- 18 files changed, 769 deletions(-) delete mode 100644 src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/executable_network/exec_graph_info.cpp delete mode 100644 src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/executable_network/exec_network_base.cpp delete mode 100644 src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/executable_network/get_metric.cpp delete mode 100644 src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/executable_network/locale.cpp delete mode 100644 src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/callback.cpp delete mode 100644 src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/cancellation.cpp delete mode 100644 src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/io_blob.cpp delete mode 100644 src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/multitheading.cpp delete mode 100644 src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/perf_counters.cpp delete mode 100644 src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/set_blob_by_type.cpp delete mode 100644 src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/wait.cpp delete mode 100644 src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/caching_tests.cpp delete mode 100644 src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/configuration_tests.cpp delete mode 100644 src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/core_integration.cpp delete mode 100644 src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/core_threading_tests.cpp delete mode 100644 src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/life_time.cpp delete mode 100644 src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/set_preprocess.cpp delete mode 100644 src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/version.cpp diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/executable_network/exec_graph_info.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/executable_network/exec_graph_info.cpp deleted file mode 100644 index 67083b18eec0fc..00000000000000 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/executable_network/exec_graph_info.cpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include "behavior/executable_network/exec_graph_info.hpp" -#include "api_conformance_helpers.hpp" - -namespace { -using namespace ExecutionGraphTests; - -INSTANTIATE_TEST_SUITE_P(ie_executable_network, ExecGraphSerializationTest, - ::testing::ValuesIn(ov::test::conformance::return_all_possible_device_combination()), - ExecGraphSerializationTest::getTestCaseName); - -const std::vector execGraphInfoElemTypes = { - InferenceEngine::Precision::FP32 -}; - -INSTANTIATE_TEST_SUITE_P(ie_executable_network, ExecGraphUniqueNodeNames, - ::testing::Combine( - ::testing::ValuesIn(execGraphInfoElemTypes), - ::testing::Values(InferenceEngine::SizeVector({1, 2, 5, 5})), - ::testing::ValuesIn(ov::test::conformance::return_all_possible_device_combination())), - ExecGraphUniqueNodeNames::getTestCaseName); - -} // namespace - diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/executable_network/exec_network_base.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/executable_network/exec_network_base.cpp deleted file mode 100644 index c0d33303b8a9ff..00000000000000 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/executable_network/exec_network_base.cpp +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/executable_network/exec_network_base.hpp" -#include "ie_plugin_config.hpp" - -#include "api_conformance_helpers.hpp" - -using namespace BehaviorTestsDefinitions; -using namespace ov::test::conformance; - -namespace { - INSTANTIATE_TEST_SUITE_P(ie_executable_network, ExecutableNetworkBaseTest, - ::testing::Combine( - ::testing::ValuesIn(return_all_possible_device_combination()), - ::testing::Values(ie_config)), - ExecutableNetworkBaseTest::getTestCaseName); - - const std::vector execNetBaseElemTypes = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16, - InferenceEngine::Precision::FP64, - InferenceEngine::Precision::BF16, - InferenceEngine::Precision::U8, - InferenceEngine::Precision::U16, - InferenceEngine::Precision::U32, - InferenceEngine::Precision::U64, - InferenceEngine::Precision::I8, - InferenceEngine::Precision::I16, - InferenceEngine::Precision::I32, - InferenceEngine::Precision::I64, - InferenceEngine::Precision::BOOL, - }; - - INSTANTIATE_TEST_SUITE_P(ie_executable_network, ExecNetSetPrecision, - ::testing::Combine( - ::testing::ValuesIn(execNetBaseElemTypes), - ::testing::ValuesIn(return_all_possible_device_combination()), - ::testing::Values(ie_config)), - ExecNetSetPrecision::getTestCaseName); -} // namespace diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/executable_network/get_metric.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/executable_network/get_metric.cpp deleted file mode 100644 index 5aecaf50ddf9c9..00000000000000 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/executable_network/get_metric.cpp +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/executable_network/get_metric.hpp" -#include "api_conformance_helpers.hpp" - -using namespace BehaviorTestsDefinitions; -using namespace InferenceEngine::PluginConfigParams; -using namespace ov::test::conformance; - -namespace { - -INSTANTIATE_TEST_SUITE_P( - ie_executable_network, IEClassImportExportTestP, - ::testing::ValuesIn(return_all_possible_device_combination())); - -// -// Executable Network GetMetric -// - -INSTANTIATE_TEST_SUITE_P( - ie_executable_network, IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, - ::testing::ValuesIn(return_all_possible_device_combination())); - -INSTANTIATE_TEST_SUITE_P( - ie_executable_network, IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS, - ::testing::ValuesIn(return_all_possible_device_combination())); - -INSTANTIATE_TEST_SUITE_P( - ie_executable_network, IEClassExecutableNetworkGetMetricTest_NETWORK_NAME, - ::testing::ValuesIn(return_all_possible_device_combination())); - -INSTANTIATE_TEST_SUITE_P( - ie_executable_network, IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS, - ::testing::ValuesIn(return_all_possible_device_combination())); - -INSTANTIATE_TEST_SUITE_P( - ie_executable_network, IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported, - ::testing::ValuesIn(return_all_possible_device_combination())); - -// -// Executable Network GetConfig / SetConfig -// - -INSTANTIATE_TEST_SUITE_P( - ie_executable_network, IEClassExecutableNetworkGetConfigTest, - ::testing::ValuesIn(return_all_possible_device_combination())); - -INSTANTIATE_TEST_SUITE_P( - ie_executable_network, IEClassExecutableNetworkSetConfigTest, - ::testing::ValuesIn(return_all_possible_device_combination())); - -// -// Hetero Executable Network GetMetric -// - -INSTANTIATE_TEST_SUITE_P( - ie_executable_network, IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, - ::testing::Values(targetDevice)); - -INSTANTIATE_TEST_SUITE_P( - smoke_IEClassHeteroExecutableNetworkGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS, - ::testing::Values(targetDevice)); - -INSTANTIATE_TEST_SUITE_P( - ie_executable_network, IEClassHeteroExecutableNetworkGetMetricTest_NETWORK_NAME, - ::testing::Values(targetDevice)); - -INSTANTIATE_TEST_SUITE_P( - ie_executable_network, IEClassHeteroExecutableNetworkGetMetricTest_TARGET_FALLBACK, - ::testing::Values(targetDevice)); - -} // namespace diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/executable_network/locale.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/executable_network/locale.cpp deleted file mode 100644 index d1d3647779594b..00000000000000 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/executable_network/locale.cpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/executable_network/locale.hpp" -#include "api_conformance_helpers.hpp" - -using namespace BehaviorTestsDefinitions; -namespace { - INSTANTIATE_TEST_SUITE_P(ie_executable_network, CustomLocaleTest, - ::testing::Combine( - ::testing::Values("ru_RU.UTF-8"), - ::testing::ValuesIn(ov::test::conformance::return_all_possible_device_combination())), - CustomLocaleTest::getTestCaseName); -} // namespace diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/callback.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/callback.cpp deleted file mode 100644 index bf4cf1c9b1a6b6..00000000000000 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/callback.cpp +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/infer_request/callback.hpp" -#include "api_conformance_helpers.hpp" - -namespace { -using namespace BehaviorTestsDefinitions; -using namespace ov::test::conformance; - -INSTANTIATE_TEST_SUITE_P(ie_infer_request, InferRequestCallbackTests, - ::testing::Combine( - ::testing::ValuesIn(return_all_possible_device_combination()), - ::testing::Values(ie_config)), - InferRequestCallbackTests::getTestCaseName); -} // namespace diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/cancellation.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/cancellation.cpp deleted file mode 100644 index d061b866f2e3c9..00000000000000 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/cancellation.cpp +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/infer_request/cancellation.hpp" -#include "api_conformance_helpers.hpp" - -namespace { -using namespace BehaviorTestsDefinitions; -using namespace ov::test::conformance; - -INSTANTIATE_TEST_SUITE_P(ie_infer_request, InferRequestCancellationTests, - ::testing::Combine( - ::testing::ValuesIn(return_all_possible_device_combination()), - ::testing::Values(ie_config)), - InferRequestCancellationTests::getTestCaseName); -} // namespace diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/io_blob.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/io_blob.cpp deleted file mode 100644 index 5075684542e10b..00000000000000 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/io_blob.cpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include "behavior/infer_request/io_blob.hpp" -#include "ie_plugin_config.hpp" -#include "api_conformance_helpers.hpp" - -namespace { -using namespace BehaviorTestsDefinitions; -using namespace ov::test::conformance; - -INSTANTIATE_TEST_SUITE_P(ie_infer_request, InferRequestIOBBlobTest, - ::testing::Combine( - ::testing::ValuesIn(return_all_possible_device_combination()), - ::testing::Values(ie_config)), - InferRequestIOBBlobTest::getTestCaseName); -} // namespace diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/multitheading.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/multitheading.cpp deleted file mode 100644 index 4d83d081e407bd..00000000000000 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/multitheading.cpp +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include "behavior/infer_request/multithreading.hpp" -#include "ie_plugin_config.hpp" - -#include "api_conformance_helpers.hpp" - -namespace { -using namespace ov::test::conformance; -using namespace BehaviorTestsDefinitions; - -INSTANTIATE_TEST_SUITE_P(ie_infer_request, InferRequestMultithreadingTests, - ::testing::Combine( - ::testing::ValuesIn(return_all_possible_device_combination()), - ::testing::Values(ie_config)), - InferRequestMultithreadingTests::getTestCaseName); - -} // namespace diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/perf_counters.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/perf_counters.cpp deleted file mode 100644 index bb8eb6c13680e5..00000000000000 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/perf_counters.cpp +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/infer_request/perf_counters.hpp" -#include "api_conformance_helpers.hpp" - -namespace { -using namespace ov::test::conformance; -using namespace BehaviorTestsDefinitions; - -INSTANTIATE_TEST_SUITE_P(ie_infer_request, InferRequestPerfCountersTest, - ::testing::Combine( - ::testing::ValuesIn(return_all_possible_device_combination()), - ::testing::Values(ie_config)), - InferRequestPerfCountersTest::getTestCaseName); - -} // namespace diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/set_blob_by_type.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/set_blob_by_type.cpp deleted file mode 100644 index 142707332266f8..00000000000000 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/set_blob_by_type.cpp +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/infer_request/set_blob_by_type.hpp" -#include "api_conformance_helpers.hpp" - -namespace { -using namespace ov::test::conformance; -using namespace BehaviorTestsDefinitions; - -const std::vector setBlobTypes = { - FuncTestUtils::BlobType::Compound, - FuncTestUtils::BlobType::Batched, - FuncTestUtils::BlobType::Memory, - FuncTestUtils::BlobType::Remote, -}; - -INSTANTIATE_TEST_SUITE_P(ie_infer_request, InferRequestSetBlobByType, - ::testing::Combine(::testing::ValuesIn(setBlobTypes), - ::testing::ValuesIn(return_all_possible_device_combination()), - ::testing::Values(ie_config)), - InferRequestSetBlobByType::getTestCaseName); -} // namespace diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/wait.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/wait.cpp deleted file mode 100644 index dec1e4d09999a1..00000000000000 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/wait.cpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include "behavior/infer_request/wait.hpp" -#include "ie_plugin_config.hpp" -#include "api_conformance_helpers.hpp" - -namespace { -using namespace ov::test::conformance; -using namespace BehaviorTestsDefinitions; - -INSTANTIATE_TEST_SUITE_P(ie_infer_request, InferRequestWaitTests, - ::testing::Combine( - ::testing::ValuesIn(return_all_possible_device_combination()), - ::testing::Values(ie_config)), - InferRequestWaitTests::getTestCaseName); -} // namespace diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/caching_tests.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/caching_tests.cpp deleted file mode 100644 index 661e39a7153ba1..00000000000000 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/caching_tests.cpp +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/plugin/caching_tests.hpp" -#include -#include "api_conformance_helpers.hpp" - -namespace { -using namespace ov::test::conformance; -using namespace LayerTestsDefinitions; -using namespace ngraph; - -static const std::vector precisionsTemplate = { - ov::element::f64, - ov::element::f32, - ov::element::f16, - ov::element::i64, - ov::element::i32, - ov::element::i16, - ov::element::i8, - ov::element::u64, - ov::element::u32, - ov::element::u16, - ov::element::u8, - ov::element::boolean, -}; - -static const std::vector batchSizesTemplate = { - 1, 2 -}; - -static const std::vector numericPrecisionsTemplate(precisionsTemplate.begin(), - precisionsTemplate.end() - 1); - -static const std::vector floatingPointPrecisionsTemplate(precisionsTemplate.begin(), - precisionsTemplate.begin() + 3); - -INSTANTIATE_TEST_SUITE_P(ie_plugin_any_type, LoadNetworkCacheTestBase, - ::testing::Combine( - ::testing::ValuesIn(LoadNetworkCacheTestBase::getAnyTypeOnlyFunctions()), - ::testing::ValuesIn(precisionsTemplate), - ::testing::ValuesIn(batchSizesTemplate), - ::testing::ValuesIn(return_all_possible_device_combination())), - LoadNetworkCacheTestBase::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(ie_plugin_numeric, LoadNetworkCacheTestBase, - ::testing::Combine( - ::testing::ValuesIn(LoadNetworkCacheTestBase::getNumericTypeOnlyFunctions()), - ::testing::ValuesIn(numericPrecisionsTemplate), - ::testing::ValuesIn(batchSizesTemplate), - ::testing::ValuesIn(return_all_possible_device_combination())), - LoadNetworkCacheTestBase::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(ie_plugin_float, LoadNetworkCacheTestBase, - ::testing::Combine( - ::testing::ValuesIn(LoadNetworkCacheTestBase::getFloatingPointOnlyFunctions()), - ::testing::ValuesIn(floatingPointPrecisionsTemplate), - ::testing::ValuesIn(batchSizesTemplate), - ::testing::ValuesIn(return_all_possible_device_combination())), - LoadNetworkCacheTestBase::getTestCaseName); -} // namespace diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/configuration_tests.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/configuration_tests.cpp deleted file mode 100644 index 90ed2d3be87c1a..00000000000000 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/configuration_tests.cpp +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ie_plugin_config.hpp" -#include "ie_system_conf.h" -#include "behavior/plugin/configuration_tests.hpp" -#include "api_conformance_helpers.hpp" - -using namespace BehaviorTestsDefinitions; -using namespace ov::test::conformance; - - -namespace { - #if (defined(__APPLE__) || defined(_WIN32)) - auto defaultBindThreadParameter = InferenceEngine::Parameter{[] { - auto numaNodes = InferenceEngine::getAvailableNUMANodes(); - if (numaNodes.size() > 1) { - return std::string{CONFIG_VALUE(NUMA)}; - } else { - return std::string{CONFIG_VALUE(NO)}; - } - }()}; - #else - auto defaultBindThreadParameter = InferenceEngine::Parameter{std::string{CONFIG_VALUE(YES)}}; - #endif - INSTANTIATE_TEST_SUITE_P( - ie_plugin, - DefaultConfigurationTest, - ::testing::Combine( - ::testing::ValuesIn(return_all_possible_device_combination(false)), - ::testing::Values(DefaultParameter{CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(NO)})), - DefaultConfigurationTest::getTestCaseName); - - const std::vector> pluginConfigs = { - {{}}, - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}}, - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}}, - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "1"}}, - // check that hints doesn't override customer value (now for streams and later for other config opts) - }; - - const std::vector> pluginMultiConfigs = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "1"}} - }; - - INSTANTIATE_TEST_SUITE_P(ie_plugin, CorrectConfigTests, - ::testing::Combine( - ::testing::Values(ov::test::conformance::targetDevice), - ::testing::ValuesIn(pluginConfigs)), - CorrectConfigTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(ie_plugin_Hetero, CorrectConfigTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_HETERO), - ::testing::ValuesIn(generate_configs(ov::test::utils::DEVICE_HETERO, pluginConfigs))), - CorrectConfigTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(ie_plugin_Multi, CorrectConfigTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(generate_configs(ov::test::utils::DEVICE_MULTI, pluginMultiConfigs))), - CorrectConfigTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(ie_plugin_Auto, CorrectConfigTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(generate_configs(ov::test::utils::DEVICE_AUTO, pluginMultiConfigs))), - CorrectConfigTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(ie_plugin_AutoBatch, CorrectConfigTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_BATCH), - ::testing::ValuesIn(generate_configs(ov::test::utils::DEVICE_BATCH, pluginConfigs))), - CorrectConfigTests::getTestCaseName); - - const std::vector> inPluginConfigs = { - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, "DOESN'T EXIST"}}, - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "-1"}}, - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "should be int"}}, - }; - - const std::vector> pluginMultiInConfigs = { - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, "DOESN'T EXIST"}}, - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "-1"}}, - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "should be int"}} - }; - - INSTANTIATE_TEST_SUITE_P(ie_plugin, IncorrectConfigTests, - ::testing::Combine( - ::testing::Values(ov::test::conformance::targetDevice), - ::testing::ValuesIn(inPluginConfigs)), - IncorrectConfigTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(ie_plugin_Hetero, IncorrectConfigTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_HETERO), - ::testing::ValuesIn(generate_configs(ov::test::utils::DEVICE_HETERO, inPluginConfigs))), - IncorrectConfigTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(ie_plugin_Multi, IncorrectConfigTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(generate_configs(ov::test::utils::DEVICE_MULTI, pluginMultiInConfigs))), - IncorrectConfigTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(ie_plugin_Auto, IncorrectConfigTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(generate_configs(ov::test::utils::DEVICE_AUTO, pluginMultiInConfigs))), - IncorrectConfigTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(ie_plugin_AutoBatch, IncorrectConfigTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_BATCH), - ::testing::ValuesIn(generate_configs(ov::test::utils::DEVICE_BATCH, pluginMultiInConfigs))), - IncorrectConfigTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(ie_plugin, IncorrectConfigAPITests, - ::testing::Combine( - ::testing::Values(ov::test::conformance::targetDevice), - ::testing::ValuesIn(inPluginConfigs)), - IncorrectConfigAPITests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(ie_plugin_Hetero, IncorrectConfigAPITests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_HETERO), - ::testing::ValuesIn(generate_configs(ov::test::utils::DEVICE_HETERO, inPluginConfigs))), - IncorrectConfigAPITests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(ie_plugin_Multi, IncorrectConfigAPITests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(generate_configs(ov::test::utils::DEVICE_MULTI, pluginMultiInConfigs))), - IncorrectConfigAPITests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(ie_plugin_Auto, IncorrectConfigAPITests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(generate_configs(ov::test::utils::DEVICE_AUTO, pluginMultiInConfigs))), - IncorrectConfigAPITests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(ie_plugin_AutoBatch, IncorrectConfigAPITests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_BATCH), - ::testing::ValuesIn(generate_configs(ov::test::utils::DEVICE_BATCH, inPluginConfigs))), - IncorrectConfigAPITests::getTestCaseName); - - const std::vector> pluginConfigsCheck = { - {}, - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}}, - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}}, - {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "1"}}, - }; - - INSTANTIATE_TEST_SUITE_P(ie_plugin, CorrectConfigCheck, - ::testing::Combine( - ::testing::ValuesIn(return_all_possible_device_combination()), - ::testing::ValuesIn(pluginConfigsCheck)), - CorrectConfigCheck::getTestCaseName); -} // namespace diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/core_integration.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/core_integration.cpp deleted file mode 100644 index 12e4071694e44a..00000000000000 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/core_integration.cpp +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/plugin/core_integration.hpp" -#include "api_conformance_helpers.hpp" - -using namespace BehaviorTestsDefinitions; -using namespace InferenceEngine::PluginConfigParams; -using namespace ov::test::conformance; - -namespace { -// -// IE Class Common tests with -// - -INSTANTIATE_TEST_SUITE_P( - ie_plugin, IEClassBasicTestP, - ::testing::ValuesIn(generate_pairs_plugin_name_by_device())); - -INSTANTIATE_TEST_SUITE_P( - ie_plugin, IEClassNetworkTestP, - ::testing::ValuesIn(return_all_possible_device_combination())); - -// -// IE Class GetMetric -// - -INSTANTIATE_TEST_SUITE_P( - ie_plugin, IEClassGetMetricTest_SUPPORTED_CONFIG_KEYS, - ::testing::ValuesIn(return_all_possible_device_combination(false))); - -INSTANTIATE_TEST_SUITE_P( - ie_plugin, IEClassGetMetricTest_SUPPORTED_METRICS, - ::testing::ValuesIn(return_all_possible_device_combination(false))); - -INSTANTIATE_TEST_SUITE_P( - ie_plugin, IEClassGetMetricTest_AVAILABLE_DEVICES, - ::testing::ValuesIn(return_all_possible_device_combination(false))); - -INSTANTIATE_TEST_SUITE_P( - ie_plugin, IEClassGetMetricTest_FULL_DEVICE_NAME, - ::testing::ValuesIn(return_all_possible_device_combination(false))); - -INSTANTIATE_TEST_SUITE_P( - ie_plugin, IEClassGetMetricTest_OPTIMIZATION_CAPABILITIES, - ::testing::ValuesIn(return_all_possible_device_combination(false))); - -INSTANTIATE_TEST_SUITE_P( - ie_plugin, IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS, - ::testing::ValuesIn(return_all_possible_device_combination(false))); - -INSTANTIATE_TEST_SUITE_P( - ie_plugin, IEClassGetMetricTest_RANGE_FOR_STREAMS, - ::testing::ValuesIn(return_all_possible_device_combination(false))); - -INSTANTIATE_TEST_SUITE_P( - ie_plugin, IEClassGetMetricTest_ThrowUnsupported, - ::testing::ValuesIn(return_all_possible_device_combination(false))); - -INSTANTIATE_TEST_SUITE_P( - ie_plugin, IEClassGetConfigTest_ThrowUnsupported, - ::testing::ValuesIn(return_all_possible_device_combination(false))); - -INSTANTIATE_TEST_SUITE_P( - ie_plugin, IEClassGetAvailableDevices, - ::testing::Values(targetDevice)); - -// -// IE Class GetConfig -// - -INSTANTIATE_TEST_SUITE_P( - ie_plugin, IEClassGetConfigTest, - ::testing::ValuesIn(return_all_possible_device_combination(false))); - -// IE Class Query network - -INSTANTIATE_TEST_SUITE_P( - ie_plugin, IEClassQueryNetworkTest, - ::testing::ValuesIn(return_all_possible_device_combination())); - -// IE Class Load network - -INSTANTIATE_TEST_SUITE_P( - ie_plugin, IEClassLoadNetworkTest, - ::testing::ValuesIn(return_all_possible_device_combination())); -} // namespace diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/core_threading_tests.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/core_threading_tests.cpp deleted file mode 100644 index 06507ee6817786..00000000000000 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/core_threading_tests.cpp +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include "api_conformance_helpers.hpp" - -using namespace ov::test::conformance; - -namespace { - -const Params coreThreadingParams[] = { - std::tuple{ ov::test::utils::DEVICE_HETERO, generate_configs(ov::test::utils::DEVICE_HETERO).front() }, - std::tuple{ ov::test::utils::DEVICE_MULTI, generate_configs(ov::test::utils::DEVICE_MULTI).front() }, - std::tuple{ ov::test::utils::DEVICE_AUTO, generate_configs(ov::test::utils::DEVICE_AUTO).front() }, - std::tuple{ ov::test::utils::DEVICE_BATCH, generate_configs(ov::test::utils::DEVICE_BATCH).front() }, -}; - -INSTANTIATE_TEST_SUITE_P(ie_plugin_, CoreThreadingTests, - testing::ValuesIn(coreThreadingParams), - CoreThreadingTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(ie_plugin, CoreThreadingTests, - ::testing::Combine( - ::testing::ValuesIn(return_all_possible_device_combination()), - ::testing::Values(Config{{ CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES) }})), - CoreThreadingTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(ie_plugin, CoreThreadingTestsWithIterations, - testing::Combine(testing::ValuesIn(coreThreadingParams), - testing::Values(4), - testing::Values(50), - testing::Values(ModelClass::Default)), - CoreThreadingTestsWithIterations::getTestCaseName); - -} // namespace diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/life_time.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/life_time.cpp deleted file mode 100644 index 8a48ac9b44239c..00000000000000 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/life_time.cpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/plugin/life_time.hpp" -#include "api_conformance_helpers.hpp" - -using namespace BehaviorTestsDefinitions; -using namespace ov::test::conformance; - -namespace { - const std::vector> orders = { - // 0 - plugin - // 1 - executable_network - // 2 - infer_request - // 3 - variable state - {3, 0, 1, 2}, - {3, 0, 2, 1}, - {3, 1, 0, 2}, - {3, 1, 2, 0}, - {3, 2, 0, 1}, - {3, 2, 1, 0}, - {0, 3, 1, 2}, - {0, 1, 3, 2} - }; - - INSTANTIATE_TEST_SUITE_P(ie_plugin, HoldersTest, - ::testing::Combine( - ::testing::ValuesIn(return_all_possible_device_combination()), - ::testing::ValuesIn(orders)), - HoldersTest::getTestCaseName); - -} // namespace diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/set_preprocess.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/set_preprocess.cpp deleted file mode 100644 index 7b1b165f8ce073..00000000000000 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/set_preprocess.cpp +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/plugin/set_preprocess.hpp" -#include "api_conformance_helpers.hpp" - -namespace { - -using namespace BehaviorTestsDefinitions; -using namespace ov::test::conformance; - -const std::vector netPrecisionsPreprocess = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16 -}; - -INSTANTIATE_TEST_SUITE_P(ie_plugin, InferRequestPreprocessTest, - ::testing::Combine( - ::testing::ValuesIn(netPrecisionsPreprocess), - ::testing::ValuesIn(return_all_possible_device_combination()), - ::testing::Values(ie_config)), - InferRequestPreprocessTest::getTestCaseName); - -const std::vector ioPrecisionsPreprocess = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::U8 -}; -const std::vector netLayoutsPreprocess = { - InferenceEngine::Layout::NCHW, - // InferenceEngine::Layout::NHWC -}; - -const std::vector ioLayoutsPreprocess = { - InferenceEngine::Layout::NCHW, - InferenceEngine::Layout::NHWC -}; - -INSTANTIATE_TEST_SUITE_P(ie_plugin, InferRequestPreprocessConversionTest, - ::testing::Combine( - ::testing::ValuesIn(netPrecisionsPreprocess), - ::testing::ValuesIn(ioPrecisionsPreprocess), - ::testing::ValuesIn(ioPrecisionsPreprocess), - ::testing::ValuesIn(netLayoutsPreprocess), - ::testing::ValuesIn(ioLayoutsPreprocess), - ::testing::ValuesIn(ioLayoutsPreprocess), - ::testing::Bool(), - ::testing::Bool(), - ::testing::ValuesIn(return_all_possible_device_combination()), - ::testing::Values(ie_config)), - InferRequestPreprocessConversionTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(ie_plugin, InferRequestPreprocessDynamicallyInSetBlobTest, - ::testing::Combine( - ::testing::ValuesIn(netPrecisionsPreprocess), - ::testing::Bool(), - ::testing::Bool(), - ::testing::ValuesIn(netLayoutsPreprocess), - ::testing::Bool(), - ::testing::Bool(), - ::testing::Values(true), // only SetBlob - ::testing::Values(true), // only SetBlob - ::testing::ValuesIn(return_all_possible_device_combination()), - ::testing::Values(ie_config)), - InferRequestPreprocessDynamicallyInSetBlobTest::getTestCaseName); -} // namespace diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/version.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/version.cpp deleted file mode 100644 index 53f06d725747f8..00000000000000 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/version.cpp +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/plugin/version.hpp" -#include "api_conformance_helpers.hpp" - -using namespace BehaviorTestsDefinitions; -namespace { - INSTANTIATE_TEST_SUITE_P(ie_plugin, VersionTest, - ::testing::ValuesIn(ov::test::conformance::return_all_possible_device_combination()), - VersionTest::getTestCaseName); -} // namespace From 749ed9dec7f2817fb3f27838ea3d307bf14c9ebf Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Tue, 3 Oct 2023 22:47:52 +0400 Subject: [PATCH 050/257] Added python search path for Conda (#19929) --- src/bindings/python/src/openvino/utils.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/bindings/python/src/openvino/utils.py b/src/bindings/python/src/openvino/utils.py index a1467b427b9a75..a62418d951fc24 100644 --- a/src/bindings/python/src/openvino/utils.py +++ b/src/bindings/python/src/openvino/utils.py @@ -18,9 +18,12 @@ def _add_openvino_libs_to_search_path() -> None: # If you're using a custom installation of openvino, # add the location of openvino dlls to your system PATH. openvino_libs = [] - # looking for the libs in the pip installation path. if os.path.isdir(os.path.join(os.path.dirname(__file__), "libs")): + # looking for the libs in the pip installation path. openvino_libs.append(os.path.join(os.path.dirname(__file__), "libs")) + elif os.path.isdir(os.path.join(os.path.dirname(__file__), "..", "..", "..", "Library", "bin")): + # looking for the libs in the conda installation path + openvino_libs.append(os.path.join(os.path.dirname(__file__), "..", "..", "..", "Library", "bin")) else: # setupvars.bat script set all libs paths to OPENVINO_LIB_PATHS environment variable. openvino_libs_installer = os.getenv("OPENVINO_LIB_PATHS") From 0ee0b4d95611fa697cd6311c71b2050262ea7266 Mon Sep 17 00:00:00 2001 From: Vladimir Paramuzov Date: Wed, 4 Oct 2023 10:18:34 +0400 Subject: [PATCH 051/257] [GPU] Fix custom layer (#20220) --- .../intel_gpu/src/plugin/ops/custom.cpp | 2 +- .../intel_gpu/tests/functional/CMakeLists.txt | 4 + .../tests/functional/custom_op/custom_op.cl | 5 + .../tests/functional/custom_op/custom_op.cpp | 97 +++++++++++++++++++ .../tests/functional/custom_op/custom_op.xml | 13 +++ 5 files changed, 120 insertions(+), 1 deletion(-) create mode 100644 src/plugins/intel_gpu/tests/functional/custom_op/custom_op.cl create mode 100644 src/plugins/intel_gpu/tests/functional/custom_op/custom_op.cpp create mode 100644 src/plugins/intel_gpu/tests/functional/custom_op/custom_op.xml diff --git a/src/plugins/intel_gpu/src/plugin/ops/custom.cpp b/src/plugins/intel_gpu/src/plugin/ops/custom.cpp index ff8f75aca966de..c5d61ceb8e2951 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/custom.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/custom.cpp @@ -229,6 +229,7 @@ void CreateCustomOp(ProgramBuilder& p, const std::shared_ptr& op, Cust outputLayout, gws, lws); + p.add_primitive(*op, customPrim); auto prevLayerName = genericLayerName; if (outputLayout.format != cldnn::format::any) { @@ -240,7 +241,6 @@ void CreateCustomOp(ProgramBuilder& p, const std::shared_ptr& op, Cust customPrim.output_layout.data_type)); prevLayerName = reorderPrimName; } - p.add_primitive(*op, customPrim); } } // namespace intel_gpu diff --git a/src/plugins/intel_gpu/tests/functional/CMakeLists.txt b/src/plugins/intel_gpu/tests/functional/CMakeLists.txt index e784d57d291c08..e40814d43eb7ff 100644 --- a/src/plugins/intel_gpu/tests/functional/CMakeLists.txt +++ b/src/plugins/intel_gpu/tests/functional/CMakeLists.txt @@ -12,6 +12,8 @@ if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") ov_add_compiler_flags(/wd4305) endif() +list(APPEND DEFINES TEST_CUSTOM_OP_CONFIG_PATH="${CMAKE_CURRENT_SOURCE_DIR}/custom_op/custom_op.xml") + addIeTargetTest( NAME ${TARGET_NAME} @@ -23,6 +25,8 @@ addIeTargetTest( ${CMAKE_CURRENT_SOURCE_DIR} $/include/ ${TEST_COMMON_INCLUDE_DIR} + DEFINES + ${DEFINES} DEPENDENCIES openvino_intel_gpu_plugin LINK_LIBRARIES diff --git a/src/plugins/intel_gpu/tests/functional/custom_op/custom_op.cl b/src/plugins/intel_gpu/tests/functional/custom_op/custom_op.cl new file mode 100644 index 00000000000000..86ee68586fc237 --- /dev/null +++ b/src/plugins/intel_gpu/tests/functional/custom_op/custom_op.cl @@ -0,0 +1,5 @@ +__kernel void custom_kernel(__global const INPUT0_TYPE* input, __global OUTPUT0_TYPE* output) { + uint id = get_global_id(0); + + output[id] = input[id] * alpha + beta; +} diff --git a/src/plugins/intel_gpu/tests/functional/custom_op/custom_op.cpp b/src/plugins/intel_gpu/tests/functional/custom_op/custom_op.cpp new file mode 100644 index 00000000000000..a7e37b8be8533b --- /dev/null +++ b/src/plugins/intel_gpu/tests/functional/custom_op/custom_op.cpp @@ -0,0 +1,97 @@ +// Copyright (C) 2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include +#include + +#include "openvino/core/any.hpp" +#include "openvino/runtime/core.hpp" +#include "openvino/runtime/exec_model_info.hpp" +#include "openvino/runtime/properties.hpp" + +#include "base/ov_behavior_test_utils.hpp" + +using namespace ::testing; + +namespace ov { +namespace test { +namespace intel_gpu { + +class CustomOp : public ov::op::Op { +private: + float m_alpha; + float m_beta; + +public: + OPENVINO_OP("CustomOp"); + + CustomOp() = default; + + CustomOp(const ov::Output& input, float alpha, float beta) : Op({input}), m_alpha(alpha), m_beta(beta) { + constructor_validate_and_infer_types(); + } + + void validate_and_infer_types() override { + set_output_size(1); + set_output_type(0, get_input_element_type(0), get_input_partial_shape(0)); + } + + bool visit_attributes(ov::AttributeVisitor& visitor) override { + visitor.on_attribute("alpha", m_alpha); + visitor.on_attribute("beta", m_beta); + return true; + } + + std::shared_ptr clone_with_new_inputs(const ov::OutputVector& inputs) const override { + return std::make_shared(inputs[0], m_alpha, m_beta); + } + + bool has_evaluate() const override { + return true; + } + + bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override { + auto in = inputs[0]; + auto out = outputs[0]; + out.set_shape(in.get_shape()); + for (size_t i = 0; i < out.get_size(); i++) { + out.data()[i] = in.data()[i] * m_alpha + m_beta; + } + return true; + } +}; + +static std::shared_ptr get_simple_model_with_custom_op() { + auto param = std::make_shared(ov::element::f32, ov::PartialShape{1, 2, 3, 4}); + auto op = std::make_shared(param, 1.0f, 2.0f); + auto result = std::make_shared(op); + + return std::make_shared(ov::ResultVector{result}, ov::ParameterVector{param}, "model_with_custom_op"); +} + +TEST(CustomOp, CanReadValidCustomOpConfig) { + ov::Core core; + core.set_property(ov::test::utils::DEVICE_GPU, {{"CONFIG_FILE", TEST_CUSTOM_OP_CONFIG_PATH}}); +} + +TEST(CustomOp, NoRedundantReordersInserted) { + ov::Core core; + auto model = get_simple_model_with_custom_op(); + ov::AnyMap config = { ov::hint::inference_precision(ov::element::f32), {"CONFIG_FILE", TEST_CUSTOM_OP_CONFIG_PATH}}; + auto compiled_model = core.compile_model(model, ov::test::utils::DEVICE_GPU, config); + + auto runtime_graph = compiled_model.get_runtime_model(); + + auto ops = runtime_graph->get_ordered_ops(); + ASSERT_EQ(ops.size(), 3); + ASSERT_STREQ(ops[0]->get_rt_info()[ov::exec_model_info::LAYER_TYPE].as().c_str(), "Input"); + ASSERT_STREQ(ops[1]->get_rt_info()[ov::exec_model_info::LAYER_TYPE].as().c_str(), "CustomGPUPrimitive"); + ASSERT_STREQ(ops[2]->get_rt_info()[ov::exec_model_info::LAYER_TYPE].as().c_str(), "Result"); +} + +} // namespace intel_gpu +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_gpu/tests/functional/custom_op/custom_op.xml b/src/plugins/intel_gpu/tests/functional/custom_op/custom_op.xml new file mode 100644 index 00000000000000..412aec3b35b513 --- /dev/null +++ b/src/plugins/intel_gpu/tests/functional/custom_op/custom_op.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + From 7926302d35770d081de06e066e5e4c2e68938b8c Mon Sep 17 00:00:00 2001 From: Mateusz Mikolajczyk Date: Wed, 4 Oct 2023 09:38:33 +0200 Subject: [PATCH 052/257] [Ref][Core][Opset13] BitwiseAnd, BitwiseOr and BitwiseXor core shell and reference (#20058) * Add Bitwise binary core and refs * Add draft for tests * Formatting, build issues and tests * Fix tests * Add reference tests * Apply requested changes * Add requested changes * Rename * uncomment test * Update src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/op_impl_check/single_op_graph.cpp Co-authored-by: Tomasz Jankowski * change reference --------- Co-authored-by: Tomasz Jankowski --- src/core/include/openvino/op/bitwise_and.hpp | 39 + src/core/include/openvino/op/bitwise_or.hpp | 39 + src/core/include/openvino/op/bitwise_xor.hpp | 39 + src/core/include/openvino/op/ops.hpp | 3 + .../op/util/binary_elementwise_bitwise.hpp | 41 + .../include/openvino/opsets/opset13_tbl.hpp | 3 + .../openvino/reference/bitwise_and.hpp | 54 + .../include/openvino/reference/bitwise_or.hpp | 54 + .../openvino/reference/bitwise_xor.hpp | 54 + src/core/src/op/bitwise_and.cpp | 25 + src/core/src/op/bitwise_not.cpp | 1 - src/core/src/op/bitwise_or.cpp | 25 + src/core/src/op/bitwise_xor.cpp | 25 + .../op/util/binary_elementwise_bitwise.cpp | 41 + src/core/tests/op_version_tbl.hpp | 3 + src/core/tests/opset.cpp | 2 +- src/core/tests/type_prop/bitwise_and.cpp | 11 + src/core/tests/type_prop/bitwise_ops.hpp | 936 ++++++++++++++++++ src/core/tests/type_prop/bitwise_or.cpp | 11 + src/core/tests/type_prop/bitwise_xor.cpp | 11 + src/core/tests/visitors/op/bitwise_and.cpp | 11 + src/core/tests/visitors/op/bitwise_or.cpp | 11 + src/core/tests/visitors/op/bitwise_xor.cpp | 11 + .../template/backend/ops/bitwise_and.cpp | 56 ++ .../template/backend/ops/bitwise_not.cpp | 2 +- .../template/backend/ops/bitwise_or.cpp | 56 ++ .../template/backend/ops/bitwise_xor.cpp | 56 ++ .../template/backend/ops/ops_evaluates.hpp | 12 + .../template/backend/opset_int_tbl.hpp | 3 + .../tests/functional/op_reference/bitwise.hpp | 27 +- .../functional/op_reference/bitwise_and.cpp | 359 +++++++ .../functional/op_reference/bitwise_or.cpp | 385 +++++++ .../functional/op_reference/bitwise_xor.cpp | 385 +++++++ .../src/op_impl_check/single_op_graph.cpp | 20 + 34 files changed, 2801 insertions(+), 10 deletions(-) create mode 100644 src/core/include/openvino/op/bitwise_and.hpp create mode 100644 src/core/include/openvino/op/bitwise_or.hpp create mode 100644 src/core/include/openvino/op/bitwise_xor.hpp create mode 100644 src/core/include/openvino/op/util/binary_elementwise_bitwise.hpp create mode 100644 src/core/reference/include/openvino/reference/bitwise_and.hpp create mode 100644 src/core/reference/include/openvino/reference/bitwise_or.hpp create mode 100644 src/core/reference/include/openvino/reference/bitwise_xor.hpp create mode 100644 src/core/src/op/bitwise_and.cpp create mode 100644 src/core/src/op/bitwise_or.cpp create mode 100644 src/core/src/op/bitwise_xor.cpp create mode 100644 src/core/src/op/util/binary_elementwise_bitwise.cpp create mode 100644 src/core/tests/type_prop/bitwise_and.cpp create mode 100644 src/core/tests/type_prop/bitwise_ops.hpp create mode 100644 src/core/tests/type_prop/bitwise_or.cpp create mode 100644 src/core/tests/type_prop/bitwise_xor.cpp create mode 100644 src/core/tests/visitors/op/bitwise_and.cpp create mode 100644 src/core/tests/visitors/op/bitwise_or.cpp create mode 100644 src/core/tests/visitors/op/bitwise_xor.cpp create mode 100644 src/plugins/template/backend/ops/bitwise_and.cpp create mode 100644 src/plugins/template/backend/ops/bitwise_or.cpp create mode 100644 src/plugins/template/backend/ops/bitwise_xor.cpp create mode 100644 src/plugins/template/tests/functional/op_reference/bitwise_and.cpp create mode 100644 src/plugins/template/tests/functional/op_reference/bitwise_or.cpp create mode 100644 src/plugins/template/tests/functional/op_reference/bitwise_xor.cpp diff --git a/src/core/include/openvino/op/bitwise_and.hpp b/src/core/include/openvino/op/bitwise_and.hpp new file mode 100644 index 00000000000000..4a9867b222aef5 --- /dev/null +++ b/src/core/include/openvino/op/bitwise_and.hpp @@ -0,0 +1,39 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" +#include "openvino/op/util/binary_elementwise_bitwise.hpp" + +namespace ov { +namespace op { +namespace v13 { +/// \brief Elementwise bitwise AND operation. +/// \ingroup ov_ops_cpp_api +class OPENVINO_API BitwiseAnd : public util::BinaryElementwiseBitwise { +public: + OPENVINO_OP("BitwiseAnd", "opset13", util::BinaryElementwiseBitwise); + /// \brief Constructs a bitwise AND operation. + BitwiseAnd() = default; + /// \brief Constructs a bitwise AND operation. + /// + /// \param arg0 Output that produces the first input tensor.
+ /// `[d0, ...]` + /// \param arg1 Output that produces the second input tensor.
+ /// `[d0, ...]` + /// \param auto_broadcast Auto broadcast specification. Default is Numpy-style + /// implicit broadcasting. + /// + /// Output `[d0, ...]` + /// + BitwiseAnd(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v13 +} // namespace op +} // namespace ov diff --git a/src/core/include/openvino/op/bitwise_or.hpp b/src/core/include/openvino/op/bitwise_or.hpp new file mode 100644 index 00000000000000..0f40a8500362a0 --- /dev/null +++ b/src/core/include/openvino/op/bitwise_or.hpp @@ -0,0 +1,39 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" +#include "openvino/op/util/binary_elementwise_bitwise.hpp" + +namespace ov { +namespace op { +namespace v13 { +/// \brief Elementwise bitwise OR operation. +/// \ingroup ov_ops_cpp_api +class OPENVINO_API BitwiseOr : public util::BinaryElementwiseBitwise { +public: + OPENVINO_OP("BitwiseOr", "opset13", util::BinaryElementwiseBitwise); + /// \brief Constructs a bitwise OR operation. + BitwiseOr() = default; + /// \brief Constructs a bitwise OR operation. + /// + /// \param arg0 Output that produces the first input tensor.
+ /// `[d0, ...]` + /// \param arg1 Output that produces the second input tensor.
+ /// `[d0, ...]` + /// \param auto_broadcast Auto broadcast specification. Default is Numpy-style + /// implicit broadcasting. + /// + /// Output `[d0, ...]` + /// + BitwiseOr(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v13 +} // namespace op +} // namespace ov diff --git a/src/core/include/openvino/op/bitwise_xor.hpp b/src/core/include/openvino/op/bitwise_xor.hpp new file mode 100644 index 00000000000000..6ebb07bfe38d73 --- /dev/null +++ b/src/core/include/openvino/op/bitwise_xor.hpp @@ -0,0 +1,39 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" +#include "openvino/op/util/binary_elementwise_bitwise.hpp" + +namespace ov { +namespace op { +namespace v13 { +/// \brief Elementwise bitwise XOR operation. +/// \ingroup ov_ops_cpp_api +class OPENVINO_API BitwiseXor : public util::BinaryElementwiseBitwise { +public: + OPENVINO_OP("BitwiseXor", "opset13", util::BinaryElementwiseBitwise); + /// \brief Constructs a bitwise XOR operation. + BitwiseXor() = default; + /// \brief Constructs a bitwise XOR operation. + /// + /// \param arg0 Output that produces the first input tensor.
+ /// `[d0, ...]` + /// \param arg1 Output that produces the second input tensor.
+ /// `[d0, ...]` + /// \param auto_broadcast Auto broadcast specification. Default is Numpy-style + /// implicit broadcasting. + /// + /// Output `[d0, ...]` + /// + BitwiseXor(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v13 +} // namespace op +} // namespace ov diff --git a/src/core/include/openvino/op/ops.hpp b/src/core/include/openvino/op/ops.hpp index 159a84176c427d..b57372f118f19b 100644 --- a/src/core/include/openvino/op/ops.hpp +++ b/src/core/include/openvino/op/ops.hpp @@ -21,7 +21,10 @@ #include "openvino/op/batch_norm.hpp" #include "openvino/op/batch_to_space.hpp" #include "openvino/op/binary_convolution.hpp" +#include "openvino/op/bitwise_and.hpp" #include "openvino/op/bitwise_not.hpp" +#include "openvino/op/bitwise_or.hpp" +#include "openvino/op/bitwise_xor.hpp" #include "openvino/op/broadcast.hpp" #include "openvino/op/bucketize.hpp" #include "openvino/op/ceiling.hpp" diff --git a/src/core/include/openvino/op/util/binary_elementwise_bitwise.hpp b/src/core/include/openvino/op/util/binary_elementwise_bitwise.hpp new file mode 100644 index 00000000000000..16096219e4d110 --- /dev/null +++ b/src/core/include/openvino/op/util/binary_elementwise_bitwise.hpp @@ -0,0 +1,41 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace util { +class OPENVINO_API BinaryElementwiseBitwise : public Op { +protected: + BinaryElementwiseBitwise(); + + /// \brief Constructs a binary elementwise bitwise operation. + /// + /// \param arg0 Output that produces the first input tensor. + /// \param arg1 Output that produces the second input tensor. + /// \param auto_broadcast Auto broadcast specification. Default is Numpy-style + /// implicit broadcasting. + BinaryElementwiseBitwise(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& autob = AutoBroadcastSpec()); + +public: + OPENVINO_OP("BinaryElementwiseBitwise", "util"); + + void validate_and_infer_types() override; + + virtual const AutoBroadcastSpec& get_autob() const override; + + void set_autob(const AutoBroadcastSpec& autob); + bool visit_attributes(AttributeVisitor& visitor) override; + +private: + AutoBroadcastSpec m_autob = AutoBroadcastType::NUMPY; +}; +} // namespace util +} // namespace op +} // namespace ov diff --git a/src/core/include/openvino/opsets/opset13_tbl.hpp b/src/core/include/openvino/opsets/opset13_tbl.hpp index 353124af1afd21..8d543e49b67614 100644 --- a/src/core/include/openvino/opsets/opset13_tbl.hpp +++ b/src/core/include/openvino/opsets/opset13_tbl.hpp @@ -209,5 +209,8 @@ _OPENVINO_OP_REG(Pad, ov::op::v12) _OPENVINO_OP_REG(ScatterElementsUpdate, ov::op::v12) // New operations added in opset13 +_OPENVINO_OP_REG(BitwiseAnd, ov::op::v13) _OPENVINO_OP_REG(BitwiseNot, ov::op::v13) +_OPENVINO_OP_REG(BitwiseOr, ov::op::v13) +_OPENVINO_OP_REG(BitwiseXor, ov::op::v13) _OPENVINO_OP_REG(NMSRotated, ov::op::v13) diff --git a/src/core/reference/include/openvino/reference/bitwise_and.hpp b/src/core/reference/include/openvino/reference/bitwise_and.hpp new file mode 100644 index 00000000000000..a6422b5d489342 --- /dev/null +++ b/src/core/reference/include/openvino/reference/bitwise_and.hpp @@ -0,0 +1,54 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include "openvino/reference/autobroadcast_binop.hpp" + +namespace ov { +namespace reference { +/** + * @brief Reference implementation of binary elementwise bitwise AND operator. + * + * @param arg0 Pointer to input 0 data. + * @param arg1 Pointer to input 1 data. + * @param out Pointer to output data. + * @param arg_shape0 Input 0 shape. + * @param arg_shape1 Input 1 shape. + * @param broadcast_spec Broadcast specification mode. + */ +template ::type, char>::value>::type* = nullptr> +// Check for char datatype used by ov::element::boolean +void bitwise_and(const T* arg0, + const T* arg1, + T* out, + const Shape& arg0_shape, + const Shape& arg1_shape, + const op::AutoBroadcastSpec& broadcast_spec) { + autobroadcast_binop(arg0, arg1, out, arg0_shape, arg1_shape, broadcast_spec, std::bit_and()); +} +/** + * @brief Reference implementation of binary elementwise bitwise AND operator. + * + * @param arg0 Pointer to input 0 data. + * @param arg1 Pointer to input 1 data. + * @param out Pointer to output data. + * @param arg_shape0 Input 0 shape. + * @param arg_shape1 Input 1 shape. + * @param broadcast_spec Broadcast specification mode. + */ +template ::type, char>::value>::type* = nullptr> +void bitwise_and(const T* arg0, + const T* arg1, + T* out, + const Shape& arg0_shape, + const Shape& arg1_shape, + const op::AutoBroadcastSpec& broadcast_spec) { + autobroadcast_binop(arg0, arg1, out, arg0_shape, arg1_shape, broadcast_spec, std::bit_and()); +} +} // namespace reference +} // namespace ov diff --git a/src/core/reference/include/openvino/reference/bitwise_or.hpp b/src/core/reference/include/openvino/reference/bitwise_or.hpp new file mode 100644 index 00000000000000..54eb2fe91ffde0 --- /dev/null +++ b/src/core/reference/include/openvino/reference/bitwise_or.hpp @@ -0,0 +1,54 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include "openvino/reference/autobroadcast_binop.hpp" + +namespace ov { +namespace reference { +/** + * @brief Reference implementation of binary elementwise bitwise OR operator. + * + * @param arg0 Pointer to input 0 data. + * @param arg1 Pointer to input 1 data. + * @param out Pointer to output data. + * @param arg_shape0 Input 0 shape. + * @param arg_shape1 Input 1 shape. + * @param broadcast_spec Broadcast specification mode. + */ +template ::type, char>::value>::type* = nullptr> +// Check for char datatype used by ov::element::boolean +void bitwise_or(const T* arg0, + const T* arg1, + T* out, + const Shape& arg0_shape, + const Shape& arg1_shape, + const op::AutoBroadcastSpec& broadcast_spec) { + autobroadcast_binop(arg0, arg1, out, arg0_shape, arg1_shape, broadcast_spec, std::bit_or()); +} +/** + * @brief Reference implementation of binary elementwise bitwise OR operator. + * + * @param arg0 Pointer to input 0 data. + * @param arg1 Pointer to input 1 data. + * @param out Pointer to output data. + * @param arg_shape0 Input 0 shape. + * @param arg_shape1 Input 1 shape. + * @param broadcast_spec Broadcast specification mode. + */ +template ::type, char>::value>::type* = nullptr> +void bitwise_or(const T* arg0, + const T* arg1, + T* out, + const Shape& arg0_shape, + const Shape& arg1_shape, + const op::AutoBroadcastSpec& broadcast_spec) { + autobroadcast_binop(arg0, arg1, out, arg0_shape, arg1_shape, broadcast_spec, std::bit_or()); +} +} // namespace reference +} // namespace ov diff --git a/src/core/reference/include/openvino/reference/bitwise_xor.hpp b/src/core/reference/include/openvino/reference/bitwise_xor.hpp new file mode 100644 index 00000000000000..7204077c4abce7 --- /dev/null +++ b/src/core/reference/include/openvino/reference/bitwise_xor.hpp @@ -0,0 +1,54 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include "openvino/reference/autobroadcast_binop.hpp" + +namespace ov { +namespace reference { +/** + * @brief Reference implementation of binary elementwise bitwise XOR operator. + * + * @param arg0 Pointer to input 0 data. + * @param arg1 Pointer to input 1 data. + * @param out Pointer to output data. + * @param arg_shape0 Input 0 shape. + * @param arg_shape1 Input 1 shape. + * @param broadcast_spec Broadcast specification mode. + */ +template ::type, char>::value>::type* = nullptr> +// Check for char datatype used by ov::element::boolean +void bitwise_xor(const T* arg0, + const T* arg1, + T* out, + const Shape& arg0_shape, + const Shape& arg1_shape, + const op::AutoBroadcastSpec& broadcast_spec) { + autobroadcast_binop(arg0, arg1, out, arg0_shape, arg1_shape, broadcast_spec, std::bit_xor()); +} +/** + * @brief Reference implementation of binary elementwise bitwise XOR operator. + * + * @param arg0 Pointer to input 0 data. + * @param arg1 Pointer to input 1 data. + * @param out Pointer to output data. + * @param arg_shape0 Input 0 shape. + * @param arg_shape1 Input 1 shape. + * @param broadcast_spec Broadcast specification mode. + */ +template ::type, char>::value>::type* = nullptr> +void bitwise_xor(const T* arg0, + const T* arg1, + T* out, + const Shape& arg0_shape, + const Shape& arg1_shape, + const op::AutoBroadcastSpec& broadcast_spec) { + autobroadcast_binop(arg0, arg1, out, arg0_shape, arg1_shape, broadcast_spec, std::bit_xor()); +} +} // namespace reference +} // namespace ov diff --git a/src/core/src/op/bitwise_and.cpp b/src/core/src/op/bitwise_and.cpp new file mode 100644 index 00000000000000..22da9e92f47386 --- /dev/null +++ b/src/core/src/op/bitwise_and.cpp @@ -0,0 +1,25 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include "openvino/op/bitwise_and.hpp" + +#include "itt.hpp" +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v13 { +BitwiseAnd::BitwiseAnd(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) + : BinaryElementwiseBitwise(arg0, arg1, auto_broadcast) { + constructor_validate_and_infer_types(); +} + +std::shared_ptr BitwiseAnd::clone_with_new_inputs(const OutputVector& new_args) const { + OV_OP_SCOPE(v13_BitwiseAnd_clone_with_new_inputs); + check_new_args_count(this, new_args); + return std::make_shared(new_args[0], new_args[1], get_autob()); +} + +} // namespace v13 +} // namespace op +} // namespace ov diff --git a/src/core/src/op/bitwise_not.cpp b/src/core/src/op/bitwise_not.cpp index 92aeace18ad501..257a796fda2d0f 100644 --- a/src/core/src/op/bitwise_not.cpp +++ b/src/core/src/op/bitwise_not.cpp @@ -4,7 +4,6 @@ #include "openvino/op/bitwise_not.hpp" #include "itt.hpp" -#include "openvino/core/validation_util.hpp" #include "openvino/op/op.hpp" namespace ov { diff --git a/src/core/src/op/bitwise_or.cpp b/src/core/src/op/bitwise_or.cpp new file mode 100644 index 00000000000000..02ff0ad0830f1f --- /dev/null +++ b/src/core/src/op/bitwise_or.cpp @@ -0,0 +1,25 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include "openvino/op/bitwise_or.hpp" + +#include "itt.hpp" +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v13 { +BitwiseOr::BitwiseOr(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) + : BinaryElementwiseBitwise(arg0, arg1, auto_broadcast) { + constructor_validate_and_infer_types(); +} + +std::shared_ptr BitwiseOr::clone_with_new_inputs(const OutputVector& new_args) const { + OV_OP_SCOPE(v13_BitwiseOr_clone_with_new_inputs); + check_new_args_count(this, new_args); + return std::make_shared(new_args[0], new_args[1], get_autob()); +} + +} // namespace v13 +} // namespace op +} // namespace ov diff --git a/src/core/src/op/bitwise_xor.cpp b/src/core/src/op/bitwise_xor.cpp new file mode 100644 index 00000000000000..320fe39f120359 --- /dev/null +++ b/src/core/src/op/bitwise_xor.cpp @@ -0,0 +1,25 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include "openvino/op/bitwise_xor.hpp" + +#include "itt.hpp" +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v13 { +BitwiseXor::BitwiseXor(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) + : BinaryElementwiseBitwise(arg0, arg1, auto_broadcast) { + constructor_validate_and_infer_types(); +} + +std::shared_ptr BitwiseXor::clone_with_new_inputs(const OutputVector& new_args) const { + OV_OP_SCOPE(v13_BitwiseXor_clone_with_new_inputs); + check_new_args_count(this, new_args); + return std::make_shared(new_args[0], new_args[1], get_autob()); +} + +} // namespace v13 +} // namespace op +} // namespace ov diff --git a/src/core/src/op/util/binary_elementwise_bitwise.cpp b/src/core/src/op/util/binary_elementwise_bitwise.cpp new file mode 100644 index 00000000000000..342bcf9cd757a8 --- /dev/null +++ b/src/core/src/op/util/binary_elementwise_bitwise.cpp @@ -0,0 +1,41 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/util/binary_elementwise_bitwise.hpp" + +#include "itt.hpp" +#include "openvino/op/util/elementwise_args.hpp" + +ov::op::util::BinaryElementwiseBitwise::BinaryElementwiseBitwise() = default; + +ov::op::util::BinaryElementwiseBitwise::BinaryElementwiseBitwise(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& autob) + : Op({arg0, arg1}), + m_autob(autob) {} + +void ov::op::util::BinaryElementwiseBitwise::validate_and_infer_types() { + OV_OP_SCOPE(v0_util_BinaryElementwiseBitwise_validate_and_infer_types); + auto args_et_pshape = op::util::validate_and_infer_elementwise_args(this); + const auto& args_et = std::get<0>(args_et_pshape); + const auto& args_pshape = std::get<1>(args_et_pshape); + + NODE_VALIDATION_CHECK(this, + args_et.is_dynamic() || args_et.is_integral(), + "The element type of the input tensor must be integer or boolean."); + + set_output_type(0, args_et, args_pshape); +} + +bool ov::op::util::BinaryElementwiseBitwise::visit_attributes(AttributeVisitor& visitor) { + OV_OP_SCOPE(v0_util_BinaryElementwiseBitwise_visit_attributes); + visitor.on_attribute("auto_broadcast", m_autob); + return true; +} +const ov::op::AutoBroadcastSpec& ov::op::util::BinaryElementwiseBitwise::get_autob() const { + return m_autob; +} +void ov::op::util::BinaryElementwiseBitwise::set_autob(const AutoBroadcastSpec& autob) { + m_autob = autob; +} diff --git a/src/core/tests/op_version_tbl.hpp b/src/core/tests/op_version_tbl.hpp index bf2fc789b12635..d861bfba0c4c50 100644 --- a/src/core/tests/op_version_tbl.hpp +++ b/src/core/tests/op_version_tbl.hpp @@ -26,7 +26,10 @@ _OPENVINO_OP_REG(AvgPool, ov::op::v1) _OPENVINO_OP_REG(BatchNormInference, ov::op::v0) _OPENVINO_OP_REG(BatchToSpace, ov::op::v1) _OPENVINO_OP_REG(BinaryConvolution, ov::op::v1) +_OPENVINO_OP_REG(BitwiseAnd, ov::op::v13) _OPENVINO_OP_REG(BitwiseNot, ov::op::v13) +_OPENVINO_OP_REG(BitwiseOr, ov::op::v13) +_OPENVINO_OP_REG(BitwiseXor, ov::op::v13) _OPENVINO_OP_REG(Broadcast, ov::op::v1) _OPENVINO_OP_REG(Broadcast, ov::op::v3) _OPENVINO_OP_REG(Bucketize, ov::op::v3) diff --git a/src/core/tests/opset.cpp b/src/core/tests/opset.cpp index 947d2cdfa1f392..204f43ae8ff906 100644 --- a/src/core/tests/opset.cpp +++ b/src/core/tests/opset.cpp @@ -71,7 +71,7 @@ INSTANTIATE_TEST_SUITE_P(opset, OpsetTestParams{ov::get_opset10, 177}, OpsetTestParams{ov::get_opset11, 177}, OpsetTestParams{ov::get_opset12, 178}, - OpsetTestParams{ov::get_opset13, 180}), + OpsetTestParams{ov::get_opset13, 183}), OpsetTestNameGenerator{}); class MyOpOld : public ov::op::Op { diff --git a/src/core/tests/type_prop/bitwise_and.cpp b/src/core/tests/type_prop/bitwise_and.cpp new file mode 100644 index 00000000000000..0490f79c96e61c --- /dev/null +++ b/src/core/tests/type_prop/bitwise_and.cpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/bitwise_and.hpp" + +#include "bitwise_ops.hpp" + +using Type = ::testing::Types; + +INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_bitwise_and, BitwiseOperator, Type); diff --git a/src/core/tests/type_prop/bitwise_ops.hpp b/src/core/tests/type_prop/bitwise_ops.hpp new file mode 100644 index 00000000000000..3a8dc24df1b3ec --- /dev/null +++ b/src/core/tests/type_prop/bitwise_ops.hpp @@ -0,0 +1,936 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include + +#include "common_test_utils/test_assertions.hpp" +#include "common_test_utils/type_prop.hpp" +#include "openvino/core/dimension_tracker.hpp" +#include "openvino/op/util/attr_types.hpp" + +using namespace ov; +using op::v0::Parameter; +using namespace testing; + +template +class BitwiseOperator : public TypePropOpTest {}; + +TYPED_TEST_SUITE_P(BitwiseOperator); + +TYPED_TEST_P(BitwiseOperator, default_constructor_integer) { + auto lhs = std::make_shared(element::i32, PartialShape{-1, 4, 1, 6, {1, 6}, {2, 6}}); + auto rhs = std::make_shared(element::i32, PartialShape{-1, 1, 5, 6, {5, 8}, {5, 8}}); + + const auto op = this->make_op(); + + op->set_argument(0, lhs); + op->set_argument(1, rhs); + + auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::NONE); + op->set_autob(autob); + EXPECT_EQ(op->get_autob(), op::AutoBroadcastType::NONE); + ASSERT_THROW(op->validate_and_infer_types(), NodeValidationFailure); + + autob = op::AutoBroadcastSpec(op::AutoBroadcastType::NUMPY); + op->set_autob(autob); + EXPECT_EQ(op->get_autob(), op::AutoBroadcastType::NUMPY); + + op->validate_and_infer_types(); + + EXPECT_EQ(op->get_element_type(), element::i32); + EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{-1, 4, 5, 6, {5, 8}, {5, 6}})); +} + +TYPED_TEST_P(BitwiseOperator, default_constructor_boolean) { + auto lhs = std::make_shared(element::boolean, PartialShape{-1, 4, 1, 6, {1, 6}, {2, 6}}); + auto rhs = std::make_shared(element::boolean, PartialShape{-1, 1, 5, 6, {5, 8}, {5, 8}}); + + const auto op = this->make_op(); + + op->set_argument(0, lhs); + op->set_argument(1, rhs); + + auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::NONE); + op->set_autob(autob); + EXPECT_EQ(op->get_autob(), op::AutoBroadcastType::NONE); + ASSERT_THROW(op->validate_and_infer_types(), NodeValidationFailure); + + autob = op::AutoBroadcastSpec(op::AutoBroadcastType::NUMPY); + op->set_autob(autob); + EXPECT_EQ(op->get_autob(), op::AutoBroadcastType::NUMPY); + + op->validate_and_infer_types(); + + EXPECT_EQ(op->get_element_type(), element::boolean); + EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{-1, 4, 5, 6, {5, 8}, {5, 6}})); +} + +TYPED_TEST_P(BitwiseOperator, shape_inference_2D) { + auto lhs = std::make_shared(element::i32, Shape{2, 2}); + auto rhs = std::make_shared(element::i32, Shape{2, 2}); + + const auto op = this->make_op(lhs, rhs); + + EXPECT_EQ(op->get_element_type(), element::i32); + EXPECT_EQ(op->get_shape(), (Shape{2, 2})); +} + +TYPED_TEST_P(BitwiseOperator, shape_inference_4D) { + auto lhs = std::make_shared(element::i32, Shape{2, 2, 3, 3}); + auto rhs = std::make_shared(element::i32, Shape{2, 2, 3, 3}); + + const auto op = this->make_op(lhs, rhs); + + EXPECT_EQ(op->get_element_type(), element::i32); + EXPECT_EQ(op->get_shape(), (Shape{2, 2, 3, 3})); +} + +TYPED_TEST_P(BitwiseOperator, default_autobroadcast) { + auto lhs = std::make_shared(element::i32, Shape{2, 2}); + auto rhs = std::make_shared(element::i32, Shape{2, 2}); + + const auto op = this->make_op(lhs, rhs); + + EXPECT_EQ(op->get_element_type(), element::i32); + EXPECT_EQ(op->get_shape(), (Shape{2, 2})); + EXPECT_EQ(op->get_autob(), op::AutoBroadcastType::NUMPY); +} + +TYPED_TEST_P(BitwiseOperator, no_autobroadcast) { + auto lhs = std::make_shared(element::i32, Shape{2, 2}); + auto rhs = std::make_shared(element::i32, Shape{2, 2}); + + const auto op = this->make_op(lhs, rhs, op::AutoBroadcastType::NONE); + + EXPECT_EQ(op->get_element_type(), element::i32); + EXPECT_EQ(op->get_shape(), (Shape{2, 2})); + EXPECT_EQ(op->get_autob(), op::AutoBroadcastType::NONE); +} + +TYPED_TEST_P(BitwiseOperator, shape_inference_4D_x_scalar_numpy_broadcast) { + auto lhs = std::make_shared(element::i32, Shape{2, 3, 4, 5}); + auto rhs = std::make_shared(element::i32, Shape{1}); + + const auto op = this->make_op(lhs, rhs); + + EXPECT_EQ(op->get_element_type(), element::i32); + EXPECT_EQ(op->get_shape(), (Shape{2, 3, 4, 5})); +} + +TYPED_TEST_P(BitwiseOperator, shape_inference_4D_x_1D_numpy_broadcast) { + auto lhs = std::make_shared(element::i32, Shape{2, 3, 4, 5}); + auto rhs = std::make_shared(element::i32, Shape{5}); + + const auto op = this->make_op(lhs, rhs); + + EXPECT_EQ(op->get_element_type(), element::i32); + EXPECT_EQ(op->get_shape(), (Shape{2, 3, 4, 5})); +} + +TYPED_TEST_P(BitwiseOperator, shape_inference_2D_x_4D_numpy_broadcast) { + auto lhs = std::make_shared(element::i32, Shape{4, 5}); + auto rhs = std::make_shared(element::i32, Shape{2, 3, 4, 5}); + + const auto op = this->make_op(lhs, rhs); + + EXPECT_EQ(op->get_element_type(), element::i32); + EXPECT_EQ(op->get_shape(), (Shape{2, 3, 4, 5})); +} + +TYPED_TEST_P(BitwiseOperator, shape_inference_3D_x_4D_numpy_broadcast) { + auto lhs = std::make_shared(element::i32, Shape{1, 4, 5}); + auto rhs = std::make_shared(element::i32, Shape{2, 3, 1, 1}); + + const auto op = this->make_op(lhs, rhs); + + EXPECT_EQ(op->get_element_type(), element::i32); + EXPECT_EQ(op->get_shape(), (Shape{2, 3, 4, 5})); +} + +TYPED_TEST_P(BitwiseOperator, shape_inference_4D_x_3D_numpy_broadcast) { + auto lhs = std::make_shared(element::i32, Shape{8, 1, 6, 1}); + auto rhs = std::make_shared(element::i32, Shape{7, 1, 5}); + + const auto op = this->make_op(lhs, rhs); + + EXPECT_EQ(op->get_element_type(), element::i32); + EXPECT_EQ(op->get_shape(), (Shape{8, 7, 6, 5})); + EXPECT_EQ(op->get_autob(), op::AutoBroadcastType::NUMPY); +} + +TYPED_TEST_P(BitwiseOperator, static_shape_pdpd_doc_examples) { + { + auto lhs = std::make_shared(element::i32, Shape{2, 3, 4, 5}); + auto rhs = std::make_shared(element::i32, Shape{3, 4}); + + const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, 1); + const auto op = this->make_op(lhs, rhs, autob); + + EXPECT_EQ(op->get_element_type(), element::i32); + EXPECT_EQ(op->get_shape(), (Shape{2, 3, 4, 5})); + EXPECT_EQ(op->get_autob().m_type, op::AutoBroadcastType::PDPD); + } + { + auto lhs = std::make_shared(element::i32, Shape{2, 3, 4, 5}); + auto rhs = std::make_shared(element::i32, Shape{3, 1}); + + const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, 1); + const auto op = this->make_op(lhs, rhs, autob); + + EXPECT_EQ(op->get_element_type(), element::i32); + EXPECT_EQ(op->get_shape(), (Shape{2, 3, 4, 5})); + EXPECT_EQ(op->get_autob().m_type, op::AutoBroadcastType::PDPD); + } + { + auto lhs = std::make_shared(element::i32, Shape{2, 3, 4, 5}); + auto rhs = std::make_shared(element::i32, Shape{}); + + const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD); + const auto op = this->make_op(lhs, rhs, autob); + + EXPECT_EQ(op->get_element_type(), element::i32); + EXPECT_EQ(op->get_shape(), (Shape{2, 3, 4, 5})); + EXPECT_EQ(op->get_autob().m_type, op::AutoBroadcastType::PDPD); + } + { + auto lhs = std::make_shared(element::i32, Shape{2, 3, 4, 5}); + auto rhs = std::make_shared(element::i32, Shape{5}); + + const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, 3); + const auto op = this->make_op(lhs, rhs, autob); + + EXPECT_EQ(op->get_element_type(), element::i32); + EXPECT_EQ(op->get_shape(), (Shape{2, 3, 4, 5})); + EXPECT_EQ(op->get_autob().m_type, op::AutoBroadcastType::PDPD); + } + { + auto lhs = std::make_shared(element::i32, Shape{2, 3, 4, 5}); + auto rhs = std::make_shared(element::i32, Shape{1, 3}); + + const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, 0); + const auto op = this->make_op(lhs, rhs, autob); + + EXPECT_EQ(op->get_element_type(), element::i32); + EXPECT_EQ(op->get_shape(), (Shape{2, 3, 4, 5})); + EXPECT_EQ(op->get_autob().m_type, op::AutoBroadcastType::PDPD); + } + { + auto lhs = std::make_shared(element::i32, Shape{2, 3, 4, 5}); + auto rhs = std::make_shared(element::i32, Shape{3, 1, 5}); + + const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, 1); + const auto op = this->make_op(lhs, rhs, autob); + + EXPECT_EQ(op->get_element_type(), element::i32); + EXPECT_EQ(op->get_shape(), (Shape{2, 3, 4, 5})); + EXPECT_EQ(op->get_autob().m_type, op::AutoBroadcastType::PDPD); + } +} + +TYPED_TEST_P(BitwiseOperator, static_shape_inference_4D_x_4D_pdpd_broadcast) { + { + auto lhs = std::make_shared(element::i32, Shape{8, 1, 6, 5}); + auto rhs = std::make_shared(element::i32, Shape{8, 1, 6, 5}); + + const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD); + const auto op = this->make_op(lhs, rhs, autob); + + EXPECT_EQ(op->get_element_type(), element::i32); + EXPECT_EQ(op->get_shape(), (Shape{8, 1, 6, 5})); + EXPECT_EQ(op->get_autob().m_type, op::AutoBroadcastType::PDPD); + } + { + auto lhs = std::make_shared(element::i32, Shape{8, 7, 6, 5}); + auto rhs = std::make_shared(element::i32, Shape{8, 1, 6, 5}); + + const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD); + const auto op = this->make_op(lhs, rhs, autob); + + EXPECT_EQ(op->get_element_type(), element::i32); + EXPECT_EQ(op->get_shape(), (Shape{8, 7, 6, 5})); + EXPECT_EQ(op->get_autob().m_type, op::AutoBroadcastType::PDPD); + } +} + +TYPED_TEST_P(BitwiseOperator, static_shape_inference_4D_x_3D_ax_default_pdpd_broadcast) { + auto lhs = std::make_shared(element::i32, Shape{8, 7, 6, 5}); + auto rhs = std::make_shared(element::i32, Shape{7, 1, 5}); + + const auto op = this->make_op(lhs, rhs, op::AutoBroadcastType::PDPD); + + EXPECT_EQ(op->get_element_type(), element::i32); + EXPECT_EQ(op->get_shape(), (Shape{8, 7, 6, 5})); + EXPECT_EQ(op->get_autob().m_type, op::AutoBroadcastType::PDPD); +} + +TYPED_TEST_P(BitwiseOperator, incompatible_element_types_f32) { + auto lhs = std::make_shared(element::f32, Shape{2, 2, 3, 3}); + auto rhs = std::make_shared(element::f32, Shape{2, 2, 3, 3}); + + OV_EXPECT_THROW(std::ignore = this->make_op(lhs, rhs), + NodeValidationFailure, + HasSubstr("The element type of the input tensor must be integer or boolean.")); +} + +TYPED_TEST_P(BitwiseOperator, shape_inference_1D_x_1D_incompatible) { + auto lhs = std::make_shared(element::i32, Shape{3}); + auto rhs = std::make_shared(element::i32, Shape{4}); + + ASSERT_THROW(const auto unused = this->make_op(lhs, rhs), NodeValidationFailure); +} + +TYPED_TEST_P(BitwiseOperator, shape_inference_3D_x_3D_incompatible) { + auto lhs = std::make_shared(element::i32, Shape{3, 5, 6}); + auto rhs = std::make_shared(element::i32, Shape{4, 10, 12}); + + ASSERT_THROW(const auto unused = this->make_op(lhs, rhs), NodeValidationFailure); +} + +TYPED_TEST_P(BitwiseOperator, shape_inference_5D_x_5D_incompatible) { + auto lhs = std::make_shared(element::i32, Shape{389, 112, 12}); + auto rhs = std::make_shared(element::i32, Shape{389, 112, 19}); + + ASSERT_THROW(const auto unused = this->make_op(lhs, rhs), NodeValidationFailure); +} + +TYPED_TEST_P(BitwiseOperator, shape_inference_axis_less_than_negative_1_pdpd_incompatible) { + auto lhs = std::make_shared(element::i32, Shape{2, 3, 4, 5}); + auto rhs = std::make_shared(element::i32, Shape{3, 1}); + + const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, -2); + + ASSERT_THROW(const auto unused = this->make_op(lhs, rhs, autob), NodeValidationFailure); +} + +TYPED_TEST_P(BitwiseOperator, shape_inference_dst_smaller_than_src_pdpd_broadcast) { + auto lhs = std::make_shared(element::i32, Shape{2, 3, 4, 1}); + auto rhs = std::make_shared(element::i32, Shape{2, 3, 4, 5}); + + const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD); + + ASSERT_THROW(const auto unused = this->make_op(lhs, rhs, autob), NodeValidationFailure); +} + +TYPED_TEST_P(BitwiseOperator, fully_dynamic_shape_broadcast_numpy) { + auto param = std::make_shared(element::i32, PartialShape::dynamic()); + const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::NUMPY); + + const auto op = this->make_op(param, param, autob); + EXPECT_EQ(op->get_element_type(), element::i32); + EXPECT_EQ(op->get_output_partial_shape(0), PartialShape::dynamic()); +} + +TYPED_TEST_P(BitwiseOperator, fully_dynamic_shape_broadcast_none) { + auto param = std::make_shared(element::i32, PartialShape::dynamic()); + const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::NONE); + + const auto op = this->make_op(param, param, autob); + EXPECT_EQ(op->get_element_type(), element::i32); + EXPECT_EQ(op->get_output_partial_shape(0), PartialShape::dynamic()); +} + +TYPED_TEST_P(BitwiseOperator, fully_dynamic_shape_broadcast_pdpd) { + auto param = std::make_shared(element::i32, PartialShape::dynamic()); + const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD); + + const auto op = this->make_op(param, param, autob); + EXPECT_EQ(op->get_element_type(), element::i32); + EXPECT_EQ(op->get_output_partial_shape(0), PartialShape::dynamic()); +} + +TYPED_TEST_P(BitwiseOperator, dynamic_shape_3D) { + Dimension dynamic = Dimension::dynamic(); + auto lhs = std::make_shared(element::i32, PartialShape{dynamic, dynamic, 6}); + auto rhs = std::make_shared(element::i32, PartialShape{dynamic, dynamic, 6}); + + const auto op = this->make_op(lhs, rhs); + + EXPECT_EQ(op->get_element_type(), element::i32); + EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{dynamic, dynamic, 6})); +} + +TYPED_TEST_P(BitwiseOperator, dynamic_shape_5D) { + Dimension dynamic = Dimension::dynamic(); + auto lhs = std::make_shared(element::i32, PartialShape{dynamic, 4, dynamic, dynamic, 6}); + auto rhs = std::make_shared(element::i32, PartialShape{dynamic, 4, dynamic, dynamic, 6}); + + const auto op = this->make_op(lhs, rhs); + + EXPECT_EQ(op->get_element_type(), element::i32); + EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{dynamic, 4, dynamic, dynamic, 6})); +} + +TYPED_TEST_P(BitwiseOperator, dynamic_shape_intervals_broadcast_none) { + auto lhs = std::make_shared(element::i32, PartialShape{{1, 3}, {2, 7}, {6, -1}, {-1, 6}, -1, 8}); + auto rhs = std::make_shared(element::i32, PartialShape{{1, 3}, {2, 7}, {6, -1}, {-1, 6}, -1, 8}); + + const auto op = this->make_op(lhs, rhs, op::AutoBroadcastType::NONE); + + EXPECT_EQ(op->get_element_type(), element::i32); + EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{{1, 3}, {2, 7}, {6, -1}, {-1, 6}, -1, 8})); +} + +TYPED_TEST_P(BitwiseOperator, dynamic_shape_intervals_equal_rank_broadcast_numpy) { + // Equal rank + auto lhs = std::make_shared(element::i32, PartialShape{{1, 3}, {1, 3}, {1, 3}, {4, 8}, -1, 1, -1, 1, 3}); + auto rhs = std::make_shared(element::i32, PartialShape{{1, 3}, {2, 7}, -1, 1, {1, 3}, {4, 8}, -1, 1, 3}); + + const auto op = this->make_op(lhs, rhs); + + EXPECT_EQ(op->get_element_type(), element::i32); + EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{{1, 3}, {2, 7}, -1, {4, 8}, -1, {4, 8}, -1, 1, 3})); +} + +TYPED_TEST_P(BitwiseOperator, dynamic_shape_intervals_a_rank_smaller_broadcast_numpy) { + // `lhs` rank smaller + auto lhs = std::make_shared(element::i32, PartialShape{{1, 3}, {4, 8}, -1, 1, -1, 1, 3}); + auto rhs = std::make_shared(element::i32, PartialShape{{1, 3}, {2, 7}, -1, 1, {1, 3}, {4, 8}, -1, 1, 3}); + + const auto op = this->make_op(lhs, rhs); + + EXPECT_EQ(op->get_element_type(), element::i32); + EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{{1, 3}, {2, 7}, -1, {4, 8}, -1, {4, 8}, -1, 1, 3})); +} + +TYPED_TEST_P(BitwiseOperator, dynamic_shape_intervals_b_rank_smaller_broadcast_numpy) { + // `rhs` rank smaller + auto lhs = std::make_shared(element::i32, PartialShape{{1, 3}, {2, 7}, -1, 1, {1, 3}, {4, 8}, -1, 1, 3}); + auto rhs = std::make_shared(element::i32, PartialShape{{1, 3}, {4, 8}, -1, 1, -1, 1, 3}); + + const auto op = this->make_op(lhs, rhs); + + EXPECT_EQ(op->get_element_type(), element::i32); + EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{{1, 3}, {2, 7}, -1, {4, 8}, -1, {4, 8}, -1, 1, 3})); +} + +TYPED_TEST_P(BitwiseOperator, dynamic_shape_intervals_broadcast_pdpd) { + { // Equal rank + auto lhs = std::make_shared(element::i32, PartialShape{{1, 3}, {2, 7}, {1, 6}, {6, -1}, -1, 8}); + auto rhs = std::make_shared(element::i32, PartialShape{{1, 3}, {2, 7}, 1, 1, -1, 8}); + + const auto op = this->make_op(lhs, rhs, op::AutoBroadcastType::PDPD); + + EXPECT_EQ(op->get_element_type(), element::i32); + EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{{1, 3}, {2, 7}, {1, 6}, {6, -1}, -1, 8})); + } + { // `lhs` rank smaller + auto lhs = + std::make_shared(element::i32, PartialShape{{1, 3}, {1, 3}, {1, 3}, {4, 8}, -1, 1, -1, 1, 3}); + auto rhs = + std::make_shared(element::i32, PartialShape{{1, 3}, {2, 7}, -1, 1, {1, 3}, {4, 8}, -1, 1, 3}); + + const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, 0); + const auto op = this->make_op(lhs, rhs, autob); + + EXPECT_EQ(op->get_element_type(), element::i32); + EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{{1, 3}, {2, 7}, -1, {4, 8}, -1, {4, 8}, -1, 1, 3})); + } + { // `rhs` rank smaller + auto lhs = + std::make_shared(element::i32, PartialShape{{1, 3}, {2, 7}, -1, 1, {1, 3}, {4, 8}, -1, 1, 3}); + auto rhs = std::make_shared(element::i32, PartialShape{{1, 3}, {4, 8}, -1, 1, -1, 1, 3}); + + const auto op = this->make_op(lhs, rhs); + + EXPECT_EQ(op->get_element_type(), element::i32); + EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{{1, 3}, {2, 7}, -1, {4, 8}, -1, {4, 8}, -1, 1, 3})); + } +} + +TYPED_TEST_P(BitwiseOperator, labels_a_dynamic_mixed_dims_broadcast_numpy) { + // All dimensions of lhs have labels, rhs without labels + PartialShape pshape_lhs{{-1}, {3}, {1}, {2, 128}}; + PartialShape pshape_rhs{{-1}, {3}, {2, 224}, {1}}; + + PartialShape expected_shape = {-1, 3, {2, 224}, {2, 128}}; + + set_shape_labels(pshape_lhs, {10, 11, 12, 13}); + set_shape_labels(expected_shape, {10, 11, 0, 13}); + + auto lhs = std::make_shared(element::i32, pshape_lhs); + auto rhs = std::make_shared(element::i32, pshape_rhs); + + const auto op = this->make_op(lhs, rhs); + + const auto out_shape = op->get_output_partial_shape(0); + + EXPECT_EQ(out_shape, expected_shape); + EXPECT_EQ(get_shape_labels(out_shape), get_shape_labels(expected_shape)); +} + +TYPED_TEST_P(BitwiseOperator, labels_b_dynamic_mixed_dims_broadcast_numpy) { + // All dimensions of rhs have labels, lhs without labels + PartialShape pshape_lhs{{-1}, {3}, {1}, {2, 128}}; + PartialShape pshape_rhs{{-1}, {3}, {2, 224}, {1}}; + + PartialShape expected_shape = {-1, 3, {2, 224}, {2, 128}}; + + set_shape_labels(pshape_rhs, {20, 21, 22, 23}); + set_shape_labels(expected_shape, {20, 21, 22, 0}); + + auto lhs = std::make_shared(element::i32, pshape_lhs); + auto rhs = std::make_shared(element::i32, pshape_rhs); + + const auto op = this->make_op(lhs, rhs); + + const auto out_shape = op->get_output_partial_shape(0); + + EXPECT_EQ(out_shape, expected_shape); + EXPECT_EQ(get_shape_labels(out_shape), get_shape_labels(expected_shape)); +} + +TYPED_TEST_P(BitwiseOperator, labels_different_interval_mixed_dims_broadcast_numpy) { + // Both params have dimensions with different labels + PartialShape pshape_lhs{{-1}, {3}, {1}, {2, 128}}; + PartialShape pshape_rhs{{-1}, {3}, {2, 224}, {1}}; + + PartialShape expected_shape = {-1, 3, {2, 224}, {2, 128}}; + + set_shape_labels(pshape_lhs, {10, 11, 12, 13}); + set_shape_labels(pshape_rhs, {20, 21, 22, 23}); + set_shape_labels(expected_shape, {0, 21, 22, 13}); + + auto lhs = std::make_shared(element::i32, pshape_lhs); + auto rhs = std::make_shared(element::i32, pshape_rhs); + + const auto op = this->make_op(lhs, rhs); + + const auto out_shape = op->get_output_partial_shape(0); + + EXPECT_EQ(out_shape, expected_shape); + EXPECT_EQ(get_shape_labels(out_shape), get_shape_labels(expected_shape)); +} + +TYPED_TEST_P(BitwiseOperator, labels_different_interval_b_and_fully_dyn_a_broadcast_numpy) { + // Both params have dimension labels, output has label rhs + Dimension dim_0_lhs = {-1}; + Dimension dim_0_rhs = {2, 4}; + + DimensionTracker::set_label(dim_0_lhs, 10); + DimensionTracker::set_label(dim_0_rhs, 20); + + PartialShape pshape_lhs = {dim_0_lhs, 3, 224, 1}, pshape_rhs = {dim_0_rhs, 3, 1, 224}; + PartialShape expected_shape = {{2, 4}, 3, 224, 224}; + TensorLabel expected_labels{20, 0, 0, 0}; + + auto lhs = std::make_shared(element::i32, pshape_lhs); + auto rhs = std::make_shared(element::i32, pshape_rhs); + + const auto op = this->make_op(lhs, rhs); + + const auto out_shape = op->get_output_partial_shape(0); + + EXPECT_EQ(out_shape, expected_shape); + EXPECT_EQ(get_shape_labels(out_shape), expected_labels); +} + +TYPED_TEST_P(BitwiseOperator, labels_different_interval_a_and_fully_dyn_b_broadcast_numpy) { + // Both params have dimension labels, output has label lhs + Dimension dim_0_lhs = {2, 4}; + Dimension dim_0_rhs = {-1}; + + DimensionTracker::set_label(dim_0_lhs, 10); + DimensionTracker::set_label(dim_0_rhs, 20); + + PartialShape pshape_lhs = {dim_0_lhs, 3, 224, 1}, pshape_rhs = {dim_0_rhs, 3, 1, 224}; + PartialShape expected_shape = {{2, 4}, 3, 224, 224}; + TensorLabel expected_labels{10, 0, 0, 0}; + + auto lhs = std::make_shared(element::i32, pshape_lhs); + auto rhs = std::make_shared(element::i32, pshape_rhs); + + const auto op = this->make_op(lhs, rhs); + + const auto out_shape = op->get_output_partial_shape(0); + + EXPECT_EQ(out_shape, expected_shape); + EXPECT_EQ(get_shape_labels(out_shape), expected_labels); +} + +TYPED_TEST_P(BitwiseOperator, labels_equal_interval_dims_without_one_broadcast_numpy) { + // Both params have dynamic interval dimension the same labels + PartialShape pshape_lhs{{2, 4}, {8, 16}, {8, 16}, {8, 16}}; + PartialShape pshape_rhs{{2, 4}, {4, 12}, {10, 12}, {16, 24}}; + + PartialShape expected_shape = {{2, 4}, {8, 12}, {10, 12}, 16}; + + set_shape_labels(pshape_lhs, {10, 11, 12, 13}); + set_shape_labels(pshape_rhs, {10, 11, 12, 13}); + set_shape_labels(expected_shape, {10, 11, 12, 13}); + + auto lhs = std::make_shared(element::i32, pshape_lhs); + auto rhs = std::make_shared(element::i32, pshape_rhs); + + const auto op = this->make_op(lhs, rhs); + + const auto out_shape = op->get_output_partial_shape(0); + + EXPECT_EQ(out_shape, expected_shape); + EXPECT_EQ(get_shape_labels(out_shape), get_shape_labels(expected_shape)); +} + +TYPED_TEST_P(BitwiseOperator, labels_different_interval_dims_without_one_broadcast_numpy) { + // Both params have dynamic interval dimension different labels + PartialShape pshape_lhs{{2, 4}, {8, 16}, {8, 16}, {8, 16}}; + PartialShape pshape_rhs{{2, 4}, {4, 12}, {10, 12}, {16, 24}}; + + PartialShape expected_shape = {{2, 4}, {8, 12}, {10, 12}, 16}; + TensorLabel expected_labels{20, 21, 22, 23}; + + set_shape_labels(pshape_lhs, {10, 11, 12, 13}); + set_shape_labels(pshape_rhs, {20, 21, 22, 23}); + + auto lhs = std::make_shared(element::i32, pshape_lhs); + auto rhs = std::make_shared(element::i32, pshape_rhs); + + const auto op = this->make_op(lhs, rhs); + + const auto out_shape = op->get_output_partial_shape(0); + + EXPECT_EQ(out_shape, expected_shape); + EXPECT_EQ(get_shape_labels(out_shape), expected_labels); +} + +TYPED_TEST_P(BitwiseOperator, labels_different_interval_batch_without_one_equivalence_table_broadcast_numpy) { + // Both params have dynamic interval dimension different labels, use table of equivalence + auto table_of_equivalence = std::make_shared(); + DimensionTracker dim_tracker(table_of_equivalence); + + Dimension dim_0_lhs = {2, 4}; + Dimension dim_0_rhs = {2, 4}; + + dim_tracker.set_up_for_tracking(dim_0_lhs, 10); + dim_tracker.set_up_for_tracking(dim_0_rhs, 20); + + PartialShape pshape_lhs = {dim_0_lhs, 3, 224, 1}, pshape_rhs = {dim_0_rhs, 3, 1, 224}; + + auto lhs = std::make_shared(element::i32, pshape_lhs); + auto rhs = std::make_shared(element::i32, pshape_rhs); + + const auto op = this->make_op(lhs, rhs); + + const auto out_shape = op->get_output_partial_shape(0); + + PartialShape expected_shape = {{2, 4}, 3, 224, 224}; + TensorLabel expected_labels{20, 0, 0, 0}; + + auto eq_table = table_of_equivalence->get_equivalence_table(); + EXPECT_EQ(*eq_table[DimensionTracker::get_label(dim_0_lhs)], std::set({10, 20})); + EXPECT_EQ(*eq_table[DimensionTracker::get_label(dim_0_rhs)], std::set({10, 20})); + + EXPECT_EQ(out_shape, expected_shape); + EXPECT_EQ(get_shape_labels(out_shape), expected_labels); +} + +TYPED_TEST_P(BitwiseOperator, labels_different_fully_dynamic_batch_broadcast_numpy) { + // Both params have fully dynamic dimension and different labels + Dimension dim_0_lhs = {-1}; + Dimension dim_0_rhs = {-1}; + + DimensionTracker::set_label(dim_0_lhs, 10); + DimensionTracker::set_label(dim_0_rhs, 20); + + PartialShape pshape_lhs = {dim_0_lhs, 3, 224, 1}, pshape_rhs = {dim_0_rhs, 3, 1, 224}; + PartialShape expected_shape = {-1, 3, 224, 224}; + TensorLabel expected_labels{0, 0, 0, 0}; + + auto lhs = std::make_shared(element::i32, pshape_lhs); + auto rhs = std::make_shared(element::i32, pshape_rhs); + + const auto op = this->make_op(lhs, rhs); + + const auto out_shape = op->get_output_partial_shape(0); + + EXPECT_EQ(out_shape, expected_shape); + EXPECT_EQ(get_shape_labels(out_shape), expected_labels); +} + +TYPED_TEST_P(BitwiseOperator, labels_equal_fully_dynamic_batch_broadcast_numpy) { + // Both params have fully dynamic dimension and the same labels + Dimension dim_0_lhs = {-1}; + Dimension dim_0_rhs = {-1}; + + DimensionTracker::set_label(dim_0_lhs, 10); + DimensionTracker::set_label(dim_0_rhs, 10); + + PartialShape pshape_lhs = {dim_0_lhs, 3, 224, 1}, pshape_rhs = {dim_0_rhs, 3, 1, 224}; + PartialShape expected_shape = {-1, 3, 224, 224}; + TensorLabel expected_labels{10, 0, 0, 0}; + + auto lhs = std::make_shared(element::i32, pshape_lhs); + auto rhs = std::make_shared(element::i32, pshape_rhs); + + const auto op = this->make_op(lhs, rhs); + + const auto out_shape = op->get_output_partial_shape(0); + + EXPECT_EQ(out_shape, expected_shape); + EXPECT_EQ(get_shape_labels(out_shape), expected_labels); +} + +TYPED_TEST_P(BitwiseOperator, labels_dyn_batch_a_broadcast_numpy) { + Dimension dim_0_lhs = -1; + DimensionTracker::set_label(dim_0_lhs, 10); + PartialShape pshape_lhs = {dim_0_lhs, 3, 224, 224}, pshape_rhs = {1, 3, 1, 1}; + PartialShape expected_shape{dim_0_lhs, 3, 224, 224}; + + TensorLabel expected_labels{10, 0, 0, 0}; + + auto lhs = std::make_shared(element::i64, pshape_lhs); + auto rhs = std::make_shared(element::i64, pshape_rhs); + + const auto op = this->make_op(lhs, rhs); + + const auto out_shape = op->get_output_partial_shape(0); + + EXPECT_EQ(out_shape, expected_shape); + EXPECT_EQ(get_shape_labels(out_shape), expected_labels); +} + +TYPED_TEST_P(BitwiseOperator, labels_dyn_batch_b_broadcast_numpy) { + Dimension dim_0_rhs = -1; + DimensionTracker::set_label(dim_0_rhs, 10); + PartialShape pshape_rhs = {dim_0_rhs, 3, 224, 224}, pshape_lhs = {1, 3, 1, 1}; + PartialShape expected_shape{dim_0_rhs, 3, 224, 224}; + + TensorLabel expected_labels{10, 0, 0, 0}; + + auto lhs = std::make_shared(element::i64, pshape_lhs); + auto rhs = std::make_shared(element::i64, pshape_rhs); + + const auto op = this->make_op(lhs, rhs); + + const auto out_shape = op->get_output_partial_shape(0); + + EXPECT_EQ(out_shape, expected_shape); + EXPECT_EQ(get_shape_labels(out_shape), expected_labels); +} + +TYPED_TEST_P(BitwiseOperator, labels_dyn_batch_and_higher_rank_a_broadcast_numpy) { + Dimension dim_0_lhs = -1; + DimensionTracker::set_label(dim_0_lhs, 10); + + PartialShape pshape_lhs{dim_0_lhs, -1, -1, -1}; + PartialShape pshape_rhs{3, 1, 1}; + PartialShape expected_shape{dim_0_lhs, 3, -1, -1}; + + TensorLabel expected_labels{10, 0, 0, 0}; + + auto lhs = std::make_shared(element::i64, pshape_lhs); + auto rhs = std::make_shared(element::i64, pshape_rhs); + + const auto op = this->make_op(lhs, rhs); + + const auto out_shape = op->get_output_partial_shape(0); + + EXPECT_EQ(out_shape, expected_shape); + EXPECT_EQ(get_shape_labels(out_shape), expected_labels); +} + +TYPED_TEST_P(BitwiseOperator, labels_dyn_batch_and_higher_rank_b_broadcast_numpy) { + Dimension dim_0_rhs = -1; + DimensionTracker::set_label(dim_0_rhs, 10); + + PartialShape pshape_lhs{3, 1, 1}; + PartialShape pshape_rhs{dim_0_rhs, -1, -1, -1}; + PartialShape expected_shape{dim_0_rhs, 3, -1, -1}; + + TensorLabel expected_labels{10, 0, 0, 0}; + + auto lhs = std::make_shared(element::i64, pshape_lhs); + auto rhs = std::make_shared(element::i64, pshape_rhs); + + const auto op = this->make_op(lhs, rhs); + + const auto out_shape = op->get_output_partial_shape(0); + + EXPECT_EQ(out_shape, expected_shape); + EXPECT_EQ(get_shape_labels(out_shape), expected_labels); +} + +TYPED_TEST_P(BitwiseOperator, labels_different_static_shape_broadcast_numpy) { + // Static shape, different labels + PartialShape pshape_lhs{{2}, {1}, {224}, {1}}; + PartialShape pshape_rhs{{2}, {1}, {1}, {128}}; + PartialShape expected_shape{2, 1, 224, 128}; + + // Different labels + set_shape_labels(pshape_lhs, {10, 11, 12, 13}); + set_shape_labels(pshape_rhs, {20, 21, 22, 23}); + set_shape_labels(expected_shape, {20, 21, 12, 23}); + + auto lhs = std::make_shared(element::i32, pshape_lhs); + auto rhs = std::make_shared(element::i32, pshape_rhs); + const auto op = this->make_op(lhs, rhs, op::AutoBroadcastType::NUMPY); + + const auto out_shape = op->get_output_partial_shape(0); + + EXPECT_EQ(out_shape, expected_shape); + EXPECT_EQ(get_shape_labels(out_shape), get_shape_labels(expected_shape)); +} + +TYPED_TEST_P(BitwiseOperator, labels_equal_static_shape_broadcast_numpy) { + // Static shape, the same labels + PartialShape pshape_lhs{2, 1, 224, 1}; + PartialShape pshape_rhs{2, 1, 1, 128}; + PartialShape expected_shape{2, 1, 224, 128}; + + // Equal labels + set_shape_labels(pshape_lhs, {30, 31, 32, 33}); + set_shape_labels(pshape_rhs, {30, 31, 32, 33}); + set_shape_labels(expected_shape, {30, 31, 32, 33}); + + auto lhs = std::make_shared(element::i32, pshape_lhs); + auto rhs = std::make_shared(element::i32, pshape_rhs); + const auto op = this->make_op(lhs, rhs, op::AutoBroadcastType::NUMPY); + + const auto out_shape = op->get_output_partial_shape(0); + + EXPECT_EQ(out_shape, expected_shape); + EXPECT_EQ(get_shape_labels(out_shape), get_shape_labels(expected_shape)); +} + +TYPED_TEST_P(BitwiseOperator, labels_different_static_shape_broadcast_none) { + // Static shape + PartialShape pshape_lhs{2, 3, 224, 128}; + PartialShape pshape_rhs{2, 3, 224, 128}; + PartialShape expected_shape{2, 3, 224, 128}; + + // Different labels + set_shape_labels(pshape_lhs, {10, 11, 12, 13}); + set_shape_labels(pshape_rhs, {20, 21, 22, 23}); + set_shape_labels(expected_shape, {20, 21, 22, 23}); + + auto lhs = std::make_shared(element::i32, pshape_lhs); + auto rhs = std::make_shared(element::i32, pshape_rhs); + const auto op = this->make_op(lhs, rhs, op::AutoBroadcastType::NONE); + + auto out_shape = op->get_output_partial_shape(0); + + EXPECT_EQ(out_shape, expected_shape); + EXPECT_EQ(get_shape_labels(out_shape), get_shape_labels(expected_shape)); +} + +TYPED_TEST_P(BitwiseOperator, labels_equal_static_shape_broadcast_none) { + // Static shape + PartialShape pshape_lhs{2, 3, 224, 128}; + PartialShape pshape_rhs{2, 3, 224, 128}; + PartialShape expected_shape{2, 3, 224, 128}; + + // Equal labels + set_shape_labels(pshape_lhs, {30, 31, 32, 33}); + set_shape_labels(pshape_rhs, {30, 31, 32, 33}); + set_shape_labels(expected_shape, {30, 31, 32, 33}); + + auto lhs = std::make_shared(element::i32, pshape_lhs); + auto rhs = std::make_shared(element::i32, pshape_rhs); + const auto op = this->make_op(lhs, rhs, op::AutoBroadcastType::NONE); + + auto out_shape = op->get_output_partial_shape(0); + + EXPECT_EQ(out_shape, expected_shape); + EXPECT_EQ(get_shape_labels(out_shape), get_shape_labels(expected_shape)); +} + +TYPED_TEST_P(BitwiseOperator, labels_different_dynamic_shape_broadcast_none) { + // Dynamic shape + PartialShape pshape_lhs{{-1}, {3}, {2, 224}, {1, 128}}; + PartialShape pshape_rhs{{-1}, {3}, {2, 224}, {1, 128}}; + PartialShape expected_shape{-1, 3, {2, 224}, {1, 128}}; + + // Different labels + set_shape_labels(pshape_lhs, {10, 11, 12, 13}); + set_shape_labels(pshape_rhs, {20, 21, 22, 23}); + set_shape_labels(expected_shape, {20, 21, 22, 23}); + + auto lhs = std::make_shared(element::i32, pshape_lhs); + auto rhs = std::make_shared(element::i32, pshape_rhs); + const auto op = this->make_op(lhs, rhs, op::AutoBroadcastType::NONE); + + const auto out_shape = op->get_output_partial_shape(0); + + EXPECT_EQ(out_shape, expected_shape); + EXPECT_EQ(get_shape_labels(out_shape), get_shape_labels(expected_shape)); +} + +TYPED_TEST_P(BitwiseOperator, labels_equal_dynamic_shape_broadcast_none) { + // Dynamic shape + PartialShape pshape_lhs{{-1}, {3}, {2, 224}, {1, 128}}; + PartialShape pshape_rhs{{-1}, {3}, {2, 224}, {1, 128}}; + PartialShape expected_shape{-1, 3, {2, 224}, {1, 128}}; + + // Equal labels + set_shape_labels(pshape_lhs, {30, 31, 32, 33}); + set_shape_labels(pshape_rhs, {30, 31, 32, 33}); + set_shape_labels(expected_shape, {30, 31, 32, 33}); + + auto lhs = std::make_shared(element::i32, pshape_lhs); + auto rhs = std::make_shared(element::i32, pshape_rhs); + const auto op = this->make_op(lhs, rhs, op::AutoBroadcastType::NONE); + + const auto out_shape = op->get_output_partial_shape(0); + + EXPECT_EQ(out_shape, expected_shape); + EXPECT_EQ(get_shape_labels(out_shape), get_shape_labels(expected_shape)); +} + +REGISTER_TYPED_TEST_SUITE_P(BitwiseOperator, + default_constructor_integer, + default_constructor_boolean, + + // Static shapes + shape_inference_2D, + shape_inference_4D, + default_autobroadcast, + no_autobroadcast, + shape_inference_4D_x_scalar_numpy_broadcast, + shape_inference_4D_x_1D_numpy_broadcast, + shape_inference_2D_x_4D_numpy_broadcast, + shape_inference_3D_x_4D_numpy_broadcast, + shape_inference_4D_x_3D_numpy_broadcast, + static_shape_pdpd_doc_examples, + static_shape_inference_4D_x_4D_pdpd_broadcast, + static_shape_inference_4D_x_3D_ax_default_pdpd_broadcast, + incompatible_element_types_f32, + shape_inference_1D_x_1D_incompatible, + shape_inference_3D_x_3D_incompatible, + shape_inference_5D_x_5D_incompatible, + shape_inference_axis_less_than_negative_1_pdpd_incompatible, + shape_inference_dst_smaller_than_src_pdpd_broadcast, + + // Dynamic shapes + fully_dynamic_shape_broadcast_numpy, + fully_dynamic_shape_broadcast_none, + fully_dynamic_shape_broadcast_pdpd, + dynamic_shape_3D, + dynamic_shape_5D, + dynamic_shape_intervals_broadcast_none, + dynamic_shape_intervals_equal_rank_broadcast_numpy, + dynamic_shape_intervals_a_rank_smaller_broadcast_numpy, + dynamic_shape_intervals_b_rank_smaller_broadcast_numpy, + dynamic_shape_intervals_broadcast_pdpd, + + // Dimension labels (static and dynamic) + labels_a_dynamic_mixed_dims_broadcast_numpy, + labels_b_dynamic_mixed_dims_broadcast_numpy, + labels_different_interval_mixed_dims_broadcast_numpy, + labels_different_interval_b_and_fully_dyn_a_broadcast_numpy, + labels_different_interval_a_and_fully_dyn_b_broadcast_numpy, + labels_equal_interval_dims_without_one_broadcast_numpy, + labels_different_interval_dims_without_one_broadcast_numpy, + labels_different_interval_batch_without_one_equivalence_table_broadcast_numpy, + labels_different_fully_dynamic_batch_broadcast_numpy, + labels_equal_fully_dynamic_batch_broadcast_numpy, + labels_dyn_batch_a_broadcast_numpy, + labels_dyn_batch_b_broadcast_numpy, + labels_dyn_batch_and_higher_rank_a_broadcast_numpy, + labels_dyn_batch_and_higher_rank_b_broadcast_numpy, + labels_different_static_shape_broadcast_numpy, + labels_equal_static_shape_broadcast_numpy, + labels_different_static_shape_broadcast_none, + labels_equal_static_shape_broadcast_none, + labels_different_dynamic_shape_broadcast_none, + labels_equal_dynamic_shape_broadcast_none); diff --git a/src/core/tests/type_prop/bitwise_or.cpp b/src/core/tests/type_prop/bitwise_or.cpp new file mode 100644 index 00000000000000..bb41322f1dec49 --- /dev/null +++ b/src/core/tests/type_prop/bitwise_or.cpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/bitwise_or.hpp" + +#include "bitwise_ops.hpp" + +using Type = ::testing::Types; + +INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_bitwise_or, BitwiseOperator, Type); diff --git a/src/core/tests/type_prop/bitwise_xor.cpp b/src/core/tests/type_prop/bitwise_xor.cpp new file mode 100644 index 00000000000000..00a1a299573882 --- /dev/null +++ b/src/core/tests/type_prop/bitwise_xor.cpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/bitwise_xor.hpp" + +#include "bitwise_ops.hpp" + +using Type = ::testing::Types; + +INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_bitwise_xor, BitwiseOperator, Type); diff --git a/src/core/tests/visitors/op/bitwise_and.cpp b/src/core/tests/visitors/op/bitwise_and.cpp new file mode 100644 index 00000000000000..35c29762061283 --- /dev/null +++ b/src/core/tests/visitors/op/bitwise_and.cpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/bitwise_and.hpp" + +#include "binary_ops.hpp" + +using Type = ::testing::Types>; + +INSTANTIATE_TYPED_TEST_SUITE_P(visitor_with_auto_broadcast, BinaryOperatorVisitor, Type, BinaryOperatorTypeName); diff --git a/src/core/tests/visitors/op/bitwise_or.cpp b/src/core/tests/visitors/op/bitwise_or.cpp new file mode 100644 index 00000000000000..ebcff6e5e932b0 --- /dev/null +++ b/src/core/tests/visitors/op/bitwise_or.cpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/bitwise_or.hpp" + +#include "binary_ops.hpp" + +using Type = ::testing::Types>; + +INSTANTIATE_TYPED_TEST_SUITE_P(visitor_with_auto_broadcast, BinaryOperatorVisitor, Type, BinaryOperatorTypeName); diff --git a/src/core/tests/visitors/op/bitwise_xor.cpp b/src/core/tests/visitors/op/bitwise_xor.cpp new file mode 100644 index 00000000000000..ef36fc98ab707d --- /dev/null +++ b/src/core/tests/visitors/op/bitwise_xor.cpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/bitwise_xor.hpp" + +#include "binary_ops.hpp" + +using Type = ::testing::Types>; + +INSTANTIATE_TYPED_TEST_SUITE_P(visitor_with_auto_broadcast, BinaryOperatorVisitor, Type, BinaryOperatorTypeName); diff --git a/src/plugins/template/backend/ops/bitwise_and.cpp b/src/plugins/template/backend/ops/bitwise_and.cpp new file mode 100644 index 00000000000000..d0e5d05b11360d --- /dev/null +++ b/src/plugins/template/backend/ops/bitwise_and.cpp @@ -0,0 +1,56 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/bitwise_and.hpp" + +#include "evaluate_node.hpp" +#include "openvino/reference/bitwise_and.hpp" +#include "utils.hpp" + +using namespace ov; + +template +bool evaluate(const std::shared_ptr& node, + ov::TensorVector& outputs, + const ov::TensorVector& inputs) { + OPENVINO_ASSERT(inputs.size() == 2); + OPENVINO_ASSERT(outputs.size() == 1); + outputs[0].set_shape(infer_broadcast_shape(node.get(), inputs[0].get_shape(), inputs[1].get_shape())); + using T = typename ov::element_type_traits::value_type; + ov::reference::bitwise_and(inputs[0].data(), + inputs[1].data(), + outputs[0].data(), + inputs[0].get_shape(), + inputs[1].get_shape(), + node->get_autob()); + return true; +} + +template <> +bool evaluate_node(std::shared_ptr node, + ov::TensorVector& outputs, + const ov::TensorVector& inputs) { + switch (node->get_input_element_type(0)) { + case element::boolean: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::u8: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::i8: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::u16: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::i16: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::u32: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::i32: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::u64: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::i64: + return evaluate(as_type_ptr(node), outputs, inputs); + default: + OPENVINO_THROW("Unhandled data type ", node->get_element_type().get_type_name(), "in evaluate_node()"); + } +} diff --git a/src/plugins/template/backend/ops/bitwise_not.cpp b/src/plugins/template/backend/ops/bitwise_not.cpp index 91a73fa0dd1c3f..83f1d77750eeec 100644 --- a/src/plugins/template/backend/ops/bitwise_not.cpp +++ b/src/plugins/template/backend/ops/bitwise_not.cpp @@ -19,7 +19,7 @@ bool evaluate(const std::shared_ptr& node, outputs[0].set_shape(inputs[0].get_shape()); using T = typename ov::element_type_traits::value_type; - ov::reference::bitwise_not(inputs[0].data(), outputs[0].data(), shape_size(inputs[0].get_shape())); + ov::reference::bitwise_not(inputs[0].data(), outputs[0].data(), shape_size(inputs[0].get_shape())); return true; } diff --git a/src/plugins/template/backend/ops/bitwise_or.cpp b/src/plugins/template/backend/ops/bitwise_or.cpp new file mode 100644 index 00000000000000..fe163edeccb3a1 --- /dev/null +++ b/src/plugins/template/backend/ops/bitwise_or.cpp @@ -0,0 +1,56 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/bitwise_or.hpp" + +#include "evaluate_node.hpp" +#include "openvino/reference/bitwise_or.hpp" +#include "utils.hpp" + +using namespace ov; + +template +bool evaluate(const std::shared_ptr& node, + ov::TensorVector& outputs, + const ov::TensorVector& inputs) { + OPENVINO_ASSERT(inputs.size() == 2); + OPENVINO_ASSERT(outputs.size() == 1); + outputs[0].set_shape(infer_broadcast_shape(node.get(), inputs[0].get_shape(), inputs[1].get_shape())); + using T = typename ov::element_type_traits::value_type; + ov::reference::bitwise_or(inputs[0].data(), + inputs[1].data(), + outputs[0].data(), + inputs[0].get_shape(), + inputs[1].get_shape(), + node->get_autob()); + return true; +} + +template <> +bool evaluate_node(std::shared_ptr node, + ov::TensorVector& outputs, + const ov::TensorVector& inputs) { + switch (node->get_input_element_type(0)) { + case element::boolean: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::u8: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::i8: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::u16: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::i16: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::u32: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::i32: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::u64: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::i64: + return evaluate(as_type_ptr(node), outputs, inputs); + default: + OPENVINO_THROW("Unhandled data type ", node->get_element_type().get_type_name(), "in evaluate_node()"); + } +} diff --git a/src/plugins/template/backend/ops/bitwise_xor.cpp b/src/plugins/template/backend/ops/bitwise_xor.cpp new file mode 100644 index 00000000000000..3fa98775a05e18 --- /dev/null +++ b/src/plugins/template/backend/ops/bitwise_xor.cpp @@ -0,0 +1,56 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/bitwise_xor.hpp" + +#include "evaluate_node.hpp" +#include "openvino/reference/bitwise_xor.hpp" +#include "utils.hpp" + +using namespace ov; + +template +bool evaluate(const std::shared_ptr& node, + ov::TensorVector& outputs, + const ov::TensorVector& inputs) { + OPENVINO_ASSERT(inputs.size() == 2); + OPENVINO_ASSERT(outputs.size() == 1); + outputs[0].set_shape(infer_broadcast_shape(node.get(), inputs[0].get_shape(), inputs[1].get_shape())); + using T = typename ov::element_type_traits::value_type; + ov::reference::bitwise_xor(inputs[0].data(), + inputs[1].data(), + outputs[0].data(), + inputs[0].get_shape(), + inputs[1].get_shape(), + node->get_autob()); + return true; +} + +template <> +bool evaluate_node(std::shared_ptr node, + ov::TensorVector& outputs, + const ov::TensorVector& inputs) { + switch (node->get_input_element_type(0)) { + case element::boolean: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::u8: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::i8: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::u16: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::i16: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::u32: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::i32: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::u64: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::i64: + return evaluate(as_type_ptr(node), outputs, inputs); + default: + OPENVINO_THROW("Unhandled data type ", node->get_element_type().get_type_name(), "in evaluate_node()"); + } +} diff --git a/src/plugins/template/backend/ops/ops_evaluates.hpp b/src/plugins/template/backend/ops/ops_evaluates.hpp index 0b860fcd4b28c6..040fd8334a3527 100644 --- a/src/plugins/template/backend/ops/ops_evaluates.hpp +++ b/src/plugins/template/backend/ops/ops_evaluates.hpp @@ -445,10 +445,22 @@ extern template bool evaluate_node(std::shared_ ov::TensorVector& outputs, const ov::TensorVector& inputs); +extern template bool evaluate_node(std::shared_ptr node, + ov::TensorVector& outputs, + const ov::TensorVector& inputs); + extern template bool evaluate_node(std::shared_ptr node, ov::TensorVector& outputs, const ov::TensorVector& inputs); +extern template bool evaluate_node(std::shared_ptr node, + ov::TensorVector& outputs, + const ov::TensorVector& inputs); + +extern template bool evaluate_node(std::shared_ptr node, + ov::TensorVector& outputs, + const ov::TensorVector& inputs); + extern template bool evaluate_node(std::shared_ptr node, ov::TensorVector& outputs, const ov::TensorVector& inputs); diff --git a/src/plugins/template/backend/opset_int_tbl.hpp b/src/plugins/template/backend/opset_int_tbl.hpp index 5ce73cbfa561c4..725fdd0621fc18 100644 --- a/src/plugins/template/backend/opset_int_tbl.hpp +++ b/src/plugins/template/backend/opset_int_tbl.hpp @@ -150,7 +150,10 @@ _OPENVINO_OP_REG(Interpolate, op::v11) _OPENVINO_OP_REG(GroupNormalization, ov::op::v12) +_OPENVINO_OP_REG(BitwiseAnd, ov::op::v13) _OPENVINO_OP_REG(BitwiseNot, ov::op::v13) +_OPENVINO_OP_REG(BitwiseOr, ov::op::v13) +_OPENVINO_OP_REG(BitwiseXor, ov::op::v13) _OPENVINO_OP_REG(NMSRotated, ov::op::v13) _OPENVINO_OP_REG(AUGRUCell, ov::op::internal) diff --git a/src/plugins/template/tests/functional/op_reference/bitwise.hpp b/src/plugins/template/tests/functional/op_reference/bitwise.hpp index 0e8ff7af32ce1b..8feb41378eb2f7 100644 --- a/src/plugins/template/tests/functional/op_reference/bitwise.hpp +++ b/src/plugins/template/tests/functional/op_reference/bitwise.hpp @@ -5,14 +5,17 @@ #include #include "base_reference_test.hpp" +#include "openvino/op/bitwise_and.hpp" #include "openvino/op/bitwise_not.hpp" +#include "openvino/op/bitwise_or.hpp" +#include "openvino/op/bitwise_xor.hpp" using namespace ov; namespace reference_tests { namespace BitwiseOpsRefTestDefinitions { -enum BitwiseTypes { BITWISE_NOT }; +enum BitwiseTypes { BITWISE_AND, BITWISE_NOT, BITWISE_OR, BITWISE_XOR }; struct RefBitwiseParams { BitwiseTypes opType; @@ -30,7 +33,7 @@ class ReferenceBitwiseLayerTest : public testing::TestWithParam CreateFunction(BitwiseTypes op_type, - const std::vector& inputs) { + static std::shared_ptr create_model(BitwiseTypes op_type, + const std::vector& inputs) { ov::ParameterVector params_vec; for (auto& input : inputs) { params_vec.push_back(std::make_shared(input.type, input.shape)); } - std::shared_ptr bitwise_op; + std::shared_ptr bitwise_op = nullptr; switch (op_type) { case BitwiseTypes::BITWISE_NOT: { bitwise_op = std::make_shared(params_vec[0]); break; } - default: { - throw std::runtime_error("Incorrect type of Bitwise operation"); + case BitwiseTypes::BITWISE_AND: { + bitwise_op = std::make_shared(params_vec[0], params_vec[1]); + break; + } + case BitwiseTypes::BITWISE_OR: { + bitwise_op = std::make_shared(params_vec[0], params_vec[1]); + break; + } + case BitwiseTypes::BITWISE_XOR: { + bitwise_op = std::make_shared(params_vec[0], params_vec[1]); + break; } } + EXPECT_TRUE(bitwise_op) << "Incorrect type of Bitwise operation"; return std::make_shared(ov::NodeVector{bitwise_op}, ov::ParameterVector{params_vec}); } }; diff --git a/src/plugins/template/tests/functional/op_reference/bitwise_and.cpp b/src/plugins/template/tests/functional/op_reference/bitwise_and.cpp new file mode 100644 index 00000000000000..a656f61826a675 --- /dev/null +++ b/src/plugins/template/tests/functional/op_reference/bitwise_and.cpp @@ -0,0 +1,359 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/bitwise_and.hpp" + +#include + +#include "bitwise.hpp" + +using namespace ov; + +namespace reference_tests { +namespace BitwiseOpsRefTestDefinitions { +namespace { + +std::vector generateBitwiseParams() { + std::vector bitwiseParams{ + Builder{} + .opType(BitwiseTypes::BITWISE_AND) + .inputs({{{2, 2}, element::boolean, std::vector{true, false, true, false}}, + {{2, 2}, element::boolean, std::vector{true, false, false, true}}}) + .expected({{2, 2}, element::boolean, std::vector{true, false, false, false}}), + Builder{} + .opType(BitwiseTypes::BITWISE_AND) + .inputs( + {{{3, 5}, + element::u8, + std::vector< + uint8_t>{0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x1, 0x1, 0x1, 0x8, 0x8, 0xbf}}, + {{3, 5}, + element::u8, + std::vector< + uint8_t>{0x0, 0x1, 0x8, 0xbf, 0x3f, 0x1, 0x8, 0xbf, 0x3f, 0x8, 0xbf, 0x3f, 0xbf, 0x3f, 0x3f}}}) + .expected( + {{3, 5}, + element::u8, + std::vector{0x0, 0x1, 0x8, 0xbf, 0x3f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x1, 0x8, 0x8, 0x3f}}), + Builder{} + .opType(BitwiseTypes::BITWISE_AND) + .inputs({{{3, 5}, + element::u16, + std::vector{0xffff, + 0xffff, + 0xffff, + 0xffff, + 0xffff, + 0x0, + 0x0, + 0x0, + 0x0, + 0x1, + 0x1, + 0x1, + 0x8, + 0x8, + 0xbfff}}, + {{3, 5}, + element::u16, + std::vector{0x0, + 0x1, + 0x8, + 0xbfff, + 0x3fff, + 0x1, + 0x8, + 0xbfff, + 0x3fff, + 0x8, + 0xbfff, + 0x3fff, + 0xbfff, + 0x3fff, + 0x3fff}}}) + .expected( + {{3, 5}, + element::u16, + std::vector< + uint16_t>{0x0, 0x1, 0x8, 0xbfff, 0x3fff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x1, 0x8, 0x8, 0x3fff}}), + Builder{} + .opType(BitwiseTypes::BITWISE_AND) + .inputs({{{3, 5}, + element::u32, + std::vector{0xffffffff, + 0xffffffff, + 0xffffffff, + 0xffffffff, + 0xffffffff, + 0x0, + 0x0, + 0x0, + 0x0, + 0x1, + 0x1, + 0x1, + 0x8, + 0x8, + 0xbfffffff}}, + {{3, 5}, + element::u32, + std::vector{0x0, + 0x1, + 0x8, + 0xbfffffff, + 0x3fffffff, + 0x1, + 0x8, + 0xbfffffff, + 0x3fffffff, + 0x8, + 0xbfffffff, + 0x3fffffff, + 0xbfffffff, + 0x3fffffff, + 0x3fffffff}}}) + .expected({{3, 5}, + element::u32, + std::vector{0x0, + 0x1, + 0x8, + 0xbfffffff, + 0x3fffffff, + 0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x1, + 0x1, + 0x8, + 0x8, + 0x3fffffff}}), + Builder{} + .opType(BitwiseTypes::BITWISE_AND) + .inputs({{{3, 5}, + element::u64, + std::vector{0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x1, + 0x1, + 0x1, + 0x1, + 0x4000000000000000, + 0x4000000000000000, + 0x4000000000000000, + 0xc000000000000000, + 0xc000000000000000, + 0xffffffffffffffff}}, + {{3, 5}, + element::u64, + std::vector{0x1, + 0x4000000000000000, + 0xc000000000000000, + 0xffffffffffffffff, + 0x8, + 0x4000000000000000, + 0xc000000000000000, + 0xffffffffffffffff, + 0x8, + 0xc000000000000000, + 0xffffffffffffffff, + 0x8, + 0xffffffffffffffff, + 0x8, + 0x8}}}) + .expected({{3, 5}, + element::u64, + std::vector{0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x1, + 0x0, + 0x4000000000000000, + 0x4000000000000000, + 0x0, + 0xc000000000000000, + 0x0, + 0x8}}), + Builder{} + .opType(BitwiseTypes::BITWISE_AND) + .inputs( + {{{3, 5}, + element::i8, + std::vector< + uint8_t>{0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x1, 0x1, 0x1, 0x8, 0x8, 0xbf}}, + {{3, 5}, + element::i8, + std::vector< + uint8_t>{0x0, 0x1, 0x8, 0xbf, 0x3f, 0x1, 0x8, 0xbf, 0x3f, 0x8, 0xbf, 0x3f, 0xbf, 0x3f, 0x3f}}}) + .expected( + {{3, 5}, + element::i8, + std::vector{0x0, 0x1, 0x8, 0xbf, 0x3f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x1, 0x8, 0x8, 0x3f}}), + Builder{} + .opType(BitwiseTypes::BITWISE_AND) + .inputs({{{3, 5}, + element::i16, + std::vector{0xffff, + 0xffff, + 0xffff, + 0xffff, + 0xffff, + 0x0, + 0x0, + 0x0, + 0x0, + 0x1, + 0x1, + 0x1, + 0x8, + 0x8, + 0xbfff}}, + {{3, 5}, + element::i16, + std::vector{0x0, + 0x1, + 0x8, + 0xbfff, + 0x3fff, + 0x1, + 0x8, + 0xbfff, + 0x3fff, + 0x8, + 0xbfff, + 0x3fff, + 0xbfff, + 0x3fff, + 0x3fff}}}) + .expected( + {{3, 5}, + element::i16, + std::vector< + uint16_t>{0x0, 0x1, 0x8, 0xbfff, 0x3fff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x1, 0x8, 0x8, 0x3fff}}), + Builder{} + .opType(BitwiseTypes::BITWISE_AND) + .inputs({{{3, 5}, + element::i32, + std::vector{0xffffffff, + 0xffffffff, + 0xffffffff, + 0xffffffff, + 0xffffffff, + 0x0, + 0x0, + 0x0, + 0x0, + 0x1, + 0x1, + 0x1, + 0x8, + 0x8, + 0xbfffffff}}, + {{3, 5}, + element::i32, + std::vector{0x0, + 0x1, + 0x8, + 0xbfffffff, + 0x3fffffff, + 0x1, + 0x8, + 0xbfffffff, + 0x3fffffff, + 0x8, + 0xbfffffff, + 0x3fffffff, + 0xbfffffff, + 0x3fffffff, + 0x3fffffff}}}) + .expected({{3, 5}, + element::i32, + std::vector{0x0, + 0x1, + 0x8, + 0xbfffffff, + 0x3fffffff, + 0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x1, + 0x1, + 0x8, + 0x8, + 0x3fffffff}}), + Builder{} + .opType(BitwiseTypes::BITWISE_AND) + .inputs({{{3, 5}, + element::i64, + std::vector{0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x1, + 0x1, + 0x1, + 0x1, + 0x4000000000000000, + 0x4000000000000000, + 0x4000000000000000, + 0xc000000000000000, + 0xc000000000000000, + 0xffffffffffffffff}}, + {{3, 5}, + element::i64, + std::vector{0x1, + 0x4000000000000000, + 0xc000000000000000, + 0xffffffffffffffff, + 0x8, + 0x4000000000000000, + 0xc000000000000000, + 0xffffffffffffffff, + 0x8, + 0xc000000000000000, + 0xffffffffffffffff, + 0x8, + 0xffffffffffffffff, + 0x8, + 0x8}}}) + .expected({{3, 5}, + element::i64, + std::vector{0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x1, + 0x0, + 0x4000000000000000, + 0x4000000000000000, + 0x0, + 0xc000000000000000, + 0x0, + 0x8}}), + }; + return bitwiseParams; +} + +INSTANTIATE_TEST_SUITE_P(smoke_BitwiseAnd_With_Hardcoded_Refs, + ReferenceBitwiseLayerTest, + ::testing::ValuesIn(generateBitwiseParams()), + ReferenceBitwiseLayerTest::getTestCaseName); + +} // namespace +} // namespace BitwiseOpsRefTestDefinitions +} // namespace reference_tests diff --git a/src/plugins/template/tests/functional/op_reference/bitwise_or.cpp b/src/plugins/template/tests/functional/op_reference/bitwise_or.cpp new file mode 100644 index 00000000000000..418a2a293f41ba --- /dev/null +++ b/src/plugins/template/tests/functional/op_reference/bitwise_or.cpp @@ -0,0 +1,385 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/bitwise_or.hpp" + +#include + +#include "bitwise.hpp" + +using namespace ov; + +namespace reference_tests { +namespace BitwiseOpsRefTestDefinitions { +namespace { + +std::vector generateBitwiseParams() { + std::vector bitwiseParams{ + Builder{} + .opType(BitwiseTypes::BITWISE_OR) + .inputs({{{2, 2}, element::boolean, std::vector{true, false, true, false}}, + {{2, 2}, element::boolean, std::vector{true, false, false, true}}}) + .expected({{2, 2}, element::boolean, std::vector{true, false, true, true}}), + Builder{} + .opType(BitwiseTypes::BITWISE_OR) + .inputs( + {{{3, 5}, + element::u8, + std::vector< + uint8_t>{0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x1, 0x1, 0x1, 0x8, 0x8, 0xbf}}, + {{3, 5}, + element::u8, + std::vector< + uint8_t>{0x0, 0x1, 0x8, 0xbf, 0x3f, 0x1, 0x8, 0xbf, 0x3f, 0x8, 0xbf, 0x3f, 0xbf, 0x3f, 0x3f}}}) + .expected( + {{3, 5}, + element::u8, + std::vector< + uint8_t>{0xff, 0xff, 0xff, 0xff, 0xff, 0x1, 0x8, 0xbf, 0x3f, 0x9, 0xbf, 0x3f, 0xbf, 0x3f, 0xbf}}), + Builder{} + .opType(BitwiseTypes::BITWISE_OR) + .inputs({{{3, 5}, + element::u16, + std::vector{0xffff, + 0xffff, + 0xffff, + 0xffff, + 0xffff, + 0x0, + 0x0, + 0x0, + 0x0, + 0x1, + 0x1, + 0x1, + 0x8, + 0x8, + 0xbfff}}, + {{3, 5}, + element::u16, + std::vector{0x0, + 0x1, + 0x8, + 0xbfff, + 0x3fff, + 0x1, + 0x8, + 0xbfff, + 0x3fff, + 0x8, + 0xbfff, + 0x3fff, + 0xbfff, + 0x3fff, + 0x3fff}}}) + .expected({{3, 5}, + element::u16, + std::vector{0xffff, + 0xffff, + 0xffff, + 0xffff, + 0xffff, + 0x1, + 0x8, + 0xbfff, + 0x3fff, + 0x9, + 0xbfff, + 0x3fff, + 0xbfff, + 0x3fff, + 0xbfff}}), + Builder{} + .opType(BitwiseTypes::BITWISE_OR) + .inputs({{{3, 5}, + element::u32, + std::vector{0xffffffff, + 0xffffffff, + 0xffffffff, + 0xffffffff, + 0xffffffff, + 0x0, + 0x0, + 0x0, + 0x0, + 0x1, + 0x1, + 0x1, + 0x8, + 0x8, + 0xbfffffff}}, + {{3, 5}, + element::u32, + std::vector{0x0, + 0x1, + 0x8, + 0xbfffffff, + 0x3fffffff, + 0x1, + 0x8, + 0xbfffffff, + 0x3fffffff, + 0x8, + 0xbfffffff, + 0x3fffffff, + 0xbfffffff, + 0x3fffffff, + 0x3fffffff}}}) + .expected({{3, 5}, + element::u32, + std::vector{0xffffffff, + 0xffffffff, + 0xffffffff, + 0xffffffff, + 0xffffffff, + 0x1, + 0x8, + 0xbfffffff, + 0x3fffffff, + 0x9, + 0xbfffffff, + 0x3fffffff, + 0xbfffffff, + 0x3fffffff, + 0xbfffffff}}), + Builder{} + .opType(BitwiseTypes::BITWISE_OR) + .inputs({{{3, 5}, + element::u64, + std::vector{0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x1, + 0x1, + 0x1, + 0x1, + 0x4000000000000000, + 0x4000000000000000, + 0x4000000000000000, + 0xc000000000000000, + 0xc000000000000000, + 0xffffffffffffffff}}, + {{3, 5}, + element::u64, + std::vector{0x1, + 0x4000000000000000, + 0xc000000000000000, + 0xffffffffffffffff, + 0x8, + 0x4000000000000000, + 0xc000000000000000, + 0xffffffffffffffff, + 0x8, + 0xc000000000000000, + 0xffffffffffffffff, + 0x8, + 0xffffffffffffffff, + 0x8, + 0x8}}}) + .expected({{3, 5}, + element::u64, + std::vector{0x1, + 0x4000000000000000, + 0xc000000000000000, + 0xffffffffffffffff, + 0x8, + 0x4000000000000001, + 0xc000000000000001, + 0xffffffffffffffff, + 0x9, + 0xc000000000000000, + 0xffffffffffffffff, + 0x4000000000000008, + 0xffffffffffffffff, + 0xc000000000000008, + 0xffffffffffffffff}}), + Builder{} + .opType(BitwiseTypes::BITWISE_OR) + .inputs( + {{{3, 5}, + element::i8, + std::vector< + uint8_t>{0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x1, 0x1, 0x1, 0x8, 0x8, 0xbf}}, + {{3, 5}, + element::i8, + std::vector< + uint8_t>{0x0, 0x1, 0x8, 0xbf, 0x3f, 0x1, 0x8, 0xbf, 0x3f, 0x8, 0xbf, 0x3f, 0xbf, 0x3f, 0x3f}}}) + .expected( + {{3, 5}, + element::i8, + std::vector< + uint8_t>{0xff, 0xff, 0xff, 0xff, 0xff, 0x1, 0x8, 0xbf, 0x3f, 0x9, 0xbf, 0x3f, 0xbf, 0x3f, 0xbf}}), + Builder{} + .opType(BitwiseTypes::BITWISE_OR) + .inputs({{{3, 5}, + element::i16, + std::vector{0xffff, + 0xffff, + 0xffff, + 0xffff, + 0xffff, + 0x0, + 0x0, + 0x0, + 0x0, + 0x1, + 0x1, + 0x1, + 0x8, + 0x8, + 0xbfff}}, + {{3, 5}, + element::i16, + std::vector{0x0, + 0x1, + 0x8, + 0xbfff, + 0x3fff, + 0x1, + 0x8, + 0xbfff, + 0x3fff, + 0x8, + 0xbfff, + 0x3fff, + 0xbfff, + 0x3fff, + 0x3fff}}}) + .expected({{3, 5}, + element::i16, + std::vector{0xffff, + 0xffff, + 0xffff, + 0xffff, + 0xffff, + 0x1, + 0x8, + 0xbfff, + 0x3fff, + 0x9, + 0xbfff, + 0x3fff, + 0xbfff, + 0x3fff, + 0xbfff}}), + Builder{} + .opType(BitwiseTypes::BITWISE_OR) + .inputs({{{3, 5}, + element::i32, + std::vector{0xffffffff, + 0xffffffff, + 0xffffffff, + 0xffffffff, + 0xffffffff, + 0x0, + 0x0, + 0x0, + 0x0, + 0x1, + 0x1, + 0x1, + 0x8, + 0x8, + 0xbfffffff}}, + {{3, 5}, + element::i32, + std::vector{0x0, + 0x1, + 0x8, + 0xbfffffff, + 0x3fffffff, + 0x1, + 0x8, + 0xbfffffff, + 0x3fffffff, + 0x8, + 0xbfffffff, + 0x3fffffff, + 0xbfffffff, + 0x3fffffff, + 0x3fffffff}}}) + .expected({{3, 5}, + element::i32, + std::vector{0xffffffff, + 0xffffffff, + 0xffffffff, + 0xffffffff, + 0xffffffff, + 0x1, + 0x8, + 0xbfffffff, + 0x3fffffff, + 0x9, + 0xbfffffff, + 0x3fffffff, + 0xbfffffff, + 0x3fffffff, + 0xbfffffff}}), + Builder{} + .opType(BitwiseTypes::BITWISE_OR) + .inputs({{{3, 5}, + element::i64, + std::vector{0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x1, + 0x1, + 0x1, + 0x1, + 0x4000000000000000, + 0x4000000000000000, + 0x4000000000000000, + 0xc000000000000000, + 0xc000000000000000, + 0xffffffffffffffff}}, + {{3, 5}, + element::i64, + std::vector{0x1, + 0x4000000000000000, + 0xc000000000000000, + 0xffffffffffffffff, + 0x8, + 0x4000000000000000, + 0xc000000000000000, + 0xffffffffffffffff, + 0x8, + 0xc000000000000000, + 0xffffffffffffffff, + 0x8, + 0xffffffffffffffff, + 0x8, + 0x8}}}) + .expected({{3, 5}, + element::i64, + std::vector{0x1, + 0x4000000000000000, + 0xc000000000000000, + 0xffffffffffffffff, + 0x8, + 0x4000000000000001, + 0xc000000000000001, + 0xffffffffffffffff, + 0x9, + 0xc000000000000000, + 0xffffffffffffffff, + 0x4000000000000008, + 0xffffffffffffffff, + 0xc000000000000008, + 0xffffffffffffffff}}), + }; + return bitwiseParams; +} + +INSTANTIATE_TEST_SUITE_P(smoke_BitwiseAnd_With_Hardcoded_Refs, + ReferenceBitwiseLayerTest, + ::testing::ValuesIn(generateBitwiseParams()), + ReferenceBitwiseLayerTest::getTestCaseName); + +} // namespace +} // namespace BitwiseOpsRefTestDefinitions +} // namespace reference_tests diff --git a/src/plugins/template/tests/functional/op_reference/bitwise_xor.cpp b/src/plugins/template/tests/functional/op_reference/bitwise_xor.cpp new file mode 100644 index 00000000000000..49b113220f3211 --- /dev/null +++ b/src/plugins/template/tests/functional/op_reference/bitwise_xor.cpp @@ -0,0 +1,385 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/bitwise_xor.hpp" + +#include + +#include "bitwise.hpp" + +using namespace ov; + +namespace reference_tests { +namespace BitwiseOpsRefTestDefinitions { +namespace { + +std::vector generateBitwiseParams() { + std::vector bitwiseParams{ + Builder{} + .opType(BitwiseTypes::BITWISE_XOR) + .inputs({{{2, 2}, element::boolean, std::vector{true, false, true, false}}, + {{2, 2}, element::boolean, std::vector{true, false, false, true}}}) + .expected({{2, 2}, element::boolean, std::vector{false, false, true, true}}), + Builder{} + .opType(BitwiseTypes::BITWISE_XOR) + .inputs( + {{{3, 5}, + element::u8, + std::vector< + uint8_t>{0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x1, 0x1, 0x1, 0x8, 0x8, 0xbf}}, + {{3, 5}, + element::u8, + std::vector< + uint8_t>{0x0, 0x1, 0x8, 0xbf, 0x3f, 0x1, 0x8, 0xbf, 0x3f, 0x8, 0xbf, 0x3f, 0xbf, 0x3f, 0x3f}}}) + .expected( + {{3, 5}, + element::u8, + std::vector< + uint8_t>{0xff, 0xfe, 0xf7, 0x40, 0xc0, 0x1, 0x8, 0xbf, 0x3f, 0x9, 0xbe, 0x3e, 0xb7, 0x37, 0x80}}), + Builder{} + .opType(BitwiseTypes::BITWISE_XOR) + .inputs({{{3, 5}, + element::u16, + std::vector{0xffff, + 0xffff, + 0xffff, + 0xffff, + 0xffff, + 0x0, + 0x0, + 0x0, + 0x0, + 0x1, + 0x1, + 0x1, + 0x8, + 0x8, + 0xbfff}}, + {{3, 5}, + element::u16, + std::vector{0x0, + 0x1, + 0x8, + 0xbfff, + 0x3fff, + 0x1, + 0x8, + 0xbfff, + 0x3fff, + 0x8, + 0xbfff, + 0x3fff, + 0xbfff, + 0x3fff, + 0x3fff}}}) + .expected({{3, 5}, + element::u16, + std::vector{0xffff, + 0xfffe, + 0xfff7, + 0x4000, + 0xc000, + 0x1, + 0x8, + 0xbfff, + 0x3fff, + 0x9, + 0xbffe, + 0x3ffe, + 0xbff7, + 0x3ff7, + 0x8000}}), + Builder{} + .opType(BitwiseTypes::BITWISE_XOR) + .inputs({{{3, 5}, + element::u32, + std::vector{0xffffffff, + 0xffffffff, + 0xffffffff, + 0xffffffff, + 0xffffffff, + 0x0, + 0x0, + 0x0, + 0x0, + 0x1, + 0x1, + 0x1, + 0x8, + 0x8, + 0xbfffffff}}, + {{3, 5}, + element::u32, + std::vector{0x0, + 0x1, + 0x8, + 0xbfffffff, + 0x3fffffff, + 0x1, + 0x8, + 0xbfffffff, + 0x3fffffff, + 0x8, + 0xbfffffff, + 0x3fffffff, + 0xbfffffff, + 0x3fffffff, + 0x3fffffff}}}) + .expected({{3, 5}, + element::u32, + std::vector{0xffffffff, + 0xfffffffe, + 0xfffffff7, + 0x40000000, + 0xc0000000, + 0x1, + 0x8, + 0xbfffffff, + 0x3fffffff, + 0x9, + 0xbffffffe, + 0x3ffffffe, + 0xbffffff7, + 0x3ffffff7, + 0x80000000}}), + Builder{} + .opType(BitwiseTypes::BITWISE_XOR) + .inputs({{{3, 5}, + element::u64, + std::vector{0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x1, + 0x1, + 0x1, + 0x1, + 0x4000000000000000, + 0x4000000000000000, + 0x4000000000000000, + 0xc000000000000000, + 0xc000000000000000, + 0xffffffffffffffff}}, + {{3, 5}, + element::u64, + std::vector{0x1, + 0x4000000000000000, + 0xc000000000000000, + 0xffffffffffffffff, + 0x8, + 0x4000000000000000, + 0xc000000000000000, + 0xffffffffffffffff, + 0x8, + 0xc000000000000000, + 0xffffffffffffffff, + 0x8, + 0xffffffffffffffff, + 0x8, + 0x8}}}) + .expected({{3, 5}, + element::u64, + std::vector{0x1, + 0x4000000000000000, + 0xc000000000000000, + 0xffffffffffffffff, + 0x8, + 0x4000000000000001, + 0xc000000000000001, + 0xfffffffffffffffe, + 0x9, + 0x8000000000000000, + 0xbfffffffffffffff, + 0x4000000000000008, + 0x3fffffffffffffff, + 0xc000000000000008, + 0xfffffffffffffff7}}), + Builder{} + .opType(BitwiseTypes::BITWISE_XOR) + .inputs( + {{{3, 5}, + element::i8, + std::vector< + uint8_t>{0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x1, 0x1, 0x1, 0x8, 0x8, 0xbf}}, + {{3, 5}, + element::i8, + std::vector< + uint8_t>{0x0, 0x1, 0x8, 0xbf, 0x3f, 0x1, 0x8, 0xbf, 0x3f, 0x8, 0xbf, 0x3f, 0xbf, 0x3f, 0x3f}}}) + .expected( + {{3, 5}, + element::i8, + std::vector< + uint8_t>{0xff, 0xfe, 0xf7, 0x40, 0xc0, 0x1, 0x8, 0xbf, 0x3f, 0x9, 0xbe, 0x3e, 0xb7, 0x37, 0x80}}), + Builder{} + .opType(BitwiseTypes::BITWISE_XOR) + .inputs({{{3, 5}, + element::i16, + std::vector{0xffff, + 0xffff, + 0xffff, + 0xffff, + 0xffff, + 0x0, + 0x0, + 0x0, + 0x0, + 0x1, + 0x1, + 0x1, + 0x8, + 0x8, + 0xbfff}}, + {{3, 5}, + element::i16, + std::vector{0x0, + 0x1, + 0x8, + 0xbfff, + 0x3fff, + 0x1, + 0x8, + 0xbfff, + 0x3fff, + 0x8, + 0xbfff, + 0x3fff, + 0xbfff, + 0x3fff, + 0x3fff}}}) + .expected({{3, 5}, + element::i16, + std::vector{0xffff, + 0xfffe, + 0xfff7, + 0x4000, + 0xc000, + 0x1, + 0x8, + 0xbfff, + 0x3fff, + 0x9, + 0xbffe, + 0x3ffe, + 0xbff7, + 0x3ff7, + 0x8000}}), + Builder{} + .opType(BitwiseTypes::BITWISE_XOR) + .inputs({{{3, 5}, + element::i32, + std::vector{0xffffffff, + 0xffffffff, + 0xffffffff, + 0xffffffff, + 0xffffffff, + 0x0, + 0x0, + 0x0, + 0x0, + 0x1, + 0x1, + 0x1, + 0x8, + 0x8, + 0xbfffffff}}, + {{3, 5}, + element::i32, + std::vector{0x0, + 0x1, + 0x8, + 0xbfffffff, + 0x3fffffff, + 0x1, + 0x8, + 0xbfffffff, + 0x3fffffff, + 0x8, + 0xbfffffff, + 0x3fffffff, + 0xbfffffff, + 0x3fffffff, + 0x3fffffff}}}) + .expected({{3, 5}, + element::i32, + std::vector{0xffffffff, + 0xfffffffe, + 0xfffffff7, + 0x40000000, + 0xc0000000, + 0x1, + 0x8, + 0xbfffffff, + 0x3fffffff, + 0x9, + 0xbffffffe, + 0x3ffffffe, + 0xbffffff7, + 0x3ffffff7, + 0x80000000}}), + Builder{} + .opType(BitwiseTypes::BITWISE_XOR) + .inputs({{{3, 5}, + element::i64, + std::vector{0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x1, + 0x1, + 0x1, + 0x1, + 0x4000000000000000, + 0x4000000000000000, + 0x4000000000000000, + 0xc000000000000000, + 0xc000000000000000, + 0xffffffffffffffff}}, + {{3, 5}, + element::i64, + std::vector{0x1, + 0x4000000000000000, + 0xc000000000000000, + 0xffffffffffffffff, + 0x8, + 0x4000000000000000, + 0xc000000000000000, + 0xffffffffffffffff, + 0x8, + 0xc000000000000000, + 0xffffffffffffffff, + 0x8, + 0xffffffffffffffff, + 0x8, + 0x8}}}) + .expected({{3, 5}, + element::i64, + std::vector{0x1, + 0x4000000000000000, + 0xc000000000000000, + 0xffffffffffffffff, + 0x8, + 0x4000000000000001, + 0xc000000000000001, + 0xfffffffffffffffe, + 0x9, + 0x8000000000000000, + 0xbfffffffffffffff, + 0x4000000000000008, + 0x3fffffffffffffff, + 0xc000000000000008, + 0xfffffffffffffff7}}), + }; + return bitwiseParams; +} + +INSTANTIATE_TEST_SUITE_P(smoke_BitwiseAnd_With_Hardcoded_Refs, + ReferenceBitwiseLayerTest, + ::testing::ValuesIn(generateBitwiseParams()), + ReferenceBitwiseLayerTest::getTestCaseName); + +} // namespace +} // namespace BitwiseOpsRefTestDefinitions +} // namespace reference_tests diff --git a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/op_impl_check/single_op_graph.cpp b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/op_impl_check/single_op_graph.cpp index 2ca8a76f667977..5d36ba62e3ecda 100644 --- a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/op_impl_check/single_op_graph.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/op_impl_check/single_op_graph.cpp @@ -1474,6 +1474,24 @@ std::shared_ptr generateBinaryEltwise(const std::shared_ptr(results, params, "BinaryEltwiseGraph"); } +std::shared_ptr generateBinaryEltwiseBitwise(const std::shared_ptr &node) { + ov::ParameterVector params{std::make_shared(ov::element::i32, ov::PartialShape{1, 2}), + std::make_shared(ov::element::i32, ov::PartialShape{1, 2})}; + + std::shared_ptr eltwise; + if (ov::is_type(node)) { + eltwise = std::make_shared(params[0], params[1]); + } else if (ov::is_type(node)) { + eltwise = std::make_shared(params[0], params[1]); + } else if (ov::is_type(node)) { + eltwise = std::make_shared(params[0], params[1]); + } else { + return nullptr; + } + ov::ResultVector results{std::make_shared(eltwise)}; + return std::make_shared(results, params, "BinaryEltwiseBitwiseGraph"); +} + std::shared_ptr generateBinaryEltwiseComp(const std::shared_ptr &node) { ov::ParameterVector params{std::make_shared(ov::element::f32, ov::Shape{2}), std::make_shared(ov::element::f32, ov::Shape{2})}; @@ -1976,6 +1994,8 @@ std::shared_ptr generateGraph() { return generateScatterNDBase(node); } else if (ov::is_type(node)) { return generateUnaryEltwise(node); + } else if (ov::is_type(node)) { + return generateBinaryEltwiseBitwise(node); } else if (ov::is_type(node)) { return generateBinaryEltwiseComp(node); } else if (ov::is_type(node)) { From 78e3ed6a7583b19d29407f7a7cb8fa0c513d0103 Mon Sep 17 00:00:00 2001 From: Andrey Kashchikhin Date: Wed, 4 Oct 2023 08:50:57 +0100 Subject: [PATCH 053/257] [CI] [GHA] Introduce GHA Linux CUDA Pipeline (#19884) * add pipeline * rm triggers * address comments * use uninteractive as env * rm triggers * rm unused testdata * use better concurrency group Co-authored-by: Mikhail Ryzhov * use aks runner * correct path * provide path * add missing cmake options; rm unnecessary dir creation * use image from private docker * split OV and plugin cmake & build; do not fail on warning for plugin build * use different build_dir for nvidia plugin * add missing options * rm unnecessary options; add target for build * Apply suggestions from code review try fix for NVIDIA plugin * Apply suggestions from code review revert to default contrib repo, used ccache for CUDA files --------- Co-authored-by: Mikhail Ryzhov Co-authored-by: Ilya Lavrenov --- .github/workflows/linux_cuda.yml | 143 +++++++++++++++++++++++++++++++ 1 file changed, 143 insertions(+) create mode 100644 .github/workflows/linux_cuda.yml diff --git a/.github/workflows/linux_cuda.yml b/.github/workflows/linux_cuda.yml new file mode 100644 index 00000000000000..9e74ec11ec6be2 --- /dev/null +++ b/.github/workflows/linux_cuda.yml @@ -0,0 +1,143 @@ +name: Linux NVIDIA Plugin (Ubuntu 20.04) +on: + workflow_dispatch: + pull_request: + paths-ignore: + - '**/docs/**' + - 'docs/**' + - '**/**.md' + - '**.md' + - '**/layer_tests_summary/**' + - '**/conformance/**' + push: + paths-ignore: + - '**/docs/**' + - 'docs/**' + - '**/**.md' + - '**.md' + - '**/layer_tests_summary/**' + - '**/conformance/**' + branches: + - master + +concurrency: + # github.ref is not unique in post-commit + group: ${{ github.event_name == 'push' && github.run_id || github.ref }}-linux-nvidia + cancel-in-progress: true + +jobs: + Build: + defaults: + run: + shell: bash + runs-on: aks-linux-16-cores + container: + image: openvinogithubactions.azurecr.io/dockerhub/nvidia/cuda:11.8.0-runtime-ubuntu20.04 + volumes: + - /mount/caches:/mount/caches + env: + CMAKE_BUILD_TYPE: 'Release' + CMAKE_GENERATOR: 'Ninja Multi-Config' + CMAKE_CUDA_COMPILER_LAUNCHER: ccache + CMAKE_CXX_COMPILER_LAUNCHER: ccache + CMAKE_C_COMPILER_LAUNCHER: ccache + OPENVINO_REPO: /__w/openvino/openvino/openvino + OPENVINO_CONTRIB_REPO: /__w/openvino/openvino/openvino_contrib + OV_BUILD_DIR: /__w/openvino/openvino/openvino_build + NVIDIA_BUILD_DIR: /__w/openvino/openvino/nvidia_plugin_build + DEBIAN_FRONTEND: 'noninteractive' + CCACHE_DIR: /mount/caches/ccache/ubuntu20_x86_64_Release + CCACHE_TEMPDIR: /__w/openvino/openvino/ccache_temp + CCACHE_MAXSIZE: 50G + steps: + + - name: Install Prerequisites + run: | + apt update + apt install -y git curl git git-lfs unzip wget + + - name: Clone OpenVINO + uses: actions/checkout@v3 + with: + path: ${{ env.OPENVINO_REPO }} + submodules: 'true' + + - name: Clone OpenVINO Contrib + uses: actions/checkout@v3 + with: + repository: 'openvinotoolkit/openvino_contrib' + path: ${{ env.OPENVINO_CONTRIB_REPO }} + ref: 'master' + + # + # Dependencies + # + + - name: Install build dependencies + run: | + ${OPENVINO_REPO}/install_build_dependencies.sh + + apt -y --no-install-recommends install unzip wget software-properties-common + + - name: Install CUDA + run: | + wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/cuda-ubuntu2004.pin + mv cuda-ubuntu2004.pin /etc/apt/preferences.d/cuda-repository-pin-600 + + apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/3bf863cc.pub + add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/ /" + apt update + apt install -y \ + libcudnn8=8.9.4.*-1+cuda11.8 \ + libcudnn8-dev=8.9.4.*-1+cuda11.8 \ + libcudnn8-samples=8.9.4.*-1+cuda11.8 \ + cuda-runtime-11-8 \ + cuda-11-8 \ + libcutensor1=1.6.1.5-1 \ + libcutensor-dev=1.6.1.5-1 \ + cuda-drivers=520.61.05-1 + + # + # Build + # + + - name: CMake configure + run: | + cmake \ + -G "${{ env.CMAKE_GENERATOR }}" \ + -DENABLE_CPPLINT=OFF \ + -DENABLE_NCC_STYLE=OFF \ + -DENABLE_SYSTEM_PUGIXML=ON \ + -DENABLE_SYSTEM_OPENCL=ON \ + -DENABLE_STRICT_DEPENDENCIES=OFF \ + -DCMAKE_BUILD_TYPE=${{ env.CMAKE_BUILD_TYPE }} \ + -DENABLE_INTEL_CPU=OFF \ + -DENABLE_INTEL_GPU=OFF \ + -DENABLE_INTEL_GNA=OFF \ + -DENABLE_OV_TF_FRONTEND=OFF \ + -DENABLE_OV_TF_LITE=OFF \ + -DENABLE_OV_PADDLE_FRONTEND=OFF \ + -DENABLE_OV_PYTORCH_FRONTEND=OFF \ + -DENABLE_OV_ONNX_FRONTEND=OFF \ + -DENABLE_PYTHON=OFF \ + -DENABLE_TESTS=ON \ + -DCPACK_GENERATOR=TGZ \ + -DCMAKE_COMPILE_WARNING_AS_ERROR=ON \ + -S ${OPENVINO_REPO} \ + -B ${OV_BUILD_DIR} + + - name: Build - OpenVINO + run: | + cmake --build ${OV_BUILD_DIR} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} --verbose --target ov_dev_targets + + - name: Cmake & Build - NVIDIA Plugin + run: | + cmake \ + -DOpenVINODeveloperPackage_DIR=${OV_BUILD_DIR} \ + -DCMAKE_COMPILE_WARNING_AS_ERROR=OFF \ + -S ${OPENVINO_CONTRIB_REPO}/modules/nvidia_plugin \ + -B ${NVIDIA_BUILD_DIR} + cmake --build ${NVIDIA_BUILD_DIR} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} --verbose -- ov_nvidia_func_tests ov_nvidia_unit_tests + + - name: Show ccache stats + run: ccache --show-stats From b9ff53a5a4eb30560ff46964ddb6e0f7eb76ae1f Mon Sep 17 00:00:00 2001 From: Sergey Shlyapnikov Date: Wed, 4 Oct 2023 12:57:02 +0400 Subject: [PATCH 054/257] [GPU] Fix runtime reorders optimization and add runtime memory dependencies (#20202) --- .../src/graph/include/primitive_inst.h | 19 +++++- .../intel_gpu/src/graph/primitive_inst.cpp | 60 ++++++++++++++++--- src/plugins/intel_gpu/src/graph/program.cpp | 1 + .../skip_redundant_reorder_at_runtime.cpp | 49 +++++++++++++++ 4 files changed, 118 insertions(+), 11 deletions(-) diff --git a/src/plugins/intel_gpu/src/graph/include/primitive_inst.h b/src/plugins/intel_gpu/src/graph/include/primitive_inst.h index 2f25181376222b..e51a06e43386ea 100644 --- a/src/plugins/intel_gpu/src/graph/include/primitive_inst.h +++ b/src/plugins/intel_gpu/src/graph/include/primitive_inst.h @@ -168,6 +168,7 @@ class primitive_inst { } return _network.get_primitives(users); } + std::set get_runtime_memory_dependencies() { return _runtime_memory_dependencies; } const kernel_impl_params* get_impl_params() const { return _impl_params.get(); } // return pointer to const to prevent arbitrary 'execute' call -> use primitive_inst.execute() instead @@ -235,8 +236,19 @@ class primitive_inst { bool has_node() const { return _node != nullptr; } bool has_inner_networks() const; void allocate_internal_buffers(bool reset = true); - static memory::ptr allocate_output(engine& engine, memory_pool& pool, const program_node& _node, const kernel_impl_params& impl_params, uint32_t net_id, - bool is_internal, size_t idx = 0, bool reset_mem = true, bool is_output_buffer = false, memory* curr_memory = nullptr, bool runtime_alloc = false); + + static memory::ptr allocate_output(engine& engine, + memory_pool& pool, + const program_node& _node, + const kernel_impl_params& impl_params, + const std::set& memory_dependencies, + uint32_t net_id, + bool is_internal, + size_t idx = 0, + bool reset_mem = true, + bool is_output_buffer = false, + memory* curr_memory = nullptr, + bool runtime_alloc = false); std::vector get_intermediates_memories() const { return _intermediates_memory; } @@ -299,6 +311,9 @@ class primitive_inst { std::vector> _exec_deps; std::vector _exec_dep_ids; + // List of primitive ids that this primitive can't share memory buffers with + std::set _runtime_memory_dependencies; + // This is sub-network generated on demand to execute unfused primitives sequence instead of single fused primitive // Needed for dynamic path only, as fusion in some cases may be illegal, but it can't be checked on program build phase, // thus we do less restrictive fusion with runtime sanity check and unfusion when needed. diff --git a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp index 5819d1c22e3fc1..eea18ca1fe6b79 100644 --- a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp +++ b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp @@ -142,6 +142,7 @@ static memory::ptr get_memory_from_pool(engine& _engine, const layout& layout, allocation_type type, bool reusable_across_network, + const std::set& memory_dependencies, bool reset = true, memory* curr_memory = nullptr) { OPENVINO_ASSERT(!layout.is_dynamic() || layout.has_upper_bound(), @@ -150,7 +151,7 @@ static memory::ptr get_memory_from_pool(engine& _engine, if (_node.get_program().get_config().get_property(ov::intel_gpu::enable_memory_pool)) { if (curr_memory != nullptr) pool.release_memory(curr_memory, _node.id(), net_id); - return pool.get_memory(layout, _node.id(), net_id, _node.get_memory_dependencies(), type, reusable_across_network, reset); + return pool.get_memory(layout, _node.id(), net_id, memory_dependencies, type, reusable_across_network, reset); } return pool.get_memory(layout, type, reset); } @@ -685,8 +686,20 @@ void primitive_inst::do_runtime_skip_reorder() { u->update_shape(); u->update_shape_done_by_other = true; if (u->_impl_params->get_input_layout() == u->_impl_params->get_output_layout()) { + std::function>)> update_memory_dependencies; + update_memory_dependencies = [&](std::vector> users) { + for (auto& user : users) { + GPU_DEBUG_TRACE_DETAIL << "[do runtime skip reorder] add " << id() << " to restriction list of " << user->id() << std::endl; + user->_runtime_memory_dependencies.insert(id()); + if (user->can_be_optimized()) + update_memory_dependencies(user->get_user_insts()); + } + }; + + update_memory_dependencies(u->get_user_insts()); + u->set_can_be_optimized(true); - GPU_DEBUG_TRACE_DETAIL << "[do runtime skip reorder] set user " << u->id() << " as can_be_optimized" << std::endl; + GPU_DEBUG_TRACE_DETAIL << "[do runtime skip reorder] set user " << u->id() << " as can_be_optimized" << std::endl; } else { GPU_DEBUG_TRACE_DETAIL << "[do runtime skip reorder] user " << u->id() << " cannot be optimized" << std::endl; } @@ -837,7 +850,9 @@ event::ptr primitive_inst::execute(const std::vector& events) { set_arguments(); } on_execute(); - GPU_DEBUG_TRACE << id() << ": execute " << _impl->get_kernel_name() << std::endl; + + GPU_DEBUG_TRACE << id() << ": execute " << _impl->get_kernel_name() << " (is_dynamic=" << _impl->is_dynamic() << ", " + << "can_be_optimized=" << can_be_optimized() << ")" << std::endl; if (_exec_deps.empty() && dependencies.empty()) { dependencies = events; @@ -951,6 +966,7 @@ primitive_inst::primitive_inst(network& network, program_node const& node, bool , _impl_params(node.get_kernel_impl_params()) , _impl(node.get_selected_impl() ? node.get_selected_impl()->clone() : nullptr) , _dynamic_impl(nullptr) + , _runtime_memory_dependencies(node.get_memory_dependencies()) , _outputs({memory::ptr()}) , _reordered_weights_cache(network.get_weights_cache_capacity()) , _output_changed(false) @@ -1090,6 +1106,7 @@ memory::ptr primitive_inst::allocate_internal_buffer(size_t idx, bool reset) { layout, alloc_type, reuse_internal_buf, + _runtime_memory_dependencies, reset, _intermediates_memory.size() > idx ? _intermediates_memory[idx].get() : nullptr); GPU_DEBUG_LOG << " [" << _network.get_id() << ":" << _node->id() << ": internal buf " << idx << "] " << alloc_type @@ -1240,8 +1257,18 @@ static bool user_requesting_mem_reuse_false(const program_node& node) { return false; } -memory::ptr primitive_inst::allocate_output(engine& _engine, memory_pool& pool, const program_node& _node, const kernel_impl_params& impl_params, - uint32_t net_id, bool is_internal, size_t idx, bool reset, bool is_output_buffer, memory* curr_memory, bool runtime_alloc) { +memory::ptr primitive_inst::allocate_output(engine& _engine, + memory_pool& pool, + const program_node& _node, + const kernel_impl_params& impl_params, + const std::set& memory_dependencies, + uint32_t net_id, + bool is_internal, + size_t idx, + bool reset, + bool is_output_buffer, + memory* curr_memory, + bool runtime_alloc) { auto layout = impl_params.get_output_layout(idx); OPENVINO_ASSERT(layout.is_static() || layout.has_upper_bound(), "[GPU] Can't allocate output for dynamic layout"); auto device_mem_acc = [&](size_t a, const cldnn::layout& l) { @@ -1295,6 +1322,7 @@ memory::ptr primitive_inst::allocate_output(engine& _engine, memory_pool& pool, layout, alloc_type, false, + memory_dependencies, reset, curr_memory); } else { @@ -1314,6 +1342,7 @@ memory::ptr primitive_inst::allocate_output(engine& _engine, memory_pool& pool, layout, alloc_type, reusable_across_network, + memory_dependencies, reset, curr_memory); } @@ -1322,10 +1351,22 @@ memory::ptr primitive_inst::allocate_output(engine& _engine, memory_pool& pool, std::vector primitive_inst::allocate_outputs(kernel_impl_params* updated_params, bool reset_mem, bool runtime_alloc) { std::vector outputs; for (size_t i = 0; i < get_node().get_outputs_count() ; ++i) { - outputs.push_back(allocate_output(get_network().get_engine(), _network.get_memory_pool(), - *_node, (updated_params != nullptr) ? *updated_params : *_impl_params, - get_network_id(), _network.is_internal(), i, reset_mem, is_output_buffer(this, runtime_alloc), - (_outputs.size() > i) ? output_memory_ptr(i).get() : nullptr, runtime_alloc)); + auto impl_params = updated_params != nullptr ? *updated_params : *_impl_params; + auto current_memory_ptr = _outputs.size() > i ? output_memory_ptr(i).get() : nullptr; + auto is_output = is_output_buffer(this, runtime_alloc); + + outputs.push_back(allocate_output(_network.get_engine(), + _network.get_memory_pool(), + *_node, + impl_params, + _runtime_memory_dependencies, + get_network_id(), + _network.is_internal(), + i, + reset_mem, + is_output, + current_memory_ptr, + runtime_alloc)); } return outputs; } @@ -1723,6 +1764,7 @@ void primitive_inst::load(cldnn::BinaryInputBuffer& ib) { std::set _node_mem_deps; ib >> _node_mem_deps; + _runtime_memory_dependencies = _node_mem_deps; size_t vector_size = 0UL; ib >> vector_size; diff --git a/src/plugins/intel_gpu/src/graph/program.cpp b/src/plugins/intel_gpu/src/graph/program.cpp index 4fcb120cdd4220..59af7125f9e4dc 100644 --- a/src/plugins/intel_gpu/src/graph/program.cpp +++ b/src/plugins/intel_gpu/src/graph/program.cpp @@ -1664,6 +1664,7 @@ std::pair program::get_estimated_device_mem_usage() { pool, *node, *node->get_kernel_impl_params(), + node->get_memory_dependencies(), 0, false, 0, diff --git a/src/plugins/intel_gpu/tests/unit/dynamic_execution/skip_redundant_reorder_at_runtime.cpp b/src/plugins/intel_gpu/tests/unit/dynamic_execution/skip_redundant_reorder_at_runtime.cpp index 5ee76f2c2b2de8..e80567ec7c9432 100644 --- a/src/plugins/intel_gpu/tests/unit/dynamic_execution/skip_redundant_reorder_at_runtime.cpp +++ b/src/plugins/intel_gpu/tests/unit/dynamic_execution/skip_redundant_reorder_at_runtime.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include "program_wrapper.h" @@ -49,4 +50,52 @@ TEST(remove_redundant_reorder, skip_reorder_at_runtime) { ASSERT_EQ(reorder_inst->can_be_optimized(), true); ASSERT_EQ(network.get_output_memory("reorder")->buffer_ptr(), network.get_primitive("fc")->output_memory_ptr()->buffer_ptr()); } + +TEST(skip_reorder_at_runtime, correct_memory_reuse) { + auto& engine = get_test_engine(); + + auto weight_mem = engine.allocate_memory({{2, 32}, data_types::f32, format::bfyx}); + std::vector weight_data(weight_mem->get_layout().count()); + std::iota(weight_data.begin(), weight_data.end(), 1.0f); + set_values(weight_mem, weight_data); + + auto input_l = layout{ov::PartialShape::dynamic(2), data_types::f32, format::bfyx}; + topology topology(input_layout("input", input_l), + data("weight", weight_mem), + fully_connected("fc", input_info("input"), {"weight"}, "", data_types::f32), + reorder("reorder", input_info("fc"), format::bfyx, data_types::f32), + reshape("reshape", input_info("reorder"), false, {}, {2, 1, 1, 1}), + reorder("reorder_fsv16", input_info("reshape"), format::b_fs_yx_fsv16, data_types::f32)); + + ExecutionConfig config = get_test_default_config(engine); + config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); + config.set_property(ov::intel_gpu::optimize_data(true)); + + network network(engine, topology, config); + auto reorder_inst = network.get_primitive("reorder"); + auto reshape_inst = network.get_primitive("reshape"); + auto reorder_fsv16_inst = network.get_primitive("reorder_fsv16"); + ASSERT_EQ(reorder_inst->can_be_optimized(), false); + ASSERT_EQ(reshape_inst->can_be_optimized(), true); + ASSERT_EQ(reorder_fsv16_inst->can_be_optimized(), false); + + auto input_mem = engine.allocate_memory({{10, 32}, data_types::f32, format::bfyx}); + std::vector input_data(input_mem->get_layout().count()); + std::iota(input_data.begin(), input_data.end(), 0.5f); + set_values(input_mem, input_data); + + network.set_input_data("input", input_mem); + auto outputs = network.execute(); + outputs.begin()->second.get_memory(); + + ASSERT_EQ(reorder_inst->can_be_optimized(), true); + ASSERT_EQ(reshape_inst->can_be_optimized(), true); + ASSERT_EQ(reorder_fsv16_inst->can_be_optimized(), false); + + auto reshape_memory_deps = reshape_inst->get_runtime_memory_dependencies(); + ASSERT_TRUE(reshape_memory_deps.find("fc") != reshape_memory_deps.end()); + + auto reorder_fsv16_memory_deps = reorder_fsv16_inst->get_runtime_memory_dependencies(); + ASSERT_TRUE(reorder_fsv16_memory_deps.find("fc") != reorder_fsv16_memory_deps.end()); +} } // memory_realloc_tests From 185d728b7ef9ae33d20989f5a00890f6135d54ed Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Wed, 4 Oct 2023 11:17:43 +0200 Subject: [PATCH 055/257] Changing file structure of Operation Sets (#20225) --- .../documentation/openvino_ir/ir_suitable_for_int8_inference.md} | 0 .../documentation/openvino_ir/operation_sets}/broadcast_rules.md | 0 .../openvino_ir/operation_sets/operations_specifications.md} | 0 .../operations_specifications}/activation/Clamp_1.md | 0 .../operation_sets/operations_specifications}/activation/Elu_1.md | 0 .../operation_sets/operations_specifications}/activation/Exp_1.md | 0 .../operations_specifications}/activation/GELU_2.md | 0 .../operations_specifications}/activation/GELU_7.md | 0 .../operations_specifications}/activation/HSigmoid_5.md | 0 .../operations_specifications}/activation/HSwish_4.md | 0 .../operations_specifications}/activation/HardSigmoid_1.md | 0 .../operations_specifications}/activation/LogSoftmax_5.md | 0 .../operations_specifications}/activation/Mish_4.md | 0 .../operations_specifications}/activation/PReLU_1.md | 0 .../operations_specifications}/activation/ReLU_1.md | 0 .../operations_specifications}/activation/Selu_1.md | 0 .../operations_specifications}/activation/Sigmoid_1.md | 0 .../operations_specifications}/activation/SoftMax_1.md | 0 .../operations_specifications}/activation/SoftMax_8.md | 0 .../operations_specifications}/activation/SoftPlus_4.md | 0 .../operations_specifications}/activation/SoftSign_9.md | 0 .../operations_specifications}/activation/Swish_4.md | 0 .../operation_sets/operations_specifications}/arithmetic/Abs_1.md | 0 .../operations_specifications}/arithmetic/Acos_1.md | 0 .../operations_specifications}/arithmetic/Acosh_3.md | 0 .../operation_sets/operations_specifications}/arithmetic/Add_1.md | 0 .../operations_specifications}/arithmetic/Asin_1.md | 0 .../operations_specifications}/arithmetic/Asinh_3.md | 0 .../operations_specifications}/arithmetic/Atan_1.md | 0 .../operations_specifications}/arithmetic/Atanh_3.md | 0 .../operations_specifications}/arithmetic/Ceiling_1.md | 0 .../operation_sets/operations_specifications}/arithmetic/Cos_1.md | 0 .../operations_specifications}/arithmetic/Cosh_1.md | 0 .../operations_specifications}/arithmetic/CumSum_3.md | 0 .../operations_specifications}/arithmetic/Divide_1.md | 0 .../operation_sets/operations_specifications}/arithmetic/Erf_1.md | 0 .../operations_specifications}/arithmetic/FloorMod_1.md | 0 .../operations_specifications}/arithmetic/Floor_1.md | 0 .../operation_sets/operations_specifications}/arithmetic/Log_1.md | 0 .../operations_specifications}/arithmetic/Maximum_1.md | 0 .../operations_specifications}/arithmetic/Minimum_1.md | 0 .../operation_sets/operations_specifications}/arithmetic/Mod_1.md | 0 .../operations_specifications}/arithmetic/Multiply_1.md | 0 .../operations_specifications}/arithmetic/Negative_1.md | 0 .../operations_specifications}/arithmetic/Power_1.md | 0 .../operations_specifications}/arithmetic/Round_5.md | 0 .../operations_specifications}/arithmetic/Sign_1.md | 0 .../operation_sets/operations_specifications}/arithmetic/Sin_1.md | 0 .../operations_specifications}/arithmetic/Sinh_1.md | 0 .../operations_specifications}/arithmetic/Sqrt_1.md | 0 .../operations_specifications}/arithmetic/SquaredDifference_1.md | 0 .../operations_specifications}/arithmetic/Subtract_1.md | 0 .../operation_sets/operations_specifications}/arithmetic/Tan_1.md | 0 .../operations_specifications}/arithmetic/Tanh_1.md | 0 .../operations_specifications}/bitwise/BitwiseAnd_13.md | 0 .../operations_specifications}/bitwise/BitwiseNot_13.md | 0 .../operations_specifications}/bitwise/BitwiseOr_13.md | 0 .../operations_specifications}/bitwise/BitwiseXor_13.md | 0 .../operations_specifications}/comparison/Equal_1.md | 0 .../operations_specifications}/comparison/GreaterEqual_1.md | 0 .../operations_specifications}/comparison/Greater_1.md | 0 .../operations_specifications}/comparison/IsFinite_10.md | 0 .../operations_specifications}/comparison/IsInf_10.md | 0 .../operations_specifications}/comparison/IsNaN_10.md | 0 .../operations_specifications}/comparison/LessEqual_1.md | 0 .../operations_specifications}/comparison/Less_1.md | 0 .../operations_specifications}/comparison/NotEqual_1.md | 0 .../operations_specifications}/condition/Bucketize_3.md | 0 .../operation_sets/operations_specifications}/condition/If_8.md | 0 .../operations_specifications}/condition/NonZero_3.md | 0 .../operations_specifications}/condition/Select_1.md | 0 .../operations_specifications}/convolution/BinaryConvolution_1.md | 0 .../convolution/ConvolutionBackpropData_1.md | 0 .../operations_specifications}/convolution/Convolution_1.md | 0 .../convolution/DeformableConvolution_1.md | 0 .../convolution/DeformableConvolution_8.md | 0 .../convolution/GroupConvolutionBackpropData_1.md | 0 .../operations_specifications}/convolution/GroupConvolution_1.md | 0 .../detection/DeformablePSROIPooling_1.md | 0 .../operations_specifications}/detection/DetectionOutput_1.md | 0 .../operations_specifications}/detection/DetectionOutput_8.md | 0 .../detection/ExperimentalDetectronDetectionOutput_6.md | 0 .../ExperimentalDetectronGenerateProposalsSingleImage_6.md | 0 .../detection/ExperimentalDetectronPriorGridGenerator_6.md | 0 .../detection/ExperimentalDetectronROIFeatureExtractor_6.md | 0 .../operations_specifications}/detection/GenerateProposals_9.md | 0 .../operations_specifications}/detection/PSROIPooling_1.md | 0 .../operations_specifications}/detection/PriorBoxClustered_1.md | 0 .../operations_specifications}/detection/PriorBox_1.md | 0 .../operations_specifications}/detection/PriorBox_8.md | 0 .../operations_specifications}/detection/Proposal_1.md | 0 .../operations_specifications}/detection/Proposal_4.md | 0 .../operations_specifications}/detection/ROIAlign_3.md | 0 .../operations_specifications}/detection/ROIAlign_9.md | 0 .../operations_specifications}/detection/ROIPooling_1.md | 0 .../operations_specifications}/detection/RegionYolo_1.md | 0 .../operations_specifications}/detection/ReorgYolo_1.md | 0 .../operation_sets/operations_specifications}/generation/Eye_9.md | 0 .../operations_specifications}/generation/Multinomial_13.md | 0 .../operations_specifications}/generation/RandomUniform_8.md | 0 .../operations_specifications}/generation/Range_1.md | 0 .../operations_specifications}/generation/Range_4.md | 0 .../operations_specifications}/image/GridSample_9.md | 0 .../operations_specifications}/image/I420toBGR_8.md | 0 .../operations_specifications}/image/I420toRGB_8.md | 0 .../operations_specifications}/image/Interpolate_1.md | 0 .../operations_specifications}/image/Interpolate_11.md | 0 .../operations_specifications}/image/Interpolate_4.md | 0 .../operations_specifications}/image/NV12toBGR_8.md | 0 .../operations_specifications}/image/NV12toRGB_8.md | 0 .../operations_specifications}/infrastructure/Assign_3.md | 0 .../operations_specifications}/infrastructure/Constant_1.md | 0 .../operations_specifications}/infrastructure/Loop_5.md | 0 .../operations_specifications}/infrastructure/Parameter_1.md | 0 .../operations_specifications}/infrastructure/ReadValue_3.md | 0 .../operations_specifications}/infrastructure/Result_1.md | 0 .../operations_specifications}/infrastructure/TensorIterator_1.md | 0 .../operations_specifications}/internal/AUGRUCell.md | 0 .../operations_specifications}/internal/AUGRUSequence.md | 0 .../operations_specifications}/logical/LogicalAnd_1.md | 0 .../operations_specifications}/logical/LogicalNot_1.md | 0 .../operations_specifications}/logical/LogicalOr_1.md | 0 .../operations_specifications}/logical/LogicalXor_1.md | 0 .../operation_sets/operations_specifications}/matrix/Einsum_7.md | 0 .../operation_sets/operations_specifications}/matrix/MatMul_1.md | 0 .../operations_specifications}/movement/BatchToSpace_2.md | 0 .../operations_specifications}/movement/Broadcast_1.md | 0 .../operations_specifications}/movement/Broadcast_3.md | 0 .../operations_specifications}/movement/Concat_1.md | 0 .../operations_specifications}/movement/DepthToSpace_1.md | 0 .../operations_specifications}/movement/ExtractImagePatches_3.md | 0 .../operations_specifications}/movement/GatherElements_6.md | 0 .../operations_specifications}/movement/GatherND_5.md | 0 .../operations_specifications}/movement/GatherND_8.md | 0 .../operations_specifications}/movement/GatherTree_1.md | 0 .../operations_specifications}/movement/Gather_1.md | 0 .../operations_specifications}/movement/Gather_7.md | 0 .../operations_specifications}/movement/Gather_8.md | 0 .../operation_sets/operations_specifications}/movement/Pad_1.md | 0 .../operation_sets/operations_specifications}/movement/Pad_12.md | 0 .../operations_specifications}/movement/ReverseSequence_1.md | 0 .../operations_specifications}/movement/Reverse_1.md | 0 .../operation_sets/operations_specifications}/movement/Roll_7.md | 0 .../movement/ScatterElementsUpdate_12.md | 0 .../movement/ScatterElementsUpdate_3.md | 0 .../operations_specifications}/movement/ScatterNDUpdate_3.md | 0 .../operations_specifications}/movement/ScatterUpdate_3.md | 0 .../operations_specifications}/movement/ShuffleChannels_1.md | 0 .../operation_sets/operations_specifications}/movement/Slice_8.md | 0 .../operations_specifications}/movement/SpaceToBatch_2.md | 0 .../operations_specifications}/movement/SpaceToDepth_1.md | 0 .../operation_sets/operations_specifications}/movement/Split_1.md | 0 .../operations_specifications}/movement/StridedSlice_1.md | 0 .../operation_sets/operations_specifications}/movement/Tile_1.md | 0 .../operations_specifications}/movement/Transpose_1.md | 0 .../operations_specifications}/movement/Unique_10.md | 0 .../operations_specifications}/movement/VariadicSplit_1.md | 0 .../normalization/BatchNormInference_1.md | 0 .../normalization/BatchNormInference_5.md | 0 .../operations_specifications}/normalization/GRN_1.md | 0 .../normalization/GroupNormalization_12.md | 0 .../operations_specifications}/normalization/LRN_1.md | 0 .../operations_specifications}/normalization/MVN_1.md | 0 .../operations_specifications}/normalization/MVN_6.md | 0 .../operations_specifications}/normalization/NormalizeL2_1.md | 0 .../operations_specifications}/pooling/AdaptiveAvgPool_8.md | 0 .../operations_specifications}/pooling/AdaptiveMaxPool_8.md | 0 .../operations_specifications}/pooling/AvgPool_1.md | 0 .../operations_specifications}/pooling/MaxPool_1.md | 0 .../operations_specifications}/pooling/MaxPool_8.md | 0 .../operations_specifications}/quantization/FakeQuantize_1.md | 0 .../operations_specifications}/reduction/ReduceL1_4.md | 0 .../operations_specifications}/reduction/ReduceL2_4.md | 0 .../operations_specifications}/reduction/ReduceLogicalAnd_1.md | 0 .../operations_specifications}/reduction/ReduceLogicalOr_1.md | 0 .../operations_specifications}/reduction/ReduceMax_1.md | 0 .../operations_specifications}/reduction/ReduceMean_1.md | 0 .../operations_specifications}/reduction/ReduceMin_1.md | 0 .../operations_specifications}/reduction/ReduceProd_1.md | 0 .../operations_specifications}/reduction/ReduceSum_1.md | 0 .../sequence/CTCGreedyDecoderSeqLen_6.md | 0 .../operations_specifications}/sequence/CTCGreedyDecoder_1.md | 0 .../operations_specifications}/sequence/CTCLoss_4.md | 0 .../operations_specifications}/sequence/GRUCell_3.md | 0 .../operations_specifications}/sequence/GRUSequence_5.md | 0 .../operations_specifications}/sequence/LSTMCell_1.md | 0 .../operations_specifications}/sequence/LSTMSequence_1.md | 0 .../operations_specifications}/sequence/OneHot_1.md | 0 .../operations_specifications}/sequence/RNNCell_3.md | 0 .../operations_specifications}/sequence/RNNSequence_5.md | 0 .../operation_sets/operations_specifications}/shape/Reshape_1.md | 0 .../operation_sets/operations_specifications}/shape/ShapeOf_1.md | 0 .../operation_sets/operations_specifications}/shape/ShapeOf_3.md | 0 .../operation_sets/operations_specifications}/shape/Squeeze_1.md | 0 .../operations_specifications}/shape/Unsqueeze_1.md | 0 .../operation_sets/operations_specifications}/signals/DFT_7.md | 0 .../operation_sets/operations_specifications}/signals/IDFT_7.md | 0 .../operation_sets/operations_specifications}/signals/IRDFT_9.md | 0 .../operation_sets/operations_specifications}/signals/RDFT_9.md | 0 .../sort/ExperimentalDetectronTopKROIs_6.md | 0 .../operation_sets/operations_specifications}/sort/MatrixNMS_8.md | 0 .../sort/MulticlassNonMaxSuppression_8.md | 0 .../sort/MulticlassNonMaxSuppression_9.md | 0 .../operations_specifications}/sort/NMSRotated_13.md | 0 .../operations_specifications}/sort/NonMaxSuppression_1.md | 0 .../operations_specifications}/sort/NonMaxSuppression_3.md | 0 .../operations_specifications}/sort/NonMaxSuppression_4.md | 0 .../operations_specifications}/sort/NonMaxSuppression_5.md | 0 .../operations_specifications}/sort/NonMaxSuppression_9.md | 0 .../operation_sets/operations_specifications}/sort/TopK_1.md | 0 .../operation_sets/operations_specifications}/sort/TopK_11.md | 0 .../operation_sets/operations_specifications}/sort/TopK_3.md | 0 .../operations_specifications}/sparse/EmbeddingBagOffsetsSum_3.md | 0 .../operations_specifications}/sparse/EmbeddingBagPackedSum_3.md | 0 .../operations_specifications}/sparse/EmbeddingSegmentsSum_3.md | 0 .../operations_specifications}/type/ConvertLike_1.md | 0 .../operation_sets/operations_specifications}/type/Convert_1.md | 0 217 files changed, 0 insertions(+), 0 deletions(-) rename docs/{MO_DG/prepare_model/convert_model/IR_suitable_for_INT8_inference.md => articles_en/documentation/openvino_ir/ir_suitable_for_int8_inference.md} (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets}/broadcast_rules.md (100%) rename docs/{OV_Runtime_UG/Operations_specifications.md => articles_en/documentation/openvino_ir/operation_sets/operations_specifications.md} (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/activation/Clamp_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/activation/Elu_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/activation/Exp_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/activation/GELU_2.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/activation/GELU_7.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/activation/HSigmoid_5.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/activation/HSwish_4.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/activation/HardSigmoid_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/activation/LogSoftmax_5.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/activation/Mish_4.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/activation/PReLU_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/activation/ReLU_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/activation/Selu_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/activation/Sigmoid_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/activation/SoftMax_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/activation/SoftMax_8.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/activation/SoftPlus_4.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/activation/SoftSign_9.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/activation/Swish_4.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/arithmetic/Abs_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/arithmetic/Acos_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/arithmetic/Acosh_3.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/arithmetic/Add_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/arithmetic/Asin_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/arithmetic/Asinh_3.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/arithmetic/Atan_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/arithmetic/Atanh_3.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/arithmetic/Ceiling_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/arithmetic/Cos_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/arithmetic/Cosh_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/arithmetic/CumSum_3.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/arithmetic/Divide_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/arithmetic/Erf_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/arithmetic/FloorMod_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/arithmetic/Floor_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/arithmetic/Log_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/arithmetic/Maximum_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/arithmetic/Minimum_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/arithmetic/Mod_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/arithmetic/Multiply_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/arithmetic/Negative_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/arithmetic/Power_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/arithmetic/Round_5.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/arithmetic/Sign_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/arithmetic/Sin_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/arithmetic/Sinh_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/arithmetic/Sqrt_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/arithmetic/SquaredDifference_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/arithmetic/Subtract_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/arithmetic/Tan_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/arithmetic/Tanh_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/bitwise/BitwiseAnd_13.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/bitwise/BitwiseNot_13.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/bitwise/BitwiseOr_13.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/bitwise/BitwiseXor_13.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/comparison/Equal_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/comparison/GreaterEqual_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/comparison/Greater_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/comparison/IsFinite_10.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/comparison/IsInf_10.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/comparison/IsNaN_10.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/comparison/LessEqual_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/comparison/Less_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/comparison/NotEqual_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/condition/Bucketize_3.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/condition/If_8.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/condition/NonZero_3.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/condition/Select_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/convolution/BinaryConvolution_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/convolution/ConvolutionBackpropData_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/convolution/Convolution_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/convolution/DeformableConvolution_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/convolution/DeformableConvolution_8.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/convolution/GroupConvolutionBackpropData_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/convolution/GroupConvolution_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/detection/DeformablePSROIPooling_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/detection/DetectionOutput_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/detection/DetectionOutput_8.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/detection/ExperimentalDetectronDetectionOutput_6.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/detection/ExperimentalDetectronGenerateProposalsSingleImage_6.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/detection/ExperimentalDetectronPriorGridGenerator_6.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/detection/ExperimentalDetectronROIFeatureExtractor_6.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/detection/GenerateProposals_9.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/detection/PSROIPooling_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/detection/PriorBoxClustered_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/detection/PriorBox_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/detection/PriorBox_8.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/detection/Proposal_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/detection/Proposal_4.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/detection/ROIAlign_3.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/detection/ROIAlign_9.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/detection/ROIPooling_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/detection/RegionYolo_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/detection/ReorgYolo_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/generation/Eye_9.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/generation/Multinomial_13.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/generation/RandomUniform_8.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/generation/Range_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/generation/Range_4.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/image/GridSample_9.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/image/I420toBGR_8.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/image/I420toRGB_8.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/image/Interpolate_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/image/Interpolate_11.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/image/Interpolate_4.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/image/NV12toBGR_8.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/image/NV12toRGB_8.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/infrastructure/Assign_3.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/infrastructure/Constant_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/infrastructure/Loop_5.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/infrastructure/Parameter_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/infrastructure/ReadValue_3.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/infrastructure/Result_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/infrastructure/TensorIterator_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/internal/AUGRUCell.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/internal/AUGRUSequence.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/logical/LogicalAnd_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/logical/LogicalNot_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/logical/LogicalOr_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/logical/LogicalXor_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/matrix/Einsum_7.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/matrix/MatMul_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/movement/BatchToSpace_2.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/movement/Broadcast_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/movement/Broadcast_3.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/movement/Concat_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/movement/DepthToSpace_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/movement/ExtractImagePatches_3.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/movement/GatherElements_6.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/movement/GatherND_5.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/movement/GatherND_8.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/movement/GatherTree_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/movement/Gather_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/movement/Gather_7.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/movement/Gather_8.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/movement/Pad_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/movement/Pad_12.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/movement/ReverseSequence_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/movement/Reverse_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/movement/Roll_7.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/movement/ScatterElementsUpdate_12.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/movement/ScatterElementsUpdate_3.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/movement/ScatterNDUpdate_3.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/movement/ScatterUpdate_3.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/movement/ShuffleChannels_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/movement/Slice_8.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/movement/SpaceToBatch_2.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/movement/SpaceToDepth_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/movement/Split_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/movement/StridedSlice_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/movement/Tile_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/movement/Transpose_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/movement/Unique_10.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/movement/VariadicSplit_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/normalization/BatchNormInference_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/normalization/BatchNormInference_5.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/normalization/GRN_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/normalization/GroupNormalization_12.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/normalization/LRN_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/normalization/MVN_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/normalization/MVN_6.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/normalization/NormalizeL2_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/pooling/AdaptiveAvgPool_8.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/pooling/AdaptiveMaxPool_8.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/pooling/AvgPool_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/pooling/MaxPool_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/pooling/MaxPool_8.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/quantization/FakeQuantize_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/reduction/ReduceL1_4.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/reduction/ReduceL2_4.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/reduction/ReduceLogicalAnd_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/reduction/ReduceLogicalOr_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/reduction/ReduceMax_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/reduction/ReduceMean_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/reduction/ReduceMin_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/reduction/ReduceProd_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/reduction/ReduceSum_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/sequence/CTCGreedyDecoderSeqLen_6.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/sequence/CTCGreedyDecoder_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/sequence/CTCLoss_4.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/sequence/GRUCell_3.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/sequence/GRUSequence_5.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/sequence/LSTMCell_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/sequence/LSTMSequence_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/sequence/OneHot_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/sequence/RNNCell_3.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/sequence/RNNSequence_5.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/shape/Reshape_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/shape/ShapeOf_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/shape/ShapeOf_3.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/shape/Squeeze_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/shape/Unsqueeze_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/signals/DFT_7.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/signals/IDFT_7.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/signals/IRDFT_9.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/signals/RDFT_9.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/sort/ExperimentalDetectronTopKROIs_6.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/sort/MatrixNMS_8.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/sort/MulticlassNonMaxSuppression_8.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/sort/MulticlassNonMaxSuppression_9.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/sort/NMSRotated_13.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/sort/NonMaxSuppression_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/sort/NonMaxSuppression_3.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/sort/NonMaxSuppression_4.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/sort/NonMaxSuppression_5.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/sort/NonMaxSuppression_9.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/sort/TopK_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/sort/TopK_11.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/sort/TopK_3.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/sparse/EmbeddingBagOffsetsSum_3.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/sparse/EmbeddingBagPackedSum_3.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/sparse/EmbeddingSegmentsSum_3.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/type/ConvertLike_1.md (100%) rename docs/{ops => articles_en/documentation/openvino_ir/operation_sets/operations_specifications}/type/Convert_1.md (100%) diff --git a/docs/MO_DG/prepare_model/convert_model/IR_suitable_for_INT8_inference.md b/docs/articles_en/documentation/openvino_ir/ir_suitable_for_int8_inference.md similarity index 100% rename from docs/MO_DG/prepare_model/convert_model/IR_suitable_for_INT8_inference.md rename to docs/articles_en/documentation/openvino_ir/ir_suitable_for_int8_inference.md diff --git a/docs/ops/broadcast_rules.md b/docs/articles_en/documentation/openvino_ir/operation_sets/broadcast_rules.md similarity index 100% rename from docs/ops/broadcast_rules.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/broadcast_rules.md diff --git a/docs/OV_Runtime_UG/Operations_specifications.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications.md similarity index 100% rename from docs/OV_Runtime_UG/Operations_specifications.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications.md diff --git a/docs/ops/activation/Clamp_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/Clamp_1.md similarity index 100% rename from docs/ops/activation/Clamp_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/Clamp_1.md diff --git a/docs/ops/activation/Elu_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/Elu_1.md similarity index 100% rename from docs/ops/activation/Elu_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/Elu_1.md diff --git a/docs/ops/activation/Exp_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/Exp_1.md similarity index 100% rename from docs/ops/activation/Exp_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/Exp_1.md diff --git a/docs/ops/activation/GELU_2.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/GELU_2.md similarity index 100% rename from docs/ops/activation/GELU_2.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/GELU_2.md diff --git a/docs/ops/activation/GELU_7.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/GELU_7.md similarity index 100% rename from docs/ops/activation/GELU_7.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/GELU_7.md diff --git a/docs/ops/activation/HSigmoid_5.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/HSigmoid_5.md similarity index 100% rename from docs/ops/activation/HSigmoid_5.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/HSigmoid_5.md diff --git a/docs/ops/activation/HSwish_4.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/HSwish_4.md similarity index 100% rename from docs/ops/activation/HSwish_4.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/HSwish_4.md diff --git a/docs/ops/activation/HardSigmoid_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/HardSigmoid_1.md similarity index 100% rename from docs/ops/activation/HardSigmoid_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/HardSigmoid_1.md diff --git a/docs/ops/activation/LogSoftmax_5.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/LogSoftmax_5.md similarity index 100% rename from docs/ops/activation/LogSoftmax_5.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/LogSoftmax_5.md diff --git a/docs/ops/activation/Mish_4.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/Mish_4.md similarity index 100% rename from docs/ops/activation/Mish_4.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/Mish_4.md diff --git a/docs/ops/activation/PReLU_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/PReLU_1.md similarity index 100% rename from docs/ops/activation/PReLU_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/PReLU_1.md diff --git a/docs/ops/activation/ReLU_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/ReLU_1.md similarity index 100% rename from docs/ops/activation/ReLU_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/ReLU_1.md diff --git a/docs/ops/activation/Selu_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/Selu_1.md similarity index 100% rename from docs/ops/activation/Selu_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/Selu_1.md diff --git a/docs/ops/activation/Sigmoid_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/Sigmoid_1.md similarity index 100% rename from docs/ops/activation/Sigmoid_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/Sigmoid_1.md diff --git a/docs/ops/activation/SoftMax_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/SoftMax_1.md similarity index 100% rename from docs/ops/activation/SoftMax_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/SoftMax_1.md diff --git a/docs/ops/activation/SoftMax_8.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/SoftMax_8.md similarity index 100% rename from docs/ops/activation/SoftMax_8.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/SoftMax_8.md diff --git a/docs/ops/activation/SoftPlus_4.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/SoftPlus_4.md similarity index 100% rename from docs/ops/activation/SoftPlus_4.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/SoftPlus_4.md diff --git a/docs/ops/activation/SoftSign_9.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/SoftSign_9.md similarity index 100% rename from docs/ops/activation/SoftSign_9.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/SoftSign_9.md diff --git a/docs/ops/activation/Swish_4.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/Swish_4.md similarity index 100% rename from docs/ops/activation/Swish_4.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/Swish_4.md diff --git a/docs/ops/arithmetic/Abs_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Abs_1.md similarity index 100% rename from docs/ops/arithmetic/Abs_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Abs_1.md diff --git a/docs/ops/arithmetic/Acos_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Acos_1.md similarity index 100% rename from docs/ops/arithmetic/Acos_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Acos_1.md diff --git a/docs/ops/arithmetic/Acosh_3.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Acosh_3.md similarity index 100% rename from docs/ops/arithmetic/Acosh_3.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Acosh_3.md diff --git a/docs/ops/arithmetic/Add_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Add_1.md similarity index 100% rename from docs/ops/arithmetic/Add_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Add_1.md diff --git a/docs/ops/arithmetic/Asin_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Asin_1.md similarity index 100% rename from docs/ops/arithmetic/Asin_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Asin_1.md diff --git a/docs/ops/arithmetic/Asinh_3.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Asinh_3.md similarity index 100% rename from docs/ops/arithmetic/Asinh_3.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Asinh_3.md diff --git a/docs/ops/arithmetic/Atan_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Atan_1.md similarity index 100% rename from docs/ops/arithmetic/Atan_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Atan_1.md diff --git a/docs/ops/arithmetic/Atanh_3.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Atanh_3.md similarity index 100% rename from docs/ops/arithmetic/Atanh_3.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Atanh_3.md diff --git a/docs/ops/arithmetic/Ceiling_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Ceiling_1.md similarity index 100% rename from docs/ops/arithmetic/Ceiling_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Ceiling_1.md diff --git a/docs/ops/arithmetic/Cos_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Cos_1.md similarity index 100% rename from docs/ops/arithmetic/Cos_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Cos_1.md diff --git a/docs/ops/arithmetic/Cosh_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Cosh_1.md similarity index 100% rename from docs/ops/arithmetic/Cosh_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Cosh_1.md diff --git a/docs/ops/arithmetic/CumSum_3.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/CumSum_3.md similarity index 100% rename from docs/ops/arithmetic/CumSum_3.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/CumSum_3.md diff --git a/docs/ops/arithmetic/Divide_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Divide_1.md similarity index 100% rename from docs/ops/arithmetic/Divide_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Divide_1.md diff --git a/docs/ops/arithmetic/Erf_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Erf_1.md similarity index 100% rename from docs/ops/arithmetic/Erf_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Erf_1.md diff --git a/docs/ops/arithmetic/FloorMod_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/FloorMod_1.md similarity index 100% rename from docs/ops/arithmetic/FloorMod_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/FloorMod_1.md diff --git a/docs/ops/arithmetic/Floor_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Floor_1.md similarity index 100% rename from docs/ops/arithmetic/Floor_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Floor_1.md diff --git a/docs/ops/arithmetic/Log_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Log_1.md similarity index 100% rename from docs/ops/arithmetic/Log_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Log_1.md diff --git a/docs/ops/arithmetic/Maximum_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Maximum_1.md similarity index 100% rename from docs/ops/arithmetic/Maximum_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Maximum_1.md diff --git a/docs/ops/arithmetic/Minimum_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Minimum_1.md similarity index 100% rename from docs/ops/arithmetic/Minimum_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Minimum_1.md diff --git a/docs/ops/arithmetic/Mod_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Mod_1.md similarity index 100% rename from docs/ops/arithmetic/Mod_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Mod_1.md diff --git a/docs/ops/arithmetic/Multiply_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Multiply_1.md similarity index 100% rename from docs/ops/arithmetic/Multiply_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Multiply_1.md diff --git a/docs/ops/arithmetic/Negative_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Negative_1.md similarity index 100% rename from docs/ops/arithmetic/Negative_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Negative_1.md diff --git a/docs/ops/arithmetic/Power_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Power_1.md similarity index 100% rename from docs/ops/arithmetic/Power_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Power_1.md diff --git a/docs/ops/arithmetic/Round_5.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Round_5.md similarity index 100% rename from docs/ops/arithmetic/Round_5.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Round_5.md diff --git a/docs/ops/arithmetic/Sign_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Sign_1.md similarity index 100% rename from docs/ops/arithmetic/Sign_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Sign_1.md diff --git a/docs/ops/arithmetic/Sin_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Sin_1.md similarity index 100% rename from docs/ops/arithmetic/Sin_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Sin_1.md diff --git a/docs/ops/arithmetic/Sinh_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Sinh_1.md similarity index 100% rename from docs/ops/arithmetic/Sinh_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Sinh_1.md diff --git a/docs/ops/arithmetic/Sqrt_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Sqrt_1.md similarity index 100% rename from docs/ops/arithmetic/Sqrt_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Sqrt_1.md diff --git a/docs/ops/arithmetic/SquaredDifference_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/SquaredDifference_1.md similarity index 100% rename from docs/ops/arithmetic/SquaredDifference_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/SquaredDifference_1.md diff --git a/docs/ops/arithmetic/Subtract_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Subtract_1.md similarity index 100% rename from docs/ops/arithmetic/Subtract_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Subtract_1.md diff --git a/docs/ops/arithmetic/Tan_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Tan_1.md similarity index 100% rename from docs/ops/arithmetic/Tan_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Tan_1.md diff --git a/docs/ops/arithmetic/Tanh_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Tanh_1.md similarity index 100% rename from docs/ops/arithmetic/Tanh_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Tanh_1.md diff --git a/docs/ops/bitwise/BitwiseAnd_13.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/bitwise/BitwiseAnd_13.md similarity index 100% rename from docs/ops/bitwise/BitwiseAnd_13.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/bitwise/BitwiseAnd_13.md diff --git a/docs/ops/bitwise/BitwiseNot_13.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/bitwise/BitwiseNot_13.md similarity index 100% rename from docs/ops/bitwise/BitwiseNot_13.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/bitwise/BitwiseNot_13.md diff --git a/docs/ops/bitwise/BitwiseOr_13.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/bitwise/BitwiseOr_13.md similarity index 100% rename from docs/ops/bitwise/BitwiseOr_13.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/bitwise/BitwiseOr_13.md diff --git a/docs/ops/bitwise/BitwiseXor_13.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/bitwise/BitwiseXor_13.md similarity index 100% rename from docs/ops/bitwise/BitwiseXor_13.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/bitwise/BitwiseXor_13.md diff --git a/docs/ops/comparison/Equal_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/comparison/Equal_1.md similarity index 100% rename from docs/ops/comparison/Equal_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/comparison/Equal_1.md diff --git a/docs/ops/comparison/GreaterEqual_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/comparison/GreaterEqual_1.md similarity index 100% rename from docs/ops/comparison/GreaterEqual_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/comparison/GreaterEqual_1.md diff --git a/docs/ops/comparison/Greater_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/comparison/Greater_1.md similarity index 100% rename from docs/ops/comparison/Greater_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/comparison/Greater_1.md diff --git a/docs/ops/comparison/IsFinite_10.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/comparison/IsFinite_10.md similarity index 100% rename from docs/ops/comparison/IsFinite_10.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/comparison/IsFinite_10.md diff --git a/docs/ops/comparison/IsInf_10.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/comparison/IsInf_10.md similarity index 100% rename from docs/ops/comparison/IsInf_10.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/comparison/IsInf_10.md diff --git a/docs/ops/comparison/IsNaN_10.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/comparison/IsNaN_10.md similarity index 100% rename from docs/ops/comparison/IsNaN_10.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/comparison/IsNaN_10.md diff --git a/docs/ops/comparison/LessEqual_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/comparison/LessEqual_1.md similarity index 100% rename from docs/ops/comparison/LessEqual_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/comparison/LessEqual_1.md diff --git a/docs/ops/comparison/Less_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/comparison/Less_1.md similarity index 100% rename from docs/ops/comparison/Less_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/comparison/Less_1.md diff --git a/docs/ops/comparison/NotEqual_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/comparison/NotEqual_1.md similarity index 100% rename from docs/ops/comparison/NotEqual_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/comparison/NotEqual_1.md diff --git a/docs/ops/condition/Bucketize_3.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/condition/Bucketize_3.md similarity index 100% rename from docs/ops/condition/Bucketize_3.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/condition/Bucketize_3.md diff --git a/docs/ops/condition/If_8.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/condition/If_8.md similarity index 100% rename from docs/ops/condition/If_8.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/condition/If_8.md diff --git a/docs/ops/condition/NonZero_3.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/condition/NonZero_3.md similarity index 100% rename from docs/ops/condition/NonZero_3.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/condition/NonZero_3.md diff --git a/docs/ops/condition/Select_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/condition/Select_1.md similarity index 100% rename from docs/ops/condition/Select_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/condition/Select_1.md diff --git a/docs/ops/convolution/BinaryConvolution_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/convolution/BinaryConvolution_1.md similarity index 100% rename from docs/ops/convolution/BinaryConvolution_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/convolution/BinaryConvolution_1.md diff --git a/docs/ops/convolution/ConvolutionBackpropData_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/convolution/ConvolutionBackpropData_1.md similarity index 100% rename from docs/ops/convolution/ConvolutionBackpropData_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/convolution/ConvolutionBackpropData_1.md diff --git a/docs/ops/convolution/Convolution_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/convolution/Convolution_1.md similarity index 100% rename from docs/ops/convolution/Convolution_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/convolution/Convolution_1.md diff --git a/docs/ops/convolution/DeformableConvolution_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/convolution/DeformableConvolution_1.md similarity index 100% rename from docs/ops/convolution/DeformableConvolution_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/convolution/DeformableConvolution_1.md diff --git a/docs/ops/convolution/DeformableConvolution_8.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/convolution/DeformableConvolution_8.md similarity index 100% rename from docs/ops/convolution/DeformableConvolution_8.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/convolution/DeformableConvolution_8.md diff --git a/docs/ops/convolution/GroupConvolutionBackpropData_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/convolution/GroupConvolutionBackpropData_1.md similarity index 100% rename from docs/ops/convolution/GroupConvolutionBackpropData_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/convolution/GroupConvolutionBackpropData_1.md diff --git a/docs/ops/convolution/GroupConvolution_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/convolution/GroupConvolution_1.md similarity index 100% rename from docs/ops/convolution/GroupConvolution_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/convolution/GroupConvolution_1.md diff --git a/docs/ops/detection/DeformablePSROIPooling_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/DeformablePSROIPooling_1.md similarity index 100% rename from docs/ops/detection/DeformablePSROIPooling_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/DeformablePSROIPooling_1.md diff --git a/docs/ops/detection/DetectionOutput_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/DetectionOutput_1.md similarity index 100% rename from docs/ops/detection/DetectionOutput_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/DetectionOutput_1.md diff --git a/docs/ops/detection/DetectionOutput_8.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/DetectionOutput_8.md similarity index 100% rename from docs/ops/detection/DetectionOutput_8.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/DetectionOutput_8.md diff --git a/docs/ops/detection/ExperimentalDetectronDetectionOutput_6.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/ExperimentalDetectronDetectionOutput_6.md similarity index 100% rename from docs/ops/detection/ExperimentalDetectronDetectionOutput_6.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/ExperimentalDetectronDetectionOutput_6.md diff --git a/docs/ops/detection/ExperimentalDetectronGenerateProposalsSingleImage_6.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/ExperimentalDetectronGenerateProposalsSingleImage_6.md similarity index 100% rename from docs/ops/detection/ExperimentalDetectronGenerateProposalsSingleImage_6.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/ExperimentalDetectronGenerateProposalsSingleImage_6.md diff --git a/docs/ops/detection/ExperimentalDetectronPriorGridGenerator_6.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/ExperimentalDetectronPriorGridGenerator_6.md similarity index 100% rename from docs/ops/detection/ExperimentalDetectronPriorGridGenerator_6.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/ExperimentalDetectronPriorGridGenerator_6.md diff --git a/docs/ops/detection/ExperimentalDetectronROIFeatureExtractor_6.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/ExperimentalDetectronROIFeatureExtractor_6.md similarity index 100% rename from docs/ops/detection/ExperimentalDetectronROIFeatureExtractor_6.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/ExperimentalDetectronROIFeatureExtractor_6.md diff --git a/docs/ops/detection/GenerateProposals_9.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/GenerateProposals_9.md similarity index 100% rename from docs/ops/detection/GenerateProposals_9.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/GenerateProposals_9.md diff --git a/docs/ops/detection/PSROIPooling_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/PSROIPooling_1.md similarity index 100% rename from docs/ops/detection/PSROIPooling_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/PSROIPooling_1.md diff --git a/docs/ops/detection/PriorBoxClustered_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/PriorBoxClustered_1.md similarity index 100% rename from docs/ops/detection/PriorBoxClustered_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/PriorBoxClustered_1.md diff --git a/docs/ops/detection/PriorBox_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/PriorBox_1.md similarity index 100% rename from docs/ops/detection/PriorBox_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/PriorBox_1.md diff --git a/docs/ops/detection/PriorBox_8.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/PriorBox_8.md similarity index 100% rename from docs/ops/detection/PriorBox_8.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/PriorBox_8.md diff --git a/docs/ops/detection/Proposal_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/Proposal_1.md similarity index 100% rename from docs/ops/detection/Proposal_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/Proposal_1.md diff --git a/docs/ops/detection/Proposal_4.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/Proposal_4.md similarity index 100% rename from docs/ops/detection/Proposal_4.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/Proposal_4.md diff --git a/docs/ops/detection/ROIAlign_3.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/ROIAlign_3.md similarity index 100% rename from docs/ops/detection/ROIAlign_3.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/ROIAlign_3.md diff --git a/docs/ops/detection/ROIAlign_9.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/ROIAlign_9.md similarity index 100% rename from docs/ops/detection/ROIAlign_9.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/ROIAlign_9.md diff --git a/docs/ops/detection/ROIPooling_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/ROIPooling_1.md similarity index 100% rename from docs/ops/detection/ROIPooling_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/ROIPooling_1.md diff --git a/docs/ops/detection/RegionYolo_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/RegionYolo_1.md similarity index 100% rename from docs/ops/detection/RegionYolo_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/RegionYolo_1.md diff --git a/docs/ops/detection/ReorgYolo_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/ReorgYolo_1.md similarity index 100% rename from docs/ops/detection/ReorgYolo_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/ReorgYolo_1.md diff --git a/docs/ops/generation/Eye_9.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Eye_9.md similarity index 100% rename from docs/ops/generation/Eye_9.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Eye_9.md diff --git a/docs/ops/generation/Multinomial_13.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Multinomial_13.md similarity index 100% rename from docs/ops/generation/Multinomial_13.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Multinomial_13.md diff --git a/docs/ops/generation/RandomUniform_8.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/RandomUniform_8.md similarity index 100% rename from docs/ops/generation/RandomUniform_8.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/RandomUniform_8.md diff --git a/docs/ops/generation/Range_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Range_1.md similarity index 100% rename from docs/ops/generation/Range_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Range_1.md diff --git a/docs/ops/generation/Range_4.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Range_4.md similarity index 100% rename from docs/ops/generation/Range_4.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Range_4.md diff --git a/docs/ops/image/GridSample_9.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/GridSample_9.md similarity index 100% rename from docs/ops/image/GridSample_9.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/GridSample_9.md diff --git a/docs/ops/image/I420toBGR_8.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/I420toBGR_8.md similarity index 100% rename from docs/ops/image/I420toBGR_8.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/I420toBGR_8.md diff --git a/docs/ops/image/I420toRGB_8.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/I420toRGB_8.md similarity index 100% rename from docs/ops/image/I420toRGB_8.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/I420toRGB_8.md diff --git a/docs/ops/image/Interpolate_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/Interpolate_1.md similarity index 100% rename from docs/ops/image/Interpolate_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/Interpolate_1.md diff --git a/docs/ops/image/Interpolate_11.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/Interpolate_11.md similarity index 100% rename from docs/ops/image/Interpolate_11.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/Interpolate_11.md diff --git a/docs/ops/image/Interpolate_4.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/Interpolate_4.md similarity index 100% rename from docs/ops/image/Interpolate_4.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/Interpolate_4.md diff --git a/docs/ops/image/NV12toBGR_8.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/NV12toBGR_8.md similarity index 100% rename from docs/ops/image/NV12toBGR_8.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/NV12toBGR_8.md diff --git a/docs/ops/image/NV12toRGB_8.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/NV12toRGB_8.md similarity index 100% rename from docs/ops/image/NV12toRGB_8.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/NV12toRGB_8.md diff --git a/docs/ops/infrastructure/Assign_3.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/infrastructure/Assign_3.md similarity index 100% rename from docs/ops/infrastructure/Assign_3.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/infrastructure/Assign_3.md diff --git a/docs/ops/infrastructure/Constant_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/infrastructure/Constant_1.md similarity index 100% rename from docs/ops/infrastructure/Constant_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/infrastructure/Constant_1.md diff --git a/docs/ops/infrastructure/Loop_5.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/infrastructure/Loop_5.md similarity index 100% rename from docs/ops/infrastructure/Loop_5.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/infrastructure/Loop_5.md diff --git a/docs/ops/infrastructure/Parameter_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/infrastructure/Parameter_1.md similarity index 100% rename from docs/ops/infrastructure/Parameter_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/infrastructure/Parameter_1.md diff --git a/docs/ops/infrastructure/ReadValue_3.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/infrastructure/ReadValue_3.md similarity index 100% rename from docs/ops/infrastructure/ReadValue_3.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/infrastructure/ReadValue_3.md diff --git a/docs/ops/infrastructure/Result_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/infrastructure/Result_1.md similarity index 100% rename from docs/ops/infrastructure/Result_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/infrastructure/Result_1.md diff --git a/docs/ops/infrastructure/TensorIterator_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/infrastructure/TensorIterator_1.md similarity index 100% rename from docs/ops/infrastructure/TensorIterator_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/infrastructure/TensorIterator_1.md diff --git a/docs/ops/internal/AUGRUCell.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/internal/AUGRUCell.md similarity index 100% rename from docs/ops/internal/AUGRUCell.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/internal/AUGRUCell.md diff --git a/docs/ops/internal/AUGRUSequence.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/internal/AUGRUSequence.md similarity index 100% rename from docs/ops/internal/AUGRUSequence.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/internal/AUGRUSequence.md diff --git a/docs/ops/logical/LogicalAnd_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/logical/LogicalAnd_1.md similarity index 100% rename from docs/ops/logical/LogicalAnd_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/logical/LogicalAnd_1.md diff --git a/docs/ops/logical/LogicalNot_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/logical/LogicalNot_1.md similarity index 100% rename from docs/ops/logical/LogicalNot_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/logical/LogicalNot_1.md diff --git a/docs/ops/logical/LogicalOr_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/logical/LogicalOr_1.md similarity index 100% rename from docs/ops/logical/LogicalOr_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/logical/LogicalOr_1.md diff --git a/docs/ops/logical/LogicalXor_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/logical/LogicalXor_1.md similarity index 100% rename from docs/ops/logical/LogicalXor_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/logical/LogicalXor_1.md diff --git a/docs/ops/matrix/Einsum_7.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/matrix/Einsum_7.md similarity index 100% rename from docs/ops/matrix/Einsum_7.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/matrix/Einsum_7.md diff --git a/docs/ops/matrix/MatMul_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/matrix/MatMul_1.md similarity index 100% rename from docs/ops/matrix/MatMul_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/matrix/MatMul_1.md diff --git a/docs/ops/movement/BatchToSpace_2.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/BatchToSpace_2.md similarity index 100% rename from docs/ops/movement/BatchToSpace_2.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/BatchToSpace_2.md diff --git a/docs/ops/movement/Broadcast_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Broadcast_1.md similarity index 100% rename from docs/ops/movement/Broadcast_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Broadcast_1.md diff --git a/docs/ops/movement/Broadcast_3.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Broadcast_3.md similarity index 100% rename from docs/ops/movement/Broadcast_3.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Broadcast_3.md diff --git a/docs/ops/movement/Concat_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Concat_1.md similarity index 100% rename from docs/ops/movement/Concat_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Concat_1.md diff --git a/docs/ops/movement/DepthToSpace_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/DepthToSpace_1.md similarity index 100% rename from docs/ops/movement/DepthToSpace_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/DepthToSpace_1.md diff --git a/docs/ops/movement/ExtractImagePatches_3.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ExtractImagePatches_3.md similarity index 100% rename from docs/ops/movement/ExtractImagePatches_3.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ExtractImagePatches_3.md diff --git a/docs/ops/movement/GatherElements_6.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/GatherElements_6.md similarity index 100% rename from docs/ops/movement/GatherElements_6.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/GatherElements_6.md diff --git a/docs/ops/movement/GatherND_5.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/GatherND_5.md similarity index 100% rename from docs/ops/movement/GatherND_5.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/GatherND_5.md diff --git a/docs/ops/movement/GatherND_8.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/GatherND_8.md similarity index 100% rename from docs/ops/movement/GatherND_8.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/GatherND_8.md diff --git a/docs/ops/movement/GatherTree_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/GatherTree_1.md similarity index 100% rename from docs/ops/movement/GatherTree_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/GatherTree_1.md diff --git a/docs/ops/movement/Gather_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Gather_1.md similarity index 100% rename from docs/ops/movement/Gather_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Gather_1.md diff --git a/docs/ops/movement/Gather_7.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Gather_7.md similarity index 100% rename from docs/ops/movement/Gather_7.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Gather_7.md diff --git a/docs/ops/movement/Gather_8.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Gather_8.md similarity index 100% rename from docs/ops/movement/Gather_8.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Gather_8.md diff --git a/docs/ops/movement/Pad_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Pad_1.md similarity index 100% rename from docs/ops/movement/Pad_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Pad_1.md diff --git a/docs/ops/movement/Pad_12.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Pad_12.md similarity index 100% rename from docs/ops/movement/Pad_12.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Pad_12.md diff --git a/docs/ops/movement/ReverseSequence_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ReverseSequence_1.md similarity index 100% rename from docs/ops/movement/ReverseSequence_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ReverseSequence_1.md diff --git a/docs/ops/movement/Reverse_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Reverse_1.md similarity index 100% rename from docs/ops/movement/Reverse_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Reverse_1.md diff --git a/docs/ops/movement/Roll_7.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Roll_7.md similarity index 100% rename from docs/ops/movement/Roll_7.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Roll_7.md diff --git a/docs/ops/movement/ScatterElementsUpdate_12.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ScatterElementsUpdate_12.md similarity index 100% rename from docs/ops/movement/ScatterElementsUpdate_12.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ScatterElementsUpdate_12.md diff --git a/docs/ops/movement/ScatterElementsUpdate_3.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ScatterElementsUpdate_3.md similarity index 100% rename from docs/ops/movement/ScatterElementsUpdate_3.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ScatterElementsUpdate_3.md diff --git a/docs/ops/movement/ScatterNDUpdate_3.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ScatterNDUpdate_3.md similarity index 100% rename from docs/ops/movement/ScatterNDUpdate_3.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ScatterNDUpdate_3.md diff --git a/docs/ops/movement/ScatterUpdate_3.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ScatterUpdate_3.md similarity index 100% rename from docs/ops/movement/ScatterUpdate_3.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ScatterUpdate_3.md diff --git a/docs/ops/movement/ShuffleChannels_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ShuffleChannels_1.md similarity index 100% rename from docs/ops/movement/ShuffleChannels_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ShuffleChannels_1.md diff --git a/docs/ops/movement/Slice_8.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Slice_8.md similarity index 100% rename from docs/ops/movement/Slice_8.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Slice_8.md diff --git a/docs/ops/movement/SpaceToBatch_2.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/SpaceToBatch_2.md similarity index 100% rename from docs/ops/movement/SpaceToBatch_2.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/SpaceToBatch_2.md diff --git a/docs/ops/movement/SpaceToDepth_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/SpaceToDepth_1.md similarity index 100% rename from docs/ops/movement/SpaceToDepth_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/SpaceToDepth_1.md diff --git a/docs/ops/movement/Split_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Split_1.md similarity index 100% rename from docs/ops/movement/Split_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Split_1.md diff --git a/docs/ops/movement/StridedSlice_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/StridedSlice_1.md similarity index 100% rename from docs/ops/movement/StridedSlice_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/StridedSlice_1.md diff --git a/docs/ops/movement/Tile_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Tile_1.md similarity index 100% rename from docs/ops/movement/Tile_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Tile_1.md diff --git a/docs/ops/movement/Transpose_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Transpose_1.md similarity index 100% rename from docs/ops/movement/Transpose_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Transpose_1.md diff --git a/docs/ops/movement/Unique_10.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Unique_10.md similarity index 100% rename from docs/ops/movement/Unique_10.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Unique_10.md diff --git a/docs/ops/movement/VariadicSplit_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/VariadicSplit_1.md similarity index 100% rename from docs/ops/movement/VariadicSplit_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/VariadicSplit_1.md diff --git a/docs/ops/normalization/BatchNormInference_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/BatchNormInference_1.md similarity index 100% rename from docs/ops/normalization/BatchNormInference_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/BatchNormInference_1.md diff --git a/docs/ops/normalization/BatchNormInference_5.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/BatchNormInference_5.md similarity index 100% rename from docs/ops/normalization/BatchNormInference_5.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/BatchNormInference_5.md diff --git a/docs/ops/normalization/GRN_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/GRN_1.md similarity index 100% rename from docs/ops/normalization/GRN_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/GRN_1.md diff --git a/docs/ops/normalization/GroupNormalization_12.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/GroupNormalization_12.md similarity index 100% rename from docs/ops/normalization/GroupNormalization_12.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/GroupNormalization_12.md diff --git a/docs/ops/normalization/LRN_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/LRN_1.md similarity index 100% rename from docs/ops/normalization/LRN_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/LRN_1.md diff --git a/docs/ops/normalization/MVN_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/MVN_1.md similarity index 100% rename from docs/ops/normalization/MVN_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/MVN_1.md diff --git a/docs/ops/normalization/MVN_6.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/MVN_6.md similarity index 100% rename from docs/ops/normalization/MVN_6.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/MVN_6.md diff --git a/docs/ops/normalization/NormalizeL2_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/NormalizeL2_1.md similarity index 100% rename from docs/ops/normalization/NormalizeL2_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/NormalizeL2_1.md diff --git a/docs/ops/pooling/AdaptiveAvgPool_8.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/pooling/AdaptiveAvgPool_8.md similarity index 100% rename from docs/ops/pooling/AdaptiveAvgPool_8.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/pooling/AdaptiveAvgPool_8.md diff --git a/docs/ops/pooling/AdaptiveMaxPool_8.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/pooling/AdaptiveMaxPool_8.md similarity index 100% rename from docs/ops/pooling/AdaptiveMaxPool_8.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/pooling/AdaptiveMaxPool_8.md diff --git a/docs/ops/pooling/AvgPool_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/pooling/AvgPool_1.md similarity index 100% rename from docs/ops/pooling/AvgPool_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/pooling/AvgPool_1.md diff --git a/docs/ops/pooling/MaxPool_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/pooling/MaxPool_1.md similarity index 100% rename from docs/ops/pooling/MaxPool_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/pooling/MaxPool_1.md diff --git a/docs/ops/pooling/MaxPool_8.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/pooling/MaxPool_8.md similarity index 100% rename from docs/ops/pooling/MaxPool_8.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/pooling/MaxPool_8.md diff --git a/docs/ops/quantization/FakeQuantize_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/quantization/FakeQuantize_1.md similarity index 100% rename from docs/ops/quantization/FakeQuantize_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/quantization/FakeQuantize_1.md diff --git a/docs/ops/reduction/ReduceL1_4.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceL1_4.md similarity index 100% rename from docs/ops/reduction/ReduceL1_4.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceL1_4.md diff --git a/docs/ops/reduction/ReduceL2_4.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceL2_4.md similarity index 100% rename from docs/ops/reduction/ReduceL2_4.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceL2_4.md diff --git a/docs/ops/reduction/ReduceLogicalAnd_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceLogicalAnd_1.md similarity index 100% rename from docs/ops/reduction/ReduceLogicalAnd_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceLogicalAnd_1.md diff --git a/docs/ops/reduction/ReduceLogicalOr_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceLogicalOr_1.md similarity index 100% rename from docs/ops/reduction/ReduceLogicalOr_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceLogicalOr_1.md diff --git a/docs/ops/reduction/ReduceMax_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMax_1.md similarity index 100% rename from docs/ops/reduction/ReduceMax_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMax_1.md diff --git a/docs/ops/reduction/ReduceMean_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMean_1.md similarity index 100% rename from docs/ops/reduction/ReduceMean_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMean_1.md diff --git a/docs/ops/reduction/ReduceMin_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMin_1.md similarity index 100% rename from docs/ops/reduction/ReduceMin_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMin_1.md diff --git a/docs/ops/reduction/ReduceProd_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceProd_1.md similarity index 100% rename from docs/ops/reduction/ReduceProd_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceProd_1.md diff --git a/docs/ops/reduction/ReduceSum_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceSum_1.md similarity index 100% rename from docs/ops/reduction/ReduceSum_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceSum_1.md diff --git a/docs/ops/sequence/CTCGreedyDecoderSeqLen_6.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/CTCGreedyDecoderSeqLen_6.md similarity index 100% rename from docs/ops/sequence/CTCGreedyDecoderSeqLen_6.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/CTCGreedyDecoderSeqLen_6.md diff --git a/docs/ops/sequence/CTCGreedyDecoder_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/CTCGreedyDecoder_1.md similarity index 100% rename from docs/ops/sequence/CTCGreedyDecoder_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/CTCGreedyDecoder_1.md diff --git a/docs/ops/sequence/CTCLoss_4.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/CTCLoss_4.md similarity index 100% rename from docs/ops/sequence/CTCLoss_4.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/CTCLoss_4.md diff --git a/docs/ops/sequence/GRUCell_3.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/GRUCell_3.md similarity index 100% rename from docs/ops/sequence/GRUCell_3.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/GRUCell_3.md diff --git a/docs/ops/sequence/GRUSequence_5.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/GRUSequence_5.md similarity index 100% rename from docs/ops/sequence/GRUSequence_5.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/GRUSequence_5.md diff --git a/docs/ops/sequence/LSTMCell_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/LSTMCell_1.md similarity index 100% rename from docs/ops/sequence/LSTMCell_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/LSTMCell_1.md diff --git a/docs/ops/sequence/LSTMSequence_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/LSTMSequence_1.md similarity index 100% rename from docs/ops/sequence/LSTMSequence_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/LSTMSequence_1.md diff --git a/docs/ops/sequence/OneHot_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/OneHot_1.md similarity index 100% rename from docs/ops/sequence/OneHot_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/OneHot_1.md diff --git a/docs/ops/sequence/RNNCell_3.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/RNNCell_3.md similarity index 100% rename from docs/ops/sequence/RNNCell_3.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/RNNCell_3.md diff --git a/docs/ops/sequence/RNNSequence_5.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/RNNSequence_5.md similarity index 100% rename from docs/ops/sequence/RNNSequence_5.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/RNNSequence_5.md diff --git a/docs/ops/shape/Reshape_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/Reshape_1.md similarity index 100% rename from docs/ops/shape/Reshape_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/Reshape_1.md diff --git a/docs/ops/shape/ShapeOf_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/ShapeOf_1.md similarity index 100% rename from docs/ops/shape/ShapeOf_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/ShapeOf_1.md diff --git a/docs/ops/shape/ShapeOf_3.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/ShapeOf_3.md similarity index 100% rename from docs/ops/shape/ShapeOf_3.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/ShapeOf_3.md diff --git a/docs/ops/shape/Squeeze_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/Squeeze_1.md similarity index 100% rename from docs/ops/shape/Squeeze_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/Squeeze_1.md diff --git a/docs/ops/shape/Unsqueeze_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/Unsqueeze_1.md similarity index 100% rename from docs/ops/shape/Unsqueeze_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/Unsqueeze_1.md diff --git a/docs/ops/signals/DFT_7.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/DFT_7.md similarity index 100% rename from docs/ops/signals/DFT_7.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/DFT_7.md diff --git a/docs/ops/signals/IDFT_7.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/IDFT_7.md similarity index 100% rename from docs/ops/signals/IDFT_7.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/IDFT_7.md diff --git a/docs/ops/signals/IRDFT_9.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/IRDFT_9.md similarity index 100% rename from docs/ops/signals/IRDFT_9.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/IRDFT_9.md diff --git a/docs/ops/signals/RDFT_9.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/RDFT_9.md similarity index 100% rename from docs/ops/signals/RDFT_9.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/RDFT_9.md diff --git a/docs/ops/sort/ExperimentalDetectronTopKROIs_6.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/ExperimentalDetectronTopKROIs_6.md similarity index 100% rename from docs/ops/sort/ExperimentalDetectronTopKROIs_6.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/ExperimentalDetectronTopKROIs_6.md diff --git a/docs/ops/sort/MatrixNMS_8.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/MatrixNMS_8.md similarity index 100% rename from docs/ops/sort/MatrixNMS_8.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/MatrixNMS_8.md diff --git a/docs/ops/sort/MulticlassNonMaxSuppression_8.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/MulticlassNonMaxSuppression_8.md similarity index 100% rename from docs/ops/sort/MulticlassNonMaxSuppression_8.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/MulticlassNonMaxSuppression_8.md diff --git a/docs/ops/sort/MulticlassNonMaxSuppression_9.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/MulticlassNonMaxSuppression_9.md similarity index 100% rename from docs/ops/sort/MulticlassNonMaxSuppression_9.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/MulticlassNonMaxSuppression_9.md diff --git a/docs/ops/sort/NMSRotated_13.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NMSRotated_13.md similarity index 100% rename from docs/ops/sort/NMSRotated_13.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NMSRotated_13.md diff --git a/docs/ops/sort/NonMaxSuppression_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_1.md similarity index 100% rename from docs/ops/sort/NonMaxSuppression_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_1.md diff --git a/docs/ops/sort/NonMaxSuppression_3.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_3.md similarity index 100% rename from docs/ops/sort/NonMaxSuppression_3.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_3.md diff --git a/docs/ops/sort/NonMaxSuppression_4.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_4.md similarity index 100% rename from docs/ops/sort/NonMaxSuppression_4.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_4.md diff --git a/docs/ops/sort/NonMaxSuppression_5.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_5.md similarity index 100% rename from docs/ops/sort/NonMaxSuppression_5.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_5.md diff --git a/docs/ops/sort/NonMaxSuppression_9.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_9.md similarity index 100% rename from docs/ops/sort/NonMaxSuppression_9.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_9.md diff --git a/docs/ops/sort/TopK_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/TopK_1.md similarity index 100% rename from docs/ops/sort/TopK_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/TopK_1.md diff --git a/docs/ops/sort/TopK_11.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/TopK_11.md similarity index 100% rename from docs/ops/sort/TopK_11.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/TopK_11.md diff --git a/docs/ops/sort/TopK_3.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/TopK_3.md similarity index 100% rename from docs/ops/sort/TopK_3.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/TopK_3.md diff --git a/docs/ops/sparse/EmbeddingBagOffsetsSum_3.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sparse/EmbeddingBagOffsetsSum_3.md similarity index 100% rename from docs/ops/sparse/EmbeddingBagOffsetsSum_3.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sparse/EmbeddingBagOffsetsSum_3.md diff --git a/docs/ops/sparse/EmbeddingBagPackedSum_3.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sparse/EmbeddingBagPackedSum_3.md similarity index 100% rename from docs/ops/sparse/EmbeddingBagPackedSum_3.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sparse/EmbeddingBagPackedSum_3.md diff --git a/docs/ops/sparse/EmbeddingSegmentsSum_3.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sparse/EmbeddingSegmentsSum_3.md similarity index 100% rename from docs/ops/sparse/EmbeddingSegmentsSum_3.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sparse/EmbeddingSegmentsSum_3.md diff --git a/docs/ops/type/ConvertLike_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/type/ConvertLike_1.md similarity index 100% rename from docs/ops/type/ConvertLike_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/type/ConvertLike_1.md diff --git a/docs/ops/type/Convert_1.md b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/type/Convert_1.md similarity index 100% rename from docs/ops/type/Convert_1.md rename to docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/type/Convert_1.md From e5233a2f2cbf6b5b8a02b693931a47da984a0595 Mon Sep 17 00:00:00 2001 From: Tatiana Savina Date: Wed, 4 Oct 2023 11:54:24 +0200 Subject: [PATCH 056/257] [DOCS] Port doc conversion change (#20232) * [DOCS] Fix conversion docs comments (#20144) * fix comments * more fixes * fix missing part * add ovc to img (#20192) --- docs/_static/images/BASIC_FLOW_IE_C.svg | 4 ++-- .../convert_python_model_objects.md | 2 +- .../supported_model_formats.md | 2 +- .../Convert_Model_From_PyTorch.md | 6 +++--- .../Convert_Model_From_TensorFlow.md | 6 +++--- .../openvino_workflow/model_introduction.md | 18 +++++++++--------- .../supported_model_formats.md | 2 +- .../Convert_Model_From_PyTorch.md | 10 +++++----- .../Convert_Model_From_TensorFlow.md | 2 +- .../Convert_Model_From_TensorFlow_Lite.md | 2 +- 10 files changed, 27 insertions(+), 27 deletions(-) diff --git a/docs/_static/images/BASIC_FLOW_IE_C.svg b/docs/_static/images/BASIC_FLOW_IE_C.svg index af2d88040ada55..65bb26020df9e4 100644 --- a/docs/_static/images/BASIC_FLOW_IE_C.svg +++ b/docs/_static/images/BASIC_FLOW_IE_C.svg @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:18bc08f90f844c09594cfa538f4ba2205ea2e67c849927490c01923e394ed11a -size 71578 +oid sha256:63301a7c31b6660fbdb55fb733e20af6a172c0512455f5de8c6be5e1a5b3ed0b +size 71728 diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/convert_python_model_objects.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/convert_python_model_objects.md index 7208052bdc2d8a..c9b9e2d276fcef 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/convert_python_model_objects.md +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/convert_python_model_objects.md @@ -20,7 +20,7 @@ Example of converting a PyTorch model directly from memory: import torchvision - model = torchvision.models.resnet50(pretrained=True) + model = torchvision.models.resnet50(weights='DEFAULT') ov_model = convert_model(model) The following types are supported as an input model for ``convert_model()``: diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats.md index 068ba7fca16297..fc0bf685c7f073 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats.md +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats.md @@ -58,7 +58,7 @@ Here are code examples of how to use these methods with different model formats: .. code-block:: py :force: - model = torchvision.models.resnet50(pretrained=True) + model = torchvision.models.resnet50(weights='DEFAULT') ov_model = convert_model(model) compiled_model = core.compile_model(ov_model, "AUTO") diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_PyTorch.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_PyTorch.md index 0cafd3066535ab..3215573de5e990 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_PyTorch.md +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_PyTorch.md @@ -26,7 +26,7 @@ To convert a PyTorch model to the OpenVINO IR format, use the OVC API (supersedi import torch from openvino.tools.mo import convert_model - model = torchvision.models.resnet50(pretrained=True) + model = torchvision.models.resnet50(weights='DEFAULT') ov_model = convert_model(model) Following PyTorch model formats are supported: @@ -45,7 +45,7 @@ parameter to be set, for example: import torch from openvino.tools.mo import convert_model - model = torchvision.models.resnet50(pretrained=True) + model = torchvision.models.resnet50(weights='DEFAULT') ov_model = convert_model(model, example_input=torch.randn(1, 3, 100, 100)) ``example_input`` accepts the following formats: @@ -70,7 +70,7 @@ Exporting a PyTorch Model to ONNX Format It is also possible to export a PyTorch model to ONNX and then convert it to OpenVINO IR. To convert and deploy a PyTorch model this way, follow these steps: 1. `Export a PyTorch model to ONNX <#exporting-a-pytorch-model-to-onnx-format>`__. -2. :doc:`Convert the ONNX model ` to produce an optimized :doc:`Intermediate Representation ` of the model based on the trained network topology, weights, and biases values. +2. :doc:`Convert an ONNX model ` to produce an optimized :doc:`Intermediate Representation ` of the model based on the trained network topology, weights, and biases values. PyTorch models are defined in Python. To export them, use the ``torch.onnx.export()`` method. The code to evaluate or test the model is usually provided with its code and can be used for its initialization and export. diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_TensorFlow.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_TensorFlow.md index 1d34263e65e72a..5edea16a92fb44 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_TensorFlow.md +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_TensorFlow.md @@ -17,7 +17,7 @@ Converting TensorFlow 1 Models Converting Frozen Model Format +++++++++++++++++++++++++++++++ -To convert a TensorFlow model, use the ``*mo*`` script to simply convert a model with a path to the input model ``*.pb*`` file: +To convert a TensorFlow model, use the ``*mo*`` script to simply convert a model with a path to the input model *.pb* file: .. code-block:: sh @@ -30,7 +30,7 @@ Converting Non-Frozen Model Formats There are three ways to store non-frozen TensorFlow models and convert them by model conversion API: 1. **Checkpoint**. In this case, a model consists of two files: ``inference_graph.pb`` (or ``inference_graph.pbtxt``) and ``checkpoint_file.ckpt``. -If you do not have an inference graph file, refer to the `Freezing Custom Models in Python <#Freezing-Custom-Models-in-Python>`__ section. +If you do not have an inference graph file, refer to the `Freezing Custom Models in Python <#freezing-custom-models-in-python>`__ section. To convert the model with the inference graph in ``.pb`` format, run the `mo` script with a path to the checkpoint file: .. code-block:: sh @@ -139,7 +139,7 @@ It is essential to freeze the model before pruning. Use the following code snipp Keras H5 ++++++++ -If you have a model in the HDF5 format, load the model using TensorFlow 2 and serialize it in the +If you have a model in HDF5 format, load the model using TensorFlow 2 and serialize it to SavedModel format. Here is an example of how to do it: .. code-block:: py diff --git a/docs/articles_en/openvino_workflow/model_introduction.md b/docs/articles_en/openvino_workflow/model_introduction.md index cadd407ba0b6a6..c11beb17e67764 100644 --- a/docs/articles_en/openvino_workflow/model_introduction.md +++ b/docs/articles_en/openvino_workflow/model_introduction.md @@ -22,17 +22,17 @@ and run a pre-trained network from an online database, such as or `Torchvision models `__. If your selected model is in one of the :doc:`OpenVINO™ supported model formats `, -you can use it directly, without the need to save as the OpenVINO IR. +you can use it directly, without the need to save as OpenVINO IR (`openvino.Model `__ - -`ov.Model `__). +`ov.Model `__). For this purpose, you can use ``openvino.Core.read_model`` and ``openvino.Core.compile_model`` methods, so that conversion is performed automatically before inference, for -maximum convenience (note that working with PyTorch differs slightly, the Python API -being the only option, while TensorFlow may present additional considerations -:doc:`TensorFlow Frontend Capabilities and Limitations `). +maximum convenience. Note that for PyTorch models, Python API +is the only conversion option. TensorFlow may present additional considerations +:doc:`TensorFlow Frontend Capabilities and Limitations `. -For better performance and more optimization options, OpenVINO offers a conversion +For better performance and more optimization options, OpenVINO also offers a conversion API with two possible approaches: the Python API functions (``openvino.convert_model`` and ``openvino.save_model``) and the ``ovc`` command line tool, which are described in detail in this article. @@ -50,7 +50,7 @@ and ``openvino.save_model``) and the ``ovc`` command line tool, which are descri Convert a Model in Python: ``convert_model`` ############################################## -You can use the Model conversion API in Python with the ``openvino.convert_model`` function. This function converts a model from its original framework representation, for example PyTorch or TensorFlow, to the object of type ``openvino.Model``. The resulting ``openvino.Model`` can be inferred in the same application (Python script or Jupyter Notebook) or saved into a file using``openvino.save_model`` for future use. Below, there are examples of how to use the ``openvino.convert_model`` with models from popular public repositories: +You can use the Model conversion API in Python with the ``openvino.convert_model`` function. This function converts a model from its original framework representation, for example PyTorch or TensorFlow, to the object of type ``openvino.Model``. The resulting ``openvino.Model`` can be compiled with ``openvino.compile_model`` and inferred in the same application (Python script or Jupyter Notebook) or saved into a file using``openvino.save_model`` for future use. Below, there are examples of how to use the ``openvino.convert_model`` with models from popular public repositories: .. tab-set:: @@ -64,7 +64,7 @@ You can use the Model conversion API in Python with the ``openvino.convert_model import torch from torchvision.models import resnet50 - model = resnet50(pretrained=True) + model = resnet50(weights='DEFAULT') # prepare input_data input_data = torch.rand(1, 3, 224, 224) @@ -81,7 +81,7 @@ You can use the Model conversion API in Python with the ``openvino.convert_model # compile model compiled_model = ov.compile_model(ov_model) - # run the inference + # run inference result = compiled_model(input_data) .. tab-item:: Hugging Face Transformers diff --git a/docs/articles_en/openvino_workflow/model_introduction/supported_model_formats.md b/docs/articles_en/openvino_workflow/model_introduction/supported_model_formats.md index 903199e1547165..6ff5e620f10d78 100644 --- a/docs/articles_en/openvino_workflow/model_introduction/supported_model_formats.md +++ b/docs/articles_en/openvino_workflow/model_introduction/supported_model_formats.md @@ -21,7 +21,7 @@ * :doc:`How to convert TensorFlow Lite ` * :doc:`How to convert PaddlePaddle ` -To choose the best workflow for your application, read :doc:`Introduction to Model Preparation` +To choose the best workflow for your application, read the :doc:`Model Preparation section ` Refer to the list of all supported conversion options in :doc:`Conversion Parameters ` diff --git a/docs/articles_en/openvino_workflow/model_introduction/supported_model_formats/Convert_Model_From_PyTorch.md b/docs/articles_en/openvino_workflow/model_introduction/supported_model_formats/Convert_Model_From_PyTorch.md index 6fcd6d7c03aaa8..f6e6986c5d93ba 100644 --- a/docs/articles_en/openvino_workflow/model_introduction/supported_model_formats/Convert_Model_From_PyTorch.md +++ b/docs/articles_en/openvino_workflow/model_introduction/supported_model_formats/Convert_Model_From_PyTorch.md @@ -18,7 +18,7 @@ Here is the simplest example of PyTorch model conversion using a model from ``to import torch import openvino as ov - model = torchvision.models.resnet50(pretrained=True) + model = torchvision.models.resnet50(weights='DEFAULT') ov_model = ov.convert_model(model) ``openvino.convert_model`` function supports the following PyTorch model object types: @@ -27,9 +27,9 @@ Here is the simplest example of PyTorch model conversion using a model from ``to * ``torch.jit.ScriptModule`` * ``torch.jit.ScriptFunction`` -When passing a ``torch.nn.Module`` derived class object as an input model, converting PyTorch models often requires the ``example_input`` parameter to be specified in the ``openvino.convert_model`` function call. Internally it triggers the model tracing during the model conversion process, using the capabilities of the ``torch.jit.trace`` function. +When using ``torch.nn.Module`` as an input model, ``openvino.convert_model`` often requires the ``example_input`` parameter to be specified. Internally, it triggers the model tracing during the model conversion process, using the capabilities of the ``torch.jit.trace`` function. -The use of ``example_input`` can lead to a better quality of the resulting OpenVINO model in terms of correctness and performance compared to converting the same original model without specifying ``example_input``. While the necessity of ``example_input`` depends on the implementation details of a specific PyTorch model, it is recommended to always set the ``example_input`` parameter when it is available. +The use of ``example_input`` can lead to a better quality OpenVINO model in terms of correctness and performance compared to converting the same original model without specifying ``example_input``. While the necessity of ``example_input`` depends on the implementation details of a specific PyTorch model, it is recommended to always set the ``example_input`` parameter when it is available. The value for the ``example_input`` parameter can be easily derived from knowing the input tensor's element type and shape. While it may not be suitable for all cases, random numbers can frequently serve this purpose effectively: @@ -131,7 +131,7 @@ Exporting a PyTorch Model to ONNX Format An alternative method of converting PyTorch models is exporting a PyTorch model to ONNX with ``torch.onnx.export`` first and then converting the resulting ``.onnx`` file to OpenVINO Model with ``openvino.convert_model``. It can be considered as a backup solution if a model cannot be converted directly from PyTorch to OpenVINO as described in the above chapters. Converting through ONNX can be more expensive in terms of code, conversion time, and allocated memory. 1. Refer to the `Exporting PyTorch models to ONNX format `__ guide to learn how to export models from PyTorch to ONNX. -2. Follow :doc:`Convert the ONNX model ` chapter to produce OpenVINO model. +2. Follow :doc:`Convert an ONNX model ` chapter to produce OpenVINO model. Here is an illustration of using these two steps together: @@ -142,7 +142,7 @@ Here is an illustration of using these two steps together: import torch import openvino as ov - model = torchvision.models.resnet50(pretrained=True) + model = torchvision.models.resnet50(weights='DEFAULT') # 1. Export to ONNX torch.onnx.export(model, (torch.rand(1, 3, 224, 224), ), 'model.onnx') # 2. Convert to OpenVINO diff --git a/docs/articles_en/openvino_workflow/model_introduction/supported_model_formats/Convert_Model_From_TensorFlow.md b/docs/articles_en/openvino_workflow/model_introduction/supported_model_formats/Convert_Model_From_TensorFlow.md index 5a7a3ab3a7c706..bec51f537cd541 100644 --- a/docs/articles_en/openvino_workflow/model_introduction/supported_model_formats/Convert_Model_From_TensorFlow.md +++ b/docs/articles_en/openvino_workflow/model_introduction/supported_model_formats/Convert_Model_From_TensorFlow.md @@ -45,7 +45,7 @@ To convert a model, run conversion with the directory as the model argument: Keras H5 Format +++++++++++++++ -If you have a model in the HDF5 format, load the model using TensorFlow 2 and serialize it in the +If you have a model in HDF5 format, load the model using TensorFlow 2 and serialize it to SavedModel format. Here is an example of how to do it: .. code-block:: py diff --git a/docs/articles_en/openvino_workflow/model_introduction/supported_model_formats/Convert_Model_From_TensorFlow_Lite.md b/docs/articles_en/openvino_workflow/model_introduction/supported_model_formats/Convert_Model_From_TensorFlow_Lite.md index e25795c95a4b1f..642615f0173397 100644 --- a/docs/articles_en/openvino_workflow/model_introduction/supported_model_formats/Convert_Model_From_TensorFlow_Lite.md +++ b/docs/articles_en/openvino_workflow/model_introduction/supported_model_formats/Convert_Model_From_TensorFlow_Lite.md @@ -7,7 +7,7 @@ TensorFlow Lite format to the OpenVINO Model. -To convert an ONNX model, run model conversion with the path to the ``.tflite`` model file: +To convert an TensorFlow Lite model, run model conversion with the path to the ``.tflite`` model file: .. tab-set:: From ee8bd33c6db415f16104afb3c30cfd81be98f420 Mon Sep 17 00:00:00 2001 From: Alexandra Sidorova Date: Wed, 4 Oct 2023 14:04:52 +0400 Subject: [PATCH 057/257] [CPU][Snippets] Fixed EnforcePrecision pass registration (#20051) --- src/plugins/intel_cpu/src/nodes/subgraph.cpp | 2 +- .../shared_tests_instances/skip_tests_config.cpp | 1 + .../shared_tests_instances/snippets/mha.cpp | 12 ++++++++++++ 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/src/plugins/intel_cpu/src/nodes/subgraph.cpp b/src/plugins/intel_cpu/src/nodes/subgraph.cpp index 5a70cf89d2d273..286faee9b85d15 100644 --- a/src/plugins/intel_cpu/src/nodes/subgraph.cpp +++ b/src/plugins/intel_cpu/src/nodes/subgraph.cpp @@ -714,7 +714,7 @@ void Snippet::SnippetJitExecutor::generate(const jit_snippets_compile_args* jcp) // MatMul has to be decomposed to Brgemm operations before enforcement // Note, MatMul decomposition will be run later again for case if BF16 enforcement is not happened SNIPPETS_REGISTER_PASS(PassPosition(Place::PipelineStart), ov::snippets::pass::MatMulToBrgemm); - SNIPPETS_REGISTER_PASS(PassPosition(Place::PipelineStart), pass::EnforcePrecision, element::f32, element::bf16); + SNIPPETS_REGISTER_PASS(PassPosition(Place::After, "MatMulToBrgemm"), pass::EnforcePrecision, element::f32, element::bf16); } SNIPPETS_REGISTER_PASS(PassPosition(Place::Before, "PropagatePrecision"), ov::intel_cpu::pass::BrgemmToBrgemmCPU); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 063424fa1bb22d..b3e7855c05086c 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -302,6 +302,7 @@ std::vector disabledTestPatterns() { // ignored for not supported bf16 platforms retVector.emplace_back(R"(.*smoke_Snippets_EnforcePrecision_bf16.*)"); retVector.emplace_back(R"(.*smoke_Snippets_MHAWOTransposeEnforceBF16.*)"); + retVector.emplace_back(R"(.*smoke_Snippets_MHAEnforceBF16.*)"); } return retVector; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/mha.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/mha.cpp index eab92fd7ddd3f6..8193709b479741 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/mha.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/mha.cpp @@ -64,6 +64,18 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_MHABF16, MHA, ::testing::Values(CPUTestUtils::cpuEmptyPluginConfig)), MHA::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Snippets_MHAEnforceBF16, MHA, + ::testing::Combine( + ::testing::ValuesIn(inputShapes), + ::testing::ValuesIn(precision_f32(4)), + ::testing::Values(ov::element::bf16), + ::testing::ValuesIn({false}), + ::testing::Values(7), + ::testing::Values(7), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(CPUTestUtils::cpuBF16PluginConfig)), + MHA::getTestCaseName); + INSTANTIATE_TEST_SUITE_P(smoke_Snippets_MHAMulAdd, MHAMulAdd, ::testing::Combine( From 3b8ac28ced85d52861c6052dc8aecd2dfe24c82e Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Wed, 4 Oct 2023 14:18:05 +0400 Subject: [PATCH 058/257] Moved ConstantResultTest to new API (#20224) --- .../subgraph_tests/constant_result.cpp | 62 ++++----- .../subgraph_tests/constant_result.cpp | 3 +- .../subgraph_tests/constant_result.cpp | 2 +- .../subgraph_tests/constant_result.hpp | 9 +- .../subgraph_tests/constant_result_legacy.hpp | 15 +++ .../subgraph/constant_result.hpp | 58 +++++--- .../src/subgraph/constant_result.cpp | 127 +++++++++++++----- 7 files changed, 185 insertions(+), 91 deletions(-) create mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/constant_result_legacy.hpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/constant_result.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/constant_result.cpp index 0f71f32ef7b95f..4197076d6dc0d6 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/constant_result.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/constant_result.cpp @@ -2,44 +2,38 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "subgraph_tests/constant_result.hpp" + #include -#include "subgraph_tests/constant_result.hpp" #include "common_test_utils/test_constants.hpp" -using namespace SubgraphTestsDefinitions; -using namespace InferenceEngine; +using namespace ov::test; namespace { -const std::vector types = { - ConstantSubgraphType::SINGLE_COMPONENT, - ConstantSubgraphType::SEVERAL_COMPONENT -}; - -const std::vector shapes = { - {1, 3, 10, 10}, - {2, 3, 4, 5} -}; - -const std::vector precisions = { - Precision::U8, - Precision::I8, - Precision::U16, - Precision::I16, - Precision::I32, - Precision::U64, - Precision::I64, - Precision::FP32, - Precision::BOOL -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Check, ConstantResultSubgraphTest, - ::testing::Combine( - ::testing::ValuesIn(types), - ::testing::ValuesIn(shapes), - ::testing::ValuesIn(precisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ConstantResultSubgraphTest::getTestCaseName); - -} // namespace +const std::vector types = {ConstantSubgraphType::SINGLE_COMPONENT, + ConstantSubgraphType::SEVERAL_COMPONENT}; + +const std::vector shapes = {{1, 3, 10, 10}, {2, 3, 4, 5}}; + +const std::vector precisions = {ov::element::u8, + ov::element::i8, + ov::element::u16, + ov::element::i16, + ov::element::u32, + ov::element::i32, + ov::element::u64, + ov::element::i64, + ov::element::f32, + ov::element::boolean}; + +INSTANTIATE_TEST_SUITE_P(smoke_Check, + ConstantResultSubgraphTest, + ::testing::Combine(::testing::ValuesIn(types), + ::testing::ValuesIn(shapes), + ::testing::ValuesIn(precisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ConstantResultSubgraphTest::getTestCaseName); + +} // namespace diff --git a/src/plugins/intel_gna/tests/functional/shared_tests_instances/subgraph_tests/constant_result.cpp b/src/plugins/intel_gna/tests/functional/shared_tests_instances/subgraph_tests/constant_result.cpp index a3f9aabb67c01f..312466a2f765f0 100644 --- a/src/plugins/intel_gna/tests/functional/shared_tests_instances/subgraph_tests/constant_result.cpp +++ b/src/plugins/intel_gna/tests/functional/shared_tests_instances/subgraph_tests/constant_result.cpp @@ -2,11 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "subgraph_tests/constant_result.hpp" - #include #include "common_test_utils/test_constants.hpp" +#include "subgraph_tests/constant_result_legacy.hpp" using namespace SubgraphTestsDefinitions; using namespace InferenceEngine; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/constant_result.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/constant_result.cpp index b23220562451bf..97614faf548515 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/constant_result.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/constant_result.cpp @@ -4,7 +4,7 @@ #include -#include "subgraph_tests/constant_result.hpp" +#include "subgraph_tests/constant_result_legacy.hpp" #include "common_test_utils/test_constants.hpp" using namespace SubgraphTestsDefinitions; diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/constant_result.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/constant_result.hpp index c3d8905fc219e2..e9aa3517cafcc7 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/constant_result.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/constant_result.hpp @@ -6,11 +6,12 @@ #include "shared_test_classes/subgraph/constant_result.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { TEST_P(ConstantResultSubgraphTest, CompareWithRefs) { - Run(); + run(); } -} // namespace SubgraphTestsDefinitions - +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/constant_result_legacy.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/constant_result_legacy.hpp new file mode 100644 index 00000000000000..d41ceef9ed5a08 --- /dev/null +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/constant_result_legacy.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/subgraph/constant_result.hpp" + +namespace SubgraphTestsDefinitions { + +TEST_P(ConstantResultSubgraphTest, CompareWithRefs) { + Run(); +} + +} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/constant_result.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/constant_result.hpp index 5ffffb9ced5709..b979473ead34d1 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/constant_result.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/constant_result.hpp @@ -4,35 +4,63 @@ #pragma once -#include +#include #include +#include #include -#include +#include "openvino/core/type/element_type.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { + +enum class ConstantSubgraphType { SINGLE_COMPONENT, SEVERAL_COMPONENT }; + +std::ostream& operator<<(std::ostream& os, ConstantSubgraphType type); + +typedef std::tuple + constResultParams; + +class ConstantResultSubgraphTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); + void createGraph(const ConstantSubgraphType& type, + const ov::Shape& input_shape, + const ov::element::Type& input_type); -enum class ConstantSubgraphType { - SINGLE_COMPONENT, - SEVERAL_COMPONENT +protected: + void SetUp() override; }; -std::ostream& operator<<(std::ostream &os, ConstantSubgraphType type); +} // namespace test +} // namespace ov -typedef std::tuple < - ConstantSubgraphType, - InferenceEngine::SizeVector, // input shape - InferenceEngine::Precision, // input precision - std::string // Device name -> constResultParams; +namespace SubgraphTestsDefinitions { + +using ov::test::ConstantSubgraphType; + +typedef std::tuple + constResultParams; class ConstantResultSubgraphTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: static std::string getTestCaseName(const testing::TestParamInfo& obj); - void createGraph(const ConstantSubgraphType& type, const InferenceEngine::SizeVector &inputShape, const InferenceEngine::Precision &inputPrecision); + void createGraph(const ConstantSubgraphType& type, + const InferenceEngine::SizeVector& inputShape, + const InferenceEngine::Precision& inputPrecision); + protected: void SetUp() override; }; diff --git a/src/tests/functional/shared_test_classes/src/subgraph/constant_result.cpp b/src/tests/functional/shared_test_classes/src/subgraph/constant_result.cpp index e14239f14c56f1..9f2cb371469ec8 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/constant_result.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/constant_result.cpp @@ -4,29 +4,84 @@ #include "shared_test_classes/subgraph/constant_result.hpp" -using namespace InferenceEngine; -using namespace ngraph; +#include "ngraph_functions/builders.hpp" +#include "openvino/op/result.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { -std::ostream& operator<<(std::ostream &os, ConstantSubgraphType type) { +std::ostream& operator<<(std::ostream& os, ConstantSubgraphType type) { switch (type) { - case ConstantSubgraphType::SINGLE_COMPONENT: - os << "SINGLE_COMPONENT"; - break; - case ConstantSubgraphType::SEVERAL_COMPONENT: - os << "SEVERAL_COMPONENT"; - break; - default: - os << "UNSUPPORTED_CONST_SUBGRAPH_TYPE"; + case ConstantSubgraphType::SINGLE_COMPONENT: + os << "SINGLE_COMPONENT"; + break; + case ConstantSubgraphType::SEVERAL_COMPONENT: + os << "SEVERAL_COMPONENT"; + break; + default: + os << "UNSUPPORTED_CONST_SUBGRAPH_TYPE"; } return os; } std::string ConstantResultSubgraphTest::getTestCaseName(const testing::TestParamInfo& obj) { ConstantSubgraphType type; - SizeVector IS; - Precision inputPrecision; + ov::Shape input_shape; + ov::element::Type input_type; + std::string target_device; + + std::tie(type, input_shape, input_type, target_device) = obj.param; + std::ostringstream result; + result << "SubgraphType=" << type << "_"; + result << "IS=" << input_shape << "_"; + result << "IT=" << input_type << "_"; + result << "Device=" << target_device; + return result.str(); +} + +void ConstantResultSubgraphTest::createGraph(const ConstantSubgraphType& type, + const ov::Shape& input_shape, + const ov::element::Type& input_type) { + ParameterVector params; + ResultVector results; + switch (type) { + case ConstantSubgraphType::SINGLE_COMPONENT: { + auto input = ngraph::builder::makeConstant(input_type, input_shape, {}, true); + results.push_back(std::make_shared(input)); + break; + } + case ConstantSubgraphType::SEVERAL_COMPONENT: { + auto input1 = ngraph::builder::makeConstant(input_type, input_shape, {}, true); + results.push_back(std::make_shared(input1)); + auto input2 = ngraph::builder::makeConstant(input_type, input_shape, {}, true); + results.push_back(std::make_shared(input2)); + break; + } + default: { + throw std::runtime_error("Unsupported constant graph type"); + } + } + function = std::make_shared(results, params, "ConstResult"); +} + +void ConstantResultSubgraphTest::SetUp() { + ConstantSubgraphType type; + ov::Shape input_shape; + ov::element::Type input_type; + std::tie(type, input_shape, input_type, targetDevice) = this->GetParam(); + + createGraph(type, input_shape, input_type); +} +} // namespace test +} // namespace ov + +namespace SubgraphTestsDefinitions { + +std::string ConstantResultSubgraphTest::getTestCaseName(const testing::TestParamInfo& obj) { + ConstantSubgraphType type; + InferenceEngine::SizeVector IS; + InferenceEngine::Precision inputPrecision; std::string targetDevice; std::tie(type, IS, inputPrecision, targetDevice) = obj.param; @@ -38,35 +93,37 @@ std::string ConstantResultSubgraphTest::getTestCaseName(const testing::TestParam return result.str(); } -void ConstantResultSubgraphTest::createGraph(const ConstantSubgraphType& type, const SizeVector &inputShape, const Precision &inputPrecision) { +void ConstantResultSubgraphTest::createGraph(const ConstantSubgraphType& type, + const InferenceEngine::SizeVector& inputShape, + const InferenceEngine::Precision& inputPrecision) { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); - ParameterVector params; - ResultVector results; + ov::ParameterVector params; + ov::ResultVector results; switch (type) { - case ConstantSubgraphType::SINGLE_COMPONENT: { - auto input = builder::makeConstant(ngPrc, inputShape, {}, true); - results.push_back(std::make_shared(input)); - break; - } - case ConstantSubgraphType::SEVERAL_COMPONENT: { - auto input1 = builder::makeConstant(ngPrc, inputShape, {}, true); - results.push_back(std::make_shared(input1)); - auto input2 = builder::makeConstant(ngPrc, inputShape, {}, true); - results.push_back(std::make_shared(input2)); - break; - } - default: { - throw std::runtime_error("Unsupported constant graph type"); - } + case ConstantSubgraphType::SINGLE_COMPONENT: { + auto input = ngraph::builder::makeConstant(ngPrc, inputShape, {}, true); + results.push_back(std::make_shared(input)); + break; + } + case ConstantSubgraphType::SEVERAL_COMPONENT: { + auto input1 = ngraph::builder::makeConstant(ngPrc, inputShape, {}, true); + results.push_back(std::make_shared(input1)); + auto input2 = ngraph::builder::makeConstant(ngPrc, inputShape, {}, true); + results.push_back(std::make_shared(input2)); + break; + } + default: { + throw std::runtime_error("Unsupported constant graph type"); + } } - function = std::make_shared(results, params, "ConstResult"); + function = std::make_shared(results, params, "ConstResult"); } void ConstantResultSubgraphTest::SetUp() { ConstantSubgraphType type; - SizeVector IS; - Precision inputPrecision; + InferenceEngine::SizeVector IS; + InferenceEngine::Precision inputPrecision; std::tie(type, IS, inputPrecision, targetDevice) = this->GetParam(); createGraph(type, IS, inputPrecision); From 0b56e8d32d00dbed02ddc8a22540476e56e26f8f Mon Sep 17 00:00:00 2001 From: Nadezhda Ageeva Date: Wed, 4 Oct 2023 14:55:26 +0400 Subject: [PATCH 059/257] Keep precision of inputs/outputs in SubgraphBaseTest::calculate_refs (#20227) --- .../functional/shared_test_classes/src/base/ov_subgraph.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/functional/shared_test_classes/src/base/ov_subgraph.cpp b/src/tests/functional/shared_test_classes/src/base/ov_subgraph.cpp index 6d4567eb345b91..c7059063158d07 100644 --- a/src/tests/functional/shared_test_classes/src/base/ov_subgraph.cpp +++ b/src/tests/functional/shared_test_classes/src/base/ov_subgraph.cpp @@ -287,7 +287,7 @@ std::vector SubgraphBaseTest::calculate_refs() { auto functionToProcess = functionRefs->clone(); precisions_map convert_precisions = get_ref_precisions_convert_map(); pass::Manager manager; - manager.register_pass(convert_precisions); + manager.register_pass(convert_precisions, type_to_fuse_map{}, false, false); manager.run_passes(functionToProcess); functionToProcess->validate_nodes_and_infer_types(); From 8679414be4b20581e37e19da14b26f4af6bcc345 Mon Sep 17 00:00:00 2001 From: Irina Efode Date: Wed, 4 Oct 2023 14:57:32 +0400 Subject: [PATCH 060/257] [GHA][CONFORMANCE] Init gtest filter for parallel runner in case not used arg (#20234) --- .../functional_test_utils/layer_tests_summary/run_parallel.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_parallel.py b/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_parallel.py index b0832867ea219b..67077c9d055d4f 100644 --- a/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_parallel.py +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_parallel.py @@ -217,6 +217,7 @@ def __init__(self, exec_file_path: os.path, test_command_line: list, self._exec_file_path = exec_file_path self._working_dir = working_dir self._conformance_ir_filelists = list() + self._gtest_filter = "" self._command = self.__init_basic_command_line_for_exec_file(test_command_line) self._worker_num = worker_num if not os.path.exists(self._working_dir): From dc505e0bef3f0523c95a36941e9a4264083020ef Mon Sep 17 00:00:00 2001 From: jmacekx <115208282+jmacekx@users.noreply.github.com> Date: Wed, 4 Oct 2023 14:25:03 +0200 Subject: [PATCH 061/257] [DOCS] add pyyaml to docs requirements (#20236) --- docs/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/requirements.txt b/docs/requirements.txt index e90da3b38bea18..ddeb91b02f16a2 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -26,6 +26,7 @@ pytest-html==3.1.1 pytest-metadata==1.11.0 py>=1.9.0 pytz==2022.7 +pyyaml==6.0.1 requests==2.31.0 six==1.15.0 snowballstemmer==2.1.0 From a13cc8c733c443bf51091e3cf8c9bddbeb11ccef Mon Sep 17 00:00:00 2001 From: Karol Blaszczak Date: Wed, 4 Oct 2023 14:51:35 +0200 Subject: [PATCH 062/257] [DOCS] conan fix (#20216) --- .../installing-openvino-overview.md | 16 ++++----- .../installing-openvino-linux-header.md | 2 +- .../installing-openvino-apt.md | 1 + .../installing-openvino-from-archive-linux.md | 2 +- .../installing-openvino-yum.md | 1 + .../installing-openvino-from-archive-macos.md | 2 +- .../installing-openvino-conan.md | 33 ++++++++++--------- .../installing-openvino-conda.md | 4 +-- .../installing-openvino-docker-linux.md | 20 ++++++++--- .../installing-openvino-pip.md | 3 +- .../installing-openvino-vcpkg.md | 3 +- ...nstalling-openvino-from-archive-windows.md | 2 +- 12 files changed, 52 insertions(+), 37 deletions(-) diff --git a/docs/articles_en/get started/installing-openvino-overview.md b/docs/articles_en/get started/installing-openvino-overview.md index 13201da5f8f082..c703ee386157f8 100644 --- a/docs/articles_en/get started/installing-openvino-overview.md +++ b/docs/articles_en/get started/installing-openvino-overview.md @@ -47,14 +47,14 @@ .. dropdown:: Distribution Comparison for OpenVINO 2023.1 - =============== ========== ====== ========= ======== ============ ========== - Device Archives PyPI APT/YUM Conda Homebrew vcpkg - =============== ========== ====== ========= ======== ============ ========== - CPU V V V V V V - GPU V V V V V V - GNA V n/a n/a n/a n/a n/a - NPU V n/a n/a n/a n/a n/a - =============== ========== ====== ========= ======== ============ ========== + =============== ========== ====== ========= ======== ============ ========== ========== + Device Archives PyPI APT/YUM Conda Homebrew vcpkg Conan + =============== ========== ====== ========= ======== ============ ========== ========== + CPU V V V V V V V + GPU V V V V V V V + GNA V n/a n/a n/a n/a n/a n/a + NPU V n/a n/a n/a n/a n/a n/a + =============== ========== ====== ========= ======== ============ ========== ========== | **Build OpenVINO from source** | OpenVINO Toolkit source files are available on GitHub as open source. If you want to build your own version of OpenVINO for your platform, diff --git a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-linux-header.md b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-linux-header.md index ee958af006480a..661e769407147a 100644 --- a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-linux-header.md +++ b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-linux-header.md @@ -19,8 +19,8 @@ Use Conda Forge Use vcpkg Use Homebrew - Use Docker Use Conan + Use Docker If you want to install OpenVINO™ Runtime on Linux, you have the following options: diff --git a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-apt.md b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-apt.md index 6462e6c60b251b..b70f91b611c924 100644 --- a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-apt.md +++ b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-apt.md @@ -12,6 +12,7 @@ * offers both C/C++ and Python APIs * does not offer support for GNA and NPU inference + * is dedicated to Linux users only * additionally includes code samples diff --git a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-from-archive-linux.md b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-from-archive-linux.md index f08ef101b25ec4..5034c94dc4ea5c 100644 --- a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-from-archive-linux.md +++ b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-from-archive-linux.md @@ -13,7 +13,7 @@ * offers both C/C++ and Python APIs * additionally includes code samples - * is dedicated to users of all major OSs: Windows, Linux, macOS and x86_64 / arm64 architectures + * is dedicated to Linux users (archives for other systems are also available) * may offer different hardware support under different operating systems (see the drop-down below for more details). diff --git a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-yum.md b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-yum.md index f4928cdceb3ae2..3730cdab8e7ff5 100644 --- a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-yum.md +++ b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-yum.md @@ -12,6 +12,7 @@ * offers C/C++ APIs only * does not offer support for GNA and NPU inference + * is dedicated to Linux users only * additionally includes code samples .. tab-set:: diff --git a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-macos-header/installing-openvino-from-archive-macos.md b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-macos-header/installing-openvino-from-archive-macos.md index 826fbe223e6374..316a42d6d8c5a1 100644 --- a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-macos-header/installing-openvino-from-archive-macos.md +++ b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-macos-header/installing-openvino-from-archive-macos.md @@ -13,7 +13,7 @@ * offers both C/C++ and Python APIs * additionally includes code samples - * is dedicated to users of all major OSs: Windows, Linux, macOS + * is dedicated to macOS users (archives for other systems are also available) .. tab-set:: diff --git a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conan.md b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conan.md index fcba7f0df4a5d5..5f444196c338fd 100644 --- a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conan.md +++ b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conan.md @@ -10,8 +10,10 @@ Note that the Conan Package Manager distribution: - * is dedicated to users of all major OSs: Windows, Linux, macOS - + * offers C/C++ API only + * does not offer support for GNA and NPU inference + * is dedicated to users of all major OSes: Windows, Linux, and macOS + (all x86_64 / arm64 architectures) .. tab-set:: @@ -22,7 +24,6 @@ Full requirement listing is available in: `System Requirements Page `__ - .. tab-item:: Processor Notes :sync: processor-notes @@ -38,9 +39,9 @@ Installing OpenVINO Runtime with Conan Package Manager ############################################################ -1. Install Conan 2.0 or higher: +1. Install Conan 2.0.8 or higher: - .. code-block:: + .. code-block:: console python3 -m pip install conan @@ -58,23 +59,22 @@ Installing OpenVINO Runtime with Conan Package Manager Run the command below to create ``conan_toolchain.cmake`` file, which will be used to compile your project with OpenVINO: - .. code-block:: + .. code-block:: console conan install conanfile.txt --build=missing - - .. note:: - By default, OpenVINO is statically compiled. All available plugins and frontends are compiled as well. You can build a tailored OpenVINO by using the command below: + By default, OpenVINO is statically compiled, together with all available + plugins and frontends. To build a version tailored to your needs, check + what options there are on the `Conan Package Manager page for OpenVINO `__ + and extend the command, like so: - .. code-block:: + .. code-block:: console - conan install conanfile.txt --build=missing -o:h openvino/*:enable_intel_gpu=False -o:h openvino/*:enable_onnx_frontend=False' -o:h openvino/*:shared=True. + conan install conanfile.txt --build=missing -o:h openvino/*:enable_intel_gpu=False -o:h openvino/*:enable_onnx_frontend=False' -o:h openvino/*:shared=True. - For more details on available options, see the `Conan Package Manager page on OpenVINO `__ - 3. Configure and compile your project with OpenVINO: - .. code-block:: + .. code-block:: console cmake -DCMAKE_TOOLCHAIN_FILE= -DCMAKE_BUILD_TYPE=Release -S -B cmake --build --parallel @@ -86,11 +86,12 @@ Installing OpenVINO Runtime with Conan Package Manager Additional Resources ######################## -* `Conan Package Manager `__ +* `Conan Package Manager `__. * Learn more about :doc:`OpenVINO Workflow `. * To prepare your models for working with OpenVINO, see :doc:`Model Preparation `. * Learn more about :doc:`Inference with OpenVINO Runtime `. * See sample applications in :doc:`OpenVINO toolkit Samples Overview `. -* Check out the OpenVINO product `home page `__ +* Check out the OpenVINO product `home page `__. + @endsphinxdirective diff --git a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conda.md b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conda.md index dd76f8b980cfe0..bed553e73ad6fb 100644 --- a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conda.md +++ b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conda.md @@ -13,8 +13,8 @@ * offers both C/C++ and Python APIs * does not offer support for GNA and NPU inference - * is dedicated to users of all major OSs: Windows, Linux, macOS and x86_64 / arm64 architectures - + * is dedicated to users of all major OSes: Windows, Linux, and macOS + (all x86_64 / arm64 architectures) .. tab-set:: diff --git a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-shared/installing-openvino-docker-linux.md b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-shared/installing-openvino-docker-linux.md index 740e3b0371e9a8..c97295194c578e 100644 --- a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-shared/installing-openvino-docker-linux.md +++ b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-shared/installing-openvino-docker-linux.md @@ -6,7 +6,7 @@ :description: Learn how to use a prebuilt Docker image or create an image manually to install OpenVINO™ Runtime on Linux and Windows operating systems. -This guide presents infromation on how to use a pre-built Docker image/create an image manually to install OpenVINO™ Runtime. +This guide presents information on how to use a pre-built Docker image/create an image manually to install OpenVINO™ Runtime. Supported host operating systems for the Docker Base image: @@ -22,7 +22,13 @@ You can get started easily with pre-built and published docker images, which are * `Red Hat Ecosystem Catalog (development image) `__ * `Azure Marketplace `__ -You can use the `available Dockerfiles on GitHub `__ or generate a Dockerfile with your settings via `DockerHub CI framework `__ , which can generate a Dockerfile, build, test, and deploy an image using the Intel® Distribution of OpenVINO™ toolkit. You can reuse available Dockerfiles, add your layer and customize the OpenVINO™ image to your needs. Docker CI repository includes guides on how to how to `get started with docker images `__ and how to use `OpenVINO™ Toolkit containers with GPU accelerators. `__ +You can use the `available Dockerfiles on GitHub `__ +or generate a Dockerfile with your settings via `DockerHub CI framework `__, +which can generate a Dockerfile, build, test, and deploy an image using the Intel® Distribution of OpenVINO™ toolkit. +You can reuse available Dockerfiles, add your layer and customize the OpenVINO™ image to your needs. +The Docker CI repository includes guides on how to +`get started with docker images `__ and how to use +`OpenVINO™ Toolkit containers with GPU accelerators. `__ To start using Dockerfiles the following conditions must be met: @@ -32,13 +38,17 @@ To start using Dockerfiles the following conditions must be met: .. note:: - OpenVINO's `Docker `__ and :doc:`Bare Metal ` distributions are identical, so the documentation applies to both. + OpenVINO's `Docker `__ and :doc:`Bare Metal ` + distributions are identical, so the documentation applies to both. .. note:: - OpenVINO development environment in a docker container is also available in the `notebook repository `__ . It can be implemented in `OpenShift RedHat OpenData Science (RHODS) `__. + OpenVINO development environment in a docker container is also available in the + `notebook repository `__. It can be implemented in + `OpenShift RedHat OpenData Science (RHODS) `__. -More information about Docker CI for Intel® Distribution of OpenVINO™ toolset can be found `here `__ +More information about Docker CI for Intel® Distribution of OpenVINO™ toolset can be found +`here `__ * `Docker CI framework for Intel® Distribution of OpenVINO™ toolkit `__ * `Get Started with DockerHub CI for Intel® Distribution of OpenVINO™ toolkit `__ diff --git a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-shared/installing-openvino-pip.md b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-shared/installing-openvino-pip.md index 01fe7e50d21498..1356373d1b5ad1 100644 --- a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-shared/installing-openvino-pip.md +++ b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-shared/installing-openvino-pip.md @@ -13,7 +13,8 @@ * offers the Python API only * does not offer support for GNA and NPU inference - * is dedicated to users of all major OSs: Windows, Linux, macOS and x86_64 / arm64 architectures + * is dedicated to users of all major OSes: Windows, Linux, and macOS + (all x86_64 / arm64 architectures) .. tab-set:: diff --git a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-shared/installing-openvino-vcpkg.md b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-shared/installing-openvino-vcpkg.md index eb508a520f2e1c..8202d3f7fc1fb7 100644 --- a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-shared/installing-openvino-vcpkg.md +++ b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-shared/installing-openvino-vcpkg.md @@ -12,7 +12,8 @@ * offers C/C++ API only * does not offer support for GNA and NPU inference - * is dedicated to users of all major OSs: Windows, Linux, macOS and x86_64 / arm64 architectures. + * is dedicated to users of all major OSes: Windows, Linux, and macOS + (all x86_64 / arm64 architectures) .. tab-set:: diff --git a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-windows-header/installing-openvino-from-archive-windows.md b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-windows-header/installing-openvino-from-archive-windows.md index c10564ef6a8141..270948d13406bd 100644 --- a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-windows-header/installing-openvino-from-archive-windows.md +++ b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-windows-header/installing-openvino-from-archive-windows.md @@ -13,7 +13,7 @@ * offers both C/C++ and Python APIs * additionally includes code samples - * is dedicated to users of all major OSs: Windows, Linux, macOS + * is dedicated to Windows users (archives for other systems are also available) System Requirements From bdb13aa28df9f9f3cf53db1df712707fc0635915 Mon Sep 17 00:00:00 2001 From: Katarzyna Mitrus Date: Wed, 4 Oct 2023 17:05:29 +0200 Subject: [PATCH 063/257] [Opset13][pyAPI] Python API opset13 init and NMSRotated-13 (#20204) * Opset13 init * Update py API to use Opset13 * Add nms_rotated op to py API * Add tests * Add trailng comma * Adjust blank spaces * Add nms_rotated to init file * Update tests * Update style * Adjust Optional keyword * Update op docs with shapes * Add Tensor rypes hints * Update tests to import opset13 --- .../python/src/openvino/runtime/__init__.py | 33 ++-- .../src/openvino/runtime/opset13/__init__.py | 180 ++++++++++++++++++ .../src/openvino/runtime/opset13/ops.py | 59 ++++++ .../openvino/runtime/utils/node_factory.py | 2 +- .../src/pyopenvino/graph/node_factory.cpp | 2 +- .../tests/test_graph/test_nms_rotated.py | 69 +++++++ .../tests/test_graph/test_normalization.py | 2 +- .../tests/test_graph/test_ops_scatter.py | 2 +- .../python/tests/test_graph/test_pad.py | 2 +- .../tests/test_runtime/test_infer_request.py | 2 +- .../tests/test_runtime/test_input_node.py | 2 +- .../python/tests/test_runtime/test_model.py | 2 +- .../python/tests/test_runtime/test_nogil.py | 2 +- .../test_runtime/test_output_const_node.py | 2 +- .../tests/test_runtime/test_output_node.py | 2 +- .../python/tests/test_runtime/test_ovdict.py | 2 +- .../test_transformations/test_compression.py | 2 +- src/bindings/python/tests/utils/helpers.py | 2 +- 18 files changed, 339 insertions(+), 30 deletions(-) create mode 100644 src/bindings/python/src/openvino/runtime/opset13/__init__.py create mode 100644 src/bindings/python/src/openvino/runtime/opset13/ops.py create mode 100644 src/bindings/python/tests/test_graph/test_nms_rotated.py diff --git a/src/bindings/python/src/openvino/runtime/__init__.py b/src/bindings/python/src/openvino/runtime/__init__.py index d3d2147b5fc20a..3e3d9972ff2590 100644 --- a/src/bindings/python/src/openvino/runtime/__init__.py +++ b/src/bindings/python/src/openvino/runtime/__init__.py @@ -56,6 +56,7 @@ from openvino.runtime import opset10 from openvino.runtime import opset11 from openvino.runtime import opset12 +from openvino.runtime import opset13 # Import properties API from openvino.runtime import properties @@ -65,19 +66,19 @@ from openvino.runtime.ie_api import compile_model # Extend Node class to support binary operators -Node.__add__ = opset12.add -Node.__sub__ = opset12.subtract -Node.__mul__ = opset12.multiply -Node.__div__ = opset12.divide -Node.__truediv__ = opset12.divide -Node.__radd__ = lambda left, right: opset12.add(right, left) -Node.__rsub__ = lambda left, right: opset12.subtract(right, left) -Node.__rmul__ = lambda left, right: opset12.multiply(right, left) -Node.__rdiv__ = lambda left, right: opset12.divide(right, left) -Node.__rtruediv__ = lambda left, right: opset12.divide(right, left) -Node.__eq__ = opset12.equal -Node.__ne__ = opset12.not_equal -Node.__lt__ = opset12.less -Node.__le__ = opset12.less_equal -Node.__gt__ = opset12.greater -Node.__ge__ = opset12.greater_equal +Node.__add__ = opset13.add +Node.__sub__ = opset13.subtract +Node.__mul__ = opset13.multiply +Node.__div__ = opset13.divide +Node.__truediv__ = opset13.divide +Node.__radd__ = lambda left, right: opset13.add(right, left) +Node.__rsub__ = lambda left, right: opset13.subtract(right, left) +Node.__rmul__ = lambda left, right: opset13.multiply(right, left) +Node.__rdiv__ = lambda left, right: opset13.divide(right, left) +Node.__rtruediv__ = lambda left, right: opset13.divide(right, left) +Node.__eq__ = opset13.equal +Node.__ne__ = opset13.not_equal +Node.__lt__ = opset13.less +Node.__le__ = opset13.less_equal +Node.__gt__ = opset13.greater +Node.__ge__ = opset13.greater_equal diff --git a/src/bindings/python/src/openvino/runtime/opset13/__init__.py b/src/bindings/python/src/openvino/runtime/opset13/__init__.py new file mode 100644 index 00000000000000..66d2b3e9d46096 --- /dev/null +++ b/src/bindings/python/src/openvino/runtime/opset13/__init__.py @@ -0,0 +1,180 @@ +# -*- coding: utf-8 -*- +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from openvino.runtime.opset1.ops import absolute +from openvino.runtime.opset1.ops import absolute as abs +from openvino.runtime.opset1.ops import acos +from openvino.runtime.opset4.ops import acosh +from openvino.runtime.opset8.ops import adaptive_avg_pool +from openvino.runtime.opset8.ops import adaptive_max_pool +from openvino.runtime.opset1.ops import add +from openvino.runtime.opset1.ops import asin +from openvino.runtime.opset4.ops import asinh +from openvino.runtime.opset6.ops import assign +from openvino.runtime.opset1.ops import atan +from openvino.runtime.opset4.ops import atanh +from openvino.runtime.opset1.ops import avg_pool +from openvino.runtime.opset5.ops import batch_norm_inference +from openvino.runtime.opset2.ops import batch_to_space +from openvino.runtime.opset1.ops import binary_convolution +from openvino.runtime.opset3.ops import broadcast +from openvino.runtime.opset3.ops import bucketize +from openvino.runtime.opset1.ops import ceiling +from openvino.runtime.opset1.ops import ceiling as ceil +from openvino.runtime.opset1.ops import clamp +from openvino.runtime.opset1.ops import concat +from openvino.runtime.opset1.ops import constant +from openvino.runtime.opset1.ops import convert +from openvino.runtime.opset1.ops import convert_like +from openvino.runtime.opset1.ops import convolution +from openvino.runtime.opset1.ops import convolution_backprop_data +from openvino.runtime.opset1.ops import cos +from openvino.runtime.opset1.ops import cosh +from openvino.runtime.opset1.ops import ctc_greedy_decoder +from openvino.runtime.opset6.ops import ctc_greedy_decoder_seq_len +from openvino.runtime.opset4.ops import ctc_loss +from openvino.runtime.opset3.ops import cum_sum +from openvino.runtime.opset3.ops import cum_sum as cumsum +from openvino.runtime.opset8.ops import deformable_convolution +from openvino.runtime.opset1.ops import deformable_psroi_pooling +from openvino.runtime.opset1.ops import depth_to_space +from openvino.runtime.opset8.ops import detection_output +from openvino.runtime.opset7.ops import dft +from openvino.runtime.opset1.ops import divide +from openvino.runtime.opset7.ops import einsum +from openvino.runtime.opset1.ops import elu +from openvino.runtime.opset3.ops import embedding_bag_offsets_sum +from openvino.runtime.opset3.ops import embedding_bag_packed_sum +from openvino.runtime.opset3.ops import embedding_segments_sum +from openvino.runtime.opset3.ops import extract_image_patches +from openvino.runtime.opset1.ops import equal +from openvino.runtime.opset1.ops import erf +from openvino.runtime.opset1.ops import exp +from openvino.runtime.opset9.ops import eye +from openvino.runtime.opset1.ops import fake_quantize +from openvino.runtime.opset1.ops import floor +from openvino.runtime.opset1.ops import floor_mod +from openvino.runtime.opset8.ops import gather +from openvino.runtime.opset6.ops import gather_elements +from openvino.runtime.opset8.ops import gather_nd +from openvino.runtime.opset1.ops import gather_tree +from openvino.runtime.opset7.ops import gelu +from openvino.runtime.opset9.ops import generate_proposals +from openvino.runtime.opset1.ops import greater +from openvino.runtime.opset1.ops import greater_equal +from openvino.runtime.opset9.ops import grid_sample +from openvino.runtime.opset1.ops import grn +from openvino.runtime.opset1.ops import group_convolution +from openvino.runtime.opset1.ops import group_convolution_backprop_data +from openvino.runtime.opset12.ops import group_normalization +from openvino.runtime.opset3.ops import gru_cell +from openvino.runtime.opset5.ops import gru_sequence +from openvino.runtime.opset1.ops import hard_sigmoid +from openvino.runtime.opset5.ops import hsigmoid +from openvino.runtime.opset4.ops import hswish +from openvino.runtime.opset7.ops import idft +from openvino.runtime.opset8.ops import if_op +from openvino.runtime.opset11.ops import interpolate +from openvino.runtime.opset9.ops import irdft +from openvino.runtime.opset10.ops import is_finite +from openvino.runtime.opset10.ops import is_inf +from openvino.runtime.opset10.ops import is_nan +from openvino.runtime.opset8.ops import i420_to_bgr +from openvino.runtime.opset8.ops import i420_to_rgb +from openvino.runtime.opset1.ops import less +from openvino.runtime.opset1.ops import less_equal +from openvino.runtime.opset1.ops import log +from openvino.runtime.opset1.ops import logical_and +from openvino.runtime.opset1.ops import logical_not +from openvino.runtime.opset1.ops import logical_or +from openvino.runtime.opset1.ops import logical_xor +from openvino.runtime.opset5.ops import log_softmax +from openvino.runtime.opset5.ops import loop +from openvino.runtime.opset1.ops import lrn +from openvino.runtime.opset4.ops import lstm_cell +from openvino.runtime.opset5.ops import lstm_sequence +from openvino.runtime.opset1.ops import matmul +from openvino.runtime.opset8.ops import matrix_nms +from openvino.runtime.opset8.ops import max_pool +from openvino.runtime.opset1.ops import maximum +from openvino.runtime.opset1.ops import minimum +from openvino.runtime.opset4.ops import mish +from openvino.runtime.opset1.ops import mod +from openvino.runtime.opset9.ops import multiclass_nms +from openvino.runtime.opset1.ops import multiply +from openvino.runtime.opset6.ops import mvn +from openvino.runtime.opset1.ops import negative +from openvino.runtime.opset13.ops import nms_rotated +from openvino.runtime.opset9.ops import non_max_suppression +from openvino.runtime.opset3.ops import non_zero +from openvino.runtime.opset1.ops import normalize_l2 +from openvino.runtime.opset1.ops import not_equal +from openvino.runtime.opset8.ops import nv12_to_bgr +from openvino.runtime.opset8.ops import nv12_to_rgb +from openvino.runtime.opset1.ops import one_hot +from openvino.runtime.opset12.ops import pad +from openvino.runtime.opset1.ops import parameter +from openvino.runtime.opset1.ops import power +from openvino.runtime.opset1.ops import prelu +from openvino.runtime.opset8.ops import prior_box +from openvino.runtime.opset1.ops import prior_box_clustered +from openvino.runtime.opset1.ops import psroi_pooling +from openvino.runtime.opset4.ops import proposal +from openvino.runtime.opset4.ops import range +from openvino.runtime.opset8.ops import random_uniform +from openvino.runtime.opset9.ops import rdft +from openvino.runtime.opset3.ops import read_value +from openvino.runtime.opset4.ops import reduce_l1 +from openvino.runtime.opset4.ops import reduce_l2 +from openvino.runtime.opset1.ops import reduce_logical_and +from openvino.runtime.opset1.ops import reduce_logical_or +from openvino.runtime.opset1.ops import reduce_max +from openvino.runtime.opset1.ops import reduce_mean +from openvino.runtime.opset1.ops import reduce_min +from openvino.runtime.opset1.ops import reduce_prod +from openvino.runtime.opset1.ops import reduce_sum +from openvino.runtime.opset1.ops import region_yolo +from openvino.runtime.opset2.ops import reorg_yolo +from openvino.runtime.opset1.ops import relu +from openvino.runtime.opset1.ops import reshape +from openvino.runtime.opset1.ops import result +from openvino.runtime.opset1.ops import reverse_sequence +from openvino.runtime.opset3.ops import rnn_cell +from openvino.runtime.opset5.ops import rnn_sequence +from openvino.runtime.opset9.ops import roi_align +from openvino.runtime.opset2.ops import roi_pooling +from openvino.runtime.opset7.ops import roll +from openvino.runtime.opset5.ops import round +from openvino.runtime.opset12.ops import scatter_elements_update +from openvino.runtime.opset3.ops import scatter_update +from openvino.runtime.opset1.ops import select +from openvino.runtime.opset1.ops import selu +from openvino.runtime.opset3.ops import shape_of +from openvino.runtime.opset3.ops import shuffle_channels +from openvino.runtime.opset1.ops import sigmoid +from openvino.runtime.opset1.ops import sign +from openvino.runtime.opset1.ops import sin +from openvino.runtime.opset1.ops import sinh +from openvino.runtime.opset8.ops import slice +from openvino.runtime.opset8.ops import softmax +from openvino.runtime.opset4.ops import softplus +from openvino.runtime.opset9.ops import softsign +from openvino.runtime.opset2.ops import space_to_batch +from openvino.runtime.opset1.ops import space_to_depth +from openvino.runtime.opset1.ops import split +from openvino.runtime.opset1.ops import sqrt +from openvino.runtime.opset1.ops import squared_difference +from openvino.runtime.opset1.ops import squeeze +from openvino.runtime.opset1.ops import strided_slice +from openvino.runtime.opset1.ops import subtract +from openvino.runtime.opset4.ops import swish +from openvino.runtime.opset1.ops import tan +from openvino.runtime.opset1.ops import tanh +from openvino.runtime.opset1.ops import tensor_iterator +from openvino.runtime.opset1.ops import tile +from openvino.runtime.opset11.ops import topk +from openvino.runtime.opset1.ops import transpose +from openvino.runtime.opset10.ops import unique +from openvino.runtime.opset1.ops import unsqueeze +from openvino.runtime.opset1.ops import variadic_split diff --git a/src/bindings/python/src/openvino/runtime/opset13/ops.py b/src/bindings/python/src/openvino/runtime/opset13/ops.py new file mode 100644 index 00000000000000..f50d3cd91edaad --- /dev/null +++ b/src/bindings/python/src/openvino/runtime/opset13/ops.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Factory functions for ops added to openvino opset13.""" +from functools import partial +from typing import Optional + +from openvino.runtime import Node +from openvino.runtime.opset_utils import _get_node_factory +from openvino.runtime.utils.decorators import nameable_op +from openvino.runtime.utils.types import ( + NodeInput, + as_nodes, + as_node, +) + +_get_node_factory_opset13 = partial(_get_node_factory, "opset13") + + +# -------------------------------------------- ops ------------------------------------------------ + + +@nameable_op +def nms_rotated( + boxes: NodeInput, + scores: NodeInput, + max_output_boxes_per_class: NodeInput, + iou_threshold: NodeInput, + score_threshold: NodeInput, + sort_result_descending: bool = True, + output_type: str = "i64", + clockwise: bool = True, + name: Optional[str] = None, +) -> Node: + """Return a node which performs NMSRotated. + + :param boxes: Tensor with box coordinates of floating point type and shape [num_batches, num_boxes, 5], + where the last dimension is defined as [x_ctr, y_ctr, width, height, angle_radians]. + :param scores: Tensor with box scores of floating point type and shape [num_batches, num_classes, num_boxes]. + :param max_output_boxes_per_class: Tensor (scalar or 1D) of integer type, specifying maximum number of boxes + to be selected per class. + :param iou_threshold: Tensor (scalar or 1D) of floating point type, specifying intersection over union threshold + :param score_threshold: Tensor (scalar or 1D) of floating point type, specifying minimum score to consider box for the processing. + :param sort_result_descending: Flag that specifies whenever it is necessary to sort selected + boxes across batches or not. + :param output_type: Output element type. + :param clockwise: Flag that specifies direction of the box rotation. + :return: The new node which performs NMSRotated + """ + inputs = as_nodes(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold) + + attributes = { + "sort_result_descending": sort_result_descending, + "output_type": output_type, + "clockwise": clockwise, + } + + return _get_node_factory_opset13().create("NMSRotated", inputs, attributes) diff --git a/src/bindings/python/src/openvino/runtime/utils/node_factory.py b/src/bindings/python/src/openvino/runtime/utils/node_factory.py index bc95e9d1071e52..4dfbcdc320c6e4 100644 --- a/src/bindings/python/src/openvino/runtime/utils/node_factory.py +++ b/src/bindings/python/src/openvino/runtime/utils/node_factory.py @@ -14,7 +14,7 @@ from openvino.runtime.exceptions import UserInputError -DEFAULT_OPSET = "opset12" +DEFAULT_OPSET = "opset13" class NodeFactory(object): diff --git a/src/bindings/python/src/pyopenvino/graph/node_factory.cpp b/src/bindings/python/src/pyopenvino/graph/node_factory.cpp index 54bf15db57755f..5c274d1bf3f6bd 100644 --- a/src/bindings/python/src/pyopenvino/graph/node_factory.cpp +++ b/src/bindings/python/src/pyopenvino/graph/node_factory.cpp @@ -127,7 +127,7 @@ class NodeFactory { return it->second(); } - const ov::OpSet& m_opset = ov::get_opset12(); + const ov::OpSet& m_opset = ov::get_opset13(); std::map> m_opset_so_extensions; std::unordered_map> m_variables; }; diff --git a/src/bindings/python/tests/test_graph/test_nms_rotated.py b/src/bindings/python/tests/test_graph/test_nms_rotated.py new file mode 100644 index 00000000000000..80273870472792 --- /dev/null +++ b/src/bindings/python/tests/test_graph/test_nms_rotated.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest + +from openvino.runtime import PartialShape, Dimension, Model +from openvino.runtime.exceptions import UserInputError +from openvino.runtime.utils.types import make_constant_node + +import openvino.runtime.opset13 as ov_opset13 +from openvino.runtime import Type + + +@pytest.mark.parametrize( + ("boxes_shape", "scores_shape", "max_output_boxes", "iou_threshold", "score_threshold", "expected_shape"), + [ + ([1, 100, 5], [1, 1, 100], [100], 0.1, 0.4, [PartialShape([Dimension(0, 100), Dimension(3)]), PartialShape([Dimension(0, 100), Dimension(3)])]), + ([1, 700, 5], [1, 1, 700], [600], 0.1, 0.4, [PartialShape([Dimension(0, 600), Dimension(3)]), PartialShape([Dimension(0, 600), Dimension(3)])]), + ([1, 300, 5], [1, 1, 300], [300], 0.1, 0.4, [PartialShape([Dimension(0, 300), Dimension(3)]), PartialShape([Dimension(0, 300), Dimension(3)])]), + ], +) +def test_nms_rotated_default_attrs(boxes_shape, scores_shape, max_output_boxes, iou_threshold, score_threshold, expected_shape): + boxes_parameter = ov_opset13.parameter(boxes_shape, name="Boxes", dtype=np.float32) + scores_parameter = ov_opset13.parameter(scores_shape, name="Scores", dtype=np.float32) + + max_output_boxes = make_constant_node(max_output_boxes, np.int64) + iou_threshold = make_constant_node(iou_threshold, np.float32) + score_threshold = make_constant_node(score_threshold, np.float32) + + node = ov_opset13.nms_rotated(boxes_parameter, scores_parameter, max_output_boxes, iou_threshold, score_threshold) + assert node.get_type_name() == "NMSRotated" + assert node.get_output_size() == 3 + assert node.get_output_partial_shape(0) == expected_shape[0] + assert node.get_output_partial_shape(1) == expected_shape[1] + assert node.get_output_partial_shape(2) == PartialShape([1]) + + +@pytest.mark.parametrize( + ("boxes_shape", "scores_shape", "max_output_boxes", "iou_threshold", "score_threshold", + "sort_result_descending", "output_type", "clockwise", "expected_shape"), + [ + ([1, 100, 5], [1, 1, 100], [100], 0.1, 0.4, False, "i64", False, + [PartialShape([Dimension(0, 100), Dimension(3)]), PartialShape([Dimension(0, 100), Dimension(3)])]), + ([1, 100, 5], [1, 1, 100], [100], 0.1, 0.4, True, "i32", True, + [PartialShape([Dimension(0, 100), Dimension(3)]), PartialShape([Dimension(0, 100), Dimension(3)])]), + ], +) +def test_nms_rotated_custom_attrs(boxes_shape, scores_shape, max_output_boxes, iou_threshold, score_threshold, + sort_result_descending, output_type, clockwise, expected_shape): + boxes_parameter = ov_opset13.parameter(boxes_shape, name="Boxes", dtype=np.float32) + scores_parameter = ov_opset13.parameter(scores_shape, name="Scores", dtype=np.float32) + + max_output_boxes = make_constant_node(max_output_boxes, np.int64) + iou_threshold = make_constant_node(iou_threshold, np.float32) + score_threshold = make_constant_node(score_threshold, np.float32) + + node = ov_opset13.nms_rotated(boxes_parameter, scores_parameter, max_output_boxes, iou_threshold, + score_threshold, sort_result_descending, output_type, clockwise) + assert node.get_type_name() == "NMSRotated" + assert node.get_output_size() == 3 + assert node.get_output_partial_shape(0) == expected_shape[0] + assert node.get_output_partial_shape(1) == expected_shape[1] + assert node.get_output_partial_shape(2) == PartialShape([1]) + + assert node.get_output_element_type(0) == Type.i32 if output_type == "i32" else Type.i64 + assert node.get_output_element_type(1) == Type.f32 + assert node.get_output_element_type(2) == Type.i32 if output_type == "i32" else Type.i64 diff --git a/src/bindings/python/tests/test_graph/test_normalization.py b/src/bindings/python/tests/test_graph/test_normalization.py index 344013f375939f..40f0aa64832b68 100644 --- a/src/bindings/python/tests/test_graph/test_normalization.py +++ b/src/bindings/python/tests/test_graph/test_normalization.py @@ -5,7 +5,7 @@ import numpy as np from openvino.runtime import Type -import openvino.runtime.opset12 as ov +import openvino.runtime.opset13 as ov def test_lrn(): diff --git a/src/bindings/python/tests/test_graph/test_ops_scatter.py b/src/bindings/python/tests/test_graph/test_ops_scatter.py index db6b7b796ced1e..d7477a3188f848 100644 --- a/src/bindings/python/tests/test_graph/test_ops_scatter.py +++ b/src/bindings/python/tests/test_graph/test_ops_scatter.py @@ -5,7 +5,7 @@ import numpy as np import pytest -import openvino.runtime.opset12 as ov +import openvino.runtime.opset13 as ov from openvino.runtime import Type diff --git a/src/bindings/python/tests/test_graph/test_pad.py b/src/bindings/python/tests/test_graph/test_pad.py index af573869e61d4b..8cc299ee9a34e5 100644 --- a/src/bindings/python/tests/test_graph/test_pad.py +++ b/src/bindings/python/tests/test_graph/test_pad.py @@ -6,7 +6,7 @@ import numpy as np import pytest -import openvino.runtime.opset12 as ov +import openvino.runtime.opset13 as ov from openvino.runtime import Type diff --git a/src/bindings/python/tests/test_runtime/test_infer_request.py b/src/bindings/python/tests/test_runtime/test_infer_request.py index 8fb76032831fc3..85e4296f691081 100644 --- a/src/bindings/python/tests/test_runtime/test_infer_request.py +++ b/src/bindings/python/tests/test_runtime/test_infer_request.py @@ -10,7 +10,7 @@ import datetime import time -import openvino.runtime.opset12 as ops +import openvino.runtime.opset13 as ops from openvino import ( Core, CompiledModel, diff --git a/src/bindings/python/tests/test_runtime/test_input_node.py b/src/bindings/python/tests/test_runtime/test_input_node.py index 6bdc421e9b5339..3d98525d223617 100644 --- a/src/bindings/python/tests/test_runtime/test_input_node.py +++ b/src/bindings/python/tests/test_runtime/test_input_node.py @@ -6,7 +6,7 @@ from openvino.runtime import Input, RTMap from openvino._pyopenvino import DescriptorTensor -import openvino.runtime.opset12 as ops +import openvino.runtime.opset13 as ops from openvino import Core, OVAny, Shape, PartialShape, Type from tests.utils.helpers import get_relu_model diff --git a/src/bindings/python/tests/test_runtime/test_model.py b/src/bindings/python/tests/test_runtime/test_model.py index 9806fb1b0e0b75..b8823b732943de 100644 --- a/src/bindings/python/tests/test_runtime/test_model.py +++ b/src/bindings/python/tests/test_runtime/test_model.py @@ -7,7 +7,7 @@ import pytest import math -import openvino.runtime.opset12 as ops +import openvino.runtime.opset13 as ops from openvino import ( Core, Model, diff --git a/src/bindings/python/tests/test_runtime/test_nogil.py b/src/bindings/python/tests/test_runtime/test_nogil.py index 542791fc7b099c..edb8b505da518a 100644 --- a/src/bindings/python/tests/test_runtime/test_nogil.py +++ b/src/bindings/python/tests/test_runtime/test_nogil.py @@ -9,7 +9,7 @@ import threading import numpy as np -from openvino.runtime import Core, Model, AsyncInferQueue, PartialShape, Layout, opset12 as ops, serialize +from openvino.runtime import Core, Model, AsyncInferQueue, PartialShape, Layout, opset13 as ops, serialize from openvino.preprocess import PrePostProcessor from tests import skip_devtest diff --git a/src/bindings/python/tests/test_runtime/test_output_const_node.py b/src/bindings/python/tests/test_runtime/test_output_const_node.py index 5411658aef1fe6..2ae2ecad78b853 100644 --- a/src/bindings/python/tests/test_runtime/test_output_const_node.py +++ b/src/bindings/python/tests/test_runtime/test_output_const_node.py @@ -6,7 +6,7 @@ import pytest from copy import copy, deepcopy -import openvino.runtime.opset12 as ops +import openvino.runtime.opset13 as ops from openvino import ( Shape, PartialShape, diff --git a/src/bindings/python/tests/test_runtime/test_output_node.py b/src/bindings/python/tests/test_runtime/test_output_node.py index 24faabc9f56475..d10f7e46afeef1 100644 --- a/src/bindings/python/tests/test_runtime/test_output_node.py +++ b/src/bindings/python/tests/test_runtime/test_output_node.py @@ -4,7 +4,7 @@ import os -import openvino.runtime.opset12 as ops +import openvino.runtime.opset13 as ops from openvino import Type diff --git a/src/bindings/python/tests/test_runtime/test_ovdict.py b/src/bindings/python/tests/test_runtime/test_ovdict.py index f51f7197539674..d5713732c7967d 100644 --- a/src/bindings/python/tests/test_runtime/test_ovdict.py +++ b/src/bindings/python/tests/test_runtime/test_ovdict.py @@ -6,7 +6,7 @@ import numpy as np import pytest -import openvino.runtime.opset12 as ops +import openvino.runtime.opset13 as ops from openvino import Core, CompiledModel, InferRequest, Model from openvino.runtime import ConstOutput from openvino.runtime.ie_api import OVDict diff --git a/src/bindings/python/tests/test_transformations/test_compression.py b/src/bindings/python/tests/test_transformations/test_compression.py index f35b423b7df416..db15d24592cf98 100644 --- a/src/bindings/python/tests/test_transformations/test_compression.py +++ b/src/bindings/python/tests/test_transformations/test_compression.py @@ -6,7 +6,7 @@ import numpy as np from openvino.runtime.op import Parameter, Constant -from openvino.runtime.opset12 import add, multiply +from openvino.runtime.opset13 import add, multiply import openvino as ov from tests.utils.helpers import create_filename_for_test diff --git a/src/bindings/python/tests/utils/helpers.py b/src/bindings/python/tests/utils/helpers.py index 680e3ff41de2f9..7ebbdbdbbf32c7 100644 --- a/src/bindings/python/tests/utils/helpers.py +++ b/src/bindings/python/tests/utils/helpers.py @@ -12,7 +12,7 @@ from pathlib import Path import openvino -import openvino.runtime.opset12 as ops +import openvino.runtime.opset13 as ops from openvino.runtime import Model, Core, Shape From 48164e2279d1c6f9a75fda87287f58cf44cfeeba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Krzemi=C5=84ski?= Date: Wed, 4 Oct 2023 17:14:32 +0200 Subject: [PATCH 064/257] [Ref][Core][Opset13] Add Multinomial Operation (#19655) * [Ref] Multinomial base file * [Ref] Add core & reference implementation reusing other ops * [Ref] Fix reference implementation, add missing parameters, add tests * [Core] Add opset13, register multinomial, add shape inference * [Ref][Core] Fix compile errors * [Ref][Core] Clang fix * [TEMPLATE] Remove bf16, f16, f64 types * [TEMPLATE] Remove incorrect input types for 'input' parameter * [Ref][Tests] Remove deleted test types * [Ref] Fix & optimize shape inference * [PT FE] Apply suggestions from review * [Template] Migrate to new API * [Core] Add a clause for dynamic input in shape inference * [Tests] Add missing type_prop test (?) * Update multinomial_shape_inference.hpp * Update multinomial.hpp * [Ref] Fix build issues * [Ref] Fix clang and style * [Ref] Fix tests without replacement * [Ref] Fix with_replacement sampling error * [Ref] Remove debugging artifacts * [Ref] Cast to 64-bit size for 32-bit systems * Update multinomial.hpp * [Ref] Add missing type_prop tests, add shape inference tests * Update multinomial.cpp * Update multinomial_shape_inference_test.cpp * Update multinomial.cpp * Update multinomial.hpp * [Ref] Fix compilation errors from shape inference test * [Ref] Fix compilation error of type_prop, apply recommendations from review * [Ref] Add multiple shape inference tests * [Ref] Change TEST to TEST_F, add more type_prop tests * [Ref] Clang fixes * [Ref] Fix shape inference tests with mismatching args * [Ref] Fix remaining type_prop errors * [Ref] Replace HostTensor with normal Tensor in shape inference tests * Update opset.cpp * [Ref] Possible fix for 'function empty' error * [Ref] Add a cast to remove conversion warning * [Ref] Add conformance test of Multinomial * [Ref] Match style of conf test to the remaining tests * Update single_op_graph.cpp --- src/core/include/openvino/op/multinomial.hpp | 67 +++++++ src/core/include/openvino/op/ops.hpp | 1 + .../include/openvino/opsets/opset13_tbl.hpp | 1 + .../openvino/reference/multinomial.hpp | 163 ++++++++++++++++++ .../include/multinomial_shape_inference.hpp | 58 +++++++ src/core/src/op/multinomial.cpp | 129 ++++++++++++++ src/core/tests/opset.cpp | 2 +- src/core/tests/type_prop/multinomial.cpp | 62 +++++++ src/core/tests/visitors/op/multinomial.cpp | 31 ++++ .../multinomial_shape_inference_test.cpp | 127 ++++++++++++++ .../template/backend/ops/multinomial.cpp | 93 ++++++++++ .../template/backend/ops/ops_evaluates.hpp | 4 + .../template/backend/opset_int_tbl.hpp | 1 + .../functional/op_reference/multinomial.cpp | 161 +++++++++++++++++ .../src/op_impl_check/single_op_graph.cpp | 9 + 15 files changed, 908 insertions(+), 1 deletion(-) create mode 100644 src/core/include/openvino/op/multinomial.hpp create mode 100644 src/core/reference/include/openvino/reference/multinomial.hpp create mode 100644 src/core/shape_inference/include/multinomial_shape_inference.hpp create mode 100644 src/core/src/op/multinomial.cpp create mode 100644 src/core/tests/type_prop/multinomial.cpp create mode 100644 src/core/tests/visitors/op/multinomial.cpp create mode 100644 src/plugins/intel_cpu/tests/unit/shape_inference_test/multinomial_shape_inference_test.cpp create mode 100644 src/plugins/template/backend/ops/multinomial.cpp create mode 100644 src/plugins/template/tests/functional/op_reference/multinomial.cpp diff --git a/src/core/include/openvino/op/multinomial.hpp b/src/core/include/openvino/op/multinomial.hpp new file mode 100644 index 00000000000000..7cf6318e5035e8 --- /dev/null +++ b/src/core/include/openvino/op/multinomial.hpp @@ -0,0 +1,67 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v13 { +/// \brief Multinomial operation creates a sequence of indices of classes sampled from the multinomial distribution. +/// +/// \ingroup ov_ops_cpp_api +class OPENVINO_API Multinomial : public Op { +public: + OPENVINO_OP("Multinomial", "opset13"); + Multinomial() = default; + /** + * @brief Multinomial operation creates a sequence of indices of classes sampled from the multinomial distribution. + * + * @param probs Input tensor containing at each index poisition probability/log probability of sampling a given + * class. Any floating-point precision values are allowed. + * @param num_samples Scalar or 1D tensor with a single value that determines the number of samples to generate per + * batch. Values should be of an integer type. + * @param convert_type Data type to which to convert the output class indices. Allowed values: i32/i64 + * @param with_replacement Boolean that determines whether a sampled class can appear more than once in the output. + * @param log_probs Boolean that determines whether to treat input probabilities as log probabilities. + * @param global_seed First seed value (key) of Phillox random number generation algorithm. (See RandomUniform for + * details) + * @param op_seed Second seed value (counter) of Phillox random number generation algorithm. (See RandomUniform for + * details) + */ + Multinomial(const Output& input, + const Output& num_samples, + const ov::element::Type_t output_type, + const bool with_replacement, + const bool log_probs, + const uint64_t global_seed = 0, + const uint64_t op_seed = 0); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + ov::element::Type_t get_convert_type() const; + bool get_with_replacement() const; + bool get_log_probs() const; + uint64_t get_global_seed() const; + uint64_t get_op_seed() const; + + void set_convert_type(const ov::element::Type_t output_type); + void set_with_replacement(const bool with_replacement); + void set_log_probs(const bool log_probs); + void set_global_seed(const uint64_t global_seed); + void set_op_seed(const uint64_t op_seed); + +private: + ov::element::Type_t m_convert_type; + bool m_with_replacement; + bool m_log_probs; + uint64_t m_global_seed; + uint64_t m_op_seed; +}; +} // namespace v13 +} // namespace op +} // namespace ov diff --git a/src/core/include/openvino/op/ops.hpp b/src/core/include/openvino/op/ops.hpp index b57372f118f19b..24ba54ce37b94a 100644 --- a/src/core/include/openvino/op/ops.hpp +++ b/src/core/include/openvino/op/ops.hpp @@ -110,6 +110,7 @@ #include "openvino/op/mish.hpp" #include "openvino/op/mod.hpp" #include "openvino/op/multiclass_nms.hpp" +#include "openvino/op/multinomial.hpp" #include "openvino/op/multiply.hpp" #include "openvino/op/mvn.hpp" #include "openvino/op/negative.hpp" diff --git a/src/core/include/openvino/opsets/opset13_tbl.hpp b/src/core/include/openvino/opsets/opset13_tbl.hpp index 8d543e49b67614..95d4ca0f375511 100644 --- a/src/core/include/openvino/opsets/opset13_tbl.hpp +++ b/src/core/include/openvino/opsets/opset13_tbl.hpp @@ -214,3 +214,4 @@ _OPENVINO_OP_REG(BitwiseNot, ov::op::v13) _OPENVINO_OP_REG(BitwiseOr, ov::op::v13) _OPENVINO_OP_REG(BitwiseXor, ov::op::v13) _OPENVINO_OP_REG(NMSRotated, ov::op::v13) +_OPENVINO_OP_REG(Multinomial, ov::op::v13) diff --git a/src/core/reference/include/openvino/reference/multinomial.hpp b/src/core/reference/include/openvino/reference/multinomial.hpp new file mode 100644 index 00000000000000..fc141d1204cbea --- /dev/null +++ b/src/core/reference/include/openvino/reference/multinomial.hpp @@ -0,0 +1,163 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include "openvino/reference/broadcast.hpp" +#include "openvino/reference/convert.hpp" +#include "openvino/reference/copy.hpp" +#include "openvino/reference/cum_sum.hpp" +#include "openvino/reference/divide.hpp" +#include "openvino/reference/exp.hpp" +#include "openvino/reference/random_uniform.hpp" +#include "openvino/reference/slice.hpp" + +namespace ov { +namespace reference { +namespace multinomial { +/** + * @brief Multinomial operation creates a sequence of indices of classes sampled from the multinomial distribution. + * + * @tparam T Data type of the probs' values. + * @tparam U Data type of num_samples' values. + * @tparam V Data type of output's values. + * @param probs Input tensor containing at each index poisition probability/log probability of sampling a given class. + * @param probs_shape Shape of the 'probs' tensor. + * @param num_samples Scalar or 1D tensor with a single value that determines the number of samples to generate per + * batch. + * @param num_samples_shape Shape of the 'num_samples' tensor. + * @param output Output tensor for the generated class indices. + * @param output_shape Shape of the 'output' tensor. + * @param with_replacement Boolean that determines whether a sampled class can appear more than once in the output. + * @param log_probs Boolean that determines whether to treat input probabilities as log probabilities. + * @param global_seed First seed value (key) of Phillox random number generation algorithm. (See RandomUniform for + * details) + * @param op_seed Second seed value (counter) of Phillox random number generation algorithm. (See RandomUniform for + * details) + */ +template +void multinomial(const T* probs, + const Shape& probs_shape, + const U* num_samples, + const Shape& num_samples_shape, + V* output, + const Shape& output_shape, + const bool with_replacement, + const bool log_probs, + const uint64_t global_seed, + const uint64_t op_seed) { + const auto total_inputs_elements_count = shape_size(probs_shape); + const auto total_output_elements_count = shape_size(output_shape); + + // If probabilities are log probabilities, exponentiate to get normal probabilities + std::vector input_vals(total_inputs_elements_count); + if (log_probs) { + exp(probs, input_vals.data(), total_inputs_elements_count); + } else { + copy(probs, input_vals.data(), total_inputs_elements_count); + } + + // Create a cdf of probabilties on the last axis, per batch. Note cumsum exclusive == false + std::vector cdf(total_inputs_elements_count); + const auto last_axis = probs_shape.size() - 1; + cumsum(input_vals.data(), last_axis, cdf.data(), probs_shape, false, false); + + // Obtain max value from cdf, per batch (from cumsum it is the last element) + std::vector max_value_per_batch(total_inputs_elements_count / probs_shape[last_axis]); + Shape max_value_per_batch_shape(probs_shape); + max_value_per_batch_shape[last_axis] = 1; + const std::vector start{static_cast(probs_shape[last_axis] - 1)}; + const std::vector step{1}; + const std::vector target_axis_vec{static_cast(last_axis)}; + slice(reinterpret_cast(cdf.data()), + probs_shape, // == cdf shape + reinterpret_cast(max_value_per_batch.data()), + max_value_per_batch_shape, + sizeof(T), + start, + step, + target_axis_vec); + + // Normalize the cdf by dividing all elements by the max value in each batch + std::vector max_value_per_batch_divisor(total_inputs_elements_count); + ov::AxisSet target_axis_set = ov::AxisSet({last_axis}); + broadcast(reinterpret_cast(max_value_per_batch.data()), + reinterpret_cast(max_value_per_batch_divisor.data()), + max_value_per_batch_shape, + probs_shape, // expand to original shape (expands last dim) + target_axis_set, + sizeof(T)); + divide(cdf.data(), max_value_per_batch_divisor.data(), cdf.data(), total_inputs_elements_count, false); + + // Generate random probability samples + std::vector uniform_samples(total_output_elements_count); + const double zero = 0; + const double one = 1; + const ov::Shape output_shape_shape{output_shape.size()}; + const std::vector output_shape_u64(output_shape.begin(), output_shape.end()); + const std::pair initial_state(0, 0); + random_uniform(output_shape_u64.data(), + reinterpret_cast(&zero), + reinterpret_cast(&one), + reinterpret_cast(uniform_samples.data()), + output_shape_shape, + ov::element::f64, + global_seed, + op_seed, + initial_state); + + auto batch_size = probs_shape.size() == 2 ? static_cast(probs_shape[0]) : static_cast(1); + auto class_size = + probs_shape.size() == 2 ? static_cast(probs_shape[1]) : static_cast(probs_shape[0]); + auto samples_size = + probs_shape.size() == 2 ? static_cast(num_samples[0]) : static_cast(probs_shape[0]); + + // Iterate over each channel in uniform samples + std::vector output_samples(total_output_elements_count); + for (size_t i = 0; i < batch_size * samples_size; i += samples_size) { + for (size_t j = 0; j < samples_size; ++j) { + // Iterate over cdf to find the index for a given sample + // If no class found (all have 0 probability), selects last - undefined behavior + auto i_translated = i / samples_size * class_size; + auto selected_class_idx = class_size; + auto sample_value = uniform_samples[i + j]; + for (size_t k = 0; k < class_size; ++k) { + if (sample_value <= cdf[i_translated + k]) { + output_samples[i + j] = static_cast(k); + selected_class_idx = k; + break; + } + } + // Additional step with replacement - change probability of a given class to 0, and update the cdf + if (with_replacement) { + T class_probability = selected_class_idx ? cdf[i_translated + selected_class_idx] - + cdf[i_translated + selected_class_idx - 1] + : cdf[i_translated + selected_class_idx]; + T divisor = 1 - class_probability; + for (size_t k = 0; k < class_size; ++k) { + if (k >= selected_class_idx) { + cdf[i_translated + k] -= class_probability; + } + cdf[i_translated + k] /= divisor; + } + } + } + } + // Finally convert the samples to the requested data type + convert(output_samples.data(), output, total_output_elements_count); +} +} // namespace multinomial +} // namespace reference + +namespace op { +namespace multinomial { +namespace validate { +void input_types(const Node* op); +} // namespace validate +} // namespace multinomial +} // namespace op +} // namespace ov diff --git a/src/core/shape_inference/include/multinomial_shape_inference.hpp b/src/core/shape_inference/include/multinomial_shape_inference.hpp new file mode 100644 index 00000000000000..950014d9a759b0 --- /dev/null +++ b/src/core/shape_inference/include/multinomial_shape_inference.hpp @@ -0,0 +1,58 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "dimension_util.hpp" +#include "openvino/op/multinomial.hpp" +#include "utils.hpp" + +namespace ov { +namespace op { +namespace v13 { +template > +std::vector shape_infer(const Multinomial* op, + const std::vector& input_shapes, + const ITensorAccessor& ta = make_tensor_accessor()) { + NODE_VALIDATION_CHECK(op, input_shapes.size() == 2); + + const auto& input_shape = input_shapes[0]; + NODE_SHAPE_INFER_CHECK(op, + input_shapes, + input_shape.rank().compatible(1) || input_shape.rank().compatible(2), + "The rank of the 'probs' tensor defining output shape must be either 1 or 2."); + + const auto& num_samples_shape = input_shapes[1]; + NODE_SHAPE_INFER_CHECK(op, + input_shapes, + num_samples_shape.compatible(TRShape{}) || num_samples_shape.compatible(TRShape{1}), + "Number of samples must be a scalar or one element 1D tensor."); + + auto output_shapes = std::vector(1); + auto& result_shape = output_shapes[0]; + const auto input_rank_static = input_shape.rank().is_static(); + if (input_rank_static) { + const auto& num_samples = get_input_const_data_as_shape(op, 1, ta); + if (num_samples) { + NODE_VALIDATION_CHECK(op, + (*num_samples)[0].get_min_length() >= 0, + "Number of samples must be non-negative. Got number of samples: ", + (*num_samples)[0].get_min_length()); + result_shape = *num_samples; + } else { + result_shape = ov::PartialShape::dynamic(1); + } + + if (input_shape.rank().compatible(2)) { + result_shape.insert(result_shape.begin(), input_shape[0]); + } + } else { + result_shape = ov::PartialShape::dynamic(); + } + + return output_shapes; +} +} // namespace v13 +} // namespace op +} // namespace ov diff --git a/src/core/src/op/multinomial.cpp b/src/core/src/op/multinomial.cpp new file mode 100644 index 00000000000000..0dd4a93867d74a --- /dev/null +++ b/src/core/src/op/multinomial.cpp @@ -0,0 +1,129 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/multinomial.hpp" + +#include + +#include "bound_evaluate.hpp" +#include "itt.hpp" +#include "multinomial_shape_inference.hpp" +#include "openvino/core/attribute_visitor.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/util/op_types.hpp" +#include "openvino/reference/multinomial.hpp" + +namespace ov { + +// ------------------------------ v13 ------------------------------ + +op::v13::Multinomial::Multinomial(const Output& probs, + const Output& num_samples, + const ov::element::Type_t convert_type, + const bool with_replacement, + const bool log_probs, + const uint64_t global_seed, + const uint64_t op_seed) + : Op({probs, num_samples}), + m_convert_type(convert_type), + m_with_replacement(with_replacement), + m_log_probs(log_probs), + m_global_seed(global_seed), + m_op_seed(op_seed) { + constructor_validate_and_infer_types(); +} + +bool op::v13::Multinomial::visit_attributes(AttributeVisitor& visitor) { + OV_OP_SCOPE(v13_Multinomial_visit_attributes); + visitor.on_attribute("convert_type", m_convert_type); + visitor.on_attribute("with_replacement", m_with_replacement); + visitor.on_attribute("log_probs", m_log_probs); + visitor.on_attribute("global_seed", m_global_seed); + visitor.on_attribute("op_seed", m_op_seed); + return true; +} + +void op::v13::Multinomial::validate_and_infer_types() { + OV_OP_SCOPE(v13_Multinomial_validate_and_infer_types); + + OPENVINO_SUPPRESS_DEPRECATED_START + const auto input_shapes = get_node_input_partial_shapes(*this); + OPENVINO_SUPPRESS_DEPRECATED_END + + const auto output_shapes = shape_infer(this, input_shapes); + + multinomial::validate::input_types(this); + + set_output_type(0, m_convert_type, output_shapes[0]); +} + +std::shared_ptr op::v13::Multinomial::clone_with_new_inputs(const OutputVector& new_args) const { + OV_OP_SCOPE(v13_Multinomial_clone_with_new_inputs); + check_new_args_count(this, new_args); + + return std::make_shared(new_args.at(0), + new_args.at(1), + m_convert_type, + m_with_replacement, + m_log_probs, + m_global_seed, + m_op_seed); +} + +ov::element::Type_t op::v13::Multinomial::get_convert_type() const { + return m_convert_type; +} + +bool op::v13::Multinomial::get_with_replacement() const { + return m_with_replacement; +} + +bool op::v13::Multinomial::get_log_probs() const { + return m_log_probs; +} + +uint64_t op::v13::Multinomial::get_global_seed() const { + return m_global_seed; +} + +uint64_t op::v13::Multinomial::get_op_seed() const { + return m_op_seed; +} + +void op::v13::Multinomial::set_convert_type(const ov::element::Type_t convert_type) { + m_convert_type = convert_type; +} + +void op::v13::Multinomial::set_with_replacement(const bool with_replacement) { + m_with_replacement = with_replacement; +} + +void op::v13::Multinomial::set_log_probs(const bool log_probs) { + m_log_probs = log_probs; +} + +void op::v13::Multinomial::set_global_seed(const uint64_t global_seed) { + m_global_seed = global_seed; +} + +void op::v13::Multinomial::set_op_seed(const uint64_t op_seed) { + m_op_seed = op_seed; +} + +namespace op { +namespace multinomial { +namespace validate { +void input_types(const Node* op) { + NODE_VALIDATION_CHECK(op, + op->get_input_element_type(0).is_real(), + "Expected floating point type as element type for the 'probs' input."); + + NODE_VALIDATION_CHECK(op, + op->get_input_element_type(1).is_integral_number(), + "Expected integer type as element type for the 'num_samples' input."); +} +} // namespace validate +} // namespace multinomial +} // namespace op +} // namespace ov diff --git a/src/core/tests/opset.cpp b/src/core/tests/opset.cpp index 204f43ae8ff906..ee055befd4ba58 100644 --- a/src/core/tests/opset.cpp +++ b/src/core/tests/opset.cpp @@ -71,7 +71,7 @@ INSTANTIATE_TEST_SUITE_P(opset, OpsetTestParams{ov::get_opset10, 177}, OpsetTestParams{ov::get_opset11, 177}, OpsetTestParams{ov::get_opset12, 178}, - OpsetTestParams{ov::get_opset13, 183}), + OpsetTestParams{ov::get_opset13, 184}), OpsetTestNameGenerator{}); class MyOpOld : public ov::op::Op { diff --git a/src/core/tests/type_prop/multinomial.cpp b/src/core/tests/type_prop/multinomial.cpp new file mode 100644 index 00000000000000..faa6b17b4f790b --- /dev/null +++ b/src/core/tests/type_prop/multinomial.cpp @@ -0,0 +1,62 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/multinomial.hpp" + +#include + +#include "common_test_utils/test_assertions.hpp" +#include "common_test_utils/type_prop.hpp" + +using namespace testing; + +class TypePropMultinomialV13Test : public TypePropOpTest {}; + +TEST_F(TypePropMultinomialV13Test, input_probs_f64_num_samples_i32_convert_i32) { + const auto probs = std::make_shared(ov::element::f64, ov::Shape{4}); + const auto num_samples = std::make_shared(ov::element::i32, ov::Shape{1}); + const auto op = make_op(probs, num_samples, ov::element::i32, false, false, 0, 0); + EXPECT_EQ(op->get_element_type(), ov::element::i32); + EXPECT_EQ(op->get_output_partial_shape(0), (ov::PartialShape::dynamic(1))); +} + +TEST_F(TypePropMultinomialV13Test, input_probs_f32_num_samples_i32_convert_i64) { + const auto probs = std::make_shared(ov::element::f32, ov::Shape{4, 4}); + const auto num_samples = std::make_shared(ov::element::i32, ov::Shape{}); + const auto op = make_op(probs, num_samples, ov::element::i64, false, false, 0, 0); + EXPECT_EQ(op->get_element_type(), ov::element::i64); + EXPECT_EQ(op->get_output_partial_shape(0), (ov::PartialShape{4, -1})); +} + +TEST_F(TypePropMultinomialV13Test, probs_incompatibile_data_type) { + const auto probs = std::make_shared(ov::element::i32, ov::Shape{4}); + const auto num_samples = std::make_shared(ov::element::i32, ov::Shape{}); + OV_EXPECT_THROW(std::ignore = make_op(probs, num_samples, ov::element::u64, false, false, 0, 0), + ov::NodeValidationFailure, + HasSubstr("Expected floating point type as element type for the 'probs' input.")); +} + +TEST_F(TypePropMultinomialV13Test, num_samples_incompatibile_data_type) { + const auto probs = std::make_shared(ov::element::f32, ov::Shape{4}); + const auto num_samples = std::make_shared(ov::element::f32, ov::Shape{}); + OV_EXPECT_THROW(std::ignore = make_op(probs, num_samples, ov::element::u64, false, false, 0, 0), + ov::NodeValidationFailure, + HasSubstr("Expected integer type as element type for the 'num_samples' input.")); +} + +TEST_F(TypePropMultinomialV13Test, probs_incompatibile_rank) { + const auto probs = std::make_shared(ov::element::f32, ov::Shape{4, 4, 4}); + const auto num_samples = std::make_shared(ov::element::i32, ov::Shape{1}); + OV_EXPECT_THROW(std::ignore = make_op(probs, num_samples, ov::element::boolean, false, false, 0, 0), + ov::NodeValidationFailure, + HasSubstr("The rank of the 'probs' tensor defining output shape must be either 1 or 2.")); +} + +TEST_F(TypePropMultinomialV13Test, num_samples_incompatibile_rank) { + const auto probs = std::make_shared(ov::element::f32, ov::Shape{4}); + const auto num_samples = std::make_shared(ov::element::i32, ov::Shape{1, 2}); + OV_EXPECT_THROW(std::ignore = make_op(probs, num_samples, ov::element::boolean, false, false, 0, 0), + ov::NodeValidationFailure, + HasSubstr("Number of samples must be a scalar or one element 1D tensor.")); +} diff --git a/src/core/tests/visitors/op/multinomial.cpp b/src/core/tests/visitors/op/multinomial.cpp new file mode 100644 index 00000000000000..f6f19129a57979 --- /dev/null +++ b/src/core/tests/visitors/op/multinomial.cpp @@ -0,0 +1,31 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/multinomial.hpp" + +#include + +#include "openvino/op/unique.hpp" +#include "visitors/visitors.hpp" + +using namespace ov; +using ov::test::NodeBuilder; + +TEST(attributes, multinomial) { + NodeBuilder::get_ops().register_factory(); + const auto probs = std::make_shared(element::f32, Shape{2, 2}); + const auto num_samples = std::make_shared(element::i32, Shape{1}); + + const auto op = std::make_shared(probs, num_samples, element::f32, false, true, 0, 0); + NodeBuilder builder(op, {probs, num_samples}); + auto g_multi = ov::as_type_ptr(builder.create()); + + constexpr auto expected_attr_count = 5; + EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); + EXPECT_EQ(op->get_with_replacement(), g_multi->get_with_replacement()); + EXPECT_EQ(op->get_global_seed(), g_multi->get_global_seed()); + EXPECT_EQ(op->get_convert_type(), g_multi->get_convert_type()); + EXPECT_EQ(op->get_log_probs(), g_multi->get_log_probs()); + EXPECT_EQ(op->get_op_seed(), g_multi->get_op_seed()); +} diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/multinomial_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/multinomial_shape_inference_test.cpp new file mode 100644 index 00000000000000..c3253be8a5d775 --- /dev/null +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/multinomial_shape_inference_test.cpp @@ -0,0 +1,127 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "multinomial_shape_inference.hpp" + +#include + +#include "utils.hpp" + +using namespace ov; +using namespace ov::intel_cpu; + +TEST(StaticShapeInferenceTest, MultinomialStaticShapeInferenceTest1D) { + auto probs = std::make_shared(element::f32, Shape{4}); + auto num_samples = std::make_shared(element::i32, Shape{1}); + auto multinomial = std::make_shared(probs, num_samples, element::i32, false, false, 0, 0); + + // Test Static Shape 1D input + std::vector static_input_shapes = {StaticShape{4}, StaticShape{1}}; + int32_t num_elements_val = 2; + auto const_data = + std::unordered_map{{1, {element::i32, Shape{1}, &num_elements_val}}}; + auto acc = make_tensor_accessor(const_data); + auto static_output_shapes = shape_infer(multinomial.get(), static_input_shapes, acc); + ASSERT_EQ(static_output_shapes[0], StaticShape({2})); +} + +TEST(StaticShapeInferenceTest, MultinomialStaticShapeInferenceTest2D) { + auto probs = std::make_shared(element::f32, Shape{4, 4}); + auto num_samples = std::make_shared(element::i32, Shape{1}); + auto multinomial = std::make_shared(probs, num_samples, element::i32, false, false, 0, 0); + + // Test Static Shape 2D input + std::vector static_input_shapes = {StaticShape{4, 4}, StaticShape{1}}; + int32_t num_elements_val = 2; + auto const_data = + std::unordered_map{{1, {element::i32, Shape{1}, &num_elements_val}}}; + auto acc = make_tensor_accessor(const_data); + auto static_output_shapes = shape_infer(multinomial.get(), static_input_shapes, acc); + ASSERT_EQ(static_output_shapes[0], StaticShape({4, 2})); +} + +TEST(StaticShapeInferenceTest, MultinomialDynamicShapeInferenceTestAllDimKnown1D) { + auto probs = std::make_shared(element::f32, PartialShape{3}); + auto num_samples = std::make_shared(element::i32, PartialShape{1}); + auto multinomial = std::make_shared(probs, num_samples, element::i32, false, false, 0, 0); + + // Test Partial Shape 1D input + std::vector partial_input_shapes = {PartialShape{3}, PartialShape{1}}; + int32_t num_elements_val = 2; + auto const_data = + std::unordered_map{{1, {element::i32, Shape{1}, &num_elements_val}}}; + auto acc = make_tensor_accessor(const_data); + auto partial_output_shapes = shape_infer(multinomial.get(), partial_input_shapes, acc); + ASSERT_EQ(partial_output_shapes[0], PartialShape({2})); +} + +TEST(StaticShapeInferenceTest, MultinomialDynamicShapeInferenceTestAllDimKnown2D) { + auto probs = std::make_shared(element::f32, PartialShape{2, 3}); + auto num_samples = std::make_shared(element::i32, PartialShape{1}); + auto multinomial = std::make_shared(probs, num_samples, element::i32, false, false, 0, 0); + + // Test Partial Shape 2D input + std::vector partial_input_shapes = {PartialShape{2, 3}, PartialShape{1}}; + int32_t num_elements_val = 2; + auto const_data = + std::unordered_map{{1, {element::i32, Shape{1}, &num_elements_val}}}; + auto acc = make_tensor_accessor(const_data); + auto partial_output_shapes = shape_infer(multinomial.get(), partial_input_shapes, acc); + ASSERT_EQ(partial_output_shapes[0], PartialShape({2, 2})); +} + +TEST(StaticShapeInferenceTest, MultinomialDynamicShapeInferenceTestDynamicNumSamples1D) { + auto probs = std::make_shared(element::f32, PartialShape{4}); + auto num_samples = std::make_shared(element::i32, PartialShape{-1}); + auto multinomial = std::make_shared(probs, num_samples, element::i32, false, false, 0, 0); + + // Test Partial Shape 1D input, unknown num_samples + std::vector partial_input_shapes = {PartialShape{4}, PartialShape{-1}}; + auto partial_output_shapes = shape_infer(multinomial.get(), partial_input_shapes, make_tensor_accessor()); + ASSERT_EQ(partial_output_shapes[0], PartialShape({-1})); +} + +TEST(StaticShapeInferenceTest, MultinomialDynamicShapeInferenceTestDynamicNumSamples2D) { + auto probs = std::make_shared(element::f32, PartialShape{4, 4}); + auto num_samples = std::make_shared(element::i32, PartialShape{-1}); + auto multinomial = std::make_shared(probs, num_samples, element::i32, false, false, 0, 0); + + // Test Partial Shape 2D input, unknown num_samples + std::vector partial_input_shapes = {PartialShape{4, 4}, PartialShape{-1}}; + auto partial_output_shapes = shape_infer(multinomial.get(), partial_input_shapes, make_tensor_accessor()); + ASSERT_EQ(partial_output_shapes[0], PartialShape({4, -1})); +} + +TEST(StaticShapeInferenceTest, MultinomialDynamicShapeInferenceTestDynamicProbsDynamicNumSamples1D) { + auto probs = std::make_shared(element::f32, PartialShape{-1}); + auto num_samples = std::make_shared(element::i32, PartialShape{-1}); + auto multinomial = std::make_shared(probs, num_samples, element::i32, false, false, 0, 0); + + // Test Partial Shape 1D input, unknown num_samples and probs shape + std::vector partial_input_shapes = {PartialShape{-1}, PartialShape{-1}}; + auto partial_output_shapes = shape_infer(multinomial.get(), partial_input_shapes, make_tensor_accessor()); + ASSERT_EQ(partial_output_shapes[0], PartialShape({-1})); +} + +TEST(StaticShapeInferenceTest, MultinomialDynamicShapeInferenceTestDynamicProbsDynamicNumSamples2D) { + auto probs = std::make_shared(element::f32, PartialShape{-1, -1}); + auto num_samples = std::make_shared(element::i32, PartialShape{-1}); + auto multinomial = std::make_shared(probs, num_samples, element::i32, false, false, 0, 0); + + // Test Partial Shape 2D input, unknown num_samples and probs shape + std::vector partial_input_shapes = {PartialShape{-1, -1}, PartialShape{-1}}; + auto partial_output_shapes = shape_infer(multinomial.get(), partial_input_shapes, make_tensor_accessor()); + ASSERT_EQ(partial_output_shapes[0], PartialShape({-1, -1})); +} + +TEST(StaticShapeInferenceTest, MultinomialDynamicShapeInferenceTestDynamicProbsDynamicNumSamplesDynamicRank) { + auto probs = std::make_shared(element::f32, PartialShape::dynamic()); + auto num_samples = std::make_shared(element::i32, PartialShape{-1}); + auto multinomial = std::make_shared(probs, num_samples, element::i32, false, false, 0, 0); + + // Test Partial Shape dynamic input, unknown num_samples and probs shape + std::vector partial_input_shapes = {PartialShape::dynamic(), PartialShape{-1}}; + auto partial_output_shapes = shape_infer(multinomial.get(), partial_input_shapes, make_tensor_accessor()); + ASSERT_EQ(partial_output_shapes[0], PartialShape::dynamic()); +} diff --git a/src/plugins/template/backend/ops/multinomial.cpp b/src/plugins/template/backend/ops/multinomial.cpp new file mode 100644 index 00000000000000..0cb340588eabf1 --- /dev/null +++ b/src/plugins/template/backend/ops/multinomial.cpp @@ -0,0 +1,93 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/reference/multinomial.hpp" + +#include "evaluate_node.hpp" +#include "multinomial_shape_inference.hpp" + +template +inline void evaluate_output_t(const std::shared_ptr& op, + ov::TensorVector& outputs, + const ov::TensorVector& inputs) { + using T1 = typename ov::element_type_traits::value_type; + using T2 = typename ov::element_type_traits::value_type; + using T3 = typename ov::element_type_traits::value_type; + + const auto tensor_acc = make_tensor_accessor(inputs); + const std::vector input_shapes{op->get_input_shape(0), op->get_input_shape(1)}; + const auto out_shape = ov::op::v13::shape_infer(op.get(), input_shapes, tensor_acc).front().to_shape(); + outputs[0].set_shape(out_shape); + + ov::reference::multinomial::multinomial(inputs[0].data(), + op->get_input_shape(0), + inputs[1].data(), + op->get_input_shape(1), + outputs[0].data(), + out_shape, + op->get_with_replacement(), + op->get_log_probs(), + op->get_global_seed(), + op->get_op_seed()); +} + +template +inline void evaluate_samples_t(const std::shared_ptr& op, + ov::TensorVector& outputs, + const ov::TensorVector& inputs) { + switch (op->get_convert_type()) { + case ov::element::Type_t::i32: + evaluate_output_t(op, outputs, inputs); + return; + case ov::element::Type_t::i64: + evaluate_output_t(op, outputs, inputs); + return; + default: + OPENVINO_THROW(std::string("Unhandled convert data type '") + + ov::element::Type(op->get_convert_type()).get_type_name() + + std::string("' in evaluate_node(). Use either i32 or i64 and apply conversion manually.")); + } +} + +template +bool evaluate_input_t(const std::shared_ptr& op, + ov::TensorVector& outputs, + const ov::TensorVector& inputs) { + switch (inputs[1].get_element_type()) { + case ov::element::Type_t::i64: + evaluate_samples_t(op, outputs, inputs); + break; + default: + evaluate_samples_t(op, outputs, inputs); + break; + } + return true; +} + +template <> +bool evaluate_node(std::shared_ptr node, + ov::TensorVector& outputs, + const ov::TensorVector& inputs) { + switch (node->get_input_element_type(0)) { + case ov::element::Type_t::f16: + return evaluate_input_t(ov::as_type_ptr(node), + outputs, + inputs); + case ov::element::Type_t::f32: + return evaluate_input_t(ov::as_type_ptr(node), + outputs, + inputs); + case ov::element::Type_t::f64: + return evaluate_input_t(ov::as_type_ptr(node), + outputs, + inputs); + case ov::element::Type_t::bf16: + return evaluate_input_t(ov::as_type_ptr(node), + outputs, + inputs); + default: + OPENVINO_THROW(std::string("Unhandled input data type ") + node->get_input_element_type(0).get_type_name() + + std::string(" in evaluate_node().")); + } +} diff --git a/src/plugins/template/backend/ops/ops_evaluates.hpp b/src/plugins/template/backend/ops/ops_evaluates.hpp index 040fd8334a3527..1cf9f91ea9812e 100644 --- a/src/plugins/template/backend/ops/ops_evaluates.hpp +++ b/src/plugins/template/backend/ops/ops_evaluates.hpp @@ -465,6 +465,10 @@ extern template bool evaluate_node(std::shared_ptr(std::shared_ptr node, + ov::TensorVector& outputs, + const ov::TensorVector& inputs); + extern template bool evaluate_node(std::shared_ptr node, ov::TensorVector& outputs, const ov::TensorVector& inputs); diff --git a/src/plugins/template/backend/opset_int_tbl.hpp b/src/plugins/template/backend/opset_int_tbl.hpp index 725fdd0621fc18..837efcd9ae1876 100644 --- a/src/plugins/template/backend/opset_int_tbl.hpp +++ b/src/plugins/template/backend/opset_int_tbl.hpp @@ -155,6 +155,7 @@ _OPENVINO_OP_REG(BitwiseNot, ov::op::v13) _OPENVINO_OP_REG(BitwiseOr, ov::op::v13) _OPENVINO_OP_REG(BitwiseXor, ov::op::v13) _OPENVINO_OP_REG(NMSRotated, ov::op::v13) +_OPENVINO_OP_REG(Multinomial, ov::op::v13) _OPENVINO_OP_REG(AUGRUCell, ov::op::internal) _OPENVINO_OP_REG(AUGRUSequence, ov::op::internal) diff --git a/src/plugins/template/tests/functional/op_reference/multinomial.cpp b/src/plugins/template/tests/functional/op_reference/multinomial.cpp new file mode 100644 index 00000000000000..d2edf5bedd9d60 --- /dev/null +++ b/src/plugins/template/tests/functional/op_reference/multinomial.cpp @@ -0,0 +1,161 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/multinomial.hpp" + +#include "base_reference_test.hpp" +#include "gtest/gtest.h" +#include "openvino/op/parameter.hpp" + +namespace { +struct MultinomialParams { + MultinomialParams(const reference_tests::Tensor& probabilities, + const reference_tests::Tensor& num_samples, + const reference_tests::Tensor& expected_tensor, + ov::element::Type_t convert_type, + bool log_probs, + bool with_replacement, + std::string name) + : probabilities{probabilities}, + num_samples{num_samples}, + expected_tensor(expected_tensor), + convert_type{convert_type}, + log_probs(log_probs), + with_replacement(with_replacement), + test_case_name{std::move(name)} {} + + reference_tests::Tensor probabilities; + reference_tests::Tensor num_samples; + reference_tests::Tensor expected_tensor; + + ov::element::Type_t convert_type; + bool log_probs; + bool with_replacement; + std::string test_case_name; +}; + +class ReferenceMultinomial : public testing::TestWithParam, + public reference_tests::CommonReferenceTest { +public: + void SetUp() override { + const auto& params = GetParam(); + function = CreateFunction(params); + inputData = {params.probabilities.data, params.num_samples.data}; + refOutData = {params.expected_tensor.data}; + } + + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + std::ostringstream name; + name << obj.param.test_case_name; + name << "_input_type_"; + name << obj.param.probabilities.type; + name << "_samples_type_"; + name << obj.param.num_samples.type; + name << "_convert_type_"; + name << obj.param.convert_type; + name << "_log_"; + name << obj.param.log_probs; + name << "_replacement_"; + name << obj.param.with_replacement; + return name.str(); + } + +private: + static std::shared_ptr CreateFunction(const MultinomialParams& params) { + const auto in_probabilities = + std::make_shared(params.probabilities.type, params.probabilities.shape); + const auto in_num_samples = + std::make_shared(params.num_samples.type, params.num_samples.shape); + const auto multinomial = std::make_shared(in_probabilities, + in_num_samples, + params.convert_type, + params.with_replacement, + params.log_probs, + 1, + 1); + return std::make_shared(multinomial->outputs(), + ov::ParameterVector{in_probabilities, in_num_samples}); + } +}; + +template +std::vector generateMultinomialParams() { + using vt = typename ov::element_type_traits::value_type; + + const ov::Shape prob_2d_shape{2, 4}; + const ov::Shape prob_1d_shape{4}; + const ov::Shape num_samples_shape{1}; + + reference_tests::Tensor num_samples(num_samples_shape, ov::element::Type_t::i32, std::vector{4}); + + reference_tests::Tensor probabilities_2d_no_log(prob_2d_shape, + et, + std::vector{0.001, 0.01, 0.1, 0.899, 0.899, 0.1, 0.01, 0.001}); + reference_tests::Tensor probabilities_2d_log(prob_2d_shape, et, std::vector{1, 2, 3, 4, 2, 4, 6, 8}); + reference_tests::Tensor probabilities_1d_no_log(prob_1d_shape, et, std::vector{0.001, 0.01, 0.1, 0.899}); + reference_tests::Tensor probabilities_1d_log(prob_1d_shape, et, std::vector{1, 10, 7, 3}); + + reference_tests::Tensor output_2d_no_log_no_replacement(prob_2d_shape, + ov::element::Type_t::i32, + std::vector{3, 3, 3, 3, 0, 0, 0, 0}); + reference_tests::Tensor output_2d_log_no_replacement(prob_2d_shape, + ov::element::Type_t::i32, + std::vector{3, 3, 2, 3, 3, 3, 3, 3}); + reference_tests::Tensor output_1d_no_log_replacement(prob_1d_shape, + ov::element::Type_t::i64, + std::vector{3, 2, 1, 0}); + reference_tests::Tensor output_1d_log_replacement(prob_1d_shape, + ov::element::Type_t::i64, + std::vector{1, 2, 3, 0}); + + std::vector params; + // probabilities, num_samples, output, convert_type, log_probs, with_replacement, name + params.emplace_back(probabilities_2d_no_log, + num_samples, + output_2d_no_log_no_replacement, + ov::element::Type_t::i32, + false, + false, + "input_2d"); + params.emplace_back(probabilities_2d_log, + num_samples, + output_2d_log_no_replacement, + ov::element::Type_t::i32, + true, + false, + "input_2d"); + params.emplace_back(probabilities_1d_no_log, + num_samples, + output_1d_no_log_replacement, + ov::element::Type_t::i64, + false, + true, + "input_1d"); + params.emplace_back(probabilities_1d_log, + num_samples, + output_1d_log_replacement, + ov::element::Type_t::i64, + true, + true, + "input_1d"); + return params; +} + +std::vector generateMultinomialParams() { + std::vector> combo_params{generateMultinomialParams()}; + std::vector test_params; + for (auto& params : combo_params) + std::move(params.begin(), params.end(), std::back_inserter(test_params)); + return test_params; +} +} // namespace + +TEST_P(ReferenceMultinomial, CompareWithRefs) { + Exec(); +} + +INSTANTIATE_TEST_SUITE_P(smoke, + ReferenceMultinomial, + ::testing::ValuesIn(generateMultinomialParams()), + ReferenceMultinomial::getTestCaseName); diff --git a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/op_impl_check/single_op_graph.cpp b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/op_impl_check/single_op_graph.cpp index 5d36ba62e3ecda..05328c6b5fb8d0 100644 --- a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/op_impl_check/single_op_graph.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/op_impl_check/single_op_graph.cpp @@ -606,6 +606,15 @@ std::shared_ptr generate(const std::shared_ptr &n return std::make_shared(results, params, "MatMul-1"); } +std::shared_ptr generate(const std::shared_ptr& node) { + ov::ParameterVector params{std::make_shared(ov::element::f32, ov::Shape{{1, 5}}), + std::make_shared(ov::element::i32, ov::Shape{1})}; + auto multinomial = + std::make_shared(params[0], params[1], ov::element::i32, false, false, 0, 0); + ov::ResultVector results{std::make_shared(multinomial)}; + return std::make_shared(results, params, "Multinomial-13"); +} + std::shared_ptr generate(const std::shared_ptr &node) { ov::ParameterVector params{std::make_shared(ov::element::f32, ov::Shape{{1, 6, 5}}), std::make_shared(ov::element::f32, ov::Shape{{1, 1, 6}}), From 3d6fb85a994ec73548087e7149ec07f26f162e5d Mon Sep 17 00:00:00 2001 From: Ivan Tikhonov Date: Wed, 4 Oct 2023 19:38:24 +0330 Subject: [PATCH 065/257] Model builders refactoring: rename dirs, targets, file names (#19885) * Model builders refactoring * Apply review comments * resolve review commets: update cmake target names * fix build: use correct headers * fix headers * fix build * fix docs --- .../openvino_plugin_library/Building.md | 2 +- .../openvino_plugin_library/PluginTesting.md | 2 +- src/bindings/c/tests/test_model_repo.hpp | 4 ++-- .../tests/CMakeLists.txt | 2 +- .../tests/add_transformation.cpp | 4 ++-- ...ncat_quantization_parameters_transformation.cpp | 4 ++-- .../tests/assign_and_read_value_transformation.cpp | 4 ++-- .../tests/avg_pool_transformation.cpp | 4 ++-- .../tests/avg_pool_with_child_transformation.cpp | 4 ++-- .../tests/batch_to_space_transformation.cpp | 4 ++-- .../tests/clamp_transformation.cpp | 4 ++-- .../tests/compose_fake_quantize_transformation.cpp | 6 +++--- ..._selection_with_intermediate_transformation.cpp | 4 ++-- .../tests/concat_transformation.cpp | 4 ++-- ...concat_with_different_precision_on_children.cpp | 4 ++-- .../tests/concat_with_fq_tranformation.cpp | 6 +++--- ...rmediate_precision_selection_transformation.cpp | 4 ++-- ...at_with_intermediate_reshape_transformation.cpp | 4 ++-- .../concat_with_intermediate_transformation.cpp | 4 ++-- ...h_intermediate_with_constant_transformation.cpp | 4 ++-- .../tests/concat_with_neighbors_transformation.cpp | 4 ++-- ...h_neighbors_transformation_with_convolution.cpp | 6 +++--- ...at_with_not_quantized_parent_transformation.cpp | 6 +++--- ...ncat_with_reshape_at_the_end_transformation.cpp | 4 ++-- .../tests/concat_with_split_transformation.cpp | 4 ++-- .../concat_with_strided_slice_transformation.cpp | 4 ++-- .../convert_subtract_constant_transformation.cpp | 2 +- .../convolution_backprop_data_transformation.cpp | 2 +- .../tests/convolution_qdq_transformation.cpp | 2 +- .../tests/convolution_transformation.cpp | 2 +- .../tests/convolution_with_incorrect_weights.cpp | 8 ++++---- .../tests/depth_to_space_transformation.cpp | 2 +- ..._multi_parent_dequantization_transformation.cpp | 4 ++-- .../eliminate_fake_quantize_transformation.cpp | 4 ++-- ...ze_and_two_output_branches_with_convolution.cpp | 8 ++++---- ..._quantize_on_weights_with_unsupported_child.cpp | 2 +- ...quantize_precision_selection_transformation.cpp | 2 +- .../tests/fake_quantize_transformation.cpp | 4 ++-- ...quantize_with_dq_not_optimal_transformation.cpp | 10 +++++----- .../tests/fold_convert_transformation.cpp | 4 ++-- .../fold_fake_quantize_in_transformations.cpp | 6 +++--- .../tests/fuse_convert_transformation.cpp | 4 ++-- ..._dequantize_to_fake_quantize_transformation.cpp | 8 ++++---- ...e_quantize_with_multi_inputs_transformation.cpp | 6 +++--- ...se_multiply_to_fake_quantize_transformation.cpp | 6 +++--- ...se_subtract_to_fake_quantize_transformation.cpp | 6 +++--- .../tests/gather_transformation.cpp | 4 ++-- .../get_dequantization_below_transformation.cpp | 2 +- .../tests/get_dequantization_test.cpp | 4 ++-- .../tests/get_dequantization_transformation.cpp | 2 +- .../tests/group_convolution_transformation.cpp | 6 +++--- .../tests/interpolate_transformation.cpp | 4 ++-- .../is_asymmetric_on_weights_dequantization.cpp | 2 +- .../tests/is_asymmetric_on_weights_fq.cpp | 2 +- .../tests/is_constant_path_transformation.cpp | 10 +++++----- .../tests/is_function_quantized_transformation.cpp | 2 +- .../tests/layer_transformation.hpp | 2 +- .../tests/lpt_avoid_shapeof_propagation_test.cpp | 2 +- .../markup_avg_pool_precisions_transformation.cpp | 4 ++-- .../tests/markup_bias_transformation.cpp | 2 +- .../tests/mat_mul_transformation.cpp | 6 +++--- .../tests/mat_mul_with_constant_transformation.cpp | 8 ++++---- .../tests/max_pool_transformation.cpp | 4 ++-- .../move_dequantization_after_transformation.cpp | 4 ++-- .../tests/move_fake_quantize_transformation.cpp | 8 ++++---- .../tests/multiply_partial_transformation.cpp | 4 ++-- ...ultiply_to_group_convolution_transformation.cpp | 4 ++-- .../tests/multiply_transformation.cpp | 8 +++----- .../tests/mvn_transformation.cpp | 4 ++-- .../normalize_dequantization_transformation.cpp | 2 +- .../tests/normalize_l2_transformation.cpp | 4 ++-- .../tests/pad_transformation.cpp | 4 ++-- .../tests/prelu_transformation.cpp | 4 ++-- ...shape_through_dequantization_transformation.cpp | 2 +- ...spose_through_dequantization_transformation.cpp | 2 +- .../quantization_granularity_restriction_test.cpp | 2 +- .../tests/recurrent_cell_transformation.cpp | 6 +++--- .../tests/reduce_max_transformation.cpp | 6 +++--- .../tests/reduce_mean_transformation.cpp | 6 +++--- .../tests/reduce_min_transformation.cpp | 6 +++--- .../tests/reduce_sum_transformation.cpp | 6 +++--- .../tests/reduce_transformation.hpp | 6 +++--- .../tests/relu_transformation.cpp | 4 ++-- .../tests/reshape_transformation.cpp | 4 ++-- .../tests/round_transformation.cpp | 4 ++-- ...eparate_in_standalone_branch_transformation.cpp | 10 +++++----- .../tests/shuffle_channels_transformation.cpp | 4 ++-- .../tests/space_to_batch_transformation.cpp | 4 ++-- .../tests/split_transformation.cpp | 4 ++-- .../tests/squeeze_transformation.cpp | 2 +- .../tests/strided_slice_transformation.cpp | 4 ++-- .../src/fq_decomposition_with_shared_constants.cpp | 6 +++--- .../transformations_after_split_transformation.cpp | 2 +- .../tests/transformer_is_function_quantized.cpp | 6 +++--- .../tests/transpose_transformation.cpp | 4 ++-- .../tests/unit/data_precision_check.cpp | 2 +- .../layer_transformation_get_data_precision.cpp | 2 +- .../tests/unsqueeze_transformation.cpp | 2 +- .../tests/variadic_split_transformation.cpp | 4 ++-- src/common/snippets/tests/CMakeLists.txt | 4 ++-- .../src/pass/fake_quantize_decomposition_test.cpp | 2 +- .../tests/src/pass/precision_propagation.cpp | 2 +- src/common/transformations/tests/CMakeLists.txt | 2 +- .../common_optimizations/dimension_tracking.cpp | 2 +- .../eliminate_unsqueeze_gather.cpp | 2 +- .../tests/common_optimizations/nop_elimination.cpp | 4 ++-- .../preprocessing_fusion_tests.cpp | 2 +- .../transpose_sinking_test.cpp | 2 +- src/core/tests/type_relaxed_copy.cpp | 2 +- src/inference/tests/functional/caching_test.cpp | 2 +- src/plugins/auto/tests/unit/CMakeLists.txt | 4 ++-- .../behavior/plugin/auto_batching_tests.hpp | 2 +- src/plugins/auto_batch/tests/unit/CMakeLists.txt | 4 ++-- .../tests/unit/async_infer_request_test.cpp | 2 +- .../compile_model_create_infer_request_test.cpp | 2 +- .../tests/unit/compile_model_get_property_test.cpp | 2 +- .../unit/compile_model_get_runtime_model_test.cpp | 2 +- .../tests/unit/compile_model_set_property_test.cpp | 2 +- .../tests/unit/plugin_compile_model_test.cpp | 2 +- .../tests/unit/plugin_query_model_test.cpp | 2 +- .../tests/unit/sync_infer_request_test.cpp | 2 +- src/plugins/hetero/tests/unit/CMakeLists.txt | 4 ++-- .../intel_cpu/tests/functional/CMakeLists.txt | 2 +- .../tests/functional/behavior/export_import.cpp | 2 +- .../behavior/ov_executable_network/properties.cpp | 2 +- .../behavior/infer_request/memory_states.cpp | 2 +- .../behavior/plugin/hetero_synthetic.cpp | 4 ++-- .../execution_graph_tests/add_output.cpp | 2 +- .../fq_and_avg_pool_transformation.cpp | 2 +- .../fq_and_max_pool_transformation.cpp | 2 +- .../fq_precision_selection_transformation.cpp | 2 +- .../fq_transformation.cpp | 2 +- .../fq_with_dq_not_optimal_transformation.cpp | 2 +- .../fuse_fq_and_scale_shift_transformation.cpp | 2 +- .../subgraph_tests/concat_resize_concat.cpp | 2 +- .../single_layer_tests/adaptive_pooling.cpp | 4 ++-- .../functional/single_layer_tests/augru_cell.cpp | 2 +- .../single_layer_tests/augru_sequence.cpp | 2 +- .../single_layer_tests/batch_to_space.cpp | 2 +- .../functional/single_layer_tests/broadcast.cpp | 2 +- .../functional/single_layer_tests/bucketize.cpp | 2 +- .../single_layer_tests/classes/conversion.hpp | 2 +- .../functional/single_layer_tests/classes/mvn.hpp | 2 +- .../single_layer_tests/classes/reduce.hpp | 2 +- .../single_layer_tests/classes/softmax.hpp | 2 +- .../single_layer_tests/classes/transpose.hpp | 2 +- .../tests/functional/single_layer_tests/concat.cpp | 2 +- .../convert_to_plugin_specific_node.cpp | 2 +- .../functional/single_layer_tests/convolution.cpp | 4 ++-- .../convolution_backprop_data.cpp | 2 +- .../single_layer_tests/ctc_greedy_decoder.cpp | 2 +- .../ctc_greedy_decoder_seq_len.cpp | 2 +- .../functional/single_layer_tests/ctc_loss.cpp | 2 +- .../functional/single_layer_tests/cum_sum.cpp | 2 +- .../single_layer_tests/custom_op_internal_dyn.cpp | 5 ++--- .../single_layer_tests/deformable_convolution.cpp | 4 ++-- .../single_layer_tests/detection_output.cpp | 2 +- .../embedding_bag_offsets_sum.cpp | 2 +- .../embedding_bag_packed_sum.cpp | 2 +- .../single_layer_tests/embedding_segments_sum.cpp | 2 +- .../single_layer_tests/extract_image_patches.cpp | 2 +- .../tests/functional/single_layer_tests/eye.cpp | 2 +- .../single_layer_tests/fake_quantize.cpp | 2 +- .../tests/functional/single_layer_tests/gather.cpp | 2 +- .../single_layer_tests/gather_elements.cpp | 2 +- .../functional/single_layer_tests/gather_nd.cpp | 2 +- .../functional/single_layer_tests/gather_tree.cpp | 4 ++-- .../functional/single_layer_tests/grid_sample.cpp | 2 +- .../tests/functional/single_layer_tests/grn.cpp | 2 +- .../group_convolution_backprop_data.cpp | 2 +- .../functional/single_layer_tests/gru_cell.cpp | 2 +- .../functional/single_layer_tests/gru_sequence.cpp | 2 +- .../single_layer_tests/instances/x64/eltwise.cpp | 2 +- .../single_layer_tests/instances/x64/mvn.cpp | 2 +- .../single_layer_tests/instances/x64/reduce.cpp | 2 +- .../functional/single_layer_tests/interpolate.cpp | 2 +- .../functional/single_layer_tests/log_softmax.cpp | 2 +- .../functional/single_layer_tests/logical.cpp | 2 +- .../tests/functional/single_layer_tests/loop.cpp | 2 +- .../tests/functional/single_layer_tests/lrn.cpp | 2 +- .../functional/single_layer_tests/lstm_cell.cpp | 2 +- .../single_layer_tests/lstm_sequence.cpp | 2 +- .../tests/functional/single_layer_tests/matmul.cpp | 2 +- .../single_layer_tests/matmul_sparse.cpp | 2 +- .../single_layer_tests/non_max_suppression.cpp | 2 +- .../functional/single_layer_tests/nonzero.cpp | 4 ++-- .../functional/single_layer_tests/normalize.cpp | 2 +- .../functional/single_layer_tests/one_hot.cpp | 2 +- .../functional/single_layer_tests/pooling.cpp | 2 +- .../functional/single_layer_tests/prior_box.cpp | 2 +- .../single_layer_tests/prior_box_clustered.cpp | 2 +- .../functional/single_layer_tests/proposal.cpp | 2 +- .../single_layer_tests/psroi_pooling.cpp | 4 ++-- .../tests/functional/single_layer_tests/range.cpp | 4 ++-- .../tests/functional/single_layer_tests/rdft.cpp | 2 +- .../functional/single_layer_tests/region_yolo.cpp | 2 +- .../functional/single_layer_tests/reorg_yolo.cpp | 2 +- .../single_layer_tests/reverse_sequence.cpp | 2 +- .../functional/single_layer_tests/rnn_cell.cpp | 2 +- .../functional/single_layer_tests/rnn_sequence.cpp | 2 +- .../functional/single_layer_tests/roialign.cpp | 4 ++-- .../tests/functional/single_layer_tests/roll.cpp | 2 +- .../single_layer_tests/scatter_ND_update.cpp | 2 +- .../single_layer_tests/scatter_elements_update.cpp | 2 +- .../single_layer_tests/scatter_update.cpp | 2 +- .../tests/functional/single_layer_tests/select.cpp | 2 +- .../functional/single_layer_tests/shape_ops.cpp | 2 +- .../functional/single_layer_tests/shapeof.cpp | 4 ++-- .../single_layer_tests/shuffle_channels.cpp | 4 ++-- .../tests/functional/single_layer_tests/slice.cpp | 2 +- .../single_layer_tests/space_to_batch.cpp | 2 +- .../tests/functional/single_layer_tests/split.cpp | 2 +- .../single_layer_tests/strided_slice.cpp | 2 +- .../single_layer_tests/tensor_iterator.cpp | 2 +- .../tests/functional/single_layer_tests/tile.cpp | 2 +- .../tests/functional/single_layer_tests/topk.cpp | 2 +- .../tests/functional/single_layer_tests/unique.cpp | 2 +- .../single_layer_tests/variadic_split.cpp | 2 +- .../subgraph_tests/include/conv_concat.hpp | 4 ++-- .../include/conv_with_zero_point_fuse.hpp | 4 ++-- .../include/fuse_transpose_reorder.hpp | 4 ++-- .../subgraph_tests/src/add_convert_to_reorder.cpp | 4 ++-- .../src/align_matmul_input_ranks.cpp | 2 +- .../subgraph_tests/src/arm/convert_group_conv.cpp | 2 +- .../src/arm/convert_group_conv1d.cpp | 2 +- .../src/arm/convert_reduce_multi_axis.cpp | 2 +- .../subgraph_tests/src/broadcast_eltwise.cpp | 4 ++-- .../subgraph_tests/src/concat_const_inplace.cpp | 4 ++-- .../subgraph_tests/src/concat_conv_sum_inplace.cpp | 4 ++-- .../subgraph_tests/src/concat_reorder_inplace.cpp | 4 ++-- .../subgraph_tests/src/concat_reshape_concat.cpp | 4 ++-- .../subgraph_tests/src/conv3d_reshape.cpp | 2 +- .../functional/subgraph_tests/src/conv_dw_conv.cpp | 4 ++-- .../subgraph_tests/src/conv_maxpool_activ.cpp | 2 +- .../subgraph_tests/src/conv_sum_broadcast.cpp | 4 ++-- .../src/convert_fq_rnn_to_quantized_rnn.cpp | 2 +- .../subgraph_tests/src/convert_range.cpp | 4 ++-- .../subgraph_tests/src/convs_and_sums.cpp | 4 ++-- .../src/custom_op_insert_convert_i64.cpp | 3 +-- .../subgraph_tests/src/denormal_check.cpp | 4 ++-- .../subgraph_tests/src/eltwise_caching.cpp | 2 +- .../subgraph_tests/src/eltwise_chain.cpp | 2 +- .../functional/subgraph_tests/src/fq_caching.cpp | 2 +- .../subgraph_tests/src/fq_fused_with_ss.cpp | 2 +- .../subgraph_tests/src/fq_layer_dq_bias.cpp | 6 +++--- .../src/fullyconnected_strided_inputs_outputs.cpp | 2 +- .../src/fuse_conv_fq_with_shared_constants.cpp | 4 ++-- .../subgraph_tests/src/fuse_muladd_ewsimple.cpp | 2 +- .../subgraph_tests/src/fuse_non0_output_port.cpp | 4 ++-- .../src/fuse_scaleshift_and_fakequantize.cpp | 2 +- .../src/fuse_split_concat_pair_to_interpolate.cpp | 2 +- .../subgraph_tests/src/fuse_transpose_reorder.cpp | 2 +- .../src/input_noreorder_eltwise_bf16.cpp | 4 ++-- .../src/input_output_tensor_reuse.cpp | 4 ++-- .../subgraph_tests/src/input_tensor_roi.cpp | 2 +- .../functional/subgraph_tests/src/interaction.cpp | 2 +- .../src/matmul_decompress_convert.cpp | 2 +- .../src/matmul_quantized_subgraph.cpp | 2 +- .../src/matmul_strided_inputs_outputs.cpp | 2 +- .../src/matmul_weights_decompression.cpp | 2 +- .../subgraph_tests/src/memory_sharing_test.cpp | 2 +- .../tests/functional/subgraph_tests/src/mha.cpp | 2 +- .../tests/functional/subgraph_tests/src/ngram.cpp | 2 +- .../src/not_fused_conv_simple_op.cpp | 2 +- .../subgraph_tests/src/remove_convert.cpp | 4 ++-- .../subgraph_tests/src/reshape_chain.cpp | 2 +- .../functional/subgraph_tests/src/reshape_fc.cpp | 2 +- .../subgraph_tests/src/reshape_inplace.cpp | 4 ++-- .../subgraph_tests/src/seq_native_order.cpp | 2 +- .../subgraph_tests/src/split_concat_add.cpp | 4 ++-- .../subgraph_tests/src/split_matmul_concat.cpp | 2 +- .../subgraph_tests/src/static_zero_dims.cpp | 2 +- .../subgraph_tests/src/strided_slice_zero_dims.cpp | 4 ++-- .../subgraph_tests/src/subgraph_serialize.cpp | 2 +- .../src/subgraph_with_blocked_format.cpp | 2 +- .../src/tile_with_two_output_edges.cpp | 2 +- .../functional/test_utils/properties_test.hpp | 2 +- src/plugins/intel_cpu/tests/unit/CMakeLists.txt | 4 ++-- src/plugins/intel_cpu/tests/unit/generate_add.cpp | 2 +- .../snippets_transformations/enforce_precision.cpp | 2 +- .../fake_quantize_tokenization_test.cpp | 3 ++- src/plugins/intel_gna/legacy/tests/CMakeLists.txt | 2 +- .../legacy/tests/keep_constant_inputs_tests.cpp | 2 +- .../legacy/tests/mul_add_conversion_test.cpp | 2 +- .../unit/engines/gna/i16_quantisation_test.cpp | 2 +- .../backward_compatibility.cpp | 2 +- .../import_export_act_conv_act.cpp | 4 ++-- .../import_export_batch_size.cpp | 2 +- .../import_export_memory_layer.cpp | 4 ++-- .../import_export_multi_inputs.cpp | 2 +- .../import_reshape_permute_conv.cpp | 2 +- .../tests/functional/limitations/layers_limit.cpp | 4 ++-- .../tests/functional/pass_tests/4d_eltwise.cpp | 6 +++--- .../pass_tests/act_maxpool_reordering.cpp | 6 +++--- .../pass_tests/broadcast_const_with_fq.cpp | 6 +++--- .../functional/pass_tests/concat_memory_param.cpp | 6 +++--- .../functional/pass_tests/concat_restrictions.cpp | 6 +++--- .../functional/pass_tests/concat_transpose.cpp | 2 +- .../functional/pass_tests/conv_with_padding.cpp | 2 +- .../pass_tests/convert_dwsc_to_scaleshifts.cpp | 2 +- .../convert_matmul_to_fullyconnected.cpp | 4 ++-- .../convert_matmul_to_pointwise_conv.cpp | 6 +++--- .../pass_tests/convert_padded_to_valid_conv.cpp | 2 +- .../pass_tests/convolution_align_filter.cpp | 6 +++--- .../pass_tests/convolution_crop_axis_h.cpp | 6 +++--- .../functional/pass_tests/decompose_2d_conv.cpp | 2 +- .../tests/functional/pass_tests/decompose_mvn.cpp | 2 +- .../pass_tests/diagonal_insertion_test.cpp | 6 +++--- .../eltwise_split_over_channels_pass.cpp | 4 ++-- .../tests/functional/pass_tests/fq_activation.cpp | 6 +++--- .../pass_tests/fq_fusion_with_multiple_weights.cpp | 6 +++--- .../pass_tests/fq_fusion_with_sigmoid.cpp | 2 +- .../pass_tests/fq_maxpool_reordering.cpp | 6 +++--- .../pass_tests/fq_outputs_activation_.cpp | 6 +++--- .../fq_with_multiple_out_connections.cpp | 6 +++--- .../insert_copy_layer_before_self_concat.cpp | 6 +++--- .../pass_tests/insert_transpose_before_matmul.cpp | 6 +++--- .../pass_tests/insert_transpose_between_convs.cpp | 6 +++--- .../functional/pass_tests/layers_restrictions.cpp | 6 +++--- .../remove_permutations_NHWC_to_NCHW_pass.cpp | 6 +++--- .../preprocess_tests/gather_transpose_cpu.cpp | 2 +- .../preprocess_tests/gather_transpose_merge.cpp | 2 +- .../preprocess_tests/precision_convert.cpp | 4 ++-- .../add_overload_correction.cpp | 6 +++--- .../scale_factors_tests/const_input_add.cpp | 6 +++--- .../scale_factors_tests/eltwise_act_fq.cpp | 6 +++--- .../matmul_overload_correction.cpp | 6 +++--- .../scale_factors_tests/perchannel_quant_test.cpp | 6 +++--- .../scale_factors_tests/test_fq_scale_factors.cpp | 6 +++--- .../weighable_layer_without_fq.cpp | 4 ++-- .../behavior/infer_request/memory_states.cpp | 2 +- .../execution_graph_tests/add_output.cpp | 2 +- .../single_layer_tests/conv_low_precision.cpp | 6 +++--- .../single_layer_tests/convolution.cpp | 4 ++-- .../single_layer_tests/memory.cpp | 2 +- .../subgraph_tests/add_transpose_detection.cpp | 2 +- src/plugins/intel_gna/tests/unit/CMakeLists.txt | 2 +- .../unit/gna_executable_network_metrics_test.cpp | 2 +- .../tests/unit/gna_export_import_test.cpp | 2 +- .../tests/unit/gna_extra_pwl_segments_tests.cpp | 2 +- .../intel_gna/tests/unit/gna_hw_precision_test.cpp | 2 +- .../tests/unit/gna_infer_request_test.cpp | 2 +- .../intel_gna/tests/unit/gna_memory_alignment.cpp | 2 +- .../tests/unit/gna_memory_compact_test.cpp | 2 +- .../tests/unit/gna_plugin_load_network_test.cpp | 2 +- .../transformations/gather_transpose_merge.cpp | 2 +- ...gna_convert_matmul_to_pointwise_convolution.cpp | 2 +- .../unit/transformations/gna_insert_copy_layer.cpp | 2 +- .../transformations/gna_insert_identity_layer.cpp | 2 +- .../unit/transformations/gna_remove_convert.cpp | 2 +- .../tests/functional/behavior/infer_request.cpp | 2 +- .../tests/functional/behavior/memory_dyn_batch.cpp | 2 +- .../concurrency/gpu_concurrency_tests.cpp | 2 +- .../dynamic_tests/gpu_dyn_batch_shape_tests.cpp | 2 +- .../remote_blob_tests/cldnn_remote_blob_tests.cpp | 2 +- .../remote_blob_tests/dx11_remote_ctx_test.cpp | 2 +- .../remote_blob_tests/gpu_remote_tensor_tests.cpp | 2 +- .../behavior/infer_request/memory_states.cpp | 2 +- .../fq_and_avg_pool_transformation.cpp | 2 +- .../fq_and_max_pool_transformation.cpp | 2 +- .../fq_precision_selection_transformation.cpp | 2 +- .../fq_transformation.cpp | 2 +- .../fq_with_dq_not_optimal_transformation.cpp | 2 +- .../fuse_fq_and_scale_shift_transformation.cpp | 2 +- .../single_layer_tests/topk.cpp | 2 +- .../functional/single_layer_tests/convolution.cpp | 4 ++-- .../single_layer_tests/dynamic/batch_to_space.cpp | 2 +- .../single_layer_tests/dynamic/broadcast.cpp | 2 +- .../single_layer_tests/dynamic/convolution.cpp | 4 ++-- .../dynamic/convolution_backprop_data.cpp | 4 ++-- .../single_layer_tests/dynamic/cum_sum.cpp | 2 +- .../dynamic/detection_output.cpp | 2 +- .../single_layer_tests/dynamic/deth_to_space.cpp | 2 +- .../single_layer_tests/dynamic/gather.cpp | 2 +- .../single_layer_tests/dynamic/gather_elements.cpp | 2 +- .../single_layer_tests/dynamic/gather_nd.cpp | 2 +- .../single_layer_tests/dynamic/gather_tree.cpp | 2 +- .../single_layer_tests/dynamic/grid_sample.cpp | 2 +- .../dynamic/group_convolution_backprop_data.cpp | 4 ++-- .../dynamic/groupconvolution.cpp | 4 ++-- .../single_layer_tests/dynamic/interpolate.cpp | 2 +- .../single_layer_tests/dynamic/matmul.cpp | 2 +- .../functional/single_layer_tests/dynamic/mvn.cpp | 2 +- .../dynamic/non_max_suppression.cpp | 4 ++-- .../single_layer_tests/dynamic/normalize_l2.cpp | 2 +- .../functional/single_layer_tests/dynamic/pad.cpp | 2 +- .../single_layer_tests/dynamic/pooling.cpp | 2 +- .../single_layer_tests/dynamic/prior_box.cpp | 2 +- .../single_layer_tests/dynamic/random_uniform.cpp | 2 +- .../single_layer_tests/dynamic/range.cpp | 2 +- .../single_layer_tests/dynamic/reduce.cpp | 2 +- .../single_layer_tests/dynamic/region_yolo.cpp | 2 +- .../single_layer_tests/dynamic/roi_pooling.cpp | 2 +- .../dynamic/scatter_nd_update.cpp | 2 +- .../single_layer_tests/dynamic/select.cpp | 2 +- .../single_layer_tests/dynamic/shapeof.cpp | 2 +- .../single_layer_tests/dynamic/softmax.cpp | 2 +- .../single_layer_tests/dynamic/space_to_batch.cpp | 2 +- .../single_layer_tests/dynamic/split.cpp | 2 +- .../single_layer_tests/dynamic/strided_slice.cpp | 2 +- .../functional/single_layer_tests/dynamic/tile.cpp | 4 ++-- .../single_layer_tests/dynamic/top_k.cpp | 4 ++-- .../single_layer_tests/dynamic/unique.cpp | 2 +- .../single_layer_tests/tensor_iterator.cpp | 4 ++-- .../tests/functional/subgraph_tests/condition.cpp | 4 ++-- .../subgraph_tests/dynamic/broadcast_eltwise.cpp | 4 ++-- .../dynamic/dynamic_model_static_split_layer.cpp | 4 ++-- .../dynamic/dynamic_smoke_test_gen_impl_key.cpp | 4 ++-- ...amic_smoke_test_reduce_deconvolution_concat.cpp | 4 ++-- .../dynamic_smoke_test_shape_of_activation.cpp | 4 ++-- .../dynamic_smoke_test_shape_of_reduce_reshape.cpp | 4 ++-- .../dynamic_smoke_test_with_empty_tensor.cpp | 4 ++-- .../dynamic/matmul_weights_decompression.cpp | 2 +- .../subgraph_tests/dynamic/read_value_assign.cpp | 4 ++-- .../behavior/plugin/synthetic.cpp | 4 ++-- .../subgraph_reference/preprocess_opencv.cpp | 2 +- src/tests/CMakeLists.txt | 2 +- .../include/op_impl_check/single_op_graph.hpp | 2 +- .../op_conformance_runner/src/read_ir/read_ir.cpp | 2 +- src/tests/functional/plugin/shared/CMakeLists.txt | 6 +++--- .../shared/include/base/multi/multi_helpers.hpp | 2 +- .../shared/include/base/ov_behavior_test_utils.hpp | 2 +- .../executable_network/exec_graph_info.hpp | 2 +- .../include/behavior/executable_network/locale.hpp | 2 +- .../ov_infer_request/infer_request_dynamic.hpp | 6 +++--- .../include/behavior/ov_plugin/caching_tests.hpp | 2 +- .../behavior/plugin/auto_batching_tests.hpp | 2 +- .../include/behavior/plugin/caching_tests.hpp | 2 +- .../behavior/plugin/configuration_tests.hpp | 2 +- .../include/behavior/plugin/core_threading.hpp | 2 +- .../include/behavior/plugin/hetero_synthetic.hpp | 2 +- .../num_inputs_fusing_bin_conv.hpp | 2 +- .../execution_graph_tests/runtime_precision.hpp | 2 +- .../add_transformation.hpp | 2 +- .../assign_and_read_value_transformation.hpp | 4 ++-- .../batch_to_space_transformation.hpp | 2 +- .../clamp_transformation.hpp | 4 ++-- .../concat_transformation.hpp | 4 ++-- .../concat_with_child_and_output.hpp | 2 +- ...concat_with_different_precision_on_children.hpp | 2 +- .../concat_with_split_transformation.hpp | 2 +- .../convolution_backprop_data_transformation.hpp | 6 +++--- .../convolution_qdq_transformation.hpp | 8 ++++---- .../convolution_transformation.hpp | 4 ++-- .../convolution_with_incorrect_weights.hpp | 4 ++-- ...elementwise_branch_selection_transformation.hpp | 4 ++-- .../eliminate_fake_quantize_transformation.hpp | 6 +++--- .../fake_quantize_and_avg_pool_transformation.hpp | 2 +- .../fake_quantize_and_max_pool_transformation.hpp | 2 +- ...ze_and_two_output_branches_with_convolution.hpp | 6 +++--- ...quantize_precision_selection_transformation.hpp | 6 +++--- .../fake_quantize_transformation.hpp | 2 +- ...quantize_with_dq_not_optimal_transformation.hpp | 12 ++++++------ .../fuse_convert_transformation.hpp | 4 ++-- ..._dequantize_to_fake_quantize_transformation.hpp | 6 +++--- ...ake_quantize_and_scale_shift_transformation.hpp | 2 +- ...se_multiply_to_fake_quantize_transformation.hpp | 6 +++--- ...se_subtract_to_fake_quantize_transformation.hpp | 6 +++--- .../gather_transformation.hpp | 2 +- .../group_convolution_transformation.hpp | 4 ++-- .../groupconvolution_qdq_transformation.hpp | 10 +++++----- .../mat_mul_transformation.hpp | 4 ++-- .../mat_mul_with_constant_transformation.hpp | 10 +++++----- .../mat_mul_with_optimized_constant_fq.hpp | 2 +- .../move_fake_quantize_transformation.hpp | 6 +++--- ...ultiply_to_group_convolution_transformation.hpp | 6 +++--- .../multiply_transformation.hpp | 2 +- .../multiply_with_one_parent_transformation.hpp | 2 +- .../mvn_transformation.hpp | 2 +- .../normalize_transformation.hpp | 2 +- .../pad_transformation.hpp | 2 +- .../prelu_transformation.hpp | 2 +- ...shape_through_dequantization_transformation.hpp | 12 ++++++------ .../recurrent_cell_transformation.hpp | 6 +++--- .../reduce_max_transformation.hpp | 4 ++-- .../reduce_mean_transformation.hpp | 4 ++-- .../reduce_min_transformation.hpp | 4 ++-- .../reduce_sum_transformation.hpp | 4 ++-- .../relu_transformation.hpp | 2 +- .../reshape_transformation.hpp | 2 +- .../shuffle_channels_transformation.hpp | 2 +- .../space_to_batch_transformation.hpp | 2 +- .../split_transformation.hpp | 2 +- .../squeeze_transformation.hpp | 2 +- .../strided_slice_transformation.hpp | 4 ++-- ...act_multiply_to_multiply_add_transformation.hpp | 2 +- .../transpose_transformation.hpp | 2 +- .../unsqueeze_transformation.hpp | 2 +- .../variadic_split_transformation.hpp | 2 +- .../include/single_layer_tests/batch_norm.hpp | 2 +- .../shared/include/snippets/codegen_bert.hpp | 4 ++-- .../snippets/fake_quantize_decomposition_test.hpp | 4 ++-- .../infer_request/set_io_blob_precision.cpp | 2 +- .../ov_infer_request/infer_correctness.cpp | 2 +- .../ov_infer_request/infer_request_dynamic.cpp | 6 +++--- .../ov_infer_request/inference_chaining.cpp | 2 +- .../ov_infer_request/iteration_chaining.cpp | 2 +- .../src/behavior/ov_plugin/caching_tests.cpp | 4 ++-- .../shared/src/behavior/ov_plugin/life_time.cpp | 2 +- .../shared/src/behavior/ov_plugin/remote.cpp | 2 +- .../shared/src/behavior/plugin/caching_tests.cpp | 4 ++-- .../src/behavior/plugin/hetero_synthetic.cpp | 4 ++-- .../shared/src/behavior/plugin/life_time.cpp | 2 +- .../shared/src/behavior/plugin/stress_tests.cpp | 2 +- .../add_transformation.cpp | 4 ++-- .../assign_and_read_value_transformation.cpp | 2 +- .../batch_to_space_transformation.cpp | 2 +- .../clamp_transformation.cpp | 2 +- .../concat_transformation.cpp | 4 ++-- .../concat_with_child_and_output.cpp | 4 ++-- ...concat_with_different_precision_on_children.cpp | 4 ++-- .../concat_with_intermediate_transformation.cpp | 4 ++-- .../concat_with_neighbors_graph_transformation.cpp | 4 ++-- .../concat_with_split_transformation.cpp | 4 ++-- .../convolution_backprop_data_transformation.cpp | 2 +- .../convolution_qdq_transformation.cpp | 4 ++-- .../convolution_transformation.cpp | 4 ++-- .../convolution_with_incorrect_weights.cpp | 4 ++-- .../depth_to_space_transformation.cpp | 6 +++--- ...elementwise_branch_selection_transformation.cpp | 2 +- .../eliminate_fake_quantize_transformation.cpp | 2 +- .../fake_quantize_and_avg_pool_transformation.cpp | 4 ++-- .../fake_quantize_and_max_pool_transformation.cpp | 4 ++-- ...ze_and_two_output_branches_with_convolution.cpp | 2 +- ...quantize_precision_selection_transformation.cpp | 2 +- ...quantize_with_dq_not_optimal_transformation.cpp | 2 +- .../fully_connected_transformation.cpp | 6 +++--- .../fuse_convert_transformation.cpp | 4 ++-- ..._dequantize_to_fake_quantize_transformation.cpp | 2 +- ...se_multiply_to_fake_quantize_transformation.cpp | 2 +- ...se_subtract_to_fake_quantize_transformation.cpp | 2 +- .../gather_transformation.cpp | 2 +- .../gemm_transformation.cpp | 4 ++-- .../group_convolution_transformation.cpp | 4 ++-- .../groupconvolution_qdq_transformation.cpp | 4 ++-- .../interpolate_transformation.cpp | 2 +- .../mat_mul_transformation.cpp | 4 ++-- .../mat_mul_with_constant_transformation.cpp | 4 ++-- .../mat_mul_with_optimized_constant_fq.cpp | 4 ++-- .../move_fake_quantize_transformation.cpp | 2 +- ...ultiply_to_group_convolution_transformation.cpp | 4 ++-- .../multiply_transformation.cpp | 4 ++-- .../multiply_with_one_parent_transformation.cpp | 2 +- .../mvn_transformation.cpp | 4 ++-- .../normalize_transformation.cpp | 4 ++-- .../output_layers_concat.cpp | 4 ++-- .../output_layers_concat_multi_channel.cpp | 4 ++-- .../output_layers_handling_in_transformations.cpp | 4 ++-- .../pad_transformation.cpp | 2 +- .../prelu_transformation.cpp | 2 +- ...shape_through_dequantization_transformation.cpp | 4 ++-- .../recurrent_cell_transformation.cpp | 2 +- .../reduce_max_transformation.cpp | 2 +- .../reduce_mean_transformation.cpp | 2 +- .../reduce_min_transformation.cpp | 2 +- .../reduce_sum_transformation.cpp | 2 +- .../relu_transformation.cpp | 2 +- .../reshape_transformation.cpp | 2 +- .../shuffle_channels_transformation.cpp | 2 +- .../space_to_batch_transformation.cpp | 2 +- .../split_transformation.cpp | 2 +- .../squeeze_transformation.cpp | 4 ++-- .../strided_slice_transformation.cpp | 2 +- ...act_multiply_to_multiply_add_transformation.cpp | 2 +- .../subtract_transformation.cpp | 2 +- .../transpose_after_matmul_transformation.cpp | 2 +- .../transpose_transformation.cpp | 2 +- .../unsqueeze_transformation.cpp | 4 ++-- .../variadic_split_transformation.cpp | 2 +- .../functional/plugin/shared/src/snippets/add.cpp | 2 +- .../plugin/shared/src/snippets/codegen_bert.cpp | 2 +- .../plugin/shared/src/snippets/codegen_gelu.cpp | 2 +- .../plugin/shared/src/snippets/edge_replace.cpp | 2 +- .../snippets/fake_quantize_decomposition_test.cpp | 2 +- .../snippets/precision_propagation_convertion.cpp | 2 +- .../plugin/shared/src/snippets/softmax.cpp | 2 +- .../shared/src/snippets/transpose_softmax.cpp | 2 +- .../functional/shared_test_classes/CMakeLists.txt | 2 +- .../shared_test_classes/base/layer_test_utils.hpp | 4 ++-- .../single_layer/activation.hpp | 4 ++-- .../single_layer/batch_norm.hpp | 2 +- .../single_layer/binary_convolution.hpp | 4 ++-- .../shared_test_classes/single_layer/bucketize.hpp | 4 ++-- .../shared_test_classes/single_layer/clamp.hpp | 4 ++-- .../shared_test_classes/single_layer/concat.hpp | 4 ++-- .../shared_test_classes/single_layer/constant.hpp | 4 ++-- .../single_layer/convert_color_i420.hpp | 4 ++-- .../single_layer/convert_color_nv12.hpp | 4 ++-- .../single_layer/convolution.hpp | 4 ++-- .../single_layer/convolution_backprop.hpp | 4 ++-- .../single_layer/convolution_backprop_data.hpp | 4 ++-- .../shared_test_classes/single_layer/cum_sum.hpp | 2 +- .../single_layer/deformable_convolution.hpp | 4 ++-- .../single_layer/deformable_psroi_pooling.hpp | 4 ++-- .../single_layer/depth_to_space.hpp | 4 ++-- .../shared_test_classes/single_layer/dft.hpp | 2 +- .../shared_test_classes/single_layer/eltwise.hpp | 2 +- .../experimental_detectron_detection_output.hpp | 2 +- ...l_detectron_generate_proposals_single_image.hpp | 2 +- ...experimental_detectron_prior_grid_generator.hpp | 2 +- .../experimental_detectron_roifeatureextractor.hpp | 2 +- .../experimental_detectron_topkrois.hpp | 2 +- .../single_layer/fake_quantize.hpp | 4 ++-- .../shared_test_classes/single_layer/gather.hpp | 4 ++-- .../shared_test_classes/single_layer/gather_nd.hpp | 4 ++-- .../single_layer/gather_tree.hpp | 4 ++-- .../single_layer/generate_proposals.hpp | 2 +- .../shared_test_classes/single_layer/grn.hpp | 4 ++-- .../single_layer/group_convolution.hpp | 4 ++-- .../group_convolution_backprop_data.hpp | 4 ++-- .../shared_test_classes/single_layer/gru_cell.hpp | 4 ++-- .../single_layer/gru_sequence.hpp | 4 ++-- .../single_layer/interpolate.hpp | 4 ++-- .../single_layer/log_softmax.hpp | 4 ++-- .../shared_test_classes/single_layer/loop.hpp | 4 ++-- .../single_layer/low_precision.hpp | 4 ++-- .../shared_test_classes/single_layer/lrn.hpp | 4 ++-- .../shared_test_classes/single_layer/lstm_cell.hpp | 4 ++-- .../single_layer/lstm_cell_basic.hpp | 4 ++-- .../single_layer/lstm_sequence.hpp | 4 ++-- .../single_layer/matrix_nms.hpp | 2 +- .../single_layer/minimum_maximum.hpp | 4 ++-- .../single_layer/multiclass_nms.hpp | 2 +- .../single_layer/non_max_suppression.hpp | 2 +- .../shared_test_classes/single_layer/nonzero.hpp | 4 ++-- .../single_layer/normalize_l2.hpp | 2 +- .../shared_test_classes/single_layer/one_hot.hpp | 2 +- .../shared_test_classes/single_layer/pad.hpp | 2 +- .../shared_test_classes/single_layer/pooling.hpp | 4 ++-- .../shared_test_classes/single_layer/power.hpp | 2 +- .../shared_test_classes/single_layer/prior_box.hpp | 4 ++-- .../single_layer/prior_box_clustered.hpp | 4 ++-- .../shared_test_classes/single_layer/proposal.hpp | 4 ++-- .../single_layer/psroi_pooling.hpp | 4 ++-- .../shared_test_classes/single_layer/range.hpp | 2 +- .../shared_test_classes/single_layer/rdft.hpp | 2 +- .../single_layer/reduce_ops.hpp | 2 +- .../single_layer/region_yolo.hpp | 4 ++-- .../single_layer/reorg_yolo.hpp | 4 ++-- .../shared_test_classes/single_layer/reshape.hpp | 4 ++-- .../shared_test_classes/single_layer/result.hpp | 4 ++-- .../single_layer/reverse_sequence.hpp | 4 ++-- .../shared_test_classes/single_layer/rnn_cell.hpp | 4 ++-- .../single_layer/rnn_sequence.hpp | 4 ++-- .../single_layer/roi_pooling.hpp | 4 ++-- .../shared_test_classes/single_layer/roll.hpp | 2 +- .../shared_test_classes/single_layer/select.hpp | 2 +- .../shared_test_classes/single_layer/shape_of.hpp | 4 ++-- .../shared_test_classes/single_layer/softmax.hpp | 2 +- .../single_layer/space_to_depth.hpp | 4 ++-- .../shared_test_classes/single_layer/split.hpp | 2 +- .../single_layer/squeeze_unsqueeze.hpp | 2 +- .../single_layer/tensor_iterator.hpp | 4 ++-- .../shared_test_classes/single_layer/tile.hpp | 2 +- .../shared_test_classes/single_layer/topk.hpp | 2 +- .../shared_test_classes/single_layer/transpose.hpp | 2 +- .../single_layer/variadic_split.hpp | 2 +- .../shared_test_classes/single_op/comparison.hpp | 2 +- .../shared_test_classes/subgraph/basic_lstm.hpp | 4 ++-- .../subgraph/broadcast_power.hpp | 4 ++-- .../subgraph/cascade_concat.hpp | 2 +- .../shared_test_classes/subgraph/concat_conv.hpp | 4 ++-- .../subgraph/concat_multi_input.hpp | 4 ++-- .../subgraph/concat_quantization.hpp | 4 ++-- ...t_quantization_during_memory_requantization.hpp | 4 ++-- .../subgraph/connect_split_concat_concat.hpp | 4 ++-- .../subgraph/const_conv_concat.hpp | 4 ++-- .../subgraph/const_strided_slice_concat.hpp | 4 ++-- .../subgraph/conv_eltwise_fusion.hpp | 2 +- .../subgraph/conv_fq_eltwise.hpp | 4 ++-- .../shared_test_classes/subgraph/conv_fq_relu.hpp | 4 ++-- .../subgraph/conv_strides_opt.hpp | 2 +- .../subgraph/convert_pad_to_group_conv.hpp | 2 +- .../subgraph/convolution_relu_sequence.hpp | 4 ++-- .../subgraph/copy_before_squeeze.hpp | 4 ++-- .../subgraph/delayed_copy_layer.hpp | 4 ++-- .../subgraph/eltwise_conv_eltwise.hpp | 4 ++-- .../shared_test_classes/subgraph/fc_conv_fc.hpp | 4 ++-- .../subgraph/first_connect_input_concat.hpp | 2 +- .../subgraph/fq_conv_fq_affine.hpp | 4 ++-- .../subgraph/fq_with_mixed_levels.hpp | 4 ++-- .../subgraph/handling_orientation_conv.hpp | 4 ++-- .../shared_test_classes/subgraph/input_conv.hpp | 4 ++-- .../subgraph/input_split_concat.hpp | 4 ++-- .../subgraph/matmul_act_add.hpp | 4 ++-- .../subgraph/matmul_squeeze_add.hpp | 4 ++-- .../subgraph/memory_fq_concat_prelu.hpp | 4 ++-- .../subgraph/multi_crops_to_concat.hpp | 4 ++-- .../multioutput_eltwise_squeeze_eltwise.hpp | 4 ++-- .../subgraph/multiple_connect_split_concat.hpp | 4 ++-- .../shared_test_classes/subgraph/multiply_add.hpp | 4 ++-- .../subgraph/mvn_multiply_add.hpp | 2 +- .../subgraph/negative_memory_layer_offset.hpp | 4 ++-- .../subgraph/parameter_reshape_result.hpp | 2 +- .../subgraph/parameter_result.hpp | 2 +- .../subgraph/parameter_shapeof_result.hpp | 2 +- .../subgraph/perm_conv_perm_concat.hpp | 4 ++-- .../subgraph/permute_concat_concat_permute.hpp | 2 +- .../shared_test_classes/subgraph/preprocess.hpp | 6 +++--- .../quantized_convolution_backprop_data.hpp | 4 ++-- .../subgraph/quantized_group_convolution.hpp | 4 ++-- .../quantized_group_convolution_backprop_data.hpp | 4 ++-- .../shared_test_classes/subgraph/range_add.hpp | 2 +- .../subgraph/reduce_eltwise.hpp | 4 ++-- .../shared_test_classes/subgraph/relu_shape_of.hpp | 2 +- .../subgraph/relu_split_reshape.hpp | 2 +- .../reshape_permute_conv_permute_reshape_act.hpp | 4 ++-- .../subgraph/reshape_permute_reshape.hpp | 4 ++-- .../subgraph/reshape_squeeze_reshape_relu.hpp | 4 ++-- .../subgraph/scaleshift_conv_scaleshift.hpp | 4 ++-- .../shared_test_classes/subgraph/softsign.hpp | 4 ++-- .../subgraph/split_concat_multi_inputs.hpp | 4 ++-- .../shared_test_classes/subgraph/split_conv.hpp | 4 ++-- .../subgraph/split_conv_concat.hpp | 4 ++-- .../shared_test_classes/subgraph/split_relu.hpp | 2 +- .../subgraph/split_trivial_permute_concat.hpp | 4 ++-- .../subgraph/stridedslice_concat.hpp | 4 ++-- .../subgraph/stridedslice_conv.hpp | 4 ++-- .../shared_test_classes/subgraph/tensor_names.hpp | 2 +- .../shared_test_classes/subgraph/transpose_add.hpp | 4 ++-- .../subgraph/transpose_conv_transpose_squeeze.hpp | 4 ++-- .../subgraph/trivial_concat.hpp | 4 ++-- .../two_fake_quantize_to_fullyconnected.hpp | 4 ++-- .../subgraph/variadic_split_pad.hpp | 2 +- .../layer_transformation.cpp | 2 +- .../shared_test_classes/src/base/ov_subgraph.cpp | 2 +- .../functional/shared_test_classes/src/precomp.hpp | 4 ++-- .../src/single_layer/adaptive_pooling.cpp | 2 +- .../src/single_layer/batch_to_space.cpp | 2 +- .../src/single_layer/broadcast.cpp | 2 +- .../src/single_layer/comparison.cpp | 2 +- .../src/single_layer/conversion.cpp | 2 +- .../src/single_layer/ctc_greedy_decoder.cpp | 2 +- .../single_layer/ctc_greedy_decoder_seq_len.cpp | 2 +- .../src/single_layer/ctc_loss.cpp | 2 +- .../src/single_layer/depth_to_space.cpp | 2 +- .../src/single_layer/detection_output.cpp | 2 +- .../src/single_layer/einsum.cpp | 2 +- .../src/single_layer/eltwise.cpp | 2 +- .../src/single_layer/embedding_bag_offsets_sum.cpp | 2 +- .../src/single_layer/embedding_bag_packed_sum.cpp | 2 +- .../src/single_layer/embedding_segments_sum.cpp | 2 +- .../experimental_detectron_detection_output.cpp | 2 +- ...l_detectron_generate_proposals_single_image.cpp | 2 +- ...experimental_detectron_prior_grid_generator.cpp | 2 +- .../experimental_detectron_roifeatureextractor.cpp | 2 +- .../experimental_detectron_topkrois.cpp | 2 +- .../src/single_layer/extract_image_patches.cpp | 2 +- .../shared_test_classes/src/single_layer/eye.cpp | 2 +- .../src/single_layer/gather_elements.cpp | 2 +- .../src/single_layer/generate_proposals.cpp | 2 +- .../src/single_layer/interpolate.cpp | 4 ++-- .../src/single_layer/is_inf.cpp | 2 +- .../src/single_layer/logical.cpp | 2 +- .../src/single_layer/low_precision.cpp | 2 +- .../src/single_layer/mat_mul.cpp | 2 +- .../src/single_layer/matrix_nms.cpp | 2 +- .../src/single_layer/memory.cpp | 2 +- .../src/single_layer/multiclass_nms.cpp | 2 +- .../shared_test_classes/src/single_layer/mvn.cpp | 2 +- .../src/single_layer/random_uniform.cpp | 2 +- .../src/single_layer/reverse.cpp | 2 +- .../src/single_layer/roi_align.cpp | 2 +- .../src/single_layer/scatter_ND_update.cpp | 2 +- .../src/single_layer/scatter_elements_update.cpp | 2 +- .../src/single_layer/scatter_update.cpp | 2 +- .../src/single_layer/shuffle_channels.cpp | 2 +- .../shared_test_classes/src/single_layer/slice.cpp | 2 +- .../src/single_layer/space_to_batch.cpp | 2 +- .../src/single_layer/space_to_depth.cpp | 2 +- .../src/single_layer/strided_slice.cpp | 2 +- .../src/single_op/comparison.cpp | 2 +- .../src/single_op/convolution_backprop_data.cpp | 2 +- .../shared_test_classes/src/single_op/dft.cpp | 2 +- .../shared_test_classes/src/single_op/eltwise.cpp | 2 +- .../src/single_op/embedding_bag_offsets_sum.cpp | 2 +- .../src/single_op/embedding_bag_packed_sum.cpp | 2 +- .../src/single_op/embedding_segments_sum.cpp | 2 +- .../src/single_op/fake_quantize.cpp | 2 +- .../src/single_op/gather_nd.cpp | 2 +- .../src/single_op/group_convolution.cpp | 2 +- .../single_op/group_convolution_backprop_data.cpp | 2 +- .../src/single_op/gru_sequence.cpp | 2 +- .../src/single_op/tensor_iterator.cpp | 2 +- .../src/subgraph/activation_concats_eltwise.cpp | 2 +- .../src/subgraph/activation_fq.cpp | 2 +- .../src/subgraph/basic_lstm.cpp | 2 +- .../shared_test_classes/src/subgraph/clamp_fq.cpp | 2 +- .../src/subgraph/concat_conv.cpp | 2 +- .../src/subgraph/const_conv_concat.cpp | 2 +- .../src/subgraph/const_strided_slice_concat.cpp | 2 +- .../src/subgraph/constant_result.cpp | 2 +- .../src/subgraph/eltwise_conv_eltwise.cpp | 2 +- .../src/subgraph/eltwise_reshape_activation.cpp | 2 +- .../src/subgraph/fc_conv_fc.cpp | 2 +- .../src/subgraph/fq_with_mixed_levels.cpp | 2 +- .../src/subgraph/get_output_before_activation.cpp | 2 +- .../src/subgraph/input_conv.cpp | 2 +- .../src/subgraph/input_split_concat.cpp | 2 +- .../matmul_const_transposes_extraction.cpp | 2 +- .../src/subgraph/matmul_multiply_fusion.cpp | 2 +- .../src/subgraph/matmul_squeeze_add.cpp | 2 +- .../src/subgraph/memory_LSTMCell.cpp | 2 +- .../src/subgraph/memory_eltwise_reshape_concat.cpp | 2 +- .../src/subgraph/mul_conv_fusion.cpp | 2 +- .../src/subgraph/multi_crops_to_concat.cpp | 2 +- .../src/subgraph/multi_input_scale.cpp | 2 +- .../src/subgraph/multiple_LSTMCell.cpp | 4 ++-- .../src/subgraph/multiple_concat.cpp | 2 +- .../src/subgraph/multiple_input_fq.cpp | 2 +- .../src/subgraph/permute_concat_permute.cpp | 2 +- .../src/subgraph/preprocess.cpp | 2 +- .../subgraph/quantized_convolution_batch_norm.cpp | 2 +- .../src/subgraph/quantized_mat_mul.cpp | 2 +- .../src/subgraph/reduce_eltwise.cpp | 2 +- .../src/subgraph/scale_shift.cpp | 2 +- .../src/subgraph/scaleshift_conv_scaleshift.cpp | 2 +- .../shared_test_classes/src/subgraph/simple_if.cpp | 2 +- .../shared_test_classes/src/subgraph/softsign.cpp | 2 +- .../src/subgraph/split_concat_multi_inputs.cpp | 2 +- .../src/subgraph/split_conv.cpp | 2 +- .../src/subgraph/strided_slice.cpp | 2 +- .../src/subgraph/stridedslice_concat.cpp | 2 +- .../src/subgraph/stridedslice_conv.cpp | 2 +- .../{ngraph_helpers => ov_helpers}/CMakeLists.txt | 6 +++--- .../ov_lpt_models}/CMakeLists.txt | 6 +++--- .../ov_lpt_models/include/ov_lpt_models/add.hpp} | 10 +++++----- .../align_concat_quantization_parameters.hpp} | 0 .../ov_lpt_models/assign_and_read_value.hpp} | 4 ++-- .../include/ov_lpt_models/avg_pool.hpp} | 0 .../include/ov_lpt_models/batch_to_space.hpp} | 4 ++-- .../ov_lpt_models/include/ov_lpt_models/clamp.hpp} | 4 ++-- .../include/ov_lpt_models}/common/add.hpp | 0 .../include/ov_lpt_models}/common/builders.hpp | 12 ++++++------ .../include/ov_lpt_models}/common/constant.hpp | 0 .../include/ov_lpt_models}/common/convolution.hpp | 0 .../common/dequantization_operations.hpp | 0 .../common/fake_quantize_on_data.hpp | 0 .../common/fake_quantize_on_weights.hpp | 0 .../include/ov_lpt_models}/common/multiply.hpp | 0 .../include/ov_lpt_models}/common/reshape.hpp | 0 .../include/ov_lpt_models}/common/transpose.hpp | 0 .../ov_lpt_models/compose_fake_quantize.hpp} | 4 ++-- .../include/ov_lpt_models/concat.hpp} | 0 .../include/ov_lpt_models/convolution.hpp} | 6 +++--- .../ov_lpt_models/convolution_backprop_data.hpp} | 6 +++--- .../include/ov_lpt_models/depth_to_space.hpp} | 2 +- .../include/ov_lpt_models/elementwise.hpp} | 8 ++++---- ...ementwise_with_multi_parent_dequantization.hpp} | 4 ++-- .../include/ov_lpt_models/fake_quantize.hpp} | 2 +- .../fake_quantize_and_convolution.hpp} | 14 +++++++------- ...e_and_two_output_branches_with_convolution.hpp} | 6 +++--- ..._quantize_on_weights_and_unsupported_child.hpp} | 2 +- .../fake_quantize_precision_selection.hpp} | 4 ++-- .../include/ov_lpt_models/fold_fake_quantize.hpp} | 0 .../include/ov_lpt_models/fuse_convert.hpp} | 4 ++-- .../include/ov_lpt_models/fuse_fake_quantize.hpp} | 0 .../fuse_fake_quantize_and_scale_shift.hpp} | 0 .../fuse_multiply_to_fake_quantize.hpp} | 0 .../fuse_subtract_to_fake_quantize.hpp} | 0 .../include/ov_lpt_models/gather.hpp} | 4 ++-- .../include/ov_lpt_models/get_dequantization.hpp} | 4 ++-- .../include/ov_lpt_models/group_convolution.hpp} | 4 ++-- .../include/ov_lpt_models/interpolate.hpp} | 2 +- .../ov_lpt_models/markup_avg_pool_precisions.hpp} | 0 .../include/ov_lpt_models/markup_bias.hpp} | 0 .../include/ov_lpt_models/mat_mul.hpp} | 8 ++++---- ..._mul_with_optimized_constant_fake_quantize.hpp} | 2 +- .../include/ov_lpt_models/max_pool.hpp} | 2 +- .../ov_lpt_models/move_dequantization_after.hpp} | 4 ++-- .../include/ov_lpt_models/move_fake_quantize.hpp} | 0 .../mul_add_to_scaleshift_or_power.hpp} | 0 .../include/ov_lpt_models/multiply.hpp} | 6 +++--- .../ov_lpt_models}/multiply_partial_function.hpp | 6 +++--- .../multiply_to_group_convolution.hpp} | 6 +++--- .../ov_lpt_models/multiply_with_one_parent.hpp} | 2 +- .../ov_lpt_models/include/ov_lpt_models/mvn.hpp} | 2 +- .../ov_lpt_models/normalize_dequantization.hpp} | 2 +- .../include/ov_lpt_models/normalize_l2.hpp} | 2 +- .../ov_lpt_models/include/ov_lpt_models/pad.hpp} | 4 ++-- .../ov_lpt_models/precision_propagation.hpp} | 0 .../ov_lpt_models/include/ov_lpt_models/prelu.hpp} | 2 +- .../include/ov_lpt_models/recurrent_cell.hpp} | 0 .../include/ov_lpt_models/reduce.hpp} | 6 +++--- .../ov_lpt_models/include/ov_lpt_models/relu.hpp} | 2 +- .../include/ov_lpt_models/reshape.hpp} | 4 ++-- .../ov_lpt_models/include/ov_lpt_models/round.hpp} | 4 ++-- .../include/ov_lpt_models/shuffle_channels.hpp} | 4 ++-- .../include/ov_lpt_models/space_to_batch.hpp} | 4 ++-- .../ov_lpt_models/include/ov_lpt_models/split.hpp} | 4 ++-- .../include/ov_lpt_models/squeeze.hpp} | 2 +- .../include/ov_lpt_models/strided_slice.hpp} | 4 ++-- .../include/ov_lpt_models/subtract.hpp} | 6 +++--- .../subtract_multiply_to_multiply_add.hpp} | 8 ++++---- .../ov_lpt_models/transformations_after_split.hpp} | 0 .../include/ov_lpt_models/transpose.hpp} | 4 ++-- .../ov_lpt_models/transpose_after_mat_mul.hpp} | 6 +++--- .../include/ov_lpt_models/unsqueeze.hpp} | 2 +- .../include/ov_lpt_models/variadic_split.hpp} | 4 ++-- .../ov_lpt_models/src/add.cpp} | 6 +++--- .../src/align_concat_quantization_parameters.cpp} | 6 +++--- .../ov_lpt_models/src/assign_and_read_value.cpp} | 6 +++--- .../ov_lpt_models/src/avg_pool.cpp} | 6 +++--- .../ov_lpt_models/src/batch_to_space.cpp} | 4 ++-- .../ov_lpt_models/src/clamp.cpp} | 6 +++--- .../ov_lpt_models}/src/common/add.cpp | 2 +- .../ov_lpt_models}/src/common/builders.cpp | 4 ++-- .../ov_lpt_models}/src/common/constant.cpp | 2 +- .../ov_lpt_models}/src/common/convolution.cpp | 2 +- .../src/common/dequantization_operations.cpp | 2 +- .../src/common/fake_quantize_on_data.cpp | 2 +- .../src/common/fake_quantize_on_weights.cpp | 2 +- .../ov_lpt_models}/src/common/multiply.cpp | 2 +- .../ov_lpt_models}/src/common/reshape.cpp | 2 +- .../ov_lpt_models}/src/common/transpose.cpp | 2 +- .../ov_lpt_models/src/compose_fake_quantize.cpp} | 6 +++--- .../ov_lpt_models/src/concat.cpp} | 10 +++++----- .../ov_lpt_models/src/convolution.cpp} | 12 ++++++------ .../src/convolution_backprop_data.cpp} | 12 ++++++------ .../ov_lpt_models/src/depth_to_space.cpp} | 6 +++--- .../ov_lpt_models/src/elementwise.cpp} | 4 ++-- ...ementwise_with_multi_parent_dequantization.cpp} | 6 +++--- .../ov_lpt_models/src/fake_quantize.cpp} | 6 +++--- .../src/fake_quantize_and_convolution.cpp} | 6 +++--- ...e_and_two_output_branches_with_convolution.cpp} | 8 ++++---- ..._quantize_on_weights_and_unsupported_child.cpp} | 8 ++++---- .../src/fake_quantize_precision_selection.cpp} | 8 ++++---- .../ov_lpt_models/src/fold_fake_quantize.cpp} | 4 ++-- .../ov_lpt_models/src/fuse_convert.cpp} | 6 +++--- .../ov_lpt_models/src/fuse_fake_quantize.cpp} | 10 +++++----- .../src/fuse_fake_quantize_and_scale_shift.cpp} | 4 ++-- .../src/fuse_multiply_to_fake_quantize.cpp} | 10 +++++----- .../src/fuse_subtract_to_fake_quantize.cpp} | 10 +++++----- .../ov_lpt_models/src/gather.cpp} | 4 ++-- .../ov_lpt_models/src/get_dequantization.cpp} | 6 +++--- .../ov_lpt_models/src/group_convolution.cpp} | 12 ++++++------ .../ov_lpt_models/src/interpolate.cpp} | 6 +++--- .../src/markup_avg_pool_precisions.cpp} | 6 +++--- .../ov_lpt_models/src/markup_bias.cpp} | 6 +++--- .../ov_lpt_models/src/mat_mul.cpp} | 6 +++--- ..._mul_with_optimized_constant_fake_quantize.cpp} | 4 ++-- .../ov_lpt_models/src/max_pool.cpp} | 6 +++--- .../src/move_dequantization_after.cpp} | 6 +++--- .../ov_lpt_models/src/move_fake_quantize.cpp} | 8 ++++---- .../ov_lpt_models/src/multiply.cpp} | 8 ++++---- .../src/multiply_partial_function.cpp | 8 ++++---- .../src/multiply_to_group_convolution.cpp} | 6 +++--- .../src/multiply_with_one_parent.cpp} | 4 ++-- .../ov_lpt_models/src/mvn.cpp} | 6 +++--- .../src/normalize_dequantization.cpp} | 6 +++--- .../ov_lpt_models/src/normalize_l2.cpp} | 6 +++--- .../ov_lpt_models/src/pad.cpp} | 7 +++---- .../ov_lpt_models/src/precision_propagation.cpp} | 12 ++++++------ .../ov_lpt_models}/src/precomp.hpp | 0 .../ov_lpt_models/src/prelu.cpp} | 6 +++--- .../ov_lpt_models/src/recurrent_cell.cpp} | 12 ++++++------ .../ov_lpt_models/src/relu.cpp} | 4 ++-- .../ov_lpt_models/src/reshape.cpp} | 4 ++-- .../ov_lpt_models/src/round.cpp} | 6 +++--- .../ov_lpt_models/src/shuffle_channels.cpp} | 6 +++--- .../ov_lpt_models/src/space_to_batch.cpp} | 4 ++-- .../ov_lpt_models/src/split.cpp} | 8 ++++---- .../ov_lpt_models/src/squeeze.cpp} | 6 +++--- .../ov_lpt_models/src/strided_slice.cpp} | 6 +++--- .../ov_lpt_models/src/subtract.cpp} | 6 +++--- .../src/subtract_multiply_to_multiply_add.cpp} | 4 ++-- .../src/transformations_after_split.cpp} | 8 ++++---- .../ov_lpt_models/src/transpose.cpp} | 4 ++-- .../ov_lpt_models/src/transpose_after_mat_mul.cpp} | 6 +++--- .../ov_lpt_models/src/unsqueeze.cpp} | 6 +++--- .../ov_lpt_models/src/variadic_split.cpp} | 8 ++++---- .../ov_models}/CMakeLists.txt | 2 +- .../ov_models/include/ov_models}/builders.hpp | 4 ++-- .../include/ov_models}/pass/convert_prc.hpp | 0 .../ov_models}/preprocess/preprocess_builders.hpp | 0 .../include/ov_models}/subgraph_builders.hpp | 2 +- .../include/ov_models}/utils/data_utils.hpp | 0 .../include/ov_models/utils/ov_helpers.hpp} | 0 .../ov_models}/src/activation.cpp | 0 .../ov_models}/src/augru_cell.cpp | 2 +- .../ov_models}/src/batch_norm.cpp | 2 +- .../ov_models}/src/batch_to_space.cpp | 2 +- .../ov_models}/src/binary_convolution.cpp | 4 ++-- .../ov_models}/src/broadcast.cpp | 2 +- .../ov_models}/src/comparison.cpp | 2 +- .../ov_models}/src/concat.cpp | 2 +- .../ov_models}/src/conversion.cpp | 0 .../ov_models}/src/convolution.cpp | 2 +- .../ov_models}/src/convolution_backprop_data.cpp | 2 +- .../ov_models}/src/ctc_greedy_decoder.cpp | 2 +- .../ov_models}/src/ctc_greedy_decoder_seq_len.cpp | 2 +- .../ov_models}/src/ctc_loss.cpp | 2 +- .../ov_models}/src/cum_sum.cpp | 2 +- .../ov_models}/src/depth_to_space.cpp | 2 +- .../ov_models}/src/detection_output.cpp | 2 +- .../ov_models}/src/dft.cpp | 2 +- .../ov_models}/src/einsum.cpp | 2 +- .../ov_models}/src/eltwise.cpp | 2 +- .../ov_models}/src/embedding_bag_offsets_sum.cpp | 2 +- .../ov_models}/src/embedding_bag_packed_sum.cpp | 2 +- .../ov_models}/src/embedding_segments_sum.cpp | 2 +- .../ov_models}/src/fake_quantize.cpp | 2 +- .../ov_models}/src/fully_connected.cpp | 2 +- .../ov_models}/src/gather_elements.cpp | 2 +- .../ov_models}/src/gather_nd.cpp | 2 +- .../ov_models}/src/group_convolution.cpp | 2 +- .../src/group_convolution_backprop_data.cpp | 2 +- .../ov_models}/src/gru_cell.cpp | 2 +- .../ov_models}/src/input_layer.cpp | 4 ++-- .../ov_models}/src/logical.cpp | 2 +- .../ov_models}/src/lstm_cell.cpp | 2 +- .../ov_models}/src/mat_mul.cpp | 2 +- .../ov_models}/src/minimum_maximum.cpp | 2 +- .../ov_models}/src/mvn.cpp | 2 +- .../ov_models}/src/non_max_suppression.cpp | 2 +- .../ov_models}/src/normalize_l2.cpp | 2 +- .../ov_models}/src/one_hot.cpp | 2 +- .../ov_models}/src/pad.cpp | 2 +- .../ov_models}/src/pooling.cpp | 2 +- .../ov_models}/src/precomp.hpp | 0 .../ov_models}/src/proposal.cpp | 2 +- .../ov_models}/src/rdft.cpp | 0 .../ov_models}/src/reduce.cpp | 0 .../ov_models}/src/rnn_cell.cpp | 2 +- .../ov_models}/src/roi_pooling.cpp | 0 .../ov_models}/src/roll.cpp | 0 .../ov_models}/src/scatter_ND_update.cpp | 0 .../ov_models}/src/scatter_elements_update.cpp | 2 +- .../ov_models}/src/scatter_update.cpp | 2 +- .../ov_models}/src/select.cpp | 2 +- .../ov_models}/src/shuffle_channels.cpp | 2 +- .../ov_models}/src/space_to_batch.cpp | 2 +- .../ov_models}/src/space_to_depth.cpp | 2 +- .../ov_models}/src/split.cpp | 2 +- .../ov_models}/src/squeeze_unsqueeze.cpp | 2 +- .../ov_models}/src/strided_slice.cpp | 2 +- .../ov_models}/src/subgraph_builders.cpp | 4 ++-- .../ov_models}/src/tile.cpp | 2 +- .../ov_models/src/utils/ov_helpers.cpp} | 2 +- .../ov_models}/src/variadic_split.cpp | 2 +- .../ov_snippets_models}/CMakeLists.txt | 4 ++-- .../include/fake_quantize_helper.hpp} | 0 .../include/function_helper.hpp | 0 .../include/precision_propagation.hpp} | 0 .../include/precision_propagation_convertion.hpp} | 0 .../include/snippets_helpers.hpp | 0 .../include/subgraph_converts.hpp | 0 .../include/subgraph_customizable.hpp | 0 .../ov_snippets_models}/include/subgraph_fq.hpp | 0 .../include/subgraph_lowered.hpp | 0 .../include/subgraph_matmul.hpp | 0 .../ov_snippets_models}/include/subgraph_mha.hpp | 0 .../include/subgraph_roll_matmul_roll.hpp | 0 .../include/subgraph_simple.hpp | 0 .../include/subgraph_softmax.hpp | 0 .../include/subgraph_transpose.hpp | 0 .../ov_snippets_models/include/two_binary_ops.hpp} | 0 .../src/fake_quantize_helper.cpp} | 4 ++-- .../ov_snippets_models}/src/function_helper.cpp | 2 +- .../src/precision_propagation.cpp} | 2 +- .../src/precision_propagation_convertion.cpp} | 2 +- .../ov_snippets_models}/src/precomp.hpp | 0 .../ov_snippets_models}/src/snippets_helpers.cpp | 0 .../ov_snippets_models}/src/subgraph_convert.cpp | 0 .../src/subgraph_customizable.cpp | 0 .../ov_snippets_models}/src/subgraph_fq.cpp | 2 +- .../ov_snippets_models}/src/subgraph_lowered.cpp | 2 +- .../ov_snippets_models}/src/subgraph_matmul.cpp | 2 +- .../ov_snippets_models}/src/subgraph_mha.cpp | 4 ++-- .../src/subgraph_roll_matmul_roll.cpp | 0 .../ov_snippets_models}/src/subgraph_simple.cpp | 0 .../ov_snippets_models}/src/subgraph_softmax.cpp | 2 +- .../ov_snippets_models}/src/subgraph_transpose.cpp | 0 .../ov_snippets_models/src/two_binary_ops.cpp} | 2 +- .../test_utils/common_test_utils/CMakeLists.txt | 2 +- .../common_test_utils/src/graph_comparator.cpp | 2 +- .../functional_test_utils/CMakeLists.txt | 2 +- .../src/test_model/test_model.cpp | 2 +- 1078 files changed, 1669 insertions(+), 1673 deletions(-) rename src/tests/{ngraph_helpers => ov_helpers}/CMakeLists.txt (50%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions => ov_helpers/ov_lpt_models}/CMakeLists.txt (87%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/add_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/add.hpp} (92%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/align_concat_quantization_parameters_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/align_concat_quantization_parameters.hpp} (100%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/assign_and_read_value_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/assign_and_read_value.hpp} (92%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/avg_pool_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/avg_pool.hpp} (100%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/batch_to_space_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/batch_to_space.hpp} (92%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/clamp_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/clamp.hpp} (91%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions => ov_helpers/ov_lpt_models/include/ov_lpt_models}/common/add.hpp (100%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions => ov_helpers/ov_lpt_models/include/ov_lpt_models}/common/builders.hpp (92%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions => ov_helpers/ov_lpt_models/include/ov_lpt_models}/common/constant.hpp (100%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions => ov_helpers/ov_lpt_models/include/ov_lpt_models}/common/convolution.hpp (100%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions => ov_helpers/ov_lpt_models/include/ov_lpt_models}/common/dequantization_operations.hpp (100%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions => ov_helpers/ov_lpt_models/include/ov_lpt_models}/common/fake_quantize_on_data.hpp (100%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions => ov_helpers/ov_lpt_models/include/ov_lpt_models}/common/fake_quantize_on_weights.hpp (100%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions => ov_helpers/ov_lpt_models/include/ov_lpt_models}/common/multiply.hpp (100%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions => ov_helpers/ov_lpt_models/include/ov_lpt_models}/common/reshape.hpp (100%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions => ov_helpers/ov_lpt_models/include/ov_lpt_models}/common/transpose.hpp (100%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/compose_fake_quantize_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/compose_fake_quantize.hpp} (85%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/concat_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/concat.hpp} (100%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/convolution_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/convolution.hpp} (94%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/convolution_backprop_data_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/convolution_backprop_data.hpp} (91%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/depth_to_space_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/depth_to_space.hpp} (95%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/elementwise_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/elementwise.hpp} (82%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/elementwise_with_multi_parent_dequantization_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/elementwise_with_multi_parent_dequantization.hpp} (94%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fake_quantize_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/fake_quantize.hpp} (96%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fake_quantize_and_convolution_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/fake_quantize_and_convolution.hpp} (84%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fake_quantize_and_two_output_branches_with_convolution_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/fake_quantize_and_two_output_branches_with_convolution.hpp} (87%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fake_quantize_on_weights_and_unsupported_child_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/fake_quantize_on_weights_and_unsupported_child.hpp} (90%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fake_quantize_precision_selection_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/fake_quantize_precision_selection.hpp} (93%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fold_fake_quantize_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/fold_fake_quantize.hpp} (100%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fuse_convert_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/fuse_convert.hpp} (88%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fuse_fake_quantize_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/fuse_fake_quantize.hpp} (100%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fuse_fake_quantize_and_scale_shift_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/fuse_fake_quantize_and_scale_shift.hpp} (100%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fuse_multiply_to_fake_quantize_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/fuse_multiply_to_fake_quantize.hpp} (100%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fuse_subtract_to_fake_quantize_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/fuse_subtract_to_fake_quantize.hpp} (100%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/gather_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/gather.hpp} (93%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/get_dequantization_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/get_dequantization.hpp} (90%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/group_convolution_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/group_convolution.hpp} (94%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/interpolate_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/interpolate.hpp} (97%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/markup_avg_pool_precisions_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/markup_avg_pool_precisions.hpp} (100%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/markup_bias_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/markup_bias.hpp} (100%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/mat_mul_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/mat_mul.hpp} (92%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/mat_mul_with_optimized_constant_fake_quantize_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/mat_mul_with_optimized_constant_fake_quantize.hpp} (90%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/max_pool_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/max_pool.hpp} (93%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/move_dequantization_after_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/move_dequantization_after.hpp} (88%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/move_fake_quantize_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/move_fake_quantize.hpp} (100%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/mul_add_to_scaleshift_or_power_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/mul_add_to_scaleshift_or_power.hpp} (100%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/multiply_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/multiply.hpp} (92%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions => ov_helpers/ov_lpt_models/include/ov_lpt_models}/multiply_partial_function.hpp (91%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/multiply_to_group_convolution_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/multiply_to_group_convolution.hpp} (88%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/multiply_with_one_parent_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/multiply_with_one_parent.hpp} (89%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/mvn_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/mvn.hpp} (95%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/normalize_dequantization_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/normalize_dequantization.hpp} (89%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/normalize_l2_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/normalize_l2.hpp} (95%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/pad_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/pad.hpp} (89%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/precision_propagation_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/precision_propagation.hpp} (100%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/prelu_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/prelu.hpp} (94%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/recurrent_cell_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/recurrent_cell.hpp} (100%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/reduce_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/reduce.hpp} (97%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/relu_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/relu.hpp} (94%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/reshape_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/reshape.hpp} (91%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/round_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/round.hpp} (86%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/shuffle_channels_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/shuffle_channels.hpp} (92%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/space_to_batch_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/space_to_batch.hpp} (92%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/split_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/split.hpp} (91%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/squeeze_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/squeeze.hpp} (95%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/strided_slice_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/strided_slice.hpp} (95%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/subtract_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/subtract.hpp} (73%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/subtract_multiply_to_multiply_add_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/subtract_multiply_to_multiply_add.hpp} (85%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/transformations_after_split_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/transformations_after_split.hpp} (100%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/transpose_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/transpose.hpp} (91%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/transpose_after_mat_mul_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/transpose_after_mat_mul.hpp} (74%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/unsqueeze_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/unsqueeze.hpp} (95%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/variadic_split_function.hpp => ov_helpers/ov_lpt_models/include/ov_lpt_models/variadic_split.hpp} (91%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/add_function.cpp => ov_helpers/ov_lpt_models/src/add.cpp} (98%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/align_concat_quantization_parameters_function.cpp => ov_helpers/ov_lpt_models/src/align_concat_quantization_parameters.cpp} (98%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/assign_and_read_value_function.cpp => ov_helpers/ov_lpt_models/src/assign_and_read_value.cpp} (98%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/avg_pool_function.cpp => ov_helpers/ov_lpt_models/src/avg_pool.cpp} (97%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/batch_to_space_function.cpp => ov_helpers/ov_lpt_models/src/batch_to_space.cpp} (96%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/clamp_function.cpp => ov_helpers/ov_lpt_models/src/clamp.cpp} (96%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions => ov_helpers/ov_lpt_models}/src/common/add.cpp (96%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions => ov_helpers/ov_lpt_models}/src/common/builders.cpp (99%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions => ov_helpers/ov_lpt_models}/src/common/constant.cpp (95%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions => ov_helpers/ov_lpt_models}/src/common/convolution.cpp (93%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions => ov_helpers/ov_lpt_models}/src/common/dequantization_operations.cpp (98%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions => ov_helpers/ov_lpt_models}/src/common/fake_quantize_on_data.cpp (97%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions => ov_helpers/ov_lpt_models}/src/common/fake_quantize_on_weights.cpp (93%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions => ov_helpers/ov_lpt_models}/src/common/multiply.cpp (96%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions => ov_helpers/ov_lpt_models}/src/common/reshape.cpp (90%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions => ov_helpers/ov_lpt_models}/src/common/transpose.cpp (89%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/compose_fake_quantize_function.cpp => ov_helpers/ov_lpt_models/src/compose_fake_quantize.cpp} (91%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/concat_function.cpp => ov_helpers/ov_lpt_models/src/concat.cpp} (99%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/convolution_function.cpp => ov_helpers/ov_lpt_models/src/convolution.cpp} (98%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/convolution_backprop_data_function.cpp => ov_helpers/ov_lpt_models/src/convolution_backprop_data.cpp} (95%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/depth_to_space_function.cpp => ov_helpers/ov_lpt_models/src/depth_to_space.cpp} (95%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/elementwise_function.cpp => ov_helpers/ov_lpt_models/src/elementwise.cpp} (97%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/elementwise_with_multi_parent_dequantization_function.cpp => ov_helpers/ov_lpt_models/src/elementwise_with_multi_parent_dequantization.cpp} (94%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_function.cpp => ov_helpers/ov_lpt_models/src/fake_quantize.cpp} (97%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_and_convolution_function.cpp => ov_helpers/ov_lpt_models/src/fake_quantize_and_convolution.cpp} (98%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_and_two_output_branches_with_convolution_function.cpp => ov_helpers/ov_lpt_models/src/fake_quantize_and_two_output_branches_with_convolution.cpp} (96%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_on_weights_and_unsupported_child_function.cpp => ov_helpers/ov_lpt_models/src/fake_quantize_on_weights_and_unsupported_child.cpp} (87%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_precision_selection_function.cpp => ov_helpers/ov_lpt_models/src/fake_quantize_precision_selection.cpp} (96%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/fold_fake_quantize_function.cpp => ov_helpers/ov_lpt_models/src/fold_fake_quantize.cpp} (93%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/fuse_convert_function.cpp => ov_helpers/ov_lpt_models/src/fuse_convert.cpp} (95%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/fuse_fake_quantize_function.cpp => ov_helpers/ov_lpt_models/src/fuse_fake_quantize.cpp} (96%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/fuse_fake_quantize_and_scale_shift_function.cpp => ov_helpers/ov_lpt_models/src/fuse_fake_quantize_and_scale_shift.cpp} (93%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/fuse_multiply_to_fake_quantize_function.cpp => ov_helpers/ov_lpt_models/src/fuse_multiply_to_fake_quantize.cpp} (78%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/fuse_subtract_to_fake_quantize_function.cpp => ov_helpers/ov_lpt_models/src/fuse_subtract_to_fake_quantize.cpp} (89%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/gather_function.cpp => ov_helpers/ov_lpt_models/src/gather.cpp} (98%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/get_dequantization_function.cpp => ov_helpers/ov_lpt_models/src/get_dequantization.cpp} (97%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/group_convolution_function.cpp => ov_helpers/ov_lpt_models/src/group_convolution.cpp} (97%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/interpolate_function.cpp => ov_helpers/ov_lpt_models/src/interpolate.cpp} (97%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/markup_avg_pool_precisions_function.cpp => ov_helpers/ov_lpt_models/src/markup_avg_pool_precisions.cpp} (98%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/markup_bias_function.cpp => ov_helpers/ov_lpt_models/src/markup_bias.cpp} (97%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/mat_mul_function.cpp => ov_helpers/ov_lpt_models/src/mat_mul.cpp} (98%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/mat_mul_with_optimized_constant_fake_quantize_function.cpp => ov_helpers/ov_lpt_models/src/mat_mul_with_optimized_constant_fake_quantize.cpp} (95%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/max_pool_function.cpp => ov_helpers/ov_lpt_models/src/max_pool.cpp} (94%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/move_dequantization_after_function.cpp => ov_helpers/ov_lpt_models/src/move_dequantization_after.cpp} (94%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/move_fake_quantize_function.cpp => ov_helpers/ov_lpt_models/src/move_fake_quantize.cpp} (95%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/multiply_function.cpp => ov_helpers/ov_lpt_models/src/multiply.cpp} (93%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions => ov_helpers/ov_lpt_models}/src/multiply_partial_function.cpp (96%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/multiply_to_group_convolution_function.cpp => ov_helpers/ov_lpt_models/src/multiply_to_group_convolution.cpp} (96%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/multiply_with_one_parent_function.cpp => ov_helpers/ov_lpt_models/src/multiply_with_one_parent.cpp} (91%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/mvn_function.cpp => ov_helpers/ov_lpt_models/src/mvn.cpp} (96%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/normalize_dequantization_function.cpp => ov_helpers/ov_lpt_models/src/normalize_dequantization.cpp} (89%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/normalize_l2_function.cpp => ov_helpers/ov_lpt_models/src/normalize_l2.cpp} (97%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/pad_function.cpp => ov_helpers/ov_lpt_models/src/pad.cpp} (95%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/precision_propagation_function.cpp => ov_helpers/ov_lpt_models/src/precision_propagation.cpp} (97%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions => ov_helpers/ov_lpt_models}/src/precomp.hpp (100%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/prelu_function.cpp => ov_helpers/ov_lpt_models/src/prelu.cpp} (95%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/recurrent_cell_function.cpp => ov_helpers/ov_lpt_models/src/recurrent_cell.cpp} (95%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/relu_function.cpp => ov_helpers/ov_lpt_models/src/relu.cpp} (97%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/reshape_function.cpp => ov_helpers/ov_lpt_models/src/reshape.cpp} (97%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/round_function.cpp => ov_helpers/ov_lpt_models/src/round.cpp} (92%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/shuffle_channels_function.cpp => ov_helpers/ov_lpt_models/src/shuffle_channels.cpp} (95%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/space_to_batch_function.cpp => ov_helpers/ov_lpt_models/src/space_to_batch.cpp} (96%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/split_function.cpp => ov_helpers/ov_lpt_models/src/split.cpp} (95%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/squeeze_function.cpp => ov_helpers/ov_lpt_models/src/squeeze.cpp} (95%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/strided_slice_function.cpp => ov_helpers/ov_lpt_models/src/strided_slice.cpp} (97%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/subtract_function.cpp => ov_helpers/ov_lpt_models/src/subtract.cpp} (92%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/subtract_multiply_to_multiply_add_function.cpp => ov_helpers/ov_lpt_models/src/subtract_multiply_to_multiply_add.cpp} (96%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/transformations_after_split_function.cpp => ov_helpers/ov_lpt_models/src/transformations_after_split.cpp} (97%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/transpose_function.cpp => ov_helpers/ov_lpt_models/src/transpose.cpp} (97%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/transpose_after_mat_mul_function.cpp => ov_helpers/ov_lpt_models/src/transpose_after_mat_mul.cpp} (92%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/unsqueeze_function.cpp => ov_helpers/ov_lpt_models/src/unsqueeze.cpp} (96%) rename src/tests/{ngraph_helpers/lpt_ngraph_functions/src/variadic_split_function.cpp => ov_helpers/ov_lpt_models/src/variadic_split.cpp} (95%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/CMakeLists.txt (95%) rename src/tests/{ngraph_helpers/ngraph_functions/include/ngraph_functions => ov_helpers/ov_models/include/ov_models}/builders.hpp (99%) rename src/tests/{ngraph_helpers/ngraph_functions/include/ngraph_functions => ov_helpers/ov_models/include/ov_models}/pass/convert_prc.hpp (100%) rename src/tests/{ngraph_helpers/ngraph_functions/include/ngraph_functions => ov_helpers/ov_models/include/ov_models}/preprocess/preprocess_builders.hpp (100%) rename src/tests/{ngraph_helpers/ngraph_functions/include/ngraph_functions => ov_helpers/ov_models/include/ov_models}/subgraph_builders.hpp (99%) rename src/tests/{ngraph_helpers/ngraph_functions/include/ngraph_functions => ov_helpers/ov_models/include/ov_models}/utils/data_utils.hpp (100%) rename src/tests/{ngraph_helpers/ngraph_functions/include/ngraph_functions/utils/ngraph_helpers.hpp => ov_helpers/ov_models/include/ov_models/utils/ov_helpers.hpp} (100%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/activation.cpp (100%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/augru_cell.cpp (99%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/batch_norm.cpp (96%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/batch_to_space.cpp (96%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/binary_convolution.cpp (95%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/broadcast.cpp (95%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/comparison.cpp (96%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/concat.cpp (90%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/conversion.cpp (100%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/convolution.cpp (98%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/convolution_backprop_data.cpp (99%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/ctc_greedy_decoder.cpp (96%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/ctc_greedy_decoder_seq_len.cpp (98%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/ctc_loss.cpp (98%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/cum_sum.cpp (92%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/depth_to_space.cpp (92%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/detection_output.cpp (96%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/dft.cpp (97%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/einsum.cpp (91%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/eltwise.cpp (96%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/embedding_bag_offsets_sum.cpp (98%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/embedding_bag_packed_sum.cpp (97%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/embedding_segments_sum.cpp (98%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/fake_quantize.cpp (99%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/fully_connected.cpp (97%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/gather_elements.cpp (96%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/gather_nd.cpp (98%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/group_convolution.cpp (98%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/group_convolution_backprop_data.cpp (99%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/gru_cell.cpp (99%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/input_layer.cpp (94%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/logical.cpp (97%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/lstm_cell.cpp (99%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/mat_mul.cpp (90%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/minimum_maximum.cpp (95%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/mvn.cpp (97%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/non_max_suppression.cpp (99%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/normalize_l2.cpp (94%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/one_hot.cpp (96%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/pad.cpp (98%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/pooling.cpp (98%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/precomp.hpp (100%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/proposal.cpp (98%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/rdft.cpp (100%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/reduce.cpp (100%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/rnn_cell.cpp (99%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/roi_pooling.cpp (100%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/roll.cpp (100%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/scatter_ND_update.cpp (100%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/scatter_elements_update.cpp (96%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/scatter_update.cpp (96%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/select.cpp (92%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/shuffle_channels.cpp (91%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/space_to_batch.cpp (96%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/space_to_depth.cpp (92%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/split.cpp (94%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/squeeze_unsqueeze.cpp (96%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/strided_slice.cpp (99%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/subgraph_builders.cpp (99%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/tile.cpp (92%) rename src/tests/{ngraph_helpers/ngraph_functions/src/utils/ngraph_helpers.cpp => ov_helpers/ov_models/src/utils/ov_helpers.cpp} (99%) rename src/tests/{ngraph_helpers/ngraph_functions => ov_helpers/ov_models}/src/variadic_split.cpp (95%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions => ov_helpers/ov_snippets_models}/CMakeLists.txt (92%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions/include/fake_quantize_function.hpp => ov_helpers/ov_snippets_models/include/fake_quantize_helper.hpp} (100%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions => ov_helpers/ov_snippets_models}/include/function_helper.hpp (100%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions/include/precision_propagation_function.hpp => ov_helpers/ov_snippets_models/include/precision_propagation.hpp} (100%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions/include/precision_propagation_convertion_function.hpp => ov_helpers/ov_snippets_models/include/precision_propagation_convertion.hpp} (100%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions => ov_helpers/ov_snippets_models}/include/snippets_helpers.hpp (100%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions => ov_helpers/ov_snippets_models}/include/subgraph_converts.hpp (100%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions => ov_helpers/ov_snippets_models}/include/subgraph_customizable.hpp (100%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions => ov_helpers/ov_snippets_models}/include/subgraph_fq.hpp (100%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions => ov_helpers/ov_snippets_models}/include/subgraph_lowered.hpp (100%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions => ov_helpers/ov_snippets_models}/include/subgraph_matmul.hpp (100%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions => ov_helpers/ov_snippets_models}/include/subgraph_mha.hpp (100%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions => ov_helpers/ov_snippets_models}/include/subgraph_roll_matmul_roll.hpp (100%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions => ov_helpers/ov_snippets_models}/include/subgraph_simple.hpp (100%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions => ov_helpers/ov_snippets_models}/include/subgraph_softmax.hpp (100%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions => ov_helpers/ov_snippets_models}/include/subgraph_transpose.hpp (100%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions/include/two_binary_ops_function.hpp => ov_helpers/ov_snippets_models/include/two_binary_ops.hpp} (100%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions/src/fake_quantize_function.cpp => ov_helpers/ov_snippets_models/src/fake_quantize_helper.cpp} (99%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions => ov_helpers/ov_snippets_models}/src/function_helper.cpp (98%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions/src/precision_propagation_function.cpp => ov_helpers/ov_snippets_models/src/precision_propagation.cpp} (98%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions/src/precision_propagation_convertion_function.cpp => ov_helpers/ov_snippets_models/src/precision_propagation_convertion.cpp} (98%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions => ov_helpers/ov_snippets_models}/src/precomp.hpp (100%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions => ov_helpers/ov_snippets_models}/src/snippets_helpers.cpp (100%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions => ov_helpers/ov_snippets_models}/src/subgraph_convert.cpp (100%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions => ov_helpers/ov_snippets_models}/src/subgraph_customizable.cpp (100%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions => ov_helpers/ov_snippets_models}/src/subgraph_fq.cpp (99%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions => ov_helpers/ov_snippets_models}/src/subgraph_lowered.cpp (99%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions => ov_helpers/ov_snippets_models}/src/subgraph_matmul.cpp (99%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions => ov_helpers/ov_snippets_models}/src/subgraph_mha.cpp (99%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions => ov_helpers/ov_snippets_models}/src/subgraph_roll_matmul_roll.cpp (100%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions => ov_helpers/ov_snippets_models}/src/subgraph_simple.cpp (100%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions => ov_helpers/ov_snippets_models}/src/subgraph_softmax.cpp (98%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions => ov_helpers/ov_snippets_models}/src/subgraph_transpose.cpp (100%) rename src/tests/{ngraph_helpers/snippets_ngraph_functions/src/two_binary_ops_function.cpp => ov_helpers/ov_snippets_models/src/two_binary_ops.cpp} (99%) diff --git a/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/Building.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/Building.md index 1925ab3d291cc2..17e930f29a3b6f 100644 --- a/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/Building.md +++ b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/Building.md @@ -39,7 +39,7 @@ Once the commands above are executed, the OpenVINO Developer Package is generate * ``openvino::common_test_utils`` - static library with common tests utilities * ``openvino::func_test_utils`` - static library with functional tests utilities * ``openvino::unit_test_utils`` - static library with unit tests utilities - * ``openvino::ngraphFunctions`` - static library with the set of ``ov::Model`` builders + * ``openvino::ov_models`` - static library with the set of ``ov::Model`` builders * ``openvino::funcSharedTests`` - static library with common functional tests * ``openvino::ngraph_reference`` - static library with operation reference implementations. diff --git a/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/PluginTesting.md b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/PluginTesting.md index c5920c34b1ca08..df2ef74a320204 100644 --- a/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/PluginTesting.md +++ b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/PluginTesting.md @@ -41,7 +41,7 @@ Test definitions are split into tests class declaration (see ``src/tests/functio .. note:: - Such sub-graphs or patterns for sub-graph tests should be added to ``openvino::ngraphFunctions`` library first (this library is a pre-defined set of small ``ov::Model``) and re-used in sub-graph tests after. + Such sub-graphs or patterns for sub-graph tests should be added to ``openvino::ov_models`` library first (this library is a pre-defined set of small ``ov::Model``) and re-used in sub-graph tests after. 4. **HETERO tests** (``subgraph_tests`` sub-folder) contains tests for ``HETERO`` scenario (manual or automatic affinities settings, tests for ``query_model``). diff --git a/src/bindings/c/tests/test_model_repo.hpp b/src/bindings/c/tests/test_model_repo.hpp index b6f016f27ed3c2..8be0a84528012f 100644 --- a/src/bindings/c/tests/test_model_repo.hpp +++ b/src/bindings/c/tests/test_model_repo.hpp @@ -7,11 +7,11 @@ #include #include -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/subgraph_builders.hpp" #include "openvino/core/visibility.hpp" #include "openvino/pass/manager.hpp" #include "openvino/util/file_util.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/subgraph_builders.hpp" namespace TestDataHelpers { diff --git a/src/common/low_precision_transformations/tests/CMakeLists.txt b/src/common/low_precision_transformations/tests/CMakeLists.txt index 6a3d094d4360f4..aef7ed1391d87a 100644 --- a/src/common/low_precision_transformations/tests/CMakeLists.txt +++ b/src/common/low_precision_transformations/tests/CMakeLists.txt @@ -13,7 +13,7 @@ ov_add_test_target( gtest_main openvino::runtime::dev common_test_utils - lptNgraphFunctions + ov_lpt_models gmock INCLUDES ${CMAKE_CURRENT_SOURCE_DIR} LABELS diff --git a/src/common/low_precision_transformations/tests/add_transformation.cpp b/src/common/low_precision_transformations/tests/add_transformation.cpp index c6b7272103ec37..4fb84c286acc44 100644 --- a/src/common/low_precision_transformations/tests/add_transformation.cpp +++ b/src/common/low_precision_transformations/tests/add_transformation.cpp @@ -14,8 +14,8 @@ #include "common_test_utils/ov_test_utils.hpp" #include "layer_transformation.hpp" -#include "lpt_ngraph_functions/add_function.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/add.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" #include "simple_low_precision_transformer.hpp" namespace { diff --git a/src/common/low_precision_transformations/tests/align_concat_quantization_parameters_transformation.cpp b/src/common/low_precision_transformations/tests/align_concat_quantization_parameters_transformation.cpp index 9e2c283340fee5..772fa303ae3d7b 100644 --- a/src/common/low_precision_transformations/tests/align_concat_quantization_parameters_transformation.cpp +++ b/src/common/low_precision_transformations/tests/align_concat_quantization_parameters_transformation.cpp @@ -15,8 +15,8 @@ #include "common_test_utils/ov_test_utils.hpp" #include "layer_transformation.hpp" -#include "lpt_ngraph_functions/align_concat_quantization_parameters_function.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/align_concat_quantization_parameters.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/src/common/low_precision_transformations/tests/assign_and_read_value_transformation.cpp b/src/common/low_precision_transformations/tests/assign_and_read_value_transformation.cpp index 7bffc814378dae..d9af66849c07c7 100644 --- a/src/common/low_precision_transformations/tests/assign_and_read_value_transformation.cpp +++ b/src/common/low_precision_transformations/tests/assign_and_read_value_transformation.cpp @@ -12,8 +12,8 @@ #include "low_precision/assign_and_read_value.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/assign_and_read_value_function.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/assign_and_read_value.hpp" #include "simple_low_precision_transformer.hpp" #include "low_precision/layer_transformation.hpp" diff --git a/src/common/low_precision_transformations/tests/avg_pool_transformation.cpp b/src/common/low_precision_transformations/tests/avg_pool_transformation.cpp index 9c9aad5f7bd3db..c78a43b6b70c3f 100644 --- a/src/common/low_precision_transformations/tests/avg_pool_transformation.cpp +++ b/src/common/low_precision_transformations/tests/avg_pool_transformation.cpp @@ -13,8 +13,8 @@ #include "common_test_utils/ov_test_utils.hpp" #include "layer_transformation.hpp" -#include "lpt_ngraph_functions/avg_pool_function.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/avg_pool.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/src/common/low_precision_transformations/tests/avg_pool_with_child_transformation.cpp b/src/common/low_precision_transformations/tests/avg_pool_with_child_transformation.cpp index f10bc3bd8a64c9..60e27db9aab9ce 100644 --- a/src/common/low_precision_transformations/tests/avg_pool_with_child_transformation.cpp +++ b/src/common/low_precision_transformations/tests/avg_pool_with_child_transformation.cpp @@ -13,8 +13,8 @@ #include "common_test_utils/ov_test_utils.hpp" #include "layer_transformation.hpp" -#include "lpt_ngraph_functions/avg_pool_function.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/avg_pool.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/src/common/low_precision_transformations/tests/batch_to_space_transformation.cpp b/src/common/low_precision_transformations/tests/batch_to_space_transformation.cpp index ad9ff08a304cc1..7015ede358c775 100644 --- a/src/common/low_precision_transformations/tests/batch_to_space_transformation.cpp +++ b/src/common/low_precision_transformations/tests/batch_to_space_transformation.cpp @@ -16,8 +16,8 @@ #include "common_test_utils/ov_test_utils.hpp" #include "simple_low_precision_transformer.hpp" -#include "lpt_ngraph_functions/batch_to_space_function.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/batch_to_space.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" using namespace testing; diff --git a/src/common/low_precision_transformations/tests/clamp_transformation.cpp b/src/common/low_precision_transformations/tests/clamp_transformation.cpp index 410a9e52953819..a46eb18096c706 100644 --- a/src/common/low_precision_transformations/tests/clamp_transformation.cpp +++ b/src/common/low_precision_transformations/tests/clamp_transformation.cpp @@ -11,8 +11,8 @@ #include "common_test_utils/ov_test_utils.hpp" #include "layer_transformation.hpp" -#include "lpt_ngraph_functions/clamp_function.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/clamp.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" #include "simple_low_precision_transformer.hpp" namespace { diff --git a/src/common/low_precision_transformations/tests/compose_fake_quantize_transformation.cpp b/src/common/low_precision_transformations/tests/compose_fake_quantize_transformation.cpp index e7e9686d6deb5e..be87c9954fbcc3 100644 --- a/src/common/low_precision_transformations/tests/compose_fake_quantize_transformation.cpp +++ b/src/common/low_precision_transformations/tests/compose_fake_quantize_transformation.cpp @@ -15,9 +15,9 @@ #include "low_precision/network_helper.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/compose_fake_quantize_function.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/compose_fake_quantize.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" using namespace testing; using namespace ov::pass; diff --git a/src/common/low_precision_transformations/tests/concat_selection_with_intermediate_transformation.cpp b/src/common/low_precision_transformations/tests/concat_selection_with_intermediate_transformation.cpp index 6d2f7dce814bbd..31bc46fb802dc6 100644 --- a/src/common/low_precision_transformations/tests/concat_selection_with_intermediate_transformation.cpp +++ b/src/common/low_precision_transformations/tests/concat_selection_with_intermediate_transformation.cpp @@ -17,8 +17,8 @@ #include "low_precision/max_pool.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/concat_function.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/concat.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/src/common/low_precision_transformations/tests/concat_transformation.cpp b/src/common/low_precision_transformations/tests/concat_transformation.cpp index 75d8c1c0f88f7d..0732d060f773a5 100644 --- a/src/common/low_precision_transformations/tests/concat_transformation.cpp +++ b/src/common/low_precision_transformations/tests/concat_transformation.cpp @@ -12,8 +12,8 @@ #include "low_precision/concat.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/concat_function.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_lpt_models/concat.hpp" +#include "ov_lpt_models/common/builders.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/src/common/low_precision_transformations/tests/concat_with_different_precision_on_children.cpp b/src/common/low_precision_transformations/tests/concat_with_different_precision_on_children.cpp index 68ad0ed11950b0..081863dd5859d7 100644 --- a/src/common/low_precision_transformations/tests/concat_with_different_precision_on_children.cpp +++ b/src/common/low_precision_transformations/tests/concat_with_different_precision_on_children.cpp @@ -16,8 +16,8 @@ #include "low_precision/max_pool.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/concat_function.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/concat.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" #include "simple_low_precision_transformer.hpp" #include "low_precision/common/quantization_granularity_restriction.hpp" diff --git a/src/common/low_precision_transformations/tests/concat_with_fq_tranformation.cpp b/src/common/low_precision_transformations/tests/concat_with_fq_tranformation.cpp index 6e06247a682d3a..63df9d73df3d2a 100644 --- a/src/common/low_precision_transformations/tests/concat_with_fq_tranformation.cpp +++ b/src/common/low_precision_transformations/tests/concat_with_fq_tranformation.cpp @@ -19,9 +19,9 @@ #include "common_test_utils/ov_test_utils.hpp" #include "layer_transformation.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/concat_function.hpp" +#include "ov_lpt_models/common/builders.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/concat.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/src/common/low_precision_transformations/tests/concat_with_intermediate_precision_selection_transformation.cpp b/src/common/low_precision_transformations/tests/concat_with_intermediate_precision_selection_transformation.cpp index 6f1d635c7179e2..8d177c89343d6c 100644 --- a/src/common/low_precision_transformations/tests/concat_with_intermediate_precision_selection_transformation.cpp +++ b/src/common/low_precision_transformations/tests/concat_with_intermediate_precision_selection_transformation.cpp @@ -18,8 +18,8 @@ #include "low_precision/fake_quantize_decomposition.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/concat_function.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/concat.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/src/common/low_precision_transformations/tests/concat_with_intermediate_reshape_transformation.cpp b/src/common/low_precision_transformations/tests/concat_with_intermediate_reshape_transformation.cpp index 39a45101028bd9..e1adfd37974ffa 100644 --- a/src/common/low_precision_transformations/tests/concat_with_intermediate_reshape_transformation.cpp +++ b/src/common/low_precision_transformations/tests/concat_with_intermediate_reshape_transformation.cpp @@ -16,8 +16,8 @@ #include "low_precision/concat.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/concat_function.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/concat.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/src/common/low_precision_transformations/tests/concat_with_intermediate_transformation.cpp b/src/common/low_precision_transformations/tests/concat_with_intermediate_transformation.cpp index 169a4c07be5018..4e370569d7e2cc 100644 --- a/src/common/low_precision_transformations/tests/concat_with_intermediate_transformation.cpp +++ b/src/common/low_precision_transformations/tests/concat_with_intermediate_transformation.cpp @@ -17,8 +17,8 @@ #include "low_precision/max_pool.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/concat_function.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/concat.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/src/common/low_precision_transformations/tests/concat_with_intermediate_with_constant_transformation.cpp b/src/common/low_precision_transformations/tests/concat_with_intermediate_with_constant_transformation.cpp index 8bb318c78f59cd..9edd23d98f6fbe 100644 --- a/src/common/low_precision_transformations/tests/concat_with_intermediate_with_constant_transformation.cpp +++ b/src/common/low_precision_transformations/tests/concat_with_intermediate_with_constant_transformation.cpp @@ -18,8 +18,8 @@ #include "low_precision/interpolate.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/concat_function.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/concat.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/src/common/low_precision_transformations/tests/concat_with_neighbors_transformation.cpp b/src/common/low_precision_transformations/tests/concat_with_neighbors_transformation.cpp index 9387f9905566b8..4f10c33a33d9ce 100644 --- a/src/common/low_precision_transformations/tests/concat_with_neighbors_transformation.cpp +++ b/src/common/low_precision_transformations/tests/concat_with_neighbors_transformation.cpp @@ -24,8 +24,8 @@ #include "low_precision/fake_quantize_decomposition.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/concat_function.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/concat.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/src/common/low_precision_transformations/tests/concat_with_neighbors_transformation_with_convolution.cpp b/src/common/low_precision_transformations/tests/concat_with_neighbors_transformation_with_convolution.cpp index 24627e0c2a7c7b..45804187a71af8 100644 --- a/src/common/low_precision_transformations/tests/concat_with_neighbors_transformation_with_convolution.cpp +++ b/src/common/low_precision_transformations/tests/concat_with_neighbors_transformation_with_convolution.cpp @@ -14,9 +14,9 @@ #include "low_precision/fake_quantize_decomposition.hpp" #include "low_precision/max_pool.hpp" -#include "lpt_ngraph_functions/precision_propagation_function.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/precision_propagation.hpp" +#include "ov_lpt_models/common/builders.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/src/common/low_precision_transformations/tests/concat_with_not_quantized_parent_transformation.cpp b/src/common/low_precision_transformations/tests/concat_with_not_quantized_parent_transformation.cpp index 7ae982f2eb2203..2982975b807d20 100644 --- a/src/common/low_precision_transformations/tests/concat_with_not_quantized_parent_transformation.cpp +++ b/src/common/low_precision_transformations/tests/concat_with_not_quantized_parent_transformation.cpp @@ -25,9 +25,9 @@ #include "low_precision/markup_quantization_granularity.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/concat_function.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/concat.hpp" +#include "ov_lpt_models/common/builders.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" using namespace testing; using namespace ov; diff --git a/src/common/low_precision_transformations/tests/concat_with_reshape_at_the_end_transformation.cpp b/src/common/low_precision_transformations/tests/concat_with_reshape_at_the_end_transformation.cpp index 0d90390f987d66..4b52b21b665c1e 100644 --- a/src/common/low_precision_transformations/tests/concat_with_reshape_at_the_end_transformation.cpp +++ b/src/common/low_precision_transformations/tests/concat_with_reshape_at_the_end_transformation.cpp @@ -18,8 +18,8 @@ #include "low_precision/reshape.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/concat_function.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/concat.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/src/common/low_precision_transformations/tests/concat_with_split_transformation.cpp b/src/common/low_precision_transformations/tests/concat_with_split_transformation.cpp index 0249ece3df6933..4326445a3b2724 100644 --- a/src/common/low_precision_transformations/tests/concat_with_split_transformation.cpp +++ b/src/common/low_precision_transformations/tests/concat_with_split_transformation.cpp @@ -24,8 +24,8 @@ #include "low_precision/common/precisions_restriction.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/concat_function.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/concat.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" #include "simple_low_precision_transformer.hpp" diff --git a/src/common/low_precision_transformations/tests/concat_with_strided_slice_transformation.cpp b/src/common/low_precision_transformations/tests/concat_with_strided_slice_transformation.cpp index 3b49f43a555633..292357a4270434 100644 --- a/src/common/low_precision_transformations/tests/concat_with_strided_slice_transformation.cpp +++ b/src/common/low_precision_transformations/tests/concat_with_strided_slice_transformation.cpp @@ -18,8 +18,8 @@ #include "low_precision/strided_slice.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/concat_function.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/concat.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/src/common/low_precision_transformations/tests/convert_subtract_constant_transformation.cpp b/src/common/low_precision_transformations/tests/convert_subtract_constant_transformation.cpp index 1d56bc17d39cb3..46d9dbaff521dc 100644 --- a/src/common/low_precision_transformations/tests/convert_subtract_constant_transformation.cpp +++ b/src/common/low_precision_transformations/tests/convert_subtract_constant_transformation.cpp @@ -15,7 +15,7 @@ #include "common_test_utils/ov_test_utils.hpp" #include "simple_low_precision_transformer.hpp" -#include "lpt_ngraph_functions/fake_quantize_and_convolution_function.hpp" +#include "ov_lpt_models/fake_quantize_and_convolution.hpp" using namespace testing; using namespace ov; diff --git a/src/common/low_precision_transformations/tests/convolution_backprop_data_transformation.cpp b/src/common/low_precision_transformations/tests/convolution_backprop_data_transformation.cpp index cefd04bc163575..c2cc9f8d52feef 100644 --- a/src/common/low_precision_transformations/tests/convolution_backprop_data_transformation.cpp +++ b/src/common/low_precision_transformations/tests/convolution_backprop_data_transformation.cpp @@ -17,7 +17,7 @@ #include "common_test_utils/ov_test_utils.hpp" #include "simple_low_precision_transformer.hpp" -#include "lpt_ngraph_functions/convolution_backprop_data_function.hpp" +#include "ov_lpt_models/convolution_backprop_data.hpp" namespace { using namespace testing; diff --git a/src/common/low_precision_transformations/tests/convolution_qdq_transformation.cpp b/src/common/low_precision_transformations/tests/convolution_qdq_transformation.cpp index 0b11a020fb0d30..7abcc64a15f40d 100644 --- a/src/common/low_precision_transformations/tests/convolution_qdq_transformation.cpp +++ b/src/common/low_precision_transformations/tests/convolution_qdq_transformation.cpp @@ -16,7 +16,7 @@ #include "common_test_utils/ov_test_utils.hpp" #include "simple_low_precision_transformer.hpp" -#include "lpt_ngraph_functions/fake_quantize_and_convolution_function.hpp" +#include "ov_lpt_models/fake_quantize_and_convolution.hpp" using namespace testing; using namespace ov; diff --git a/src/common/low_precision_transformations/tests/convolution_transformation.cpp b/src/common/low_precision_transformations/tests/convolution_transformation.cpp index 03902f3f443610..87ac3856aa699a 100644 --- a/src/common/low_precision_transformations/tests/convolution_transformation.cpp +++ b/src/common/low_precision_transformations/tests/convolution_transformation.cpp @@ -16,7 +16,7 @@ #include "common_test_utils/ov_test_utils.hpp" #include "simple_low_precision_transformer.hpp" -#include "lpt_ngraph_functions/convolution_function.hpp" +#include "ov_lpt_models/convolution.hpp" using namespace testing; using namespace ov; diff --git a/src/common/low_precision_transformations/tests/convolution_with_incorrect_weights.cpp b/src/common/low_precision_transformations/tests/convolution_with_incorrect_weights.cpp index 56469c41702b13..1ca255bed8f8b7 100644 --- a/src/common/low_precision_transformations/tests/convolution_with_incorrect_weights.cpp +++ b/src/common/low_precision_transformations/tests/convolution_with_incorrect_weights.cpp @@ -14,10 +14,10 @@ #include "common_test_utils/ov_test_utils.hpp" #include "layer_transformation.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" -#include "lpt_ngraph_functions/convolution_function.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/convolution.hpp" #include "simple_low_precision_transformer.hpp" namespace { diff --git a/src/common/low_precision_transformations/tests/depth_to_space_transformation.cpp b/src/common/low_precision_transformations/tests/depth_to_space_transformation.cpp index 3d5c0f877023b0..4330e8c4d45c3a 100644 --- a/src/common/low_precision_transformations/tests/depth_to_space_transformation.cpp +++ b/src/common/low_precision_transformations/tests/depth_to_space_transformation.cpp @@ -16,7 +16,7 @@ #include "common_test_utils/ov_test_utils.hpp" #include "simple_low_precision_transformer.hpp" -#include "lpt_ngraph_functions/depth_to_space_function.hpp" +#include "ov_lpt_models/depth_to_space.hpp" namespace { using namespace ov::pass; diff --git a/src/common/low_precision_transformations/tests/elementwise_with_multi_parent_dequantization_transformation.cpp b/src/common/low_precision_transformations/tests/elementwise_with_multi_parent_dequantization_transformation.cpp index 231ec42f968ae5..74fa55212dc20c 100644 --- a/src/common/low_precision_transformations/tests/elementwise_with_multi_parent_dequantization_transformation.cpp +++ b/src/common/low_precision_transformations/tests/elementwise_with_multi_parent_dequantization_transformation.cpp @@ -18,8 +18,8 @@ #include "simple_low_precision_transformer.hpp" #include "low_precision/add.hpp" -#include "lpt_ngraph_functions/elementwise_with_multi_parent_dequantization_function.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/elementwise_with_multi_parent_dequantization.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" using namespace testing; using namespace ov::pass; diff --git a/src/common/low_precision_transformations/tests/eliminate_fake_quantize_transformation.cpp b/src/common/low_precision_transformations/tests/eliminate_fake_quantize_transformation.cpp index 359143bc449cc2..c467a3aff67317 100644 --- a/src/common/low_precision_transformations/tests/eliminate_fake_quantize_transformation.cpp +++ b/src/common/low_precision_transformations/tests/eliminate_fake_quantize_transformation.cpp @@ -11,8 +11,8 @@ #include "low_precision/fake_quantize.hpp" #include "low_precision/fake_quantize_decomposition.hpp" #include "low_precision/max_pool.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/fuse_fake_quantize_function.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/fuse_fake_quantize.hpp" #include "simple_low_precision_transformer.hpp" namespace { diff --git a/src/common/low_precision_transformations/tests/fake_quantize_and_two_output_branches_with_convolution.cpp b/src/common/low_precision_transformations/tests/fake_quantize_and_two_output_branches_with_convolution.cpp index 9347ac54519d58..8617dd04fd5aaa 100644 --- a/src/common/low_precision_transformations/tests/fake_quantize_and_two_output_branches_with_convolution.cpp +++ b/src/common/low_precision_transformations/tests/fake_quantize_and_two_output_branches_with_convolution.cpp @@ -16,10 +16,10 @@ #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/fake_quantize_and_two_output_branches_with_convolution_function.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/fake_quantize_and_two_output_branches_with_convolution.hpp" #include "simple_low_precision_transformer.hpp" diff --git a/src/common/low_precision_transformations/tests/fake_quantize_on_weights_with_unsupported_child.cpp b/src/common/low_precision_transformations/tests/fake_quantize_on_weights_with_unsupported_child.cpp index db0141f21b1ac7..81e84aaf4132b2 100644 --- a/src/common/low_precision_transformations/tests/fake_quantize_on_weights_with_unsupported_child.cpp +++ b/src/common/low_precision_transformations/tests/fake_quantize_on_weights_with_unsupported_child.cpp @@ -17,7 +17,7 @@ #include "common_test_utils/ov_test_utils.hpp" #include "simple_low_precision_transformer.hpp" -#include "lpt_ngraph_functions/fake_quantize_on_weights_and_unsupported_child_function.hpp" +#include "ov_lpt_models/fake_quantize_on_weights_and_unsupported_child.hpp" using namespace testing; using namespace ov; diff --git a/src/common/low_precision_transformations/tests/fake_quantize_precision_selection_transformation.cpp b/src/common/low_precision_transformations/tests/fake_quantize_precision_selection_transformation.cpp index e600403b06721d..1d9a25cc5d87a1 100644 --- a/src/common/low_precision_transformations/tests/fake_quantize_precision_selection_transformation.cpp +++ b/src/common/low_precision_transformations/tests/fake_quantize_precision_selection_transformation.cpp @@ -16,7 +16,7 @@ #include "low_precision/max_pool.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/fake_quantize_precision_selection_function.hpp" +#include "ov_lpt_models/fake_quantize_precision_selection.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/src/common/low_precision_transformations/tests/fake_quantize_transformation.cpp b/src/common/low_precision_transformations/tests/fake_quantize_transformation.cpp index 896f4cad41cae3..80363c85e234b8 100644 --- a/src/common/low_precision_transformations/tests/fake_quantize_transformation.cpp +++ b/src/common/low_precision_transformations/tests/fake_quantize_transformation.cpp @@ -15,8 +15,8 @@ #include "common_test_utils/ov_test_utils.hpp" #include "layer_transformation.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/fake_quantize_function.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/fake_quantize.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/src/common/low_precision_transformations/tests/fake_quantize_with_dq_not_optimal_transformation.cpp b/src/common/low_precision_transformations/tests/fake_quantize_with_dq_not_optimal_transformation.cpp index 205b48122dee60..edbadea9c6bfc4 100644 --- a/src/common/low_precision_transformations/tests/fake_quantize_with_dq_not_optimal_transformation.cpp +++ b/src/common/low_precision_transformations/tests/fake_quantize_with_dq_not_optimal_transformation.cpp @@ -17,11 +17,11 @@ #include "common_test_utils/ov_test_utils.hpp" #include "simple_low_precision_transformer.hpp" -#include "lpt_ngraph_functions/fake_quantize_and_convolution_function.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/constant.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/fake_quantize_and_convolution.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/constant.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" using namespace testing; using namespace ov; diff --git a/src/common/low_precision_transformations/tests/fold_convert_transformation.cpp b/src/common/low_precision_transformations/tests/fold_convert_transformation.cpp index bde3e6577b2535..0b01123792d20c 100644 --- a/src/common/low_precision_transformations/tests/fold_convert_transformation.cpp +++ b/src/common/low_precision_transformations/tests/fold_convert_transformation.cpp @@ -18,8 +18,8 @@ #include "simple_low_precision_transformer.hpp" #include "low_precision/fold_convert.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/builders.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace { using namespace testing; diff --git a/src/common/low_precision_transformations/tests/fold_fake_quantize_in_transformations.cpp b/src/common/low_precision_transformations/tests/fold_fake_quantize_in_transformations.cpp index 1fcb322d2c1586..91d101559c6b36 100644 --- a/src/common/low_precision_transformations/tests/fold_fake_quantize_in_transformations.cpp +++ b/src/common/low_precision_transformations/tests/fold_fake_quantize_in_transformations.cpp @@ -13,9 +13,9 @@ #include "common_test_utils/ov_test_utils.hpp" #include "layer_transformation.hpp" #include "low_precision/network_helper.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/fold_fake_quantize_function.hpp" +#include "ov_lpt_models/common/builders.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/fold_fake_quantize.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/src/common/low_precision_transformations/tests/fuse_convert_transformation.cpp b/src/common/low_precision_transformations/tests/fuse_convert_transformation.cpp index 63f16a0dd3635b..bb578fc07e9048 100644 --- a/src/common/low_precision_transformations/tests/fuse_convert_transformation.cpp +++ b/src/common/low_precision_transformations/tests/fuse_convert_transformation.cpp @@ -14,9 +14,9 @@ #include "low_precision/fuse_convert.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" #include "simple_low_precision_transformer.hpp" -#include "lpt_ngraph_functions/fuse_convert_function.hpp" +#include "ov_lpt_models/fuse_convert.hpp" namespace { using namespace testing; diff --git a/src/common/low_precision_transformations/tests/fuse_dequantize_to_fake_quantize_transformation.cpp b/src/common/low_precision_transformations/tests/fuse_dequantize_to_fake_quantize_transformation.cpp index 348bc65cf85a12..407ea5e0e384bb 100644 --- a/src/common/low_precision_transformations/tests/fuse_dequantize_to_fake_quantize_transformation.cpp +++ b/src/common/low_precision_transformations/tests/fuse_dequantize_to_fake_quantize_transformation.cpp @@ -14,10 +14,10 @@ #include "common_test_utils/ov_test_utils.hpp" #include "layer_transformation.hpp" -#include "lpt_ngraph_functions/common/add.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/fuse_fake_quantize_function.hpp" +#include "ov_lpt_models/common/add.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/fuse_fake_quantize.hpp" #include "simple_low_precision_transformer.hpp" namespace { diff --git a/src/common/low_precision_transformations/tests/fuse_fake_quantize_with_multi_inputs_transformation.cpp b/src/common/low_precision_transformations/tests/fuse_fake_quantize_with_multi_inputs_transformation.cpp index ba6ea956d6bf4d..d49a48ef73abc0 100644 --- a/src/common/low_precision_transformations/tests/fuse_fake_quantize_with_multi_inputs_transformation.cpp +++ b/src/common/low_precision_transformations/tests/fuse_fake_quantize_with_multi_inputs_transformation.cpp @@ -13,11 +13,11 @@ #include "transformations/utils/utils.hpp" #include "transformations/init_node_info.hpp" #include "low_precision/fake_quantize.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/fuse_fake_quantize_function.hpp" +#include "ov_lpt_models/fuse_fake_quantize.hpp" #include "simple_low_precision_transformer.hpp" diff --git a/src/common/low_precision_transformations/tests/fuse_multiply_to_fake_quantize_transformation.cpp b/src/common/low_precision_transformations/tests/fuse_multiply_to_fake_quantize_transformation.cpp index 192d27b8970875..a927acc84c04ed 100644 --- a/src/common/low_precision_transformations/tests/fuse_multiply_to_fake_quantize_transformation.cpp +++ b/src/common/low_precision_transformations/tests/fuse_multiply_to_fake_quantize_transformation.cpp @@ -10,11 +10,11 @@ #include #include "low_precision/fuse_multiply_to_fake_quantize.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/fuse_multiply_to_fake_quantize_function.hpp" +#include "ov_lpt_models/fuse_multiply_to_fake_quantize.hpp" #include "simple_low_precision_transformer.hpp" diff --git a/src/common/low_precision_transformations/tests/fuse_subtract_to_fake_quantize_transformation.cpp b/src/common/low_precision_transformations/tests/fuse_subtract_to_fake_quantize_transformation.cpp index 5bacc56c86158c..9896df2c58674f 100644 --- a/src/common/low_precision_transformations/tests/fuse_subtract_to_fake_quantize_transformation.cpp +++ b/src/common/low_precision_transformations/tests/fuse_subtract_to_fake_quantize_transformation.cpp @@ -10,11 +10,11 @@ #include #include "low_precision/fuse_subtract_to_fake_quantize.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/fuse_subtract_to_fake_quantize_function.hpp" +#include "ov_lpt_models/fuse_subtract_to_fake_quantize.hpp" #include "simple_low_precision_transformer.hpp" diff --git a/src/common/low_precision_transformations/tests/gather_transformation.cpp b/src/common/low_precision_transformations/tests/gather_transformation.cpp index f5ac1b0359ea09..300c5215878026 100644 --- a/src/common/low_precision_transformations/tests/gather_transformation.cpp +++ b/src/common/low_precision_transformations/tests/gather_transformation.cpp @@ -13,8 +13,8 @@ #include "common_test_utils/ov_test_utils.hpp" #include "layer_transformation.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/gather_function.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/gather.hpp" #include "simple_low_precision_transformer.hpp" namespace { diff --git a/src/common/low_precision_transformations/tests/get_dequantization_below_transformation.cpp b/src/common/low_precision_transformations/tests/get_dequantization_below_transformation.cpp index 764f14a7b9f43c..253543d4ba1e4f 100644 --- a/src/common/low_precision_transformations/tests/get_dequantization_below_transformation.cpp +++ b/src/common/low_precision_transformations/tests/get_dequantization_below_transformation.cpp @@ -14,7 +14,7 @@ #include "low_precision/fake_quantize_decomposition.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/get_dequantization_function.hpp" +#include "ov_lpt_models/get_dequantization.hpp" #include "low_precision/common/fake_quantize_dequantization.hpp" #include "low_precision/network_helper.hpp" diff --git a/src/common/low_precision_transformations/tests/get_dequantization_test.cpp b/src/common/low_precision_transformations/tests/get_dequantization_test.cpp index 8be933bd723241..02afa62dbb6797 100644 --- a/src/common/low_precision_transformations/tests/get_dequantization_test.cpp +++ b/src/common/low_precision_transformations/tests/get_dequantization_test.cpp @@ -11,8 +11,8 @@ #include "common_test_utils/ov_test_utils.hpp" #include "layer_transformation.hpp" #include "low_precision/network_helper.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/get_dequantization_function.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/get_dequantization.hpp" namespace { using namespace testing; diff --git a/src/common/low_precision_transformations/tests/get_dequantization_transformation.cpp b/src/common/low_precision_transformations/tests/get_dequantization_transformation.cpp index 7880acd578c6fc..0d5bad38902cb4 100644 --- a/src/common/low_precision_transformations/tests/get_dequantization_transformation.cpp +++ b/src/common/low_precision_transformations/tests/get_dequantization_transformation.cpp @@ -14,7 +14,7 @@ #include "low_precision/fake_quantize_decomposition.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/get_dequantization_function.hpp" +#include "ov_lpt_models/get_dequantization.hpp" #include "low_precision/common/fake_quantize_dequantization.hpp" #include "low_precision/network_helper.hpp" diff --git a/src/common/low_precision_transformations/tests/group_convolution_transformation.cpp b/src/common/low_precision_transformations/tests/group_convolution_transformation.cpp index 9ab7fa04a7d2d4..ee977f566c8bb5 100644 --- a/src/common/low_precision_transformations/tests/group_convolution_transformation.cpp +++ b/src/common/low_precision_transformations/tests/group_convolution_transformation.cpp @@ -13,9 +13,9 @@ #include "common_test_utils/ov_test_utils.hpp" #include "layer_transformation.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" -#include "lpt_ngraph_functions/group_convolution_function.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/group_convolution.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/src/common/low_precision_transformations/tests/interpolate_transformation.cpp b/src/common/low_precision_transformations/tests/interpolate_transformation.cpp index d27eb81e767edb..112231d870af69 100644 --- a/src/common/low_precision_transformations/tests/interpolate_transformation.cpp +++ b/src/common/low_precision_transformations/tests/interpolate_transformation.cpp @@ -16,9 +16,9 @@ #include "low_precision/interpolate.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" #include "simple_low_precision_transformer.hpp" -#include "lpt_ngraph_functions/interpolate_function.hpp" +#include "ov_lpt_models/interpolate.hpp" using namespace testing; using namespace ov::pass; diff --git a/src/common/low_precision_transformations/tests/is_asymmetric_on_weights_dequantization.cpp b/src/common/low_precision_transformations/tests/is_asymmetric_on_weights_dequantization.cpp index 92ad788740d146..7aa5cc54009465 100644 --- a/src/common/low_precision_transformations/tests/is_asymmetric_on_weights_dequantization.cpp +++ b/src/common/low_precision_transformations/tests/is_asymmetric_on_weights_dequantization.cpp @@ -12,7 +12,7 @@ #include #include "layer_transformation.hpp" -#include "lpt_ngraph_functions/convolution_function.hpp" +#include "ov_lpt_models/convolution.hpp" using namespace testing; using namespace ov; diff --git a/src/common/low_precision_transformations/tests/is_asymmetric_on_weights_fq.cpp b/src/common/low_precision_transformations/tests/is_asymmetric_on_weights_fq.cpp index c46f7a7cd43ac1..7e5022375de4de 100644 --- a/src/common/low_precision_transformations/tests/is_asymmetric_on_weights_fq.cpp +++ b/src/common/low_precision_transformations/tests/is_asymmetric_on_weights_fq.cpp @@ -13,7 +13,7 @@ #include "transformations/utils/utils.hpp" #include "transformations/init_node_info.hpp" #include "low_precision/weightable_layer_transformation.hpp" -#include "lpt_ngraph_functions/convolution_function.hpp" +#include "ov_lpt_models/convolution.hpp" using namespace testing; using namespace ov; diff --git a/src/common/low_precision_transformations/tests/is_constant_path_transformation.cpp b/src/common/low_precision_transformations/tests/is_constant_path_transformation.cpp index 090bdffb06f5f8..7c89c647bcdcf8 100644 --- a/src/common/low_precision_transformations/tests/is_constant_path_transformation.cpp +++ b/src/common/low_precision_transformations/tests/is_constant_path_transformation.cpp @@ -5,13 +5,13 @@ #include #include -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/common/builders.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" using namespace testing; using namespace ov::pass; diff --git a/src/common/low_precision_transformations/tests/is_function_quantized_transformation.cpp b/src/common/low_precision_transformations/tests/is_function_quantized_transformation.cpp index ea6f7c7a6adac3..9d07dc94bed16a 100644 --- a/src/common/low_precision_transformations/tests/is_function_quantized_transformation.cpp +++ b/src/common/low_precision_transformations/tests/is_function_quantized_transformation.cpp @@ -11,7 +11,7 @@ #include "low_precision/low_precision.hpp" #include -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_lpt_models/common/builders.hpp" using namespace testing; using namespace ov; diff --git a/src/common/low_precision_transformations/tests/layer_transformation.hpp b/src/common/low_precision_transformations/tests/layer_transformation.hpp index a060fc5bfd7c4a..2371000f956ca1 100644 --- a/src/common/low_precision_transformations/tests/layer_transformation.hpp +++ b/src/common/low_precision_transformations/tests/layer_transformation.hpp @@ -10,7 +10,7 @@ #include "low_precision/layer_transformation.hpp" #include "low_precision/transformation_context.hpp" #include "low_precision/network_helper.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" using namespace ov; diff --git a/src/common/low_precision_transformations/tests/lpt_avoid_shapeof_propagation_test.cpp b/src/common/low_precision_transformations/tests/lpt_avoid_shapeof_propagation_test.cpp index f2459620019351..35a80285def102 100644 --- a/src/common/low_precision_transformations/tests/lpt_avoid_shapeof_propagation_test.cpp +++ b/src/common/low_precision_transformations/tests/lpt_avoid_shapeof_propagation_test.cpp @@ -41,7 +41,7 @@ #include "low_precision/transpose.hpp" #include "low_precision/unsqueeze.hpp" #include "low_precision/variadic_split.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_lpt_models/common/builders.hpp" using namespace testing; using namespace ov; diff --git a/src/common/low_precision_transformations/tests/markup_avg_pool_precisions_transformation.cpp b/src/common/low_precision_transformations/tests/markup_avg_pool_precisions_transformation.cpp index 11af9c70361540..dca3fd2973ccaa 100644 --- a/src/common/low_precision_transformations/tests/markup_avg_pool_precisions_transformation.cpp +++ b/src/common/low_precision_transformations/tests/markup_avg_pool_precisions_transformation.cpp @@ -19,8 +19,8 @@ #include "transformations/utils/utils.hpp" #include "layer_transformation.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/markup_avg_pool_precisions_function.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/markup_avg_pool_precisions.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/src/common/low_precision_transformations/tests/markup_bias_transformation.cpp b/src/common/low_precision_transformations/tests/markup_bias_transformation.cpp index 95ee3c41790f9e..fa5815a61ff4af 100644 --- a/src/common/low_precision_transformations/tests/markup_bias_transformation.cpp +++ b/src/common/low_precision_transformations/tests/markup_bias_transformation.cpp @@ -11,7 +11,7 @@ #include "common_test_utils/ov_test_utils.hpp" #include "layer_transformation.hpp" -#include "lpt_ngraph_functions/markup_bias_function.hpp" +#include "ov_lpt_models/markup_bias.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/src/common/low_precision_transformations/tests/mat_mul_transformation.cpp b/src/common/low_precision_transformations/tests/mat_mul_transformation.cpp index 839cf3abe2ffc9..d2f89888b85db3 100644 --- a/src/common/low_precision_transformations/tests/mat_mul_transformation.cpp +++ b/src/common/low_precision_transformations/tests/mat_mul_transformation.cpp @@ -13,9 +13,9 @@ #include "common_test_utils/ov_test_utils.hpp" #include "layer_transformation.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/mat_mul_function.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/mat_mul.hpp" +#include "ov_models/subgraph_builders.hpp" #include "simple_low_precision_transformer.hpp" namespace { diff --git a/src/common/low_precision_transformations/tests/mat_mul_with_constant_transformation.cpp b/src/common/low_precision_transformations/tests/mat_mul_with_constant_transformation.cpp index 142c0cf8862e7f..9b471fcc40a129 100644 --- a/src/common/low_precision_transformations/tests/mat_mul_with_constant_transformation.cpp +++ b/src/common/low_precision_transformations/tests/mat_mul_with_constant_transformation.cpp @@ -12,10 +12,10 @@ #include "common_test_utils/ov_test_utils.hpp" #include "layer_transformation.hpp" -#include "lpt_ngraph_functions/common/constant.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/mat_mul_function.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_lpt_models/common/constant.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/mat_mul.hpp" +#include "ov_models/subgraph_builders.hpp" #include "simple_low_precision_transformer.hpp" namespace { diff --git a/src/common/low_precision_transformations/tests/max_pool_transformation.cpp b/src/common/low_precision_transformations/tests/max_pool_transformation.cpp index ca69859e232327..bc946f012aeace 100644 --- a/src/common/low_precision_transformations/tests/max_pool_transformation.cpp +++ b/src/common/low_precision_transformations/tests/max_pool_transformation.cpp @@ -15,8 +15,8 @@ #include "common_test_utils/ov_test_utils.hpp" #include "simple_low_precision_transformer.hpp" -#include "lpt_ngraph_functions/max_pool_function.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/max_pool.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" using namespace testing; diff --git a/src/common/low_precision_transformations/tests/move_dequantization_after_transformation.cpp b/src/common/low_precision_transformations/tests/move_dequantization_after_transformation.cpp index fe45e0480df6f8..fff60b05816324 100644 --- a/src/common/low_precision_transformations/tests/move_dequantization_after_transformation.cpp +++ b/src/common/low_precision_transformations/tests/move_dequantization_after_transformation.cpp @@ -16,8 +16,8 @@ #include "low_precision/network_helper.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/move_dequantization_after_function.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/move_dequantization_after.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" using namespace testing; using namespace ov::pass; diff --git a/src/common/low_precision_transformations/tests/move_fake_quantize_transformation.cpp b/src/common/low_precision_transformations/tests/move_fake_quantize_transformation.cpp index 2a71b357f70634..d2e276f0e5e51c 100644 --- a/src/common/low_precision_transformations/tests/move_fake_quantize_transformation.cpp +++ b/src/common/low_precision_transformations/tests/move_fake_quantize_transformation.cpp @@ -18,10 +18,10 @@ #include "common_test_utils/ov_test_utils.hpp" #include "layer_transformation.hpp" #include "low_precision/move_fake_quantize.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/move_fake_quantize_function.hpp" -#include "lpt_ngraph_functions/relu_function.hpp" +#include "ov_lpt_models/common/builders.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/move_fake_quantize.hpp" +#include "ov_lpt_models/relu.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/src/common/low_precision_transformations/tests/multiply_partial_transformation.cpp b/src/common/low_precision_transformations/tests/multiply_partial_transformation.cpp index 1e556df70bc31b..58084d7688e7a7 100644 --- a/src/common/low_precision_transformations/tests/multiply_partial_transformation.cpp +++ b/src/common/low_precision_transformations/tests/multiply_partial_transformation.cpp @@ -14,11 +14,11 @@ #include #include #include "low_precision/multiply_partial.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" #include "common_test_utils/ov_test_utils.hpp" #include "simple_low_precision_transformer.hpp" -#include "lpt_ngraph_functions/multiply_partial_function.hpp" +#include "ov_lpt_models/multiply_partial_function.hpp" namespace { using namespace testing; diff --git a/src/common/low_precision_transformations/tests/multiply_to_group_convolution_transformation.cpp b/src/common/low_precision_transformations/tests/multiply_to_group_convolution_transformation.cpp index 606cdea804185c..2a7215c8454755 100644 --- a/src/common/low_precision_transformations/tests/multiply_to_group_convolution_transformation.cpp +++ b/src/common/low_precision_transformations/tests/multiply_to_group_convolution_transformation.cpp @@ -15,9 +15,9 @@ #include "low_precision/multiply_to_group_convolution.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" #include "simple_low_precision_transformer.hpp" -#include "lpt_ngraph_functions/multiply_to_group_convolution_function.hpp" +#include "ov_lpt_models/multiply_to_group_convolution.hpp" using namespace testing; using namespace ov; diff --git a/src/common/low_precision_transformations/tests/multiply_transformation.cpp b/src/common/low_precision_transformations/tests/multiply_transformation.cpp index 3ea4563f62ba62..5fe7dec67e146f 100644 --- a/src/common/low_precision_transformations/tests/multiply_transformation.cpp +++ b/src/common/low_precision_transformations/tests/multiply_transformation.cpp @@ -7,19 +7,17 @@ #include #include #include - #include - #include + #include "transformations/utils/utils.hpp" #include "transformations/init_node_info.hpp" #include "low_precision/multiply.hpp" #include "low_precision/multiply_to_group_convolution.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" - +#include "ov_lpt_models/common/dequantization_operations.hpp" #include "common_test_utils/ov_test_utils.hpp" #include "simple_low_precision_transformer.hpp" -#include "lpt_ngraph_functions/multiply_function.hpp" +#include "ov_lpt_models/multiply.hpp" namespace { using namespace testing; diff --git a/src/common/low_precision_transformations/tests/mvn_transformation.cpp b/src/common/low_precision_transformations/tests/mvn_transformation.cpp index 61813994435ceb..1dbafbb445d10c 100644 --- a/src/common/low_precision_transformations/tests/mvn_transformation.cpp +++ b/src/common/low_precision_transformations/tests/mvn_transformation.cpp @@ -15,9 +15,9 @@ #include "low_precision/mvn.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" #include "simple_low_precision_transformer.hpp" -#include "lpt_ngraph_functions/mvn_function.hpp" +#include "ov_lpt_models/mvn.hpp" namespace { using namespace testing; diff --git a/src/common/low_precision_transformations/tests/normalize_dequantization_transformation.cpp b/src/common/low_precision_transformations/tests/normalize_dequantization_transformation.cpp index 9afc867971a65d..3c14e9a5728e97 100644 --- a/src/common/low_precision_transformations/tests/normalize_dequantization_transformation.cpp +++ b/src/common/low_precision_transformations/tests/normalize_dequantization_transformation.cpp @@ -16,7 +16,7 @@ #include "common_test_utils/ov_test_utils.hpp" #include "simple_low_precision_transformer.hpp" -#include "lpt_ngraph_functions/normalize_dequantization_function.hpp" +#include "ov_lpt_models/normalize_dequantization.hpp" using namespace testing; using namespace ov::pass; diff --git a/src/common/low_precision_transformations/tests/normalize_l2_transformation.cpp b/src/common/low_precision_transformations/tests/normalize_l2_transformation.cpp index f4e429ca44557a..78a2fb21e6a111 100644 --- a/src/common/low_precision_transformations/tests/normalize_l2_transformation.cpp +++ b/src/common/low_precision_transformations/tests/normalize_l2_transformation.cpp @@ -16,8 +16,8 @@ #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/normalize_l2_function.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/normalize_l2.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace { using namespace testing; diff --git a/src/common/low_precision_transformations/tests/pad_transformation.cpp b/src/common/low_precision_transformations/tests/pad_transformation.cpp index 3de1482771cea3..15de9d76d8564d 100644 --- a/src/common/low_precision_transformations/tests/pad_transformation.cpp +++ b/src/common/low_precision_transformations/tests/pad_transformation.cpp @@ -12,8 +12,8 @@ #include "low_precision/pad.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/pad_function.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/pad.hpp" #include "simple_low_precision_transformer.hpp" namespace { diff --git a/src/common/low_precision_transformations/tests/prelu_transformation.cpp b/src/common/low_precision_transformations/tests/prelu_transformation.cpp index 2315b94e269fdb..efe9b1c06c7e24 100644 --- a/src/common/low_precision_transformations/tests/prelu_transformation.cpp +++ b/src/common/low_precision_transformations/tests/prelu_transformation.cpp @@ -13,8 +13,8 @@ #include "common_test_utils/ov_test_utils.hpp" #include "layer_transformation.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/prelu_function.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/prelu.hpp" #include "simple_low_precision_transformer.hpp" namespace { diff --git a/src/common/low_precision_transformations/tests/pull_reshape_through_dequantization_transformation.cpp b/src/common/low_precision_transformations/tests/pull_reshape_through_dequantization_transformation.cpp index efc757c75575c6..60760ac35f759b 100644 --- a/src/common/low_precision_transformations/tests/pull_reshape_through_dequantization_transformation.cpp +++ b/src/common/low_precision_transformations/tests/pull_reshape_through_dequantization_transformation.cpp @@ -15,7 +15,7 @@ #include "common_test_utils/ov_test_utils.hpp" #include "layer_transformation.hpp" -#include "lpt_ngraph_functions/fake_quantize_and_convolution_function.hpp" +#include "ov_lpt_models/fake_quantize_and_convolution.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/src/common/low_precision_transformations/tests/pull_transpose_through_dequantization_transformation.cpp b/src/common/low_precision_transformations/tests/pull_transpose_through_dequantization_transformation.cpp index 722023f7b15ad5..ec7f9fbd2655b8 100644 --- a/src/common/low_precision_transformations/tests/pull_transpose_through_dequantization_transformation.cpp +++ b/src/common/low_precision_transformations/tests/pull_transpose_through_dequantization_transformation.cpp @@ -15,7 +15,7 @@ #include "common_test_utils/ov_test_utils.hpp" #include "layer_transformation.hpp" -#include "lpt_ngraph_functions/fake_quantize_and_convolution_function.hpp" +#include "ov_lpt_models/fake_quantize_and_convolution.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/src/common/low_precision_transformations/tests/quantization_granularity_restriction_test.cpp b/src/common/low_precision_transformations/tests/quantization_granularity_restriction_test.cpp index 70ac76d08c43e2..4993e7f88f6720 100644 --- a/src/common/low_precision_transformations/tests/quantization_granularity_restriction_test.cpp +++ b/src/common/low_precision_transformations/tests/quantization_granularity_restriction_test.cpp @@ -15,7 +15,7 @@ #include "low_precision/markup_quantization_granularity.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/convolution_function.hpp" +#include "ov_lpt_models/convolution.hpp" using namespace testing; using namespace ov; diff --git a/src/common/low_precision_transformations/tests/recurrent_cell_transformation.cpp b/src/common/low_precision_transformations/tests/recurrent_cell_transformation.cpp index d225a7b0bc9a50..aebb4bb623bf97 100644 --- a/src/common/low_precision_transformations/tests/recurrent_cell_transformation.cpp +++ b/src/common/low_precision_transformations/tests/recurrent_cell_transformation.cpp @@ -19,9 +19,9 @@ #include "common_test_utils/ov_test_utils.hpp" #include "layer_transformation.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/recurrent_cell_function.hpp" +#include "ov_lpt_models/common/builders.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/recurrent_cell.hpp" #include "simple_low_precision_transformer.hpp" #include "openvino/opsets/opset5.hpp" diff --git a/src/common/low_precision_transformations/tests/reduce_max_transformation.cpp b/src/common/low_precision_transformations/tests/reduce_max_transformation.cpp index 5dcbdb88a3565b..47b6083efa6b3c 100644 --- a/src/common/low_precision_transformations/tests/reduce_max_transformation.cpp +++ b/src/common/low_precision_transformations/tests/reduce_max_transformation.cpp @@ -17,9 +17,9 @@ #include "simple_low_precision_transformer.hpp" #include "low_precision/reduce_max.hpp" -#include "lpt_ngraph_functions/reduce_function.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/constant.hpp" +#include "ov_lpt_models/reduce.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/constant.hpp" namespace { using namespace testing; diff --git a/src/common/low_precision_transformations/tests/reduce_mean_transformation.cpp b/src/common/low_precision_transformations/tests/reduce_mean_transformation.cpp index a1443c3e26bacd..aecd438a60a110 100644 --- a/src/common/low_precision_transformations/tests/reduce_mean_transformation.cpp +++ b/src/common/low_precision_transformations/tests/reduce_mean_transformation.cpp @@ -17,9 +17,9 @@ #include "simple_low_precision_transformer.hpp" #include "low_precision/reduce_mean.hpp" -#include "lpt_ngraph_functions/reduce_function.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/constant.hpp" +#include "ov_lpt_models/reduce.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/constant.hpp" namespace { using namespace testing; diff --git a/src/common/low_precision_transformations/tests/reduce_min_transformation.cpp b/src/common/low_precision_transformations/tests/reduce_min_transformation.cpp index f31305e96f7e14..7dba2740ab2de5 100644 --- a/src/common/low_precision_transformations/tests/reduce_min_transformation.cpp +++ b/src/common/low_precision_transformations/tests/reduce_min_transformation.cpp @@ -17,9 +17,9 @@ #include "simple_low_precision_transformer.hpp" #include "low_precision/reduce_min.hpp" -#include "lpt_ngraph_functions/reduce_function.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/constant.hpp" +#include "ov_lpt_models/reduce.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/constant.hpp" namespace { using namespace testing; diff --git a/src/common/low_precision_transformations/tests/reduce_sum_transformation.cpp b/src/common/low_precision_transformations/tests/reduce_sum_transformation.cpp index cfe745016b79ce..9366fe8ace37e3 100644 --- a/src/common/low_precision_transformations/tests/reduce_sum_transformation.cpp +++ b/src/common/low_precision_transformations/tests/reduce_sum_transformation.cpp @@ -17,9 +17,9 @@ #include "simple_low_precision_transformer.hpp" #include "low_precision/reduce_sum.hpp" -#include "lpt_ngraph_functions/reduce_function.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/constant.hpp" +#include "ov_lpt_models/reduce.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/constant.hpp" namespace { using namespace testing; diff --git a/src/common/low_precision_transformations/tests/reduce_transformation.hpp b/src/common/low_precision_transformations/tests/reduce_transformation.hpp index f81c06e9d19735..da866ee328a767 100644 --- a/src/common/low_precision_transformations/tests/reduce_transformation.hpp +++ b/src/common/low_precision_transformations/tests/reduce_transformation.hpp @@ -14,9 +14,9 @@ #include "transformations/utils/utils.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/reduce_function.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/constant.hpp" +#include "ov_lpt_models/reduce.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/constant.hpp" using namespace testing; using namespace ov; diff --git a/src/common/low_precision_transformations/tests/relu_transformation.cpp b/src/common/low_precision_transformations/tests/relu_transformation.cpp index 79645125093d54..c300e5fd25c84d 100644 --- a/src/common/low_precision_transformations/tests/relu_transformation.cpp +++ b/src/common/low_precision_transformations/tests/relu_transformation.cpp @@ -15,8 +15,8 @@ #include "low_precision/relu.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/relu_function.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/relu.hpp" #include "simple_low_precision_transformer.hpp" namespace { diff --git a/src/common/low_precision_transformations/tests/reshape_transformation.cpp b/src/common/low_precision_transformations/tests/reshape_transformation.cpp index 5224d9e4fd09ed..6e5a2ca3de3494 100644 --- a/src/common/low_precision_transformations/tests/reshape_transformation.cpp +++ b/src/common/low_precision_transformations/tests/reshape_transformation.cpp @@ -13,8 +13,8 @@ #include "common_test_utils/ov_test_utils.hpp" #include "layer_transformation.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/reshape_function.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/reshape.hpp" #include "simple_low_precision_transformer.hpp" namespace { diff --git a/src/common/low_precision_transformations/tests/round_transformation.cpp b/src/common/low_precision_transformations/tests/round_transformation.cpp index ec72c7d5d409a3..49467d1b3d54a9 100644 --- a/src/common/low_precision_transformations/tests/round_transformation.cpp +++ b/src/common/low_precision_transformations/tests/round_transformation.cpp @@ -10,8 +10,8 @@ #include "common_test_utils/ov_test_utils.hpp" #include "layer_transformation.hpp" #include "low_precision/network_helper.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/round_function.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/round.hpp" namespace { using namespace testing; diff --git a/src/common/low_precision_transformations/tests/separate_in_standalone_branch_transformation.cpp b/src/common/low_precision_transformations/tests/separate_in_standalone_branch_transformation.cpp index 16190cd958449b..a530f76ecef052 100644 --- a/src/common/low_precision_transformations/tests/separate_in_standalone_branch_transformation.cpp +++ b/src/common/low_precision_transformations/tests/separate_in_standalone_branch_transformation.cpp @@ -15,13 +15,13 @@ #include "low_precision/mat_mul.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" -#include "lpt_ngraph_functions/mat_mul_function.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/builders.hpp" +#include "ov_lpt_models/mat_mul.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "simple_low_precision_transformer.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace { diff --git a/src/common/low_precision_transformations/tests/shuffle_channels_transformation.cpp b/src/common/low_precision_transformations/tests/shuffle_channels_transformation.cpp index 285d84cac3024e..afa181ad62b8f7 100644 --- a/src/common/low_precision_transformations/tests/shuffle_channels_transformation.cpp +++ b/src/common/low_precision_transformations/tests/shuffle_channels_transformation.cpp @@ -14,8 +14,8 @@ #include "common_test_utils/ov_test_utils.hpp" #include "simple_low_precision_transformer.hpp" -#include "lpt_ngraph_functions/shuffle_channels_function.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/shuffle_channels.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace { using namespace testing; diff --git a/src/common/low_precision_transformations/tests/space_to_batch_transformation.cpp b/src/common/low_precision_transformations/tests/space_to_batch_transformation.cpp index 602c034d51aa4f..ed81d3b46dc9e3 100644 --- a/src/common/low_precision_transformations/tests/space_to_batch_transformation.cpp +++ b/src/common/low_precision_transformations/tests/space_to_batch_transformation.cpp @@ -16,8 +16,8 @@ #include "common_test_utils/ov_test_utils.hpp" #include "simple_low_precision_transformer.hpp" -#include "lpt_ngraph_functions/space_to_batch_function.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/space_to_batch.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" using namespace testing; diff --git a/src/common/low_precision_transformations/tests/split_transformation.cpp b/src/common/low_precision_transformations/tests/split_transformation.cpp index 39ac426d204f6a..4646cc78318fdf 100644 --- a/src/common/low_precision_transformations/tests/split_transformation.cpp +++ b/src/common/low_precision_transformations/tests/split_transformation.cpp @@ -11,8 +11,8 @@ #include "common_test_utils/ov_test_utils.hpp" #include "layer_transformation.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/split_function.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/split.hpp" #include "simple_low_precision_transformer.hpp" namespace { diff --git a/src/common/low_precision_transformations/tests/squeeze_transformation.cpp b/src/common/low_precision_transformations/tests/squeeze_transformation.cpp index 91f0bff5381a5d..9d47afb9d72540 100644 --- a/src/common/low_precision_transformations/tests/squeeze_transformation.cpp +++ b/src/common/low_precision_transformations/tests/squeeze_transformation.cpp @@ -15,7 +15,7 @@ #include "common_test_utils/ov_test_utils.hpp" #include "simple_low_precision_transformer.hpp" -#include "lpt_ngraph_functions/squeeze_function.hpp" +#include "ov_lpt_models/squeeze.hpp" namespace { using namespace testing; diff --git a/src/common/low_precision_transformations/tests/strided_slice_transformation.cpp b/src/common/low_precision_transformations/tests/strided_slice_transformation.cpp index c91987fd37d36b..425e2d52f3e218 100644 --- a/src/common/low_precision_transformations/tests/strided_slice_transformation.cpp +++ b/src/common/low_precision_transformations/tests/strided_slice_transformation.cpp @@ -17,8 +17,8 @@ #include "simple_low_precision_transformer.hpp" #include "low_precision/strided_slice.hpp" -#include "lpt_ngraph_functions/strided_slice_function.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/strided_slice.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace { using namespace testing; diff --git a/src/common/low_precision_transformations/tests/subgraph/src/fq_decomposition_with_shared_constants.cpp b/src/common/low_precision_transformations/tests/subgraph/src/fq_decomposition_with_shared_constants.cpp index a2f590542e2943..8856566de29462 100644 --- a/src/common/low_precision_transformations/tests/subgraph/src/fq_decomposition_with_shared_constants.cpp +++ b/src/common/low_precision_transformations/tests/subgraph/src/fq_decomposition_with_shared_constants.cpp @@ -13,9 +13,9 @@ #include "common_test_utils/ov_test_utils.hpp" #include "layer_transformation.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/builders.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/src/common/low_precision_transformations/tests/transformations_after_split_transformation.cpp b/src/common/low_precision_transformations/tests/transformations_after_split_transformation.cpp index 4f1a6b3e68eb14..43e214d52f0e2d 100644 --- a/src/common/low_precision_transformations/tests/transformations_after_split_transformation.cpp +++ b/src/common/low_precision_transformations/tests/transformations_after_split_transformation.cpp @@ -39,7 +39,7 @@ #include "low_precision/fuse_multiply_to_fake_quantize.hpp" #include "low_precision/multiply_to_group_convolution.hpp" -#include "lpt_ngraph_functions/transformations_after_split_function.hpp" +#include "ov_lpt_models/transformations_after_split.hpp" #include "common_test_utils/ov_test_utils.hpp" #include "simple_low_precision_transformer.hpp" diff --git a/src/common/low_precision_transformations/tests/transformer_is_function_quantized.cpp b/src/common/low_precision_transformations/tests/transformer_is_function_quantized.cpp index 940a89c027f69d..a71f50afb68468 100644 --- a/src/common/low_precision_transformations/tests/transformer_is_function_quantized.cpp +++ b/src/common/low_precision_transformations/tests/transformer_is_function_quantized.cpp @@ -14,9 +14,9 @@ #include "low_precision/low_precision.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" -#include "lpt_ngraph_functions/convolution_function.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/convolution.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/src/common/low_precision_transformations/tests/transpose_transformation.cpp b/src/common/low_precision_transformations/tests/transpose_transformation.cpp index 05f6e25facb56c..0500cd7671877b 100644 --- a/src/common/low_precision_transformations/tests/transpose_transformation.cpp +++ b/src/common/low_precision_transformations/tests/transpose_transformation.cpp @@ -13,8 +13,8 @@ #include "common_test_utils/ov_test_utils.hpp" #include "layer_transformation.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/transpose_function.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/transpose.hpp" #include "simple_low_precision_transformer.hpp" namespace { diff --git a/src/common/low_precision_transformations/tests/unit/data_precision_check.cpp b/src/common/low_precision_transformations/tests/unit/data_precision_check.cpp index a6ca04227d792b..cc027d47fff9f4 100644 --- a/src/common/low_precision_transformations/tests/unit/data_precision_check.cpp +++ b/src/common/low_precision_transformations/tests/unit/data_precision_check.cpp @@ -7,7 +7,7 @@ #include #include "low_precision/layer_transformation.hpp" #include "low_precision/network_helper.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" using namespace ov; diff --git a/src/common/low_precision_transformations/tests/unit/layer_transformation_get_data_precision.cpp b/src/common/low_precision_transformations/tests/unit/layer_transformation_get_data_precision.cpp index b7082782a30ce9..d2eef5dc1e0ae7 100644 --- a/src/common/low_precision_transformations/tests/unit/layer_transformation_get_data_precision.cpp +++ b/src/common/low_precision_transformations/tests/unit/layer_transformation_get_data_precision.cpp @@ -7,7 +7,7 @@ #include #include "low_precision/layer_transformation.hpp" #include "low_precision/network_helper.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" using namespace ov; diff --git a/src/common/low_precision_transformations/tests/unsqueeze_transformation.cpp b/src/common/low_precision_transformations/tests/unsqueeze_transformation.cpp index ea074dbea0c92d..322ac3fc883304 100644 --- a/src/common/low_precision_transformations/tests/unsqueeze_transformation.cpp +++ b/src/common/low_precision_transformations/tests/unsqueeze_transformation.cpp @@ -15,7 +15,7 @@ #include "common_test_utils/ov_test_utils.hpp" #include "simple_low_precision_transformer.hpp" -#include "lpt_ngraph_functions/unsqueeze_function.hpp" +#include "ov_lpt_models/unsqueeze.hpp" namespace { using namespace testing; diff --git a/src/common/low_precision_transformations/tests/variadic_split_transformation.cpp b/src/common/low_precision_transformations/tests/variadic_split_transformation.cpp index 1d8b1badfb5a22..62120530105748 100644 --- a/src/common/low_precision_transformations/tests/variadic_split_transformation.cpp +++ b/src/common/low_precision_transformations/tests/variadic_split_transformation.cpp @@ -11,8 +11,8 @@ #include "common_test_utils/ov_test_utils.hpp" #include "layer_transformation.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/variadic_split_function.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/variadic_split.hpp" #include "simple_low_precision_transformer.hpp" namespace { diff --git a/src/common/snippets/tests/CMakeLists.txt b/src/common/snippets/tests/CMakeLists.txt index 64e99a3642ef51..350b87e583f3e0 100644 --- a/src/common/snippets/tests/CMakeLists.txt +++ b/src/common/snippets/tests/CMakeLists.txt @@ -13,7 +13,7 @@ addIeTargetTest( LINK_LIBRARIES openvino::runtime::dev common_test_utils - snippetsNgraphFunctions + ov_snippets_models ADD_CPPLINT LABELS IE OV SNIPPETS @@ -30,4 +30,4 @@ ov_build_target_faster(${TARGET_NAME} add_library(snippets_test_utils STATIC ${CMAKE_CURRENT_SOURCE_DIR}/include/lowering_utils.hpp ${CMAKE_CURRENT_SOURCE_DIR}/src/lowering_utils.cpp) target_include_directories(snippets_test_utils PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include) -target_link_libraries(snippets_test_utils PRIVATE common_test_utils snippetsNgraphFunctions) +target_link_libraries(snippets_test_utils PRIVATE common_test_utils ov_snippets_models) diff --git a/src/common/snippets/tests/src/pass/fake_quantize_decomposition_test.cpp b/src/common/snippets/tests/src/pass/fake_quantize_decomposition_test.cpp index 05e3c2586a69c0..a075a598299234 100644 --- a/src/common/snippets/tests/src/pass/fake_quantize_decomposition_test.cpp +++ b/src/common/snippets/tests/src/pass/fake_quantize_decomposition_test.cpp @@ -7,7 +7,7 @@ #include "common_test_utils/ov_test_utils.hpp" #include "snippets/pass/common_optimizations.hpp" #include "snippets/op/subgraph.hpp" -#include "fake_quantize_function.hpp" +#include "fake_quantize_helper.hpp" #include "function_helper.hpp" namespace ov { diff --git a/src/common/snippets/tests/src/pass/precision_propagation.cpp b/src/common/snippets/tests/src/pass/precision_propagation.cpp index 142c5561f7c8c5..aa75a0fd9ec828 100644 --- a/src/common/snippets/tests/src/pass/precision_propagation.cpp +++ b/src/common/snippets/tests/src/pass/precision_propagation.cpp @@ -10,7 +10,7 @@ #include "snippets/pass/propagate_precision.hpp" #include "snippets/op/convert_saturation.hpp" #include "common_test_utils/common_utils.hpp" -#include "precision_propagation_function.hpp" +#include "precision_propagation.hpp" namespace ov { namespace test { diff --git a/src/common/transformations/tests/CMakeLists.txt b/src/common/transformations/tests/CMakeLists.txt index b66bcd9314c1f1..296a198a666f1a 100644 --- a/src/common/transformations/tests/CMakeLists.txt +++ b/src/common/transformations/tests/CMakeLists.txt @@ -17,7 +17,7 @@ ov_add_test_target( func_test_utils offline_transformations sharedTestClasses - lptNgraphFunctions + ov_lpt_models ADD_CLANG_FORMAT INCLUDES $/src diff --git a/src/common/transformations/tests/common_optimizations/dimension_tracking.cpp b/src/common/transformations/tests/common_optimizations/dimension_tracking.cpp index 31967c71ac76a4..88f6a1c303f951 100644 --- a/src/common/transformations/tests/common_optimizations/dimension_tracking.cpp +++ b/src/common/transformations/tests/common_optimizations/dimension_tracking.cpp @@ -11,11 +11,11 @@ #include #include "common_test_utils/ov_test_utils.hpp" -#include "ngraph_functions/subgraph_builders.hpp" #include "openvino/core/dimension_tracker.hpp" #include "openvino/core/model.hpp" #include "openvino/opsets/opset1.hpp" #include "openvino/pass/manager.hpp" +#include "ov_models/subgraph_builders.hpp" #include "transformations/common_optimizations/divide_fusion.hpp" #include "transformations/init_node_info.hpp" #include "transformations/utils/utils.hpp" diff --git a/src/common/transformations/tests/common_optimizations/eliminate_unsqueeze_gather.cpp b/src/common/transformations/tests/common_optimizations/eliminate_unsqueeze_gather.cpp index de0657c6bce788..92cbfcdb686359 100644 --- a/src/common/transformations/tests/common_optimizations/eliminate_unsqueeze_gather.cpp +++ b/src/common/transformations/tests/common_optimizations/eliminate_unsqueeze_gather.cpp @@ -6,9 +6,9 @@ #include "common_test_utils/ov_test_utils.hpp" #include "common_test_utils/test_common.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" #include "openvino/op/parameter.hpp" #include "openvino/pass/manager.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "transformations/common_optimizations/shared_ops_optimization.hpp" #include "transformations/common_optimizations/simplify_shape_of_sub_graph.hpp" diff --git a/src/common/transformations/tests/common_optimizations/nop_elimination.cpp b/src/common/transformations/tests/common_optimizations/nop_elimination.cpp index 0513db08d82839..1fd58d3a6a636b 100644 --- a/src/common/transformations/tests/common_optimizations/nop_elimination.cpp +++ b/src/common/transformations/tests/common_optimizations/nop_elimination.cpp @@ -14,14 +14,14 @@ #include "common_test_utils/common_utils.hpp" #include "common_test_utils/ov_test_utils.hpp" #include "common_test_utils/test_common.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" #include "openvino/core/model.hpp" #include "openvino/opsets/opset1.hpp" #include "openvino/opsets/opset10.hpp" #include "openvino/opsets/opset9.hpp" #include "openvino/pass/constant_folding.hpp" #include "openvino/pass/manager.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "transformations/init_node_info.hpp" #include "transformations/rt_info/fused_names_attribute.hpp" #include "transformations/utils/utils.hpp" diff --git a/src/common/transformations/tests/common_optimizations/preprocessing_fusion_tests.cpp b/src/common/transformations/tests/common_optimizations/preprocessing_fusion_tests.cpp index 20801c2a182704..b5e4c05af96465 100644 --- a/src/common/transformations/tests/common_optimizations/preprocessing_fusion_tests.cpp +++ b/src/common/transformations/tests/common_optimizations/preprocessing_fusion_tests.cpp @@ -8,13 +8,13 @@ #include #include "common_test_utils/ov_test_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" #include "openvino/core/model.hpp" #include "openvino/core/preprocess/pre_post_process.hpp" #include "openvino/opsets/opset12.hpp" #include "openvino/opsets/opset8.hpp" #include "openvino/pass/constant_folding.hpp" #include "openvino/pass/serialize.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "transformations/common_optimizations/moc_transformations.hpp" #include "transformations/common_optimizations/ric_fusion.hpp" #include "transformations/common_optimizations/transpose_sinking.hpp" diff --git a/src/common/transformations/tests/common_optimizations/transpose_sinking_test.cpp b/src/common/transformations/tests/common_optimizations/transpose_sinking_test.cpp index 0aa8b8b06e50c0..6188628246a0e4 100644 --- a/src/common/transformations/tests/common_optimizations/transpose_sinking_test.cpp +++ b/src/common/transformations/tests/common_optimizations/transpose_sinking_test.cpp @@ -10,11 +10,11 @@ #include #include "common_test_utils/ov_test_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" #include "openvino/core/model.hpp" #include "openvino/core/preprocess/pre_post_process.hpp" #include "openvino/opsets/opset6.hpp" #include "openvino/pass/manager.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "transformations/init_node_info.hpp" using namespace testing; diff --git a/src/core/tests/type_relaxed_copy.cpp b/src/core/tests/type_relaxed_copy.cpp index 7edd54433afd96..4e8271043dab0b 100644 --- a/src/core/tests/type_relaxed_copy.cpp +++ b/src/core/tests/type_relaxed_copy.cpp @@ -8,8 +8,8 @@ #include #include "ie_common.h" -#include "ngraph_functions/builders.hpp" #include "openvino/op/matmul.hpp" +#include "ov_models/builders.hpp" #include "ov_ops/type_relaxed.hpp" using namespace ov; diff --git a/src/inference/tests/functional/caching_test.cpp b/src/inference/tests/functional/caching_test.cpp index fb3cd83b61cd0d..8a313792b87af5 100644 --- a/src/inference/tests/functional/caching_test.cpp +++ b/src/inference/tests/functional/caching_test.cpp @@ -15,7 +15,6 @@ #include "common_test_utils/file_utils.hpp" #include "ie_plugin_config.hpp" -#include "ngraph_functions/subgraph_builders.hpp" #include "openvino/core/any.hpp" #include "openvino/core/except.hpp" #include "openvino/core/layout.hpp" @@ -30,6 +29,7 @@ #include "openvino/runtime/iplugin.hpp" #include "openvino/runtime/iremote_context.hpp" #include "openvino/runtime/properties.hpp" +#include "ov_models/subgraph_builders.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_iasync_infer_request.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icompiled_model.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_iplugin.hpp" diff --git a/src/plugins/auto/tests/unit/CMakeLists.txt b/src/plugins/auto/tests/unit/CMakeLists.txt index 5088ce0d400f49..ca149e48b33349 100644 --- a/src/plugins/auto/tests/unit/CMakeLists.txt +++ b/src/plugins/auto/tests/unit/CMakeLists.txt @@ -18,13 +18,13 @@ ov_add_test_target( ${CMAKE_CURRENT_SOURCE_DIR} ${OpenVINO_SOURCE_DIR}/src/plugins/auto/src LINK_LIBRARIES - ngraphFunctions + ov_models unit_test_utils ADD_CPPLINT DEPENDENCIES template_extension mock_engine - ngraphFunctions + ov_models LABELS Multi Auto diff --git a/src/plugins/auto_batch/tests/functional/behavior/plugin/auto_batching_tests.hpp b/src/plugins/auto_batch/tests/functional/behavior/plugin/auto_batching_tests.hpp index b794a90b9cc524..91635a2c4b7294 100644 --- a/src/plugins/auto_batch/tests/functional/behavior/plugin/auto_batching_tests.hpp +++ b/src/plugins/auto_batch/tests/functional/behavior/plugin/auto_batching_tests.hpp @@ -11,7 +11,7 @@ #include "base/behavior_test_utils.hpp" #include "common_test_utils/test_common.hpp" #include "functional_test_utils/blob_utils.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" using namespace ::testing; using namespace InferenceEngine; diff --git a/src/plugins/auto_batch/tests/unit/CMakeLists.txt b/src/plugins/auto_batch/tests/unit/CMakeLists.txt index 4b44fc47fa9055..14b2181060eeb8 100644 --- a/src/plugins/auto_batch/tests/unit/CMakeLists.txt +++ b/src/plugins/auto_batch/tests/unit/CMakeLists.txt @@ -21,10 +21,10 @@ addIeTargetTest( ${SHARED_HEADERS_DIR} LINK_LIBRARIES unit_test_utils - ngraphFunctions + ov_models DEPENDENCIES mock_engine - ngraphFunctions + ov_models ADD_CPPLINT LABELS Auto_Batch diff --git a/src/plugins/auto_batch/tests/unit/async_infer_request_test.cpp b/src/plugins/auto_batch/tests/unit/async_infer_request_test.cpp index 2ff0b39f9c5d49..e4d03c4e832a93 100644 --- a/src/plugins/auto_batch/tests/unit/async_infer_request_test.cpp +++ b/src/plugins/auto_batch/tests/unit/async_infer_request_test.cpp @@ -7,7 +7,7 @@ #include #include "mock_common.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "openvino/core/dimension_tracker.hpp" #include "openvino/core/type/element_type.hpp" #include "openvino/runtime/threading/immediate_executor.hpp" diff --git a/src/plugins/auto_batch/tests/unit/compile_model_create_infer_request_test.cpp b/src/plugins/auto_batch/tests/unit/compile_model_create_infer_request_test.cpp index 3f9b938d4c802d..30b06945c4dfd9 100644 --- a/src/plugins/auto_batch/tests/unit/compile_model_create_infer_request_test.cpp +++ b/src/plugins/auto_batch/tests/unit/compile_model_create_infer_request_test.cpp @@ -6,7 +6,7 @@ #include #include "mock_common.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "openvino/core/dimension_tracker.hpp" #include "openvino/runtime/threading/immediate_executor.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" diff --git a/src/plugins/auto_batch/tests/unit/compile_model_get_property_test.cpp b/src/plugins/auto_batch/tests/unit/compile_model_get_property_test.cpp index 67c0016469916a..805e2f5e672df8 100644 --- a/src/plugins/auto_batch/tests/unit/compile_model_get_property_test.cpp +++ b/src/plugins/auto_batch/tests/unit/compile_model_get_property_test.cpp @@ -6,7 +6,7 @@ #include #include "mock_common.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "openvino/core/dimension_tracker.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" diff --git a/src/plugins/auto_batch/tests/unit/compile_model_get_runtime_model_test.cpp b/src/plugins/auto_batch/tests/unit/compile_model_get_runtime_model_test.cpp index 5d921ecd499457..bbe0b1fb18a9a1 100644 --- a/src/plugins/auto_batch/tests/unit/compile_model_get_runtime_model_test.cpp +++ b/src/plugins/auto_batch/tests/unit/compile_model_get_runtime_model_test.cpp @@ -6,7 +6,7 @@ #include #include "mock_common.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "openvino/core/dimension_tracker.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" diff --git a/src/plugins/auto_batch/tests/unit/compile_model_set_property_test.cpp b/src/plugins/auto_batch/tests/unit/compile_model_set_property_test.cpp index c3d8057f1831bf..a801b619566e94 100644 --- a/src/plugins/auto_batch/tests/unit/compile_model_set_property_test.cpp +++ b/src/plugins/auto_batch/tests/unit/compile_model_set_property_test.cpp @@ -6,7 +6,7 @@ #include #include "mock_common.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "openvino/core/dimension_tracker.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" diff --git a/src/plugins/auto_batch/tests/unit/plugin_compile_model_test.cpp b/src/plugins/auto_batch/tests/unit/plugin_compile_model_test.cpp index d69335582528a1..8f06a44cff06a2 100644 --- a/src/plugins/auto_batch/tests/unit/plugin_compile_model_test.cpp +++ b/src/plugins/auto_batch/tests/unit/plugin_compile_model_test.cpp @@ -6,7 +6,7 @@ #include #include "mock_common.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "openvino/core/dimension_tracker.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" diff --git a/src/plugins/auto_batch/tests/unit/plugin_query_model_test.cpp b/src/plugins/auto_batch/tests/unit/plugin_query_model_test.cpp index e04a6e897cfe33..e15ee5121f0611 100644 --- a/src/plugins/auto_batch/tests/unit/plugin_query_model_test.cpp +++ b/src/plugins/auto_batch/tests/unit/plugin_query_model_test.cpp @@ -6,7 +6,7 @@ #include #include "mock_common.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" using ::testing::_; diff --git a/src/plugins/auto_batch/tests/unit/sync_infer_request_test.cpp b/src/plugins/auto_batch/tests/unit/sync_infer_request_test.cpp index 6cdef3fb0bf172..d05cc53ceb05e6 100644 --- a/src/plugins/auto_batch/tests/unit/sync_infer_request_test.cpp +++ b/src/plugins/auto_batch/tests/unit/sync_infer_request_test.cpp @@ -7,7 +7,7 @@ #include #include "mock_common.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "openvino/core/dimension_tracker.hpp" #include "openvino/core/type/element_type.hpp" #include "openvino/runtime/threading/immediate_executor.hpp" diff --git a/src/plugins/hetero/tests/unit/CMakeLists.txt b/src/plugins/hetero/tests/unit/CMakeLists.txt index 42056e8a0cd440..654f491220c6bb 100644 --- a/src/plugins/hetero/tests/unit/CMakeLists.txt +++ b/src/plugins/hetero/tests/unit/CMakeLists.txt @@ -21,10 +21,10 @@ ov_add_test_target( ${OBJ_LIB} LINK_LIBRARIES unit_test_utils - ngraphFunctions + ov_models DEPENDENCIES mock_engine - ngraphFunctions + ov_models ADD_CLANG_FORMAT LABELS HETERO diff --git a/src/plugins/intel_cpu/tests/functional/CMakeLists.txt b/src/plugins/intel_cpu/tests/functional/CMakeLists.txt index f0127f0bb71099..d012589f5b0c1e 100644 --- a/src/plugins/intel_cpu/tests/functional/CMakeLists.txt +++ b/src/plugins/intel_cpu/tests/functional/CMakeLists.txt @@ -11,7 +11,7 @@ target_link_libraries(cpuSpecificRtInfo PRIVATE openvino::runtime) set(INCLUDES ${CMAKE_CURRENT_SOURCE_DIR} $/src) set(DEPENDENCIES openvino_intel_cpu_plugin template_extension) -set(LINK_LIBRARIES funcSharedTests cpuSpecificRtInfo openvino::snippets snippetsNgraphFunctions) +set(LINK_LIBRARIES funcSharedTests cpuSpecificRtInfo openvino::snippets ov_snippets_models) if(ENABLE_OV_ONNX_FRONTEND) list(APPEND DEFINES TEST_MODELS="${TEST_MODEL_ZOO}") diff --git a/src/plugins/intel_cpu/tests/functional/behavior/export_import.cpp b/src/plugins/intel_cpu/tests/functional/behavior/export_import.cpp index e3c8a540ab16a9..f593fe7a8e72cb 100644 --- a/src/plugins/intel_cpu/tests/functional/behavior/export_import.cpp +++ b/src/plugins/intel_cpu/tests/functional/behavior/export_import.cpp @@ -7,7 +7,7 @@ #include "openvino/runtime/compiled_model.hpp" #include "openvino/runtime/properties.hpp" #include "common_test_utils/test_common.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include diff --git a/src/plugins/intel_cpu/tests/functional/behavior/ov_executable_network/properties.cpp b/src/plugins/intel_cpu/tests/functional/behavior/ov_executable_network/properties.cpp index b4436bf73a7e5e..be296b5d53c559 100644 --- a/src/plugins/intel_cpu/tests/functional/behavior/ov_executable_network/properties.cpp +++ b/src/plugins/intel_cpu/tests/functional/behavior/ov_executable_network/properties.cpp @@ -7,7 +7,7 @@ #include "test_utils/properties_test.hpp" #include #include "ie_system_conf.h" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "openvino/runtime/core.hpp" #include "openvino/runtime/compiled_model.hpp" #include "openvino/runtime/properties.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp index 817b9741b21b77..e409ad7a866935 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp @@ -5,7 +5,7 @@ #include #include "behavior/infer_request/memory_states.hpp" #include "functional_test_utils/plugin_cache.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" using namespace BehaviorTestsDefinitions; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/hetero_synthetic.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/hetero_synthetic.cpp index cada399989e8d7..e767a26ca072ba 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/hetero_synthetic.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/hetero_synthetic.cpp @@ -5,8 +5,8 @@ #include #include "behavior/plugin/hetero_synthetic.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/subgraph_builders.hpp" // defined in plugin_name.cpp extern const char * cpu_plugin_file_name; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/execution_graph_tests/add_output.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/execution_graph_tests/add_output.cpp index adf13bfb52bcb6..7f3e8cbea3a67d 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/execution_graph_tests/add_output.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/execution_graph_tests/add_output.cpp @@ -7,7 +7,7 @@ #include "functional_test_utils/plugin_cache.hpp" #include "ngraph/op/multiply.hpp" #include "ngraph/op/sigmoid.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" using namespace ngraph; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_avg_pool_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_avg_pool_transformation.cpp index 119cf19785174f..42e3b27fbbd71c 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_avg_pool_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_avg_pool_transformation.cpp @@ -5,7 +5,7 @@ #include #include "low_precision_transformations/fake_quantize_and_avg_pool_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_max_pool_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_max_pool_transformation.cpp index 226c7aa1861f9e..4116af8a5cca86 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_max_pool_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_max_pool_transformation.cpp @@ -5,7 +5,7 @@ #include #include "low_precision_transformations/fake_quantize_and_max_pool_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_precision_selection_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_precision_selection_transformation.cpp index bd6036edbe30d0..80d790854d7b36 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_precision_selection_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_precision_selection_transformation.cpp @@ -6,7 +6,7 @@ #include "low_precision_transformations/fake_quantize_precision_selection_transformation.hpp" #include "common_test_utils/test_constants.hpp" -#include "lpt_ngraph_functions/fake_quantize_function.hpp" +#include "ov_lpt_models/fake_quantize.hpp" using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_transformation.cpp index 1541d67dc8e6a5..cccd6250dac624 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_transformation.cpp @@ -6,7 +6,7 @@ #include "low_precision_transformations/fake_quantize_transformation.hpp" #include "common_test_utils/test_constants.hpp" -#include "lpt_ngraph_functions/fake_quantize_function.hpp" +#include "ov_lpt_models/fake_quantize.hpp" using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_with_dq_not_optimal_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_with_dq_not_optimal_transformation.cpp index 6e974a62302c38..b3b0dbc853cbb5 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_with_dq_not_optimal_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_with_dq_not_optimal_transformation.cpp @@ -6,7 +6,7 @@ #include "low_precision_transformations/fake_quantize_with_dq_not_optimal_transformation.hpp" #include "common_test_utils/test_constants.hpp" -#include "lpt_ngraph_functions/fake_quantize_function.hpp" +#include "ov_lpt_models/fake_quantize.hpp" using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_fq_and_scale_shift_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_fq_and_scale_shift_transformation.cpp index ea6f3c3d817fbe..8c23420484e2ad 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_fq_and_scale_shift_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_fq_and_scale_shift_transformation.cpp @@ -6,7 +6,7 @@ #include "low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.hpp" #include "common_test_utils/test_constants.hpp" -#include "lpt_ngraph_functions/fuse_fake_quantize_and_scale_shift_function.hpp" +#include "ov_lpt_models/fuse_fake_quantize_and_scale_shift.hpp" using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/concat_resize_concat.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/concat_resize_concat.cpp index 41a5c8fa5197c8..775ba66028f8a6 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/concat_resize_concat.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/concat_resize_concat.cpp @@ -6,7 +6,7 @@ #include #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include #include diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/adaptive_pooling.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/adaptive_pooling.cpp index 37e8afb7e14f61..1efb20ffc1d8d2 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/adaptive_pooling.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/adaptive_pooling.cpp @@ -5,8 +5,8 @@ #include #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" using namespace InferenceEngine; using namespace CPUTestUtils; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/augru_cell.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/augru_cell.cpp index ed1f72d434e81b..1666eb68bc5435 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/augru_cell.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/augru_cell.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" using namespace CPUTestUtils; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/augru_sequence.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/augru_sequence.cpp index f54605b78a2558..610fa5e6d9bc29 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/augru_sequence.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/augru_sequence.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" #include "transformations/op_conversions/bidirectional_sequences_decomposition.hpp" #include "transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/batch_to_space.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/batch_to_space.cpp index cf86d8759b8f86..836931ec465669 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/batch_to_space.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/batch_to_space.cpp @@ -4,7 +4,7 @@ #include #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/broadcast.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/broadcast.cpp index 57e3b0ae4bece8..a22f29bba6c45f 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/broadcast.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/broadcast.cpp @@ -3,7 +3,7 @@ // #include "test_utils/cpu_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/bucketize.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/bucketize.cpp index 236d14229d06a8..6c345226e4f69f 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/bucketize.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/bucketize.cpp @@ -3,7 +3,7 @@ // #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/conversion.hpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/conversion.hpp index 8418a919a6e4d9..4cac9c96a2501c 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/conversion.hpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/conversion.hpp @@ -5,7 +5,7 @@ #pragma once #include "shared_test_classes/single_layer/activation.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include #include "test_utils/cpu_test_utils.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/mvn.hpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/mvn.hpp index 6cda3e59c9d70a..51e7306247e5d6 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/mvn.hpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/mvn.hpp @@ -5,7 +5,7 @@ #pragma once #include "shared_test_classes/single_layer/mvn.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include #include "test_utils/fusing_test_utils.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/reduce.hpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/reduce.hpp index e301a0a1417fb1..31022372a8234e 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/reduce.hpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/reduce.hpp @@ -5,7 +5,7 @@ #pragma once #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" #include #include "test_utils/fusing_test_utils.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/softmax.hpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/softmax.hpp index 5465d1c06859c0..06e03d79a493a1 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/softmax.hpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/softmax.hpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/transpose.hpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/transpose.hpp index 6d07d4a0d22943..5b8300106c83df 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/transpose.hpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/transpose.hpp @@ -5,7 +5,7 @@ #pragma once #include "shared_test_classes/single_layer/transpose.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" #include "gtest/gtest.h" diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/concat.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/concat.cpp index 25be06d1818c1b..3a0da3008ffd66 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/concat.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/concat.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" using namespace ov::test; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/convert_to_plugin_specific_node.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/convert_to_plugin_specific_node.cpp index a23383913dba60..ecd779302b59ad 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/convert_to_plugin_specific_node.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/convert_to_plugin_specific_node.cpp @@ -3,7 +3,7 @@ // #include "test_utils/cpu_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" using namespace ngraph; using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/convolution.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/convolution.cpp index a8bf5c83a2e4cf..a7a863b5782868 100755 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/convolution.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/convolution.cpp @@ -6,8 +6,8 @@ #include "test_utils/convolution_params.hpp" #include "test_utils/fusing_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" #include "openvino/core/visibility.hpp" #include #include "utils/general_utils.h" diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/convolution_backprop_data.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/convolution_backprop_data.cpp index 5b0496e6ca7cdc..455c78a8c09ed0 100755 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/convolution_backprop_data.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/convolution_backprop_data.cpp @@ -6,7 +6,7 @@ #include #include "cpu_shape.h" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "openvino/core/preprocess/pre_post_process.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/convolution_params.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/ctc_greedy_decoder.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/ctc_greedy_decoder.cpp index fc3bbd7d248889..185ca4d29cc438 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/ctc_greedy_decoder.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/ctc_greedy_decoder.cpp @@ -5,7 +5,7 @@ #include #include -#include +#include #include #include #include diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/ctc_greedy_decoder_seq_len.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/ctc_greedy_decoder_seq_len.cpp index f605dfd39bd398..46757e4d63d4e4 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/ctc_greedy_decoder_seq_len.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/ctc_greedy_decoder_seq_len.cpp @@ -5,7 +5,7 @@ #include #include -#include +#include #include #include #include diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/ctc_loss.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/ctc_loss.cpp index 777e54f9a93aa3..8a310a82ec5254 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/ctc_loss.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/ctc_loss.cpp @@ -5,7 +5,7 @@ #include #include -#include +#include #include #include #include diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/cum_sum.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/cum_sum.cpp index 7b55be22f224e1..232ddd839f6a00 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/cum_sum.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/cum_sum.cpp @@ -4,7 +4,7 @@ #include "test_utils/cpu_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" using namespace ngraph; using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/custom_op_internal_dyn.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/custom_op_internal_dyn.cpp index fb5ce680f5ff63..56e2b9ededea63 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/custom_op_internal_dyn.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/custom_op_internal_dyn.cpp @@ -3,11 +3,10 @@ // #include -#include #include #include - -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" using namespace ov::test; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/deformable_convolution.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/deformable_convolution.cpp index 38284cc576922c..04e96fed6bc478 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/deformable_convolution.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/deformable_convolution.cpp @@ -3,8 +3,8 @@ // #include "test_utils/cpu_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include #include "shared_test_classes/base/ov_subgraph.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/detection_output.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/detection_output.cpp index 29be49f621a309..814ff07a29c746 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/detection_output.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/detection_output.cpp @@ -4,7 +4,7 @@ #include "shared_test_classes/single_layer/detection_output.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include #include "test_utils/cpu_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/embedding_bag_offsets_sum.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/embedding_bag_offsets_sum.cpp index 01fac925ab7a89..8227678c0188f1 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/embedding_bag_offsets_sum.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/embedding_bag_offsets_sum.cpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/embedding_bag_packed_sum.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/embedding_bag_packed_sum.cpp index 814e0a61d3beb2..17512416b655d1 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/embedding_bag_packed_sum.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/embedding_bag_packed_sum.cpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/embedding_segments_sum.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/embedding_segments_sum.cpp index c8912f145d709b..36bd193bbd08da 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/embedding_segments_sum.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/embedding_segments_sum.cpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/extract_image_patches.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/extract_image_patches.cpp index 86b943b0d84a54..fecd504bf6ba31 100755 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/extract_image_patches.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/extract_image_patches.cpp @@ -5,7 +5,7 @@ #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" using namespace CPUTestUtils; using namespace ov::test; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/eye.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/eye.cpp index d984aee2152b00..22b8cb7a0de857 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/eye.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/eye.cpp @@ -5,7 +5,7 @@ #include #include #include "test_utils/cpu_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/fake_quantize.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/fake_quantize.cpp index 85872a673b3882..e82f26d774a692 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/fake_quantize.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/fake_quantize.cpp @@ -3,7 +3,7 @@ // #include "test_utils/cpu_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather.cpp index 9bfd1673cfc1f1..480247b44ea5f1 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" #include diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather_elements.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather_elements.cpp index c583c8c380c803..1edc0146f2328f 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather_elements.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather_elements.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include #include "test_utils/cpu_test_utils.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather_nd.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather_nd.cpp index 9a526db7982dbf..01ba342722e2e6 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather_nd.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather_nd.cpp @@ -4,7 +4,7 @@ #include #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" using namespace InferenceEngine; using namespace ov; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather_tree.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather_tree.cpp index 6ab22cba1ba6f4..735b6032221809 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather_tree.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather_tree.cpp @@ -3,8 +3,8 @@ // #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "common_test_utils/ov_tensor_utils.hpp" #include "test_utils/cpu_test_utils.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/grid_sample.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/grid_sample.cpp index f177ad4248a023..7d7c1d82b8d182 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/grid_sample.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/grid_sample.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" #include diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/grn.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/grn.cpp index 89d4b65feef7b5..583bd4535b76da 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/grn.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/grn.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" using namespace CPUTestUtils; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/group_convolution_backprop_data.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/group_convolution_backprop_data.cpp index e053dfab9f5e1d..03f1f707254bc5 100755 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/group_convolution_backprop_data.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/group_convolution_backprop_data.cpp @@ -7,7 +7,7 @@ #include "test_utils/fusing_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include #include "openvino/core/preprocess/pre_post_process.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/gru_cell.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/gru_cell.cpp index 3beb4415d28eb6..be63ddbc5dd984 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/gru_cell.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/gru_cell.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" using namespace CPUTestUtils; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/gru_sequence.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/gru_sequence.cpp index bde807ca63722e..265566c51f02f5 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/gru_sequence.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/gru_sequence.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" #include "transformations/op_conversions/bidirectional_sequences_decomposition.hpp" #include "transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/eltwise.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/eltwise.cpp index 47a4dd77b2d556..5196736a3c9b3f 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/eltwise.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/eltwise.cpp @@ -6,7 +6,7 @@ #include "shared_test_classes/single_layer/eltwise.hpp" #include "test_utils/cpu_test_utils.hpp" #include "test_utils/fusing_test_utils.hpp" -#include +#include #include using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/mvn.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/mvn.cpp index f4d9263cc9bffc..7e45470c395d48 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/mvn.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/mvn.cpp @@ -6,7 +6,7 @@ #include "shared_test_classes/single_layer/mvn.hpp" #include "test_utils/cpu_test_utils.hpp" #include "test_utils/fusing_test_utils.hpp" -#include +#include #include using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/reduce.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/reduce.cpp index e267c938f54b1f..4291894af8e68b 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/reduce.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/reduce.cpp @@ -6,7 +6,7 @@ #include "shared_test_classes/single_layer/reduce_ops.hpp" #include "test_utils/cpu_test_utils.hpp" #include "test_utils/fusing_test_utils.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_lpt_models/common/builders.hpp" using namespace InferenceEngine; using namespace CPUTestUtils; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/interpolate.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/interpolate.cpp index 3256a232233566..3f981101463acc 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/interpolate.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/interpolate.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" #include "test_utils/fusing_test_utils.hpp" #include diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/log_softmax.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/log_softmax.cpp index f7ec400c3db237..c0c23db047cc8d 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/log_softmax.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/log_softmax.cpp @@ -3,7 +3,7 @@ // #include "test_utils/cpu_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" using namespace ngraph; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/logical.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/logical.cpp index b5089f85a158a7..a477295140c3ab 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/logical.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/logical.cpp @@ -3,7 +3,7 @@ // #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/loop.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/loop.cpp index 3070c4f3f918aa..b92646e458130e 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/loop.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/loop.cpp @@ -4,7 +4,7 @@ #include #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/lrn.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/lrn.cpp index b6cf98a2e2841a..2d47fe19f18a8d 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/lrn.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/lrn.cpp @@ -6,7 +6,7 @@ #include "test_utils/cpu_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" using namespace ngraph; using namespace CPUTestUtils; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/lstm_cell.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/lstm_cell.cpp index 99ec9750d6f64c..ae934271631ec2 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/lstm_cell.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/lstm_cell.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" using namespace CPUTestUtils; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/lstm_sequence.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/lstm_sequence.cpp index ea36f3744ac578..218a45c1661076 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/lstm_sequence.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/lstm_sequence.cpp @@ -4,7 +4,7 @@ #include #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" #include "transformations/op_conversions/bidirectional_sequences_decomposition.hpp" #include "transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/matmul.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/matmul.cpp index 824c72ac1d3d5d..c5508e28f052c8 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/matmul.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/matmul.cpp @@ -6,7 +6,7 @@ #include "shared_test_classes/base/ov_subgraph.hpp" #include "ie_precision.hpp" #include "test_utils/fusing_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include using namespace ngraph; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/matmul_sparse.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/matmul_sparse.cpp index 79e5546b1360ef..209efbe1864819 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/matmul_sparse.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/matmul_sparse.cpp @@ -6,7 +6,7 @@ #include "shared_test_classes/base/ov_subgraph.hpp" #include "ie_precision.hpp" #include "test_utils/fusing_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include #include #include "shared_test_classes/base/utils/generate_inputs.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/non_max_suppression.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/non_max_suppression.cpp index 09ffa26e16cc76..072b481dd3c3da 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/non_max_suppression.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/non_max_suppression.cpp @@ -6,7 +6,7 @@ #include #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include #include "test_utils/cpu_test_utils.hpp" #include "shared_test_classes/base/utils/ranges.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/nonzero.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/nonzero.cpp index 6e23e4025daf97..ecce08dedb2690 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/nonzero.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/nonzero.cpp @@ -5,8 +5,8 @@ #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/normalize.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/normalize.cpp index e91dfbcc5b7cc7..6e7277ce2e343f 100755 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/normalize.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/normalize.cpp @@ -4,7 +4,7 @@ #include "shared_test_classes/single_layer/normalize_l2.hpp" #include "test_utils/fusing_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/one_hot.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/one_hot.cpp index 18713351eb1566..8eda5f4221e77e 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/one_hot.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/one_hot.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include #include #include "test_utils/cpu_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/pooling.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/pooling.cpp index 6cb669f8af1e7d..c6a76f7fee9fad 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/pooling.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/pooling.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" #include "test_utils/fusing_test_utils.hpp" #include "shared_test_classes/single_layer/pooling.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/prior_box.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/prior_box.cpp index f865451a112b14..13142ae4f395f1 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/prior_box.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/prior_box.cpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/prior_box_clustered.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/prior_box_clustered.cpp index 2200f3754f583b..06fe498f62b551 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/prior_box_clustered.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/prior_box_clustered.cpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/proposal.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/proposal.cpp index 2e770ad041aca2..7bb8cae14153fb 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/proposal.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/proposal.cpp @@ -4,7 +4,7 @@ #include #include "test_utils/cpu_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" using namespace ngraph; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/psroi_pooling.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/psroi_pooling.cpp index fa86ed17159aa5..9d0841f15fb6f0 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/psroi_pooling.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/psroi_pooling.cpp @@ -4,8 +4,8 @@ #include "test_utils/cpu_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" using namespace InferenceEngine; using namespace CPUTestUtils; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/range.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/range.cpp index b2c49f30ef4527..2596f18b459550 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/range.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/range.cpp @@ -4,8 +4,8 @@ // //#include "test_utils/cpu_test_utils.hpp" // -//#include "ngraph_functions/builders.hpp" -//#include "ngraph_functions/utils/ngraph_helpers.hpp" +//#include "ov_models/builders.hpp" +//#include "ov_models/utils/ov_helpers.hpp" // //using namespace InferenceEngine; //using namespace CPUTestUtils; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/rdft.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/rdft.cpp index 774ff1b4e5aaa3..b80dfc66cf691c 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/rdft.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/rdft.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" #include #include diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/region_yolo.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/region_yolo.cpp index eae24f1a1eb919..2dc172b24f0d7d 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/region_yolo.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/region_yolo.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/reorg_yolo.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/reorg_yolo.cpp index 8f53fc21718a85..ae6d5e5b7e85ab 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/reorg_yolo.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/reorg_yolo.cpp @@ -3,7 +3,7 @@ // #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/reverse_sequence.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/reverse_sequence.cpp index 2bca3ce329d557..4ff9eeda2ec279 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/reverse_sequence.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/reverse_sequence.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" #include "common_test_utils/ov_tensor_utils.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/rnn_cell.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/rnn_cell.cpp index fff37ec8498c09..98b5e2b73bb203 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/rnn_cell.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/rnn_cell.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" using namespace CPUTestUtils; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/rnn_sequence.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/rnn_sequence.cpp index 9dfff36079e9e8..1fdd66344f95fc 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/rnn_sequence.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/rnn_sequence.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" #include "transformations/op_conversions/bidirectional_sequences_decomposition.hpp" #include "transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/roialign.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/roialign.cpp index f764f79a896eeb..6168dd375d35d8 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/roialign.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/roialign.cpp @@ -6,8 +6,8 @@ #include #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" using namespace InferenceEngine; using namespace CPUTestUtils; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/roll.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/roll.cpp index 17a47a969c50df..60a1645ddca039 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/roll.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/roll.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" using namespace CPUTestUtils; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/scatter_ND_update.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/scatter_ND_update.cpp index 79b3258b0c2ecd..c832486be74c6c 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/scatter_ND_update.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/scatter_ND_update.cpp @@ -5,7 +5,7 @@ #include #include "test_utils/cpu_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" using namespace ngraph; using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/scatter_elements_update.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/scatter_elements_update.cpp index a63edb957f14f6..89be0e2aeae8ec 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/scatter_elements_update.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/scatter_elements_update.cpp @@ -5,7 +5,7 @@ #include "test_utils/cpu_test_utils.hpp" #include #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" using namespace ngraph; using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/scatter_update.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/scatter_update.cpp index 2217ecb260b80e..858a27a94109c1 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/scatter_update.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/scatter_update.cpp @@ -4,7 +4,7 @@ #include "test_utils/cpu_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" using namespace ngraph; using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/select.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/select.cpp index ec1c3289a6c732..9cacb90f9704c9 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/select.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/select.cpp @@ -5,7 +5,7 @@ #include #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/fusing_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" using namespace InferenceEngine; using namespace CPUTestUtils; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/shape_ops.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/shape_ops.cpp index 3b09dad3c7bd98..a4be376f857f41 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/shape_ops.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/shape_ops.cpp @@ -4,7 +4,7 @@ #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include using namespace ngraph; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/shapeof.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/shapeof.cpp index c5ad306400043b..457a2bdfa63b22 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/shapeof.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/shapeof.cpp @@ -4,8 +4,8 @@ #include "test_utils/cpu_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/shuffle_channels.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/shuffle_channels.cpp index d182943e7723d5..4063156bbc28ac 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/shuffle_channels.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/shuffle_channels.cpp @@ -4,8 +4,8 @@ #include "shared_test_classes/single_layer/shuffle_channels.hpp" #include "test_utils/cpu_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/slice.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/slice.cpp index 4068e5702cc36b..58f9adb8a5119a 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/slice.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/slice.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/space_to_batch.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/space_to_batch.cpp index 82f4e23cf75af8..bd0493d19af7f4 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/space_to_batch.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/space_to_batch.cpp @@ -4,7 +4,7 @@ #include #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/split.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/split.cpp index 4842902d0450f5..ff423786fd4234 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/split.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/split.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" using namespace ov::test; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/strided_slice.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/strided_slice.cpp index 47d8b529e8a996..a6131a937562d9 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/strided_slice.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/strided_slice.cpp @@ -3,7 +3,7 @@ // #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/tensor_iterator.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/tensor_iterator.cpp index 6452fb86579224..1b9e89095b0fcc 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/tensor_iterator.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/tensor_iterator.cpp @@ -4,7 +4,7 @@ #include #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/tile.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/tile.cpp index 15edd4d011d698..32bb1ebc9376cf 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/tile.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/tile.cpp @@ -3,7 +3,7 @@ // #include "test_utils/cpu_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/topk.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/topk.cpp index f829ec6db4db7b..4d52c4f24d5dd7 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/topk.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/topk.cpp @@ -5,7 +5,7 @@ #include #include "test_utils/cpu_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" using namespace InferenceEngine; using namespace CPUTestUtils; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/unique.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/unique.cpp index 1a61880e727295..277a799ba7b40e 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/unique.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/unique.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" #include diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/variadic_split.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/variadic_split.cpp index 21d239eccf546e..9ccd6bab8feeba 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/variadic_split.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/variadic_split.cpp @@ -6,7 +6,7 @@ #include "shared_test_classes/base/ov_subgraph.hpp" #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" using namespace ov::test; diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/include/conv_concat.hpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/include/conv_concat.hpp index 2d835883a0a1d4..0078fafd7aab76 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/include/conv_concat.hpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/include/conv_concat.hpp @@ -10,8 +10,8 @@ #include "test_utils/cpu_test_utils.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" using namespace CPUTestUtils; diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/include/conv_with_zero_point_fuse.hpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/include/conv_with_zero_point_fuse.hpp index a91d572a00076f..0059a9c21cec56 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/include/conv_with_zero_point_fuse.hpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/include/conv_with_zero_point_fuse.hpp @@ -10,8 +10,8 @@ #include "test_utils/cpu_test_utils.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" using namespace CPUTestUtils; diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/include/fuse_transpose_reorder.hpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/include/fuse_transpose_reorder.hpp index 2555d426a6bd17..0b2b63b7fe9dcc 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/include/fuse_transpose_reorder.hpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/include/fuse_transpose_reorder.hpp @@ -10,8 +10,8 @@ #include "test_utils/cpu_test_utils.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" using namespace CPUTestUtils; diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/add_convert_to_reorder.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/add_convert_to_reorder.cpp index e363140d1f0e0f..8be11f4a277fb7 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/add_convert_to_reorder.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/add_convert_to_reorder.cpp @@ -4,8 +4,8 @@ #include "test_utils/cpu_test_utils.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" using namespace InferenceEngine; using namespace CPUTestUtils; diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/align_matmul_input_ranks.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/align_matmul_input_ranks.cpp index fa324d4d8490a3..c5cca71accb4a4 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/align_matmul_input_ranks.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/align_matmul_input_ranks.cpp @@ -4,7 +4,7 @@ #include "test_utils/cpu_test_utils.hpp" #include "test_utils/fusing_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "common_test_utils/common_utils.hpp" #include diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_group_conv.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_group_conv.cpp index 057b29e1fa1fd0..763251cf5e5e7d 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_group_conv.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_group_conv.cpp @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include "common_test_utils/common_utils.hpp" #include #include "functional_test_utils/skip_tests_config.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_group_conv1d.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_group_conv1d.cpp index 2b68d838946623..e498ce930741d1 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_group_conv1d.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_group_conv1d.cpp @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include "common_test_utils/common_utils.hpp" #include #include "functional_test_utils/skip_tests_config.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_reduce_multi_axis.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_reduce_multi_axis.cpp index 8273d5dcd2a6d4..5d6424d15f611f 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_reduce_multi_axis.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_reduce_multi_axis.cpp @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include "common_test_utils/common_utils.hpp" #include #include "functional_test_utils/skip_tests_config.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/broadcast_eltwise.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/broadcast_eltwise.cpp index 8eca244cc02c30..b3a6c80097d39d 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/broadcast_eltwise.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/broadcast_eltwise.cpp @@ -5,8 +5,8 @@ #include #include -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_const_inplace.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_const_inplace.cpp index 50cf8d1e9aa39d..368b3d3b77b472 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_const_inplace.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_const_inplace.cpp @@ -4,8 +4,8 @@ #include "test_utils/cpu_test_utils.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" using namespace CPUTestUtils; using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_conv_sum_inplace.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_conv_sum_inplace.cpp index bdffc0b7d15366..4f8cd4a28d42b7 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_conv_sum_inplace.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_conv_sum_inplace.cpp @@ -4,8 +4,8 @@ #include "test_utils/cpu_test_utils.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" using namespace CPUTestUtils; using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_reorder_inplace.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_reorder_inplace.cpp index d9bb08452b097f..cf532a8e5bad1c 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_reorder_inplace.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_reorder_inplace.cpp @@ -4,8 +4,8 @@ #include -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "test_utils/cpu_test_utils.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_reshape_concat.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_reshape_concat.cpp index 57f75c12388216..fa0c73c2d7381c 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_reshape_concat.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_reshape_concat.cpp @@ -3,8 +3,8 @@ // #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" /*This test runs the following subgraph: diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv3d_reshape.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv3d_reshape.cpp index 4e217087499c17..e7b1feb6b41844 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv3d_reshape.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv3d_reshape.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" using namespace ngraph; diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv_dw_conv.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv_dw_conv.cpp index b13b3c0a440279..e0f3473794b772 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv_dw_conv.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv_dw_conv.cpp @@ -2,8 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "test_utils/cpu_test_utils.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv_maxpool_activ.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv_maxpool_activ.cpp index b3916d6af23eee..cca2a8ceb69f05 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv_maxpool_activ.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv_maxpool_activ.cpp @@ -3,7 +3,7 @@ // #include "test_utils/fusing_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" using namespace ngraph; using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv_sum_broadcast.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv_sum_broadcast.cpp index 8dde75cd890584..ca4821c2ad643d 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv_sum_broadcast.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv_sum_broadcast.cpp @@ -6,8 +6,8 @@ #include "test_utils/fusing_test_utils.hpp" #include "test_utils/convolution_params.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" #include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" using namespace CPUTestUtils; diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/convert_fq_rnn_to_quantized_rnn.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/convert_fq_rnn_to_quantized_rnn.cpp index 0c0832c486e0f4..030cb879234608 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/convert_fq_rnn_to_quantized_rnn.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/convert_fq_rnn_to_quantized_rnn.cpp @@ -12,7 +12,7 @@ #include "test_utils/cpu_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/fusing_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "common_test_utils/common_utils.hpp" #include diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/convert_range.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/convert_range.cpp index 76728e7b1f2fdf..86a0e2323dbfc4 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/convert_range.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/convert_range.cpp @@ -3,8 +3,8 @@ // #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" #include diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/convs_and_sums.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/convs_and_sums.cpp index d63ea22d7bebfe..fa84673b89da93 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/convs_and_sums.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/convs_and_sums.cpp @@ -2,8 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "test_utils/cpu_test_utils.hpp" using namespace ngraph; diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/custom_op_insert_convert_i64.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/custom_op_insert_convert_i64.cpp index c04b079b19445b..845b68932b42c1 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/custom_op_insert_convert_i64.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/custom_op_insert_convert_i64.cpp @@ -3,10 +3,9 @@ // #include -#include #include #include - +#include #include "test_utils/cpu_test_utils.hpp" using namespace ov::test; diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/denormal_check.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/denormal_check.cpp index 1e70b5a0277bb2..5b9e5ae550c236 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/denormal_check.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/denormal_check.cpp @@ -4,8 +4,8 @@ #include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" #include "ngraph/runtime/aligned_buffer.hpp" using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/eltwise_caching.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/eltwise_caching.cpp index 5843cb2d40464d..fdfa3bdc84915f 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/eltwise_caching.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/eltwise_caching.cpp @@ -38,7 +38,7 @@ #include #include #include -#include +#include #include "common_test_utils/common_utils.hpp" #include #include "functional_test_utils/skip_tests_config.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/eltwise_chain.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/eltwise_chain.cpp index a40023a15928a3..ef20c4ffd0518d 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/eltwise_chain.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/eltwise_chain.cpp @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include "common_test_utils/common_utils.hpp" #include #include "functional_test_utils/skip_tests_config.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fq_caching.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fq_caching.cpp index 2dbd2443e3d670..225dffcfc1268d 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fq_caching.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fq_caching.cpp @@ -28,7 +28,7 @@ // -------- #include -#include +#include #include #include "test_utils/cpu_test_utils.hpp" #include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fq_fused_with_ss.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fq_fused_with_ss.cpp index b0f7137bb76a0b..0e92f4b4d6ef1a 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fq_fused_with_ss.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fq_fused_with_ss.cpp @@ -4,7 +4,7 @@ #include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" #include "test_utils/cpu_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" using namespace ngraph; using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fq_layer_dq_bias.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fq_layer_dq_bias.cpp index 3bbdfeb4c99d02..149e3a4d9c683d 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fq_layer_dq_bias.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fq_layer_dq_bias.cpp @@ -2,9 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/markup_bias_function.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_lpt_models/markup_bias.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fullyconnected_strided_inputs_outputs.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fullyconnected_strided_inputs_outputs.cpp index 218933ff6fc188..070ab78d7821d2 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fullyconnected_strided_inputs_outputs.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fullyconnected_strided_inputs_outputs.cpp @@ -4,7 +4,7 @@ #include "openvino/core/partial_shape.hpp" #include "test_utils/cpu_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" using namespace ngraph; using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_conv_fq_with_shared_constants.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_conv_fq_with_shared_constants.cpp index 35daec2605d6da..68dc6230dbd322 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_conv_fq_with_shared_constants.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_conv_fq_with_shared_constants.cpp @@ -6,8 +6,8 @@ #include "test_utils/fusing_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" using namespace ngraph; using namespace ov::test; diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_muladd_ewsimple.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_muladd_ewsimple.cpp index d0c47f9c767a1c..99e7ab59000e03 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_muladd_ewsimple.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_muladd_ewsimple.cpp @@ -3,7 +3,7 @@ // #include "subgraph_tests/include/fuse_muladd_ewsimple.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" using namespace InferenceEngine; using namespace CPUTestUtils; diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_non0_output_port.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_non0_output_port.cpp index cedb22cf68b266..580b400cdf7c41 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_non0_output_port.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_non0_output_port.cpp @@ -4,8 +4,8 @@ #include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" #include "ngraph/runtime/aligned_buffer.hpp" #include diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_scaleshift_and_fakequantize.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_scaleshift_and_fakequantize.cpp index 72ef33c883370c..26ff11fb6daa88 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_scaleshift_and_fakequantize.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_scaleshift_and_fakequantize.cpp @@ -4,7 +4,7 @@ #include "shared_test_classes/base/layer_test_utils.hpp" #include "test_utils/cpu_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" using namespace ngraph; using FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc; diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_split_concat_pair_to_interpolate.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_split_concat_pair_to_interpolate.cpp index 764a6310b7305b..33f050bc6501f1 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_split_concat_pair_to_interpolate.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_split_concat_pair_to_interpolate.cpp @@ -4,7 +4,7 @@ #include "shared_test_classes/base/layer_test_utils.hpp" #include "test_utils/cpu_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" using namespace ngraph; using FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc; diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_transpose_reorder.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_transpose_reorder.cpp index 8568638e24afba..24756d0086553d 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_transpose_reorder.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_transpose_reorder.cpp @@ -3,7 +3,7 @@ // #include "subgraph_tests/include/fuse_transpose_reorder.hpp" -#include +#include #include using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/input_noreorder_eltwise_bf16.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/input_noreorder_eltwise_bf16.cpp index 9b0f308b30db04..9553caca5ec4c1 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/input_noreorder_eltwise_bf16.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/input_noreorder_eltwise_bf16.cpp @@ -2,9 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include #include "ie_common.h" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "test_utils/cpu_test_utils.hpp" using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/input_output_tensor_reuse.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/input_output_tensor_reuse.cpp index f26ed8d1f8d38b..612006be75dc2b 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/input_output_tensor_reuse.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/input_output_tensor_reuse.cpp @@ -3,8 +3,8 @@ // #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" using namespace InferenceEngine; using namespace ov::test; diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/input_tensor_roi.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/input_tensor_roi.cpp index e4c5ea8353b8f6..02f5c3e9b0e292 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/input_tensor_roi.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/input_tensor_roi.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" #include "functional_test_utils/ov_plugin_cache.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/interaction.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/interaction.cpp index e2303c421c7294..1a3874378536ef 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/interaction.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/interaction.cpp @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include "common_test_utils/common_utils.hpp" #include #include "functional_test_utils/skip_tests_config.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_decompress_convert.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_decompress_convert.cpp index 7688dd403c1e6b..06e4624ad778f4 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_decompress_convert.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_decompress_convert.cpp @@ -3,7 +3,7 @@ // #include "test_utils/fusing_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "transformations/rt_info/decompression.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_quantized_subgraph.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_quantized_subgraph.cpp index c09990531d84ab..d897f737b9989c 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_quantized_subgraph.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_quantized_subgraph.cpp @@ -4,7 +4,7 @@ #include "test_utils/cpu_test_utils.hpp" #include "test_utils/fusing_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "common_test_utils/common_utils.hpp" #include diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_strided_inputs_outputs.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_strided_inputs_outputs.cpp index 09b7756bc0b041..c5c1a43fbd8971 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_strided_inputs_outputs.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_strided_inputs_outputs.cpp @@ -3,7 +3,7 @@ // #include "test_utils/cpu_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" using namespace ngraph; using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_weights_decompression.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_weights_decompression.cpp index a7f52069b980e6..9fcd4d58689399 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_weights_decompression.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_weights_decompression.cpp @@ -3,7 +3,7 @@ // #include "test_utils/fusing_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "transformations/rt_info/decompression.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/memory_sharing_test.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/memory_sharing_test.cpp index 54756f8133dbcb..ccd09e50da2e47 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/memory_sharing_test.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/memory_sharing_test.cpp @@ -4,7 +4,7 @@ #include "openvino/openvino.hpp" #include "test_utils/cpu_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/convolution_params.hpp" using namespace CPUTestUtils; diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/mha.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/mha.cpp index bbf9c146d30911..068585b2ca9029 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/mha.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/mha.cpp @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include "common_test_utils/common_utils.hpp" #include #include "functional_test_utils/skip_tests_config.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/ngram.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/ngram.cpp index 4d8fe623c4e50f..2173c392c55711 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/ngram.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/ngram.cpp @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include "common_test_utils/common_utils.hpp" #include #include "test_utils/cpu_test_utils.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/not_fused_conv_simple_op.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/not_fused_conv_simple_op.cpp index ac84de48396b78..8cc7569233aab7 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/not_fused_conv_simple_op.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/not_fused_conv_simple_op.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" using namespace ngraph; diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/remove_convert.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/remove_convert.cpp index 3d7b72d31a302b..51e500073ced08 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/remove_convert.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/remove_convert.cpp @@ -2,8 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/reshape_chain.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/reshape_chain.cpp index daaeb933649ae1..a2cb19c8e1da3f 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/reshape_chain.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/reshape_chain.cpp @@ -3,7 +3,7 @@ // #include -#include +#include #include #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/skip_tests_config.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/reshape_fc.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/reshape_fc.cpp index 4653de36314e50..74f743d92c4837 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/reshape_fc.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/reshape_fc.cpp @@ -4,7 +4,7 @@ #include "shared_test_classes/base/ov_subgraph.hpp" #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/fusing_test_utils.hpp" using namespace CPUTestUtils; diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/reshape_inplace.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/reshape_inplace.cpp index e979d2524c9bbb..bcdc28c75de99a 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/reshape_inplace.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/reshape_inplace.cpp @@ -4,8 +4,8 @@ #include #include "ngraph/runtime/aligned_buffer.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/seq_native_order.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/seq_native_order.cpp index e0310b4937c280..0e5b57dfb6b85c 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/seq_native_order.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/seq_native_order.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" #include "transformations/op_conversions/bidirectional_sequences_decomposition.hpp" #include "transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/split_concat_add.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/split_concat_add.cpp index 7f7b2d6eb24ebb..d817ef74f25db4 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/split_concat_add.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/split_concat_add.cpp @@ -3,8 +3,8 @@ // #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" /*This test runs the following subgraph: diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/split_matmul_concat.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/split_matmul_concat.cpp index 215eb6c8839f5f..2a57e1cc4133c6 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/split_matmul_concat.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/split_matmul_concat.cpp @@ -3,7 +3,7 @@ // #include "test_utils/fusing_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" using namespace ngraph; diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/static_zero_dims.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/static_zero_dims.cpp index 6bfbef66a2501b..1072890e51774b 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/static_zero_dims.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/static_zero_dims.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include #include "functional_test_utils/skip_tests_config.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/strided_slice_zero_dims.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/strided_slice_zero_dims.cpp index 5b9a2f5b1f7e56..0ae91ab3645dbd 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/strided_slice_zero_dims.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/strided_slice_zero_dims.cpp @@ -3,8 +3,8 @@ // #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" using namespace InferenceEngine; using namespace ov::test; diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/subgraph_serialize.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/subgraph_serialize.cpp index cb7f60458fa6b2..b3e2912855dbe3 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/subgraph_serialize.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/subgraph_serialize.cpp @@ -5,7 +5,7 @@ #include "openvino/openvino.hpp" #include "openvino/opsets/opset9.hpp" #include "test_utils/cpu_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/convolution_params.hpp" #include "snippets/op/subgraph.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/subgraph_with_blocked_format.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/subgraph_with_blocked_format.cpp index e2331f3e947b98..807e458ee4deb6 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/subgraph_with_blocked_format.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/subgraph_with_blocked_format.cpp @@ -3,7 +3,7 @@ // #include "test_utils/cpu_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include using namespace ngraph; diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/tile_with_two_output_edges.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/tile_with_two_output_edges.cpp index 051c59788b2799..8fdfbff08273e1 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/tile_with_two_output_edges.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/tile_with_two_output_edges.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" using namespace ngraph; diff --git a/src/plugins/intel_cpu/tests/functional/test_utils/properties_test.hpp b/src/plugins/intel_cpu/tests/functional/test_utils/properties_test.hpp index 4aa60ac0393158..e8abbfe879d872 100644 --- a/src/plugins/intel_cpu/tests/functional/test_utils/properties_test.hpp +++ b/src/plugins/intel_cpu/tests/functional/test_utils/properties_test.hpp @@ -7,7 +7,7 @@ #include "openvino/runtime/core.hpp" #include "openvino/runtime/compiled_model.hpp" #include "functional_test_utils/skip_tests_config.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" class OVClassConfigTestCPU : public ::testing::Test { public: diff --git a/src/plugins/intel_cpu/tests/unit/CMakeLists.txt b/src/plugins/intel_cpu/tests/unit/CMakeLists.txt index 6c986d2d8c79f4..a85f63c0f38df0 100644 --- a/src/plugins/intel_cpu/tests/unit/CMakeLists.txt +++ b/src/plugins/intel_cpu/tests/unit/CMakeLists.txt @@ -58,8 +58,8 @@ addIeTargetTest( openvino::shape_inference inference_engine_s unit_test_utils - ngraphFunctions - snippetsNgraphFunctions + ov_models + ov_snippets_models snippets_test_utils ${MLAS_LIBRARY} ADD_CPPLINT diff --git a/src/plugins/intel_cpu/tests/unit/generate_add.cpp b/src/plugins/intel_cpu/tests/unit/generate_add.cpp index 9e043381808b6f..f2eb0c27cc5115 100644 --- a/src/plugins/intel_cpu/tests/unit/generate_add.cpp +++ b/src/plugins/intel_cpu/tests/unit/generate_add.cpp @@ -27,7 +27,7 @@ #include #include -#include +#include using namespace testing; diff --git a/src/plugins/intel_cpu/tests/unit/snippets_transformations/enforce_precision.cpp b/src/plugins/intel_cpu/tests/unit/snippets_transformations/enforce_precision.cpp index c370f2f007634d..9a8ffe5a7a1840 100644 --- a/src/plugins/intel_cpu/tests/unit/snippets_transformations/enforce_precision.cpp +++ b/src/plugins/intel_cpu/tests/unit/snippets_transformations/enforce_precision.cpp @@ -12,7 +12,7 @@ #include "openvino/core/type/element_type.hpp" #include "transformations/snippets/x64/pass/enforce_precision.hpp" #include "common_test_utils/common_utils.hpp" -#include "two_binary_ops_function.hpp" +#include "two_binary_ops.hpp" namespace ov { namespace test { diff --git a/src/plugins/intel_cpu/tests/unit/snippets_transformations/fake_quantize_tokenization_test.cpp b/src/plugins/intel_cpu/tests/unit/snippets_transformations/fake_quantize_tokenization_test.cpp index 559295a9ec3c93..a146b108a2e164 100644 --- a/src/plugins/intel_cpu/tests/unit/snippets_transformations/fake_quantize_tokenization_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/snippets_transformations/fake_quantize_tokenization_test.cpp @@ -8,10 +8,11 @@ #include "snippets/pass/fq_decomposition.hpp" #include "snippets/pass/tokenization.hpp" #include "snippets/pass/collapse_subgraph.hpp" -#include "fake_quantize_function.hpp" +#include "fake_quantize_helper.hpp" #include "snippets/op/subgraph.hpp" #include "transformations/snippets/x64/pass/snippets_mark_skipped.hpp" #include "function_helper.hpp" + namespace ov { namespace test { namespace snippets { diff --git a/src/plugins/intel_gna/legacy/tests/CMakeLists.txt b/src/plugins/intel_gna/legacy/tests/CMakeLists.txt index 7952934405e790..be69212ba81b82 100644 --- a/src/plugins/intel_gna/legacy/tests/CMakeLists.txt +++ b/src/plugins/intel_gna/legacy/tests/CMakeLists.txt @@ -17,7 +17,7 @@ ov_add_test_target( gmock func_test_utils sharedTestClasses - lptNgraphFunctions + ov_lpt_models inference_engine_legacy ADD_CLANG_FORMAT INCLUDES diff --git a/src/plugins/intel_gna/legacy/tests/keep_constant_inputs_tests.cpp b/src/plugins/intel_gna/legacy/tests/keep_constant_inputs_tests.cpp index d341b832cac0f9..1dcdfd17d32900 100644 --- a/src/plugins/intel_gna/legacy/tests/keep_constant_inputs_tests.cpp +++ b/src/plugins/intel_gna/legacy/tests/keep_constant_inputs_tests.cpp @@ -21,7 +21,7 @@ #include #include -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" using namespace testing; diff --git a/src/plugins/intel_gna/legacy/tests/mul_add_conversion_test.cpp b/src/plugins/intel_gna/legacy/tests/mul_add_conversion_test.cpp index ddd3affdb80313..ba38b703807453 100644 --- a/src/plugins/intel_gna/legacy/tests/mul_add_conversion_test.cpp +++ b/src/plugins/intel_gna/legacy/tests/mul_add_conversion_test.cpp @@ -26,7 +26,7 @@ #include "common_test_utils/ov_test_utils.hpp" #include "common_test_utils/test_common.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_lpt_models/common/builders.hpp" using namespace testing; diff --git a/src/plugins/intel_gna/tests/deprecated/unit/engines/gna/i16_quantisation_test.cpp b/src/plugins/intel_gna/tests/deprecated/unit/engines/gna/i16_quantisation_test.cpp index 3187b27fba4ef1..64645325513d93 100644 --- a/src/plugins/intel_gna/tests/deprecated/unit/engines/gna/i16_quantisation_test.cpp +++ b/src/plugins/intel_gna/tests/deprecated/unit/engines/gna/i16_quantisation_test.cpp @@ -13,7 +13,7 @@ #include "frontend/layer_quantizer.hpp" #include "frontend/model_quantizer.hpp" #include "gna_matcher.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" using namespace InferenceEngine; using namespace ov::intel_gna::limitations; diff --git a/src/plugins/intel_gna/tests/functional/Import_export_tests/backward_compatibility/backward_compatibility.cpp b/src/plugins/intel_gna/tests/functional/Import_export_tests/backward_compatibility/backward_compatibility.cpp index 3be9673a9c05f0..87cc38e55bf763 100644 --- a/src/plugins/intel_gna/tests/functional/Import_export_tests/backward_compatibility/backward_compatibility.cpp +++ b/src/plugins/intel_gna/tests/functional/Import_export_tests/backward_compatibility/backward_compatibility.cpp @@ -8,11 +8,11 @@ #include #include "helpers/test_model_repo.hpp" -#include "ngraph_functions/builders.hpp" #include "openvino/core/model.hpp" #include "openvino/core/shape.hpp" #include "openvino/core/type.hpp" #include "openvino/opsets/opset10.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" using namespace ov::opset10; diff --git a/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_act_conv_act.cpp b/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_act_conv_act.cpp index 909f8e823db179..ca671b4b759c59 100644 --- a/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_act_conv_act.cpp +++ b/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_act_conv_act.cpp @@ -12,8 +12,8 @@ #include #include "functional_test_utils/blob_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" typedef std::tuple, // Input shape diff --git a/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_batch_size.cpp b/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_batch_size.cpp index 23246570e983d7..6af98b35808808 100644 --- a/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_batch_size.cpp +++ b/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_batch_size.cpp @@ -12,7 +12,7 @@ #include #include "base/import_export_base.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_memory_layer.cpp b/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_memory_layer.cpp index 648a72934cd959..9bfeb3a9f61953 100644 --- a/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_memory_layer.cpp +++ b/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_memory_layer.cpp @@ -12,8 +12,8 @@ #include #include "functional_test_utils/blob_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" typedef std::tuple #include "base/import_export_base.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/plugins/intel_gna/tests/functional/Import_export_tests/import_reshape_permute_conv.cpp b/src/plugins/intel_gna/tests/functional/Import_export_tests/import_reshape_permute_conv.cpp index 6ef967004763be..b57cbb822950e9 100644 --- a/src/plugins/intel_gna/tests/functional/Import_export_tests/import_reshape_permute_conv.cpp +++ b/src/plugins/intel_gna/tests/functional/Import_export_tests/import_reshape_permute_conv.cpp @@ -3,7 +3,7 @@ // #include "base/import_export_base.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/plugins/intel_gna/tests/functional/limitations/layers_limit.cpp b/src/plugins/intel_gna/tests/functional/limitations/layers_limit.cpp index f5da3d60b73e6d..9c54ae4f64c85c 100644 --- a/src/plugins/intel_gna/tests/functional/limitations/layers_limit.cpp +++ b/src/plugins/intel_gna/tests/functional/limitations/layers_limit.cpp @@ -3,9 +3,9 @@ // #include "common_test_utils/common_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" #include "openvino/opsets/opset8.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" using namespace InferenceEngine; diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/4d_eltwise.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/4d_eltwise.cpp index 4358fdd3777dda..5fb77c679a3451 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/4d_eltwise.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/4d_eltwise.cpp @@ -11,9 +11,9 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/blob_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/pass/convert_prc.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/pass/convert_prc.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" typedef std::tuple activationNames = { diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/broadcast_const_with_fq.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/broadcast_const_with_fq.cpp index cd5e2f263f2ef8..b28926831e5657 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/broadcast_const_with_fq.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/broadcast_const_with_fq.cpp @@ -10,9 +10,9 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/blob_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/pass/convert_prc.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/pass/convert_prc.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" using BroadcastConstWithFqParamsTuple = typename std::tuple #include "common_test_utils/common_utils.hpp" -#include "ngraph_functions/builders.hpp" #include "openvino/opsets/opset12.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/conv_with_padding.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/conv_with_padding.cpp index 3b57d822fdcc22..999dd3574126f7 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/conv_with_padding.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/conv_with_padding.cpp @@ -4,7 +4,7 @@ #include "../shared_tests_instances/skip_tests_check.hpp" #include "common_test_utils/type_prop.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" typedef std::tuple #include "common_test_utils/test_common.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "transformations/init_node_info.hpp" diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/convert_matmul_to_fullyconnected.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/convert_matmul_to_fullyconnected.cpp index 2478d2954b85c1..65db77afeaa207 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/convert_matmul_to_fullyconnected.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/convert_matmul_to_fullyconnected.cpp @@ -9,8 +9,8 @@ #include #include "functional_test_utils/blob_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" typedef std::tuple>, // Input shape diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/convert_matmul_to_pointwise_conv.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/convert_matmul_to_pointwise_conv.cpp index 9bdb3528bfece2..5782c09ecb678c 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/convert_matmul_to_pointwise_conv.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/convert_matmul_to_pointwise_conv.cpp @@ -13,9 +13,9 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/blob_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/pass/convert_prc.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/pass/convert_prc.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" typedef std::tuple #include "common_test_utils/test_common.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "transformations/init_node_info.hpp" diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/diagonal_insertion_test.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/diagonal_insertion_test.cpp index f296d50d8a491d..2d9382c125fe86 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/diagonal_insertion_test.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/diagonal_insertion_test.cpp @@ -13,9 +13,9 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/blob_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/pass/convert_prc.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/pass/convert_prc.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" namespace DiagonalInsertionTestNs { diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/eltwise_split_over_channels_pass.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/eltwise_split_over_channels_pass.cpp index 9092241c338d84..e11e1a82ccf879 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/eltwise_split_over_channels_pass.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/eltwise_split_over_channels_pass.cpp @@ -9,8 +9,8 @@ #include #include "functional_test_utils/blob_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" typedef std::tuple #include "common_test_utils/test_constants.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" namespace LayerTestsDefinitions { diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/fq_maxpool_reordering.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/fq_maxpool_reordering.cpp index 79af0c841d4cf6..0d587a71f84f8b 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/fq_maxpool_reordering.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/fq_maxpool_reordering.cpp @@ -11,9 +11,9 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/blob_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/pass/convert_prc.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/pass/convert_prc.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" typedef std::tuple #include "common_test_utils/common_utils.hpp" -#include "ngraph_functions/builders.hpp" #include "openvino/opsets/opset10.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" diff --git a/src/plugins/intel_gna/tests/functional/preprocess_tests/gather_transpose_merge.cpp b/src/plugins/intel_gna/tests/functional/preprocess_tests/gather_transpose_merge.cpp index 88d6c07272b262..746c5cafa64acd 100644 --- a/src/plugins/intel_gna/tests/functional/preprocess_tests/gather_transpose_merge.cpp +++ b/src/plugins/intel_gna/tests/functional/preprocess_tests/gather_transpose_merge.cpp @@ -7,8 +7,8 @@ #include #include "common_test_utils/ov_test_utils.hpp" -#include "ngraph_functions/builders.hpp" #include "openvino/opsets/opset10.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" diff --git a/src/plugins/intel_gna/tests/functional/preprocess_tests/precision_convert.cpp b/src/plugins/intel_gna/tests/functional/preprocess_tests/precision_convert.cpp index 7221fd751ec791..f1ddaaa3446495 100644 --- a/src/plugins/intel_gna/tests/functional/preprocess_tests/precision_convert.cpp +++ b/src/plugins/intel_gna/tests/functional/preprocess_tests/precision_convert.cpp @@ -3,8 +3,8 @@ // #include "common_test_utils/common_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" using namespace InferenceEngine; diff --git a/src/plugins/intel_gna/tests/functional/scale_factors_tests/add_overload_correction.cpp b/src/plugins/intel_gna/tests/functional/scale_factors_tests/add_overload_correction.cpp index 13cced6c37d1ec..6a11f56b7b308f 100644 --- a/src/plugins/intel_gna/tests/functional/scale_factors_tests/add_overload_correction.cpp +++ b/src/plugins/intel_gna/tests/functional/scale_factors_tests/add_overload_correction.cpp @@ -11,9 +11,9 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/blob_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/pass/convert_prc.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/pass/convert_prc.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" typedef std::tuple activationNames = { diff --git a/src/plugins/intel_gna/tests/functional/scale_factors_tests/matmul_overload_correction.cpp b/src/plugins/intel_gna/tests/functional/scale_factors_tests/matmul_overload_correction.cpp index bec919d2a2a801..fa9e728f084d3d 100644 --- a/src/plugins/intel_gna/tests/functional/scale_factors_tests/matmul_overload_correction.cpp +++ b/src/plugins/intel_gna/tests/functional/scale_factors_tests/matmul_overload_correction.cpp @@ -13,9 +13,9 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/blob_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/pass/convert_prc.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/pass/convert_prc.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" typedef std::tuple #include -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/plugins/intel_gna/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp b/src/plugins/intel_gna/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp index 8ee56ae81d3fba..43d7c2eca869ad 100644 --- a/src/plugins/intel_gna/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp +++ b/src/plugins/intel_gna/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp @@ -7,7 +7,7 @@ #include #include "functional_test_utils/plugin_cache.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" using namespace BehaviorTestsDefinitions; diff --git a/src/plugins/intel_gna/tests/functional/shared_tests_instances/execution_graph_tests/add_output.cpp b/src/plugins/intel_gna/tests/functional/shared_tests_instances/execution_graph_tests/add_output.cpp index b50e289b42ac67..c56b77a0db892f 100644 --- a/src/plugins/intel_gna/tests/functional/shared_tests_instances/execution_graph_tests/add_output.cpp +++ b/src/plugins/intel_gna/tests/functional/shared_tests_instances/execution_graph_tests/add_output.cpp @@ -7,7 +7,7 @@ #include #include "functional_test_utils/plugin_cache.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" InferenceEngine::CNNNetwork getTargetNetwork() { ngraph::Shape shape = {1, 200}; diff --git a/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/conv_low_precision.cpp b/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/conv_low_precision.cpp index 14619aec73871c..752d1a6bd2d237 100644 --- a/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/conv_low_precision.cpp +++ b/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/conv_low_precision.cpp @@ -14,10 +14,10 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/blob_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/pass/convert_prc.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" #include "openvino/opsets/opset12.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/pass/convert_prc.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" namespace ConvLowPrecicionTestNs { diff --git a/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/convolution.cpp b/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/convolution.cpp index 3a333ed8d53924..e6b35e835657d5 100644 --- a/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/convolution.cpp +++ b/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/convolution.cpp @@ -9,9 +9,9 @@ #include "../skip_tests_check.hpp" #include "common_test_utils/test_constants.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" #include "openvino/opsets/opset11.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" using namespace ov; diff --git a/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/memory.cpp b/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/memory.cpp index 3417a541783f70..9caceecafed292 100644 --- a/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/memory.cpp +++ b/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/memory.cpp @@ -6,9 +6,9 @@ #include -#include "ngraph_functions/builders.hpp" #include "openvino/op/util/variable.hpp" #include "openvino/opsets/opset11.hpp" +#include "ov_models/builders.hpp" using namespace LayerTestsDefinitions; using namespace ngraph; diff --git a/src/plugins/intel_gna/tests/functional/subgraph_tests/add_transpose_detection.cpp b/src/plugins/intel_gna/tests/functional/subgraph_tests/add_transpose_detection.cpp index ffecc27d970886..cffa490d0af120 100644 --- a/src/plugins/intel_gna/tests/functional/subgraph_tests/add_transpose_detection.cpp +++ b/src/plugins/intel_gna/tests/functional/subgraph_tests/add_transpose_detection.cpp @@ -2,8 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" #include "openvino/opsets/opset9.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" using namespace ov::opset9; diff --git a/src/plugins/intel_gna/tests/unit/CMakeLists.txt b/src/plugins/intel_gna/tests/unit/CMakeLists.txt index 912d28d728b41d..c4d49f7b4b0c55 100644 --- a/src/plugins/intel_gna/tests/unit/CMakeLists.txt +++ b/src/plugins/intel_gna/tests/unit/CMakeLists.txt @@ -23,7 +23,7 @@ addIeTargetTest( ${exclude_path} LINK_LIBRARIES PRIVATE - ngraphFunctions + ov_models gmock common_test_utils_s openvino_intel_gna_plugin_test_static diff --git a/src/plugins/intel_gna/tests/unit/gna_executable_network_metrics_test.cpp b/src/plugins/intel_gna/tests/unit/gna_executable_network_metrics_test.cpp index 12b0f5429880b1..838fa4b31b9ee9 100644 --- a/src/plugins/intel_gna/tests/unit/gna_executable_network_metrics_test.cpp +++ b/src/plugins/intel_gna/tests/unit/gna_executable_network_metrics_test.cpp @@ -9,7 +9,7 @@ #include "gna_executable_network.hpp" #include "gna_plugin.hpp" #include "memory/gna_memory.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" using namespace ov::intel_gna; using namespace InferenceEngine; diff --git a/src/plugins/intel_gna/tests/unit/gna_export_import_test.cpp b/src/plugins/intel_gna/tests/unit/gna_export_import_test.cpp index bed9fa4eb0e29f..bc3fffde1edc2a 100644 --- a/src/plugins/intel_gna/tests/unit/gna_export_import_test.cpp +++ b/src/plugins/intel_gna/tests/unit/gna_export_import_test.cpp @@ -13,7 +13,7 @@ #include "common_test_utils/ov_test_utils.hpp" #include "gna_mock_api.hpp" #include "gna_plugin.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" using namespace ::testing; using ov::intel_gna::GNAPlugin; diff --git a/src/plugins/intel_gna/tests/unit/gna_extra_pwl_segments_tests.cpp b/src/plugins/intel_gna/tests/unit/gna_extra_pwl_segments_tests.cpp index b05ba43b623c6a..52a68757679ef9 100644 --- a/src/plugins/intel_gna/tests/unit/gna_extra_pwl_segments_tests.cpp +++ b/src/plugins/intel_gna/tests/unit/gna_extra_pwl_segments_tests.cpp @@ -9,8 +9,8 @@ #include "gna_infer_request.hpp" #include "gna_mock_api.hpp" #include "gna_plugin.hpp" -#include "ngraph_functions/builders.hpp" #include "openvino/opsets/opset11.hpp" +#include "ov_models/builders.hpp" using ov::intel_gna::GNAPlugin; using namespace ov::op; diff --git a/src/plugins/intel_gna/tests/unit/gna_hw_precision_test.cpp b/src/plugins/intel_gna/tests/unit/gna_hw_precision_test.cpp index 49956874998817..3cca0989543e25 100644 --- a/src/plugins/intel_gna/tests/unit/gna_hw_precision_test.cpp +++ b/src/plugins/intel_gna/tests/unit/gna_hw_precision_test.cpp @@ -8,7 +8,7 @@ #include "gna_data_types.hpp" #include "gna_plugin.hpp" #include "memory/gna_memory.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" using namespace InferenceEngine; namespace testing { diff --git a/src/plugins/intel_gna/tests/unit/gna_infer_request_test.cpp b/src/plugins/intel_gna/tests/unit/gna_infer_request_test.cpp index abd7efeeef400b..c20c37b4f1f0fb 100644 --- a/src/plugins/intel_gna/tests/unit/gna_infer_request_test.cpp +++ b/src/plugins/intel_gna/tests/unit/gna_infer_request_test.cpp @@ -14,7 +14,7 @@ #include "common_test_utils/ov_test_utils.hpp" #include "gna_mock_api.hpp" #include "gna_plugin.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" using namespace ::testing; using namespace InferenceEngine; diff --git a/src/plugins/intel_gna/tests/unit/gna_memory_alignment.cpp b/src/plugins/intel_gna/tests/unit/gna_memory_alignment.cpp index 0275e0fd42539c..1c1d8046a0d081 100644 --- a/src/plugins/intel_gna/tests/unit/gna_memory_alignment.cpp +++ b/src/plugins/intel_gna/tests/unit/gna_memory_alignment.cpp @@ -10,7 +10,7 @@ #include "gna_data_types.hpp" #include "gna_plugin.hpp" #include "memory/gna_memory.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" using namespace InferenceEngine; using namespace ov::intel_gna::target; diff --git a/src/plugins/intel_gna/tests/unit/gna_memory_compact_test.cpp b/src/plugins/intel_gna/tests/unit/gna_memory_compact_test.cpp index b86274cbd6eecd..10ec76c3f6ac9e 100644 --- a/src/plugins/intel_gna/tests/unit/gna_memory_compact_test.cpp +++ b/src/plugins/intel_gna/tests/unit/gna_memory_compact_test.cpp @@ -14,7 +14,7 @@ #include "gna_fused_iterator.hpp" #include "gna_plugin.hpp" #include "memory/gna_memory.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" using namespace InferenceEngine; using namespace memory; diff --git a/src/plugins/intel_gna/tests/unit/gna_plugin_load_network_test.cpp b/src/plugins/intel_gna/tests/unit/gna_plugin_load_network_test.cpp index 03a7327de9faf5..24d6a674104cfe 100644 --- a/src/plugins/intel_gna/tests/unit/gna_plugin_load_network_test.cpp +++ b/src/plugins/intel_gna/tests/unit/gna_plugin_load_network_test.cpp @@ -7,7 +7,7 @@ #include "common/gna_target.hpp" #include "gna_mock_api_initializer.hpp" #include "gna_plugin.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace { typedef struct { diff --git a/src/plugins/intel_gna/tests/unit/transformations/gather_transpose_merge.cpp b/src/plugins/intel_gna/tests/unit/transformations/gather_transpose_merge.cpp index 6b07a8f698b6b2..c73e99eb1a56b0 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gather_transpose_merge.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gather_transpose_merge.cpp @@ -7,9 +7,9 @@ #include "common/graph_utils.hpp" #include "common_test_utils/common_utils.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "ngraph_functions/builders.hpp" #include "openvino/opsets/opset10.hpp" #include "openvino/pass/manager.hpp" +#include "ov_models/builders.hpp" #include "transformations/gather_sinking_transpose.hpp" #include "transformations/init_node_info.hpp" diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_convert_matmul_to_pointwise_convolution.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_convert_matmul_to_pointwise_convolution.cpp index 1985ac716909e4..4c8dc70f452cb5 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_convert_matmul_to_pointwise_convolution.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_convert_matmul_to_pointwise_convolution.cpp @@ -12,7 +12,7 @@ #include #include "common_test_utils/ov_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "transformations/convert_matmul_to_pointwise_convolution.hpp" namespace testing { diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_insert_copy_layer.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_insert_copy_layer.cpp index c5b4d05c674195..04c1410af61068 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_insert_copy_layer.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_insert_copy_layer.cpp @@ -14,8 +14,8 @@ #include "backend/gna_limitations.hpp" #include "common/gna_target.hpp" #include "common_test_utils/ov_test_utils.hpp" -#include "ngraph_functions/builders.hpp" #include "ops/copy.hpp" +#include "ov_models/builders.hpp" #include "transformations/insert_copy_layer.hpp" using namespace ov::intel_gna::limitations; diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_insert_identity_layer.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_insert_identity_layer.cpp index 111d609ab314aa..045c6f5ed8eec4 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_insert_identity_layer.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_insert_identity_layer.cpp @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include #include diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_remove_convert.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_remove_convert.cpp index da1434da634a0a..77ecaaf4b9a0a8 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_remove_convert.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_remove_convert.cpp @@ -10,7 +10,7 @@ #include #include "common_test_utils/ov_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "transformations/remove_converts.hpp" namespace testing { diff --git a/src/plugins/intel_gpu/tests/functional/behavior/infer_request.cpp b/src/plugins/intel_gpu/tests/functional/behavior/infer_request.cpp index b59e80ffed5bc1..1d64aa74232d7c 100644 --- a/src/plugins/intel_gpu/tests/functional/behavior/infer_request.cpp +++ b/src/plugins/intel_gpu/tests/functional/behavior/infer_request.cpp @@ -10,7 +10,7 @@ #include "openvino/runtime/core.hpp" #include -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "functional_test_utils/blob_utils.hpp" #include "openvino/core/preprocess/pre_post_process.hpp" #include "transformations/utils/utils.hpp" diff --git a/src/plugins/intel_gpu/tests/functional/behavior/memory_dyn_batch.cpp b/src/plugins/intel_gpu/tests/functional/behavior/memory_dyn_batch.cpp index a91b114809a879..057c404fde0010 100644 --- a/src/plugins/intel_gpu/tests/functional/behavior/memory_dyn_batch.cpp +++ b/src/plugins/intel_gpu/tests/functional/behavior/memory_dyn_batch.cpp @@ -3,7 +3,7 @@ // #include "ngraph/opsets/opset8.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "openvino/runtime/core.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include diff --git a/src/plugins/intel_gpu/tests/functional/concurrency/gpu_concurrency_tests.cpp b/src/plugins/intel_gpu/tests/functional/concurrency/gpu_concurrency_tests.cpp index a50e50c354f1bb..29b80e9759df2a 100644 --- a/src/plugins/intel_gpu/tests/functional/concurrency/gpu_concurrency_tests.cpp +++ b/src/plugins/intel_gpu/tests/functional/concurrency/gpu_concurrency_tests.cpp @@ -12,7 +12,7 @@ #include #include #include -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "functional_test_utils/blob_utils.hpp" #include "openvino/core/preprocess/pre_post_process.hpp" #include "transformations/utils/utils.hpp" diff --git a/src/plugins/intel_gpu/tests/functional/dynamic_tests/gpu_dyn_batch_shape_tests.cpp b/src/plugins/intel_gpu/tests/functional/dynamic_tests/gpu_dyn_batch_shape_tests.cpp index c5b63803db16a8..964e6dcad3dcc5 100644 --- a/src/plugins/intel_gpu/tests/functional/dynamic_tests/gpu_dyn_batch_shape_tests.cpp +++ b/src/plugins/intel_gpu/tests/functional/dynamic_tests/gpu_dyn_batch_shape_tests.cpp @@ -7,7 +7,7 @@ #include "common_test_utils/common_utils.hpp" #include "common_test_utils/file_utils.hpp" #include "functional_test_utils/skip_tests_config.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" using namespace ::testing; diff --git a/src/plugins/intel_gpu/tests/functional/remote_blob_tests/cldnn_remote_blob_tests.cpp b/src/plugins/intel_gpu/tests/functional/remote_blob_tests/cldnn_remote_blob_tests.cpp index 8a06f2c138af66..ee3f484927f48b 100644 --- a/src/plugins/intel_gpu/tests/functional/remote_blob_tests/cldnn_remote_blob_tests.cpp +++ b/src/plugins/intel_gpu/tests/functional/remote_blob_tests/cldnn_remote_blob_tests.cpp @@ -16,7 +16,7 @@ #include #include "base/ov_behavior_test_utils.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "functional_test_utils/blob_utils.hpp" using namespace ::testing; diff --git a/src/plugins/intel_gpu/tests/functional/remote_blob_tests/dx11_remote_ctx_test.cpp b/src/plugins/intel_gpu/tests/functional/remote_blob_tests/dx11_remote_ctx_test.cpp index 4492b6b5a95bba..00761773bba2d6 100644 --- a/src/plugins/intel_gpu/tests/functional/remote_blob_tests/dx11_remote_ctx_test.cpp +++ b/src/plugins/intel_gpu/tests/functional/remote_blob_tests/dx11_remote_ctx_test.cpp @@ -14,7 +14,7 @@ #include #include #include "common_test_utils/file_utils.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include #ifdef _WIN32 diff --git a/src/plugins/intel_gpu/tests/functional/remote_blob_tests/gpu_remote_tensor_tests.cpp b/src/plugins/intel_gpu/tests/functional/remote_blob_tests/gpu_remote_tensor_tests.cpp index 8df502e7bb3487..f3bffc6e6cebde 100644 --- a/src/plugins/intel_gpu/tests/functional/remote_blob_tests/gpu_remote_tensor_tests.cpp +++ b/src/plugins/intel_gpu/tests/functional/remote_blob_tests/gpu_remote_tensor_tests.cpp @@ -17,7 +17,7 @@ #include #include #include "base/ov_behavior_test_utils.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "functional_test_utils/blob_utils.hpp" #include "openvino/core/preprocess/pre_post_process.hpp" #include "transformations/utils/utils.hpp" diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp index f584f13baaa40d..60e9d3542d41c3 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp @@ -5,7 +5,7 @@ #include #include "behavior/infer_request/memory_states.hpp" #include "functional_test_utils/plugin_cache.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" using namespace BehaviorTestsDefinitions; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_avg_pool_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_avg_pool_transformation.cpp index 4f0eb7992f7b38..7b29143e4bc6d8 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_avg_pool_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_avg_pool_transformation.cpp @@ -5,7 +5,7 @@ #include #include "low_precision_transformations/fake_quantize_and_avg_pool_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_max_pool_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_max_pool_transformation.cpp index d283299b6933e2..2a83be0a515026 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_max_pool_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_and_max_pool_transformation.cpp @@ -5,7 +5,7 @@ #include #include "low_precision_transformations/fake_quantize_and_max_pool_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_precision_selection_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_precision_selection_transformation.cpp index 4048550b9f917d..853719aabd3854 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_precision_selection_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_precision_selection_transformation.cpp @@ -6,7 +6,7 @@ #include "low_precision_transformations/fake_quantize_precision_selection_transformation.hpp" #include "common_test_utils/test_constants.hpp" -#include "lpt_ngraph_functions/fake_quantize_function.hpp" +#include "ov_lpt_models/fake_quantize.hpp" using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_transformation.cpp index 939617d59eb68f..560cbf0629b30c 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_transformation.cpp @@ -6,7 +6,7 @@ #include "low_precision_transformations/fake_quantize_transformation.hpp" #include "common_test_utils/test_constants.hpp" -#include "lpt_ngraph_functions/fake_quantize_function.hpp" +#include "ov_lpt_models/fake_quantize.hpp" using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_with_dq_not_optimal_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_with_dq_not_optimal_transformation.cpp index 0c7ea8e229727d..a771177d4bd71b 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_with_dq_not_optimal_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fq_with_dq_not_optimal_transformation.cpp @@ -6,7 +6,7 @@ #include "low_precision_transformations/fake_quantize_with_dq_not_optimal_transformation.hpp" #include "common_test_utils/test_constants.hpp" -#include "lpt_ngraph_functions/fake_quantize_function.hpp" +#include "ov_lpt_models/fake_quantize.hpp" using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_fq_and_scale_shift_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_fq_and_scale_shift_transformation.cpp index 1fd986206e6ffe..9b663476240a41 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_fq_and_scale_shift_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_fq_and_scale_shift_transformation.cpp @@ -6,7 +6,7 @@ #include "low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.hpp" #include "common_test_utils/test_constants.hpp" -#include "lpt_ngraph_functions/fuse_fake_quantize_and_scale_shift_function.hpp" +#include "ov_lpt_models/fuse_fake_quantize_and_scale_shift.hpp" using namespace LayerTestsDefinitions; using namespace ov::pass::low_precision; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/topk.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/topk.cpp index 93c37834da6762..100c33bb788a21 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/topk.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/topk.cpp @@ -6,7 +6,7 @@ #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" namespace GPULayerTestsDefinitions { diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/convolution.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/convolution.cpp index 85ad9b8f047b84..de10fba3cfc13f 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/convolution.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/convolution.cpp @@ -5,8 +5,8 @@ #include #include #include -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "shared_test_classes/single_layer/convolution.hpp" #include "common_test_utils/test_constants.hpp" diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/batch_to_space.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/batch_to_space.cpp index e327f14389bf78..9c1b6c5796aef9 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/batch_to_space.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/batch_to_space.cpp @@ -4,7 +4,7 @@ #include "shared_test_classes/single_layer/batch_to_space.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "common_test_utils/test_constants.hpp" #include "common_test_utils/ov_tensor_utils.hpp" diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/broadcast.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/broadcast.cpp index 71e37d4c28e8a7..580a67383f3aba 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/broadcast.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/broadcast.cpp @@ -5,7 +5,7 @@ #include "shared_test_classes/single_layer/broadcast.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "ie_precision.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include #include diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution.cpp index ca8f024c95fad8..44101b10c8e172 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution.cpp @@ -5,8 +5,8 @@ #include #include #include -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "shared_test_classes/single_layer/convolution.hpp" #include "common_test_utils/test_constants.hpp" diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution_backprop_data.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution_backprop_data.cpp index ab535def809055..d394eb7d05de17 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution_backprop_data.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution_backprop_data.cpp @@ -6,8 +6,8 @@ #include #include #include -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "shared_test_classes/single_layer/convolution_backprop_data.hpp" #include "common_test_utils/test_constants.hpp" diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/cum_sum.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/cum_sum.cpp index 70e03b27df496c..db4bea2f89ee5e 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/cum_sum.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/cum_sum.cpp @@ -5,7 +5,7 @@ #include "shared_test_classes/single_layer/cum_sum.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "ie_precision.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include using namespace ngraph; diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/detection_output.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/detection_output.cpp index 2ae2dab356877a..a5dfc13c5c8dcb 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/detection_output.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/detection_output.cpp @@ -5,7 +5,7 @@ #include "shared_test_classes/single_layer/detection_output.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "ie_precision.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "common_test_utils/ov_tensor_utils.hpp" #include diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/deth_to_space.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/deth_to_space.cpp index d568c2ec10c67f..d39008127d8fa2 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/deth_to_space.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/deth_to_space.cpp @@ -5,7 +5,7 @@ #include "shared_test_classes/single_layer/depth_to_space.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "ie_precision.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "common_test_utils/ov_tensor_utils.hpp" #include diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather.cpp index c92f376e8246a8..b97dd9927002e8 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather.cpp @@ -5,7 +5,7 @@ #include "shared_test_classes/single_layer/gather.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "ie_precision.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "common_test_utils/ov_tensor_utils.hpp" #include diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_elements.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_elements.cpp index fd64e29cfde884..680474953110ca 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_elements.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_elements.cpp @@ -4,7 +4,7 @@ #include "shared_test_classes/base/ov_subgraph.hpp" #include "ie_precision.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "common_test_utils/ov_tensor_utils.hpp" #include diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_nd.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_nd.cpp index 90d0a29687f3a4..cdd9ace992225f 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_nd.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_nd.cpp @@ -5,7 +5,7 @@ #include "shared_test_classes/single_layer/gather_nd.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "ie_precision.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "common_test_utils/ov_tensor_utils.hpp" #include diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_tree.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_tree.cpp index 72635ae748ea29..7c004ed42c2743 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_tree.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_tree.cpp @@ -5,7 +5,7 @@ #include "shared_test_classes/single_layer/gather_tree.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "ie_precision.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "common_test_utils/ov_tensor_utils.hpp" #include diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/grid_sample.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/grid_sample.cpp index 96f93e1ac052cf..fb9c60318be8b2 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/grid_sample.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/grid_sample.cpp @@ -5,7 +5,7 @@ #include "shared_test_classes/single_layer/select.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "ie_precision.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include #include diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/group_convolution_backprop_data.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/group_convolution_backprop_data.cpp index 6da5efea12a2d0..fb955a63a837de 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/group_convolution_backprop_data.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/group_convolution_backprop_data.cpp @@ -6,8 +6,8 @@ #include #include #include -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "shared_test_classes/single_layer/group_convolution_backprop_data.hpp" #include "common_test_utils/test_constants.hpp" diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/groupconvolution.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/groupconvolution.cpp index c3cfa122bba4ce..e47866f1f5709a 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/groupconvolution.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/groupconvolution.cpp @@ -5,8 +5,8 @@ #include #include #include -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "shared_test_classes/single_layer/group_convolution.hpp" #include "common_test_utils/test_constants.hpp" diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/interpolate.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/interpolate.cpp index 99d3266e947eb7..5bfe9bb5612205 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/interpolate.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/interpolate.cpp @@ -4,7 +4,7 @@ #include "shared_test_classes/single_layer/interpolate.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include #include "openvino/core/preprocess/pre_post_process.hpp" diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/matmul.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/matmul.cpp index 1d14e01fc162b1..8965a42ee35dc9 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/matmul.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/matmul.cpp @@ -5,7 +5,7 @@ #include "shared_test_classes/single_layer/mat_mul.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "ie_precision.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include using namespace ngraph; diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/mvn.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/mvn.cpp index a4470087b65544..2dee03efde16dd 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/mvn.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/mvn.cpp @@ -3,7 +3,7 @@ // #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" using namespace InferenceEngine; diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/non_max_suppression.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/non_max_suppression.cpp index 40efd14936e84b..4708b2f0ffb5a4 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/non_max_suppression.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/non_max_suppression.cpp @@ -6,8 +6,8 @@ #include #include #include -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "shared_test_classes/single_layer/non_max_suppression.hpp" #include "common_test_utils/test_constants.hpp" diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/normalize_l2.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/normalize_l2.cpp index 7e86c934a6c3b3..7d4adb660fe18f 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/normalize_l2.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/normalize_l2.cpp @@ -3,7 +3,7 @@ // #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" using namespace InferenceEngine; diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/pad.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/pad.cpp index ac90f0dcabe915..d219b693016587 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/pad.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/pad.cpp @@ -5,7 +5,7 @@ #include "shared_test_classes/single_layer/pad.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "ie_precision.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include #include diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/pooling.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/pooling.cpp index 8721875569d1bc..21d0e584def3e3 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/pooling.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/pooling.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/single_layer/pooling.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/prior_box.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/prior_box.cpp index d1839c7438f560..9c22ee657eedde 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/prior_box.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/prior_box.cpp @@ -8,7 +8,7 @@ #include "shared_test_classes/single_layer/prior_box_clustered.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "ie_precision.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include #include diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/random_uniform.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/random_uniform.cpp index 5344e3ebb1f090..440924fd0a541e 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/random_uniform.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/random_uniform.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/range.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/range.cpp index ba5e2d3c51515b..f171bfebb4814a 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/range.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/range.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/reduce.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/reduce.cpp index d091e0ae049928..1f13d7998a6f20 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/reduce.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/reduce.cpp @@ -5,7 +5,7 @@ #include "shared_test_classes/single_layer/reduce_ops.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "ie_precision.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include using namespace ngraph; diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/region_yolo.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/region_yolo.cpp index 7996d4b81b7ee6..9ced7435d6b77e 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/region_yolo.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/region_yolo.cpp @@ -5,7 +5,7 @@ #include "shared_test_classes/single_layer/region_yolo.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "ie_precision.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "common_test_utils/ov_tensor_utils.hpp" #include diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/roi_pooling.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/roi_pooling.cpp index 69cd8c973da8d8..eee9451c206413 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/roi_pooling.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/roi_pooling.cpp @@ -5,7 +5,7 @@ #include "shared_test_classes/single_layer/roi_pooling.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "ie_precision.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "common_test_utils/ov_tensor_utils.hpp" #include diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/scatter_nd_update.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/scatter_nd_update.cpp index 49f73670e0cbe7..81e87ec1eb69e0 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/scatter_nd_update.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/scatter_nd_update.cpp @@ -5,7 +5,7 @@ #include "shared_test_classes/single_layer/scatter_ND_update.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "ie_precision.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include #include diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/select.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/select.cpp index 262734f441a55e..ed65d68d70a798 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/select.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/select.cpp @@ -5,7 +5,7 @@ #include "shared_test_classes/single_layer/select.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "ie_precision.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include using namespace ngraph; diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/shapeof.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/shapeof.cpp index f9244790e9fc7a..c1217d68744640 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/shapeof.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/shapeof.cpp @@ -5,7 +5,7 @@ #include "shared_test_classes/single_layer/shape_of.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "ie_precision.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include using namespace ngraph; diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/softmax.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/softmax.cpp index 76edfe4d339d2e..a8cc19ea0cbd89 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/softmax.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/softmax.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include #include "shared_test_classes/single_layer/shape_of.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/space_to_batch.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/space_to_batch.cpp index 15d13e67bb4036..e66f099bf3e0bc 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/space_to_batch.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/space_to_batch.cpp @@ -4,7 +4,7 @@ #include "shared_test_classes/single_layer/space_to_batch.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "common_test_utils/test_constants.hpp" #include "common_test_utils/ov_tensor_utils.hpp" diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/split.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/split.cpp index 7a0b3fda62bcfe..472960b7574d71 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/split.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/split.cpp @@ -5,7 +5,7 @@ #include "shared_test_classes/single_layer/select.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "ie_precision.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "common_test_utils/ov_tensor_utils.hpp" #include diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/strided_slice.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/strided_slice.cpp index 052d6574d682d7..c6cd7e65258f34 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/strided_slice.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/strided_slice.cpp @@ -4,7 +4,7 @@ #include "shared_test_classes/single_layer/strided_slice.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "common_test_utils/test_constants.hpp" #include "common_test_utils/ov_tensor_utils.hpp" diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/tile.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/tile.cpp index e323f1f791e801..3f3e0734eb2905 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/tile.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/tile.cpp @@ -6,8 +6,8 @@ #include #include #include -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "shared_test_classes/single_layer/tile.hpp" #include "common_test_utils/test_constants.hpp" diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/top_k.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/top_k.cpp index 8e21b463961f7d..7ff69b3db00f3e 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/top_k.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/top_k.cpp @@ -6,8 +6,8 @@ #include #include #include -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "shared_test_classes/single_layer/topk.hpp" #include "common_test_utils/test_constants.hpp" diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/unique.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/unique.cpp index 0110b049fcbcb5..a8bf9bc51735b6 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/unique.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/unique.cpp @@ -3,7 +3,7 @@ // #include "common_test_utils/ov_tensor_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/tensor_iterator.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/tensor_iterator.cpp index b2ea9a47d56896..4bd41f87cd51a6 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/tensor_iterator.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/tensor_iterator.cpp @@ -14,8 +14,8 @@ #include "common_test_utils/test_constants.hpp" #include "ie_api.h" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" using namespace InferenceEngine; using Config = std::pair>; diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/condition.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/condition.cpp index 67cada5dbf661e..a8be347616b33a 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/condition.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/condition.cpp @@ -6,9 +6,9 @@ #include #include #include -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "common_test_utils/test_constants.hpp" #include "shared_test_classes/base/utils/ranges.hpp" diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/broadcast_eltwise.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/broadcast_eltwise.cpp index fa581718c99937..9030ea1129313d 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/broadcast_eltwise.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/broadcast_eltwise.cpp @@ -5,8 +5,8 @@ #include #include -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "openvino/pass/serialize.hpp" diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_model_static_split_layer.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_model_static_split_layer.cpp index 5e7393fffce95d..0f38bf1787bd2c 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_model_static_split_layer.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_model_static_split_layer.cpp @@ -5,8 +5,8 @@ #include #include #include -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "shared_test_classes/single_layer/split.hpp" #include diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_gen_impl_key.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_gen_impl_key.cpp index 77ce615cdc73e1..06aa4a11817e75 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_gen_impl_key.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_gen_impl_key.cpp @@ -5,8 +5,8 @@ #include #include #include -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "shared_test_classes/single_layer/shape_of.hpp" #include "shared_test_classes/single_layer/strided_slice.hpp" diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_reduce_deconvolution_concat.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_reduce_deconvolution_concat.cpp index fce1c81bcca3a2..f09470c91a8442 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_reduce_deconvolution_concat.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_reduce_deconvolution_concat.cpp @@ -5,8 +5,8 @@ #include #include #include -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "shared_test_classes/single_layer/reduce_ops.hpp" #include "shared_test_classes/single_layer/convolution_backprop_data.hpp" diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_shape_of_activation.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_shape_of_activation.cpp index bd67b3e055b933..4967c716b14d6a 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_shape_of_activation.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_shape_of_activation.cpp @@ -5,8 +5,8 @@ #include #include #include -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "shared_test_classes/single_layer/shape_of.hpp" #include "shared_test_classes/single_layer/reshape.hpp" diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_shape_of_reduce_reshape.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_shape_of_reduce_reshape.cpp index fd6db3767919fe..107cb0f2bcd569 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_shape_of_reduce_reshape.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_shape_of_reduce_reshape.cpp @@ -5,8 +5,8 @@ #include #include #include -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "shared_test_classes/single_layer/shape_of.hpp" #include "shared_test_classes/single_layer/strided_slice.hpp" diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_with_empty_tensor.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_with_empty_tensor.cpp index bdb6a20b59bcbd..448c629d1a5579 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_with_empty_tensor.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_with_empty_tensor.cpp @@ -5,8 +5,8 @@ #include #include #include -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "shared_test_classes/single_layer/shape_of.hpp" #include "shared_test_classes/single_layer/strided_slice.hpp" diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/matmul_weights_decompression.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/matmul_weights_decompression.cpp index 5eaa69d4e627c2..313015da3406ba 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/matmul_weights_decompression.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/matmul_weights_decompression.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "transformations/rt_info/decompression.hpp" diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/read_value_assign.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/read_value_assign.cpp index 07bc8ed7c07838..7112af8d1710b0 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/read_value_assign.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/read_value_assign.cpp @@ -5,8 +5,8 @@ #include #include -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" diff --git a/src/plugins/template/tests/functional/shared_tests_instances/behavior/plugin/synthetic.cpp b/src/plugins/template/tests/functional/shared_tests_instances/behavior/plugin/synthetic.cpp index 96444bd1dda303..c81cdb24b53ce2 100644 --- a/src/plugins/template/tests/functional/shared_tests_instances/behavior/plugin/synthetic.cpp +++ b/src/plugins/template/tests/functional/shared_tests_instances/behavior/plugin/synthetic.cpp @@ -5,8 +5,8 @@ #include #include "behavior/plugin/hetero_synthetic.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/subgraph_builders.hpp" namespace { using namespace HeteroTests; diff --git a/src/plugins/template/tests/functional/subgraph_reference/preprocess_opencv.cpp b/src/plugins/template/tests/functional/subgraph_reference/preprocess_opencv.cpp index 099cea661b5f6c..32b4097eb58550 100644 --- a/src/plugins/template/tests/functional/subgraph_reference/preprocess_opencv.cpp +++ b/src/plugins/template/tests/functional/subgraph_reference/preprocess_opencv.cpp @@ -14,7 +14,7 @@ # include # include "base_reference_test.hpp" -# include "ngraph_functions/builders.hpp" +# include "ov_models/builders.hpp" using namespace ov; using namespace ov::preprocess; diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt index b86c0d8bfba630..944fc8d0d52054 100644 --- a/src/tests/CMakeLists.txt +++ b/src/tests/CMakeLists.txt @@ -29,7 +29,7 @@ endif() ov_deprecated_no_errors() -add_subdirectory(ngraph_helpers) +add_subdirectory(ov_helpers) add_subdirectory(test_utils) if(ENABLE_FUNCTIONAL_TESTS) diff --git a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/include/op_impl_check/single_op_graph.hpp b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/include/op_impl_check/single_op_graph.hpp index e9ed04a7cfb7eb..e586d485954af0 100644 --- a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/include/op_impl_check/single_op_graph.hpp +++ b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/include/op_impl_check/single_op_graph.hpp @@ -5,7 +5,7 @@ #pragma once #include -#include +#include namespace ov { namespace test { diff --git a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir.cpp b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir.cpp index 5f411e5aa37b6c..308d733574a38f 100644 --- a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir.cpp @@ -11,7 +11,7 @@ #include "shared_test_classes/base/utils/ranges.hpp" #include "shared_test_classes/base/utils/generate_inputs.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "common_test_utils/file_utils.hpp" #include "common_test_utils/data_utils.hpp" #include "common_test_utils/ov_tensor_utils.hpp" diff --git a/src/tests/functional/plugin/shared/CMakeLists.txt b/src/tests/functional/plugin/shared/CMakeLists.txt index 30892b44ec0287..23f279a8dca3c6 100644 --- a/src/tests/functional/plugin/shared/CMakeLists.txt +++ b/src/tests/functional/plugin/shared/CMakeLists.txt @@ -40,7 +40,7 @@ else() endif() if (TARGET openvino::snippets) - list(APPEND LINK_LIBRARIES_PRIVATE snippetsNgraphFunctions) + list(APPEND LINK_LIBRARIES_PRIVATE ov_snippets_models) else() list(APPEND EXCLUDED_SOURCE_PATHS ${CMAKE_CURRENT_SOURCE_DIR}/src/snippets) endif() @@ -64,8 +64,8 @@ addIeTarget( openvino::pugixml common_test_utils func_test_utils - ngraphFunctions - lptNgraphFunctions + ov_models + ov_lpt_models sharedTestClasses PRIVATE ${LINK_LIBRARIES_PRIVATE} diff --git a/src/tests/functional/plugin/shared/include/base/multi/multi_helpers.hpp b/src/tests/functional/plugin/shared/include/base/multi/multi_helpers.hpp index 5044cec6776224..86c0d2fff6e855 100644 --- a/src/tests/functional/plugin/shared/include/base/multi/multi_helpers.hpp +++ b/src/tests/functional/plugin/shared/include/base/multi/multi_helpers.hpp @@ -8,7 +8,7 @@ #include "base/ov_behavior_test_utils.hpp" #include "common_test_utils/test_common.hpp" #include "common_test_utils/test_constants.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "openvino/util/common_util.hpp" using namespace ::testing; diff --git a/src/tests/functional/plugin/shared/include/base/ov_behavior_test_utils.hpp b/src/tests/functional/plugin/shared/include/base/ov_behavior_test_utils.hpp index 1e24f1be47bcdd..347660d0aa3252 100644 --- a/src/tests/functional/plugin/shared/include/base/ov_behavior_test_utils.hpp +++ b/src/tests/functional/plugin/shared/include/base/ov_behavior_test_utils.hpp @@ -13,7 +13,7 @@ #include -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "common_test_utils/test_common.hpp" #include "common_test_utils/test_constants.hpp" diff --git a/src/tests/functional/plugin/shared/include/behavior/executable_network/exec_graph_info.hpp b/src/tests/functional/plugin/shared/include/behavior/executable_network/exec_graph_info.hpp index b65f94ad37ee06..f3519a57638cf0 100644 --- a/src/tests/functional/plugin/shared/include/behavior/executable_network/exec_graph_info.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/executable_network/exec_graph_info.hpp @@ -11,7 +11,7 @@ #include "shared_test_classes/base/layer_test_utils.hpp" #include "base/behavior_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "pugixml.hpp" diff --git a/src/tests/functional/plugin/shared/include/behavior/executable_network/locale.hpp b/src/tests/functional/plugin/shared/include/behavior/executable_network/locale.hpp index c48a36190853a8..a8af80e4101fc3 100644 --- a/src/tests/functional/plugin/shared/include/behavior/executable_network/locale.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/executable_network/locale.hpp @@ -7,7 +7,7 @@ #include "common_test_utils/test_common.hpp" #include "common_test_utils/file_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "base/behavior_test_utils.hpp" diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_request_dynamic.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_request_dynamic.hpp index e8db2a531337c9..a1d1fc71c2e72e 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_request_dynamic.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_request_dynamic.hpp @@ -16,8 +16,8 @@ #include #include "openvino/core/shape.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" #include "transformations/utils/utils.hpp" #include #include @@ -26,7 +26,7 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" #include "functional_test_utils/blob_utils.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "shared_test_classes/subgraph/basic_lstm.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/caching_tests.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/caching_tests.hpp index 74180ca5c86f22..07f8616bcbbb47 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/caching_tests.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/caching_tests.hpp @@ -9,7 +9,7 @@ #include "shared_test_classes/base/ov_subgraph.hpp" #include "ngraph/function.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "functional_test_utils/plugin_cache.hpp" #include "common_test_utils/unicode_utils.hpp" #include "openvino/util/common_util.hpp" diff --git a/src/tests/functional/plugin/shared/include/behavior/plugin/auto_batching_tests.hpp b/src/tests/functional/plugin/shared/include/behavior/plugin/auto_batching_tests.hpp index 7b7b404c40377a..8ecb1440571873 100644 --- a/src/tests/functional/plugin/shared/include/behavior/plugin/auto_batching_tests.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/plugin/auto_batching_tests.hpp @@ -11,7 +11,7 @@ #include #include -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "functional_test_utils/blob_utils.hpp" #include "base/behavior_test_utils.hpp" diff --git a/src/tests/functional/plugin/shared/include/behavior/plugin/caching_tests.hpp b/src/tests/functional/plugin/shared/include/behavior/plugin/caching_tests.hpp index 8ecf5ba9e32982..20d923061467e0 100644 --- a/src/tests/functional/plugin/shared/include/behavior/plugin/caching_tests.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/plugin/caching_tests.hpp @@ -9,7 +9,7 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" #include "ngraph/function.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "functional_test_utils/plugin_cache.hpp" #include "common_test_utils/unicode_utils.hpp" #include "openvino/util/common_util.hpp" diff --git a/src/tests/functional/plugin/shared/include/behavior/plugin/configuration_tests.hpp b/src/tests/functional/plugin/shared/include/behavior/plugin/configuration_tests.hpp index 96bc975b8c9f5b..1524ab7bc6d855 100644 --- a/src/tests/functional/plugin/shared/include/behavior/plugin/configuration_tests.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/plugin/configuration_tests.hpp @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include "common_test_utils/common_utils.hpp" #include "common_test_utils/test_common.hpp" diff --git a/src/tests/functional/plugin/shared/include/behavior/plugin/core_threading.hpp b/src/tests/functional/plugin/shared/include/behavior/plugin/core_threading.hpp index f5b8973c0788d2..34bc6b593e307e 100644 --- a/src/tests/functional/plugin/shared/include/behavior/plugin/core_threading.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/plugin/core_threading.hpp @@ -10,7 +10,7 @@ #include #include -#include +#include #include #include #include diff --git a/src/tests/functional/plugin/shared/include/behavior/plugin/hetero_synthetic.hpp b/src/tests/functional/plugin/shared/include/behavior/plugin/hetero_synthetic.hpp index c208cbf3b9df4a..c6d3afef908471 100644 --- a/src/tests/functional/plugin/shared/include/behavior/plugin/hetero_synthetic.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/plugin/hetero_synthetic.hpp @@ -9,7 +9,7 @@ #include #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace HeteroTests { diff --git a/src/tests/functional/plugin/shared/include/execution_graph_tests/num_inputs_fusing_bin_conv.hpp b/src/tests/functional/plugin/shared/include/execution_graph_tests/num_inputs_fusing_bin_conv.hpp index fdaa9eeb9ae4c7..169f7464765cd3 100644 --- a/src/tests/functional/plugin/shared/include/execution_graph_tests/num_inputs_fusing_bin_conv.hpp +++ b/src/tests/functional/plugin/shared/include/execution_graph_tests/num_inputs_fusing_bin_conv.hpp @@ -6,7 +6,7 @@ #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" namespace ExecutionGraphTests { diff --git a/src/tests/functional/plugin/shared/include/execution_graph_tests/runtime_precision.hpp b/src/tests/functional/plugin/shared/include/execution_graph_tests/runtime_precision.hpp index f826223d23e91c..b7379f1479b1d6 100644 --- a/src/tests/functional/plugin/shared/include/execution_graph_tests/runtime_precision.hpp +++ b/src/tests/functional/plugin/shared/include/execution_graph_tests/runtime_precision.hpp @@ -10,7 +10,7 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ExecutionGraphTests { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/add_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/add_transformation.hpp index 32e2a0dd1864d7..b0466f52aa47b3 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/add_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/add_transformation.hpp @@ -7,7 +7,7 @@ #include #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/assign_and_read_value_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/assign_and_read_value_transformation.hpp index a7b26ab515a56b..be011e89aaead3 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/assign_and_read_value_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/assign_and_read_value_transformation.hpp @@ -5,8 +5,8 @@ #pragma once #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace LayerTestsDefinitions { class AssignAndReadValueTransformationParam { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/batch_to_space_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/batch_to_space_transformation.hpp index a263b8831db131..88e6eb9b90f051 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/batch_to_space_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/batch_to_space_transformation.hpp @@ -8,7 +8,7 @@ #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/clamp_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/clamp_transformation.hpp index 0748f5d0f9c23b..50d687b1bd88aa 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/clamp_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/clamp_transformation.hpp @@ -5,8 +5,8 @@ #pragma once #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace LayerTestsDefinitions { class ClampTransformationParam { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_transformation.hpp index 9546880498dc02..48c3ba7d2b8353 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_transformation.hpp @@ -8,8 +8,8 @@ #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_child_and_output.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_child_and_output.hpp index 0b676d831468ec..96d57afe7ad62d 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_child_and_output.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_child_and_output.hpp @@ -8,7 +8,7 @@ #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { class ConcatWithChildAndOutputTransformationParam { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_different_precision_on_children.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_different_precision_on_children.hpp index 209500c9d56ef3..4baadf52c0a62d 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_different_precision_on_children.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_different_precision_on_children.hpp @@ -8,7 +8,7 @@ #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { class ConcatWithDifferentChildrenTransformationParam { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_split_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_split_transformation.hpp index 2491549e26656d..43f0491dc89305 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_split_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_split_transformation.hpp @@ -8,7 +8,7 @@ #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { class ConcatWithSplitTransformationParam { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/convolution_backprop_data_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/convolution_backprop_data_transformation.hpp index 8ed8498b79a5d1..13aa0a4f26260c 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/convolution_backprop_data_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/convolution_backprop_data_transformation.hpp @@ -10,9 +10,9 @@ #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/convolution_qdq_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/convolution_qdq_transformation.hpp index cd268238291279..8475b146af9b0f 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/convolution_qdq_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/convolution_qdq_transformation.hpp @@ -8,10 +8,10 @@ #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/constant.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/common/constant.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/convolution_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/convolution_transformation.hpp index 5fa546c3096b4b..851c8f57cb4789 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/convolution_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/convolution_transformation.hpp @@ -8,8 +8,8 @@ #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/convolution_with_incorrect_weights.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/convolution_with_incorrect_weights.hpp index 90d2d63d2609c6..9f919e5cdd5195 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/convolution_with_incorrect_weights.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/convolution_with_incorrect_weights.hpp @@ -8,8 +8,8 @@ #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/elementwise_branch_selection_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/elementwise_branch_selection_transformation.hpp index aa9ceb958a954b..e98af3352dae83 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/elementwise_branch_selection_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/elementwise_branch_selection_transformation.hpp @@ -7,8 +7,8 @@ #include #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/convolution.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/convolution.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/eliminate_fake_quantize_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/eliminate_fake_quantize_transformation.hpp index 0758b7e3fa87d5..bc49cae1cb5755 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/eliminate_fake_quantize_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/eliminate_fake_quantize_transformation.hpp @@ -8,9 +8,9 @@ #include #include -#include "lpt_ngraph_functions/common/add.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/add.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_avg_pool_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_avg_pool_transformation.hpp index d93aca06192beb..27140d9bd07c54 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_avg_pool_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_avg_pool_transformation.hpp @@ -7,7 +7,7 @@ #include #include -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_max_pool_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_max_pool_transformation.hpp index b2f1d56e137d73..499e4bd686b887 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_max_pool_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_max_pool_transformation.hpp @@ -7,7 +7,7 @@ #include #include -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.hpp index 3ac87adf356ee9..e17076acb062c6 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.hpp @@ -8,9 +8,9 @@ #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" -#include "lpt_ngraph_functions/fake_quantize_and_two_output_branches_with_convolution_function.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/fake_quantize_and_two_output_branches_with_convolution.hpp" namespace LayerTestsDefinitions { class FakeQuantizeAndTwoOutputBranchesWithConvolution { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_precision_selection_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_precision_selection_transformation.hpp index 657669dfcf6e5e..fa4077ece6cd44 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_precision_selection_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_precision_selection_transformation.hpp @@ -6,11 +6,11 @@ #include #include -#include "lpt_ngraph_functions/fake_quantize_function.hpp" +#include "ov_lpt_models/fake_quantize.hpp" #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_transformation.hpp index 44b89e0b053052..1e07645754f014 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_transformation.hpp @@ -6,7 +6,7 @@ #include #include -#include "lpt_ngraph_functions/fake_quantize_function.hpp" +#include "ov_lpt_models/fake_quantize.hpp" #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_with_dq_not_optimal_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_with_dq_not_optimal_transformation.hpp index 66736533664a1f..78557a195a84e2 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_with_dq_not_optimal_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_with_dq_not_optimal_transformation.hpp @@ -6,14 +6,14 @@ #include #include -#include "lpt_ngraph_functions/fake_quantize_function.hpp" +#include "ov_lpt_models/fake_quantize.hpp" #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/fake_quantize_and_convolution_function.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/constant.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/fake_quantize_and_convolution.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/constant.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_convert_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_convert_transformation.hpp index 35520e1382f4e9..8ddbfe92406ba1 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_convert_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_convert_transformation.hpp @@ -8,8 +8,8 @@ #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" using namespace ngraph; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_dequantize_to_fake_quantize_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_dequantize_to_fake_quantize_transformation.hpp index ff488cb8a44e15..2c16fea5538e8c 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_dequantize_to_fake_quantize_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_dequantize_to_fake_quantize_transformation.hpp @@ -7,9 +7,9 @@ #include #include -#include "lpt_ngraph_functions/common/add.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/add.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.hpp index 9d53eba2affea7..14e98de6a43844 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.hpp @@ -6,7 +6,7 @@ #include #include -#include "lpt_ngraph_functions/fuse_fake_quantize_and_scale_shift_function.hpp" +#include "ov_lpt_models/fuse_fake_quantize_and_scale_shift.hpp" #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_multiply_to_fake_quantize_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_multiply_to_fake_quantize_transformation.hpp index 6f9830a746aef4..9153c8cdf6d6f3 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_multiply_to_fake_quantize_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_multiply_to_fake_quantize_transformation.hpp @@ -7,9 +7,9 @@ #include #include -#include "lpt_ngraph_functions/common/add.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/add.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_subtract_to_fake_quantize_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_subtract_to_fake_quantize_transformation.hpp index 64398a3ea18506..e6128bb9ebf44f 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_subtract_to_fake_quantize_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/fuse_subtract_to_fake_quantize_transformation.hpp @@ -7,9 +7,9 @@ #include #include -#include "lpt_ngraph_functions/common/add.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/add.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/gather_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/gather_transformation.hpp index ab4fe46b6e0b18..1dee4f8157940e 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/gather_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/gather_transformation.hpp @@ -8,7 +8,7 @@ #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/group_convolution_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/group_convolution_transformation.hpp index bb24d0ea0a4b3b..09e3d90210414f 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/group_convolution_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/group_convolution_transformation.hpp @@ -8,8 +8,8 @@ #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/groupconvolution_qdq_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/groupconvolution_qdq_transformation.hpp index 93d0bad2305a93..f23036150c0ee5 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/groupconvolution_qdq_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/groupconvolution_qdq_transformation.hpp @@ -8,11 +8,11 @@ #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/constant.hpp" -#include "lpt_ngraph_functions/common/reshape.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/common/constant.hpp" +#include "ov_lpt_models/common/reshape.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_transformation.hpp index 327b868606a809..285c599591d7ca 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_transformation.hpp @@ -7,8 +7,8 @@ #include #include -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/mat_mul_function.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/mat_mul.hpp" #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_with_constant_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_with_constant_transformation.hpp index ce5e2d51650199..7eaab1f0d93f4b 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_with_constant_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_with_constant_transformation.hpp @@ -7,12 +7,12 @@ #include #include -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" -#include "lpt_ngraph_functions/common/constant.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/common/constant.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/mat_mul_function.hpp" +#include "ov_lpt_models/mat_mul.hpp" #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_with_optimized_constant_fq.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_with_optimized_constant_fq.hpp index 65cb01a3fa5dfa..55b5f35f2b101f 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_with_optimized_constant_fq.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_with_optimized_constant_fq.hpp @@ -8,7 +8,7 @@ #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/move_fake_quantize_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/move_fake_quantize_transformation.hpp index bbfa6999a029ac..7b0bdf2e5603bf 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/move_fake_quantize_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/move_fake_quantize_transformation.hpp @@ -8,12 +8,12 @@ #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" #include "low_precision/move_fake_quantize.hpp" -#include "lpt_ngraph_functions/move_fake_quantize_function.hpp" +#include "ov_lpt_models/move_fake_quantize.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/multiply_to_group_convolution_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/multiply_to_group_convolution_transformation.hpp index 3b4ecd65289e21..6bd801816daefa 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/multiply_to_group_convolution_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/multiply_to_group_convolution_transformation.hpp @@ -8,9 +8,9 @@ #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/constant.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/constant.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" using namespace ngraph; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/multiply_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/multiply_transformation.hpp index 253cd5c7f9dbbb..06ac05ecb03b46 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/multiply_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/multiply_transformation.hpp @@ -8,7 +8,7 @@ #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/multiply_with_one_parent_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/multiply_with_one_parent_transformation.hpp index e43069f7c59db9..0f5bcc77779bcd 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/multiply_with_one_parent_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/multiply_with_one_parent_transformation.hpp @@ -8,7 +8,7 @@ #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/mvn_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/mvn_transformation.hpp index b0db525a501e09..8111f6f05375ed 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/mvn_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/mvn_transformation.hpp @@ -8,7 +8,7 @@ #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" using namespace ngraph; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/normalize_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/normalize_transformation.hpp index e8ccb19d5fad3f..3c3bd8dd29461e 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/normalize_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/normalize_transformation.hpp @@ -8,7 +8,7 @@ #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/pad_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/pad_transformation.hpp index 020490c6328faf..bfdf719f47224b 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/pad_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/pad_transformation.hpp @@ -5,7 +5,7 @@ #pragma once #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { class PadTransformationParam { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/prelu_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/prelu_transformation.hpp index 744e004b98cb5e..d0256c905685e2 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/prelu_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/prelu_transformation.hpp @@ -7,7 +7,7 @@ #include #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/pull_reshape_through_dequantization_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/pull_reshape_through_dequantization_transformation.hpp index e3be928bca65ba..abceb85983bc8c 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/pull_reshape_through_dequantization_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/pull_reshape_through_dequantization_transformation.hpp @@ -8,12 +8,12 @@ #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/constant.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" -#include "lpt_ngraph_functions/common/reshape.hpp" -#include "lpt_ngraph_functions/common/transpose.hpp" +#include "ov_lpt_models/common/constant.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/common/reshape.hpp" +#include "ov_lpt_models/common/transpose.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/recurrent_cell_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/recurrent_cell_transformation.hpp index 4e5e894e9223ea..6e9e9f375ff62e 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/recurrent_cell_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/recurrent_cell_transformation.hpp @@ -8,12 +8,12 @@ #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" #include "low_precision/recurrent_cell.hpp" -#include "lpt_ngraph_functions/recurrent_cell_function.hpp" +#include "ov_lpt_models/recurrent_cell.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/reduce_max_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/reduce_max_transformation.hpp index cfac5f7ec3611a..9dabccfbdb65f0 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/reduce_max_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/reduce_max_transformation.hpp @@ -5,8 +5,8 @@ #pragma once #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace LayerTestsDefinitions { class ReduceMaxTransformationParam { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/reduce_mean_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/reduce_mean_transformation.hpp index a9394f7fe7c205..bef0cf1e823528 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/reduce_mean_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/reduce_mean_transformation.hpp @@ -5,8 +5,8 @@ #pragma once #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/reduce_min_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/reduce_min_transformation.hpp index 69e55744381def..14a6e3b043902f 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/reduce_min_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/reduce_min_transformation.hpp @@ -5,8 +5,8 @@ #pragma once #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace LayerTestsDefinitions { class ReduceMinTransformationParam { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/reduce_sum_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/reduce_sum_transformation.hpp index cad728ef357091..0da5e29777f79b 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/reduce_sum_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/reduce_sum_transformation.hpp @@ -5,8 +5,8 @@ #pragma once #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace LayerTestsDefinitions { class ReduceSumTransformationParam { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/relu_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/relu_transformation.hpp index 8238adca60f0f4..25219937997769 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/relu_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/relu_transformation.hpp @@ -7,7 +7,7 @@ #include #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/reshape_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/reshape_transformation.hpp index bf3f3693037c1f..02fa5b75ed802c 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/reshape_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/reshape_transformation.hpp @@ -8,7 +8,7 @@ #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/shuffle_channels_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/shuffle_channels_transformation.hpp index 97536d3b4d5b8d..cd54ec38c3c122 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/shuffle_channels_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/shuffle_channels_transformation.hpp @@ -8,7 +8,7 @@ #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/space_to_batch_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/space_to_batch_transformation.hpp index d4065a52944f32..95884e9e8f1422 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/space_to_batch_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/space_to_batch_transformation.hpp @@ -8,7 +8,7 @@ #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/split_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/split_transformation.hpp index 4e55309730e8ce..5fede273161e71 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/split_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/split_transformation.hpp @@ -5,7 +5,7 @@ #pragma once #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { class SplitTransformationParam { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/squeeze_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/squeeze_transformation.hpp index 538a99b0a4b93f..0a5579d13d68bd 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/squeeze_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/squeeze_transformation.hpp @@ -8,7 +8,7 @@ #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/strided_slice_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/strided_slice_transformation.hpp index e6e1f9ef1f1690..13d255ef575e63 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/strided_slice_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/strided_slice_transformation.hpp @@ -5,8 +5,8 @@ #pragma once #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace LayerTestsDefinitions { class StridedSliceTransformationParam { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/subtract_multiply_to_multiply_add_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/subtract_multiply_to_multiply_add_transformation.hpp index 1c14478350ef8a..2ac54b9b98a7c3 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/subtract_multiply_to_multiply_add_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/subtract_multiply_to_multiply_add_transformation.hpp @@ -8,7 +8,7 @@ #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/transpose_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/transpose_transformation.hpp index 758a358e9f380e..52e164d05f96d4 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/transpose_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/transpose_transformation.hpp @@ -8,7 +8,7 @@ #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/unsqueeze_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/unsqueeze_transformation.hpp index bf96a3868f0bed..0d726ee4eda42b 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/unsqueeze_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/unsqueeze_transformation.hpp @@ -8,7 +8,7 @@ #include #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/variadic_split_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/variadic_split_transformation.hpp index 61b3f8b707abbf..11b0b67e1e9ed1 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/variadic_split_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/variadic_split_transformation.hpp @@ -5,7 +5,7 @@ #pragma once #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { class VariadicSplitTransformationParam { diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/batch_norm.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/batch_norm.hpp index 8ffc21821bd617..bbe542c0bf42b3 100644 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/batch_norm.hpp +++ b/src/tests/functional/plugin/shared/include/single_layer_tests/batch_norm.hpp @@ -5,7 +5,7 @@ #pragma once #include "shared_test_classes/single_layer/batch_norm.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/snippets/codegen_bert.hpp b/src/tests/functional/plugin/shared/include/snippets/codegen_bert.hpp index ddf823177fb0c9..b61cfbc506fe23 100644 --- a/src/tests/functional/plugin/shared/include/snippets/codegen_bert.hpp +++ b/src/tests/functional/plugin/shared/include/snippets/codegen_bert.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" // todo: Rewrite this test using Snippets test infrastructure. See add_convert or conv_eltwise for example namespace ov { diff --git a/src/tests/functional/plugin/shared/include/snippets/fake_quantize_decomposition_test.hpp b/src/tests/functional/plugin/shared/include/snippets/fake_quantize_decomposition_test.hpp index 5d6a7d9b123240..d1c684a6e08fdc 100644 --- a/src/tests/functional/plugin/shared/include/snippets/fake_quantize_decomposition_test.hpp +++ b/src/tests/functional/plugin/shared/include/snippets/fake_quantize_decomposition_test.hpp @@ -8,8 +8,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/snippets_test_utils.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/behavior/infer_request/set_io_blob_precision.cpp b/src/tests/functional/plugin/shared/src/behavior/infer_request/set_io_blob_precision.cpp index a439310721a213..212bfded16af0d 100644 --- a/src/tests/functional/plugin/shared/src/behavior/infer_request/set_io_blob_precision.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/infer_request/set_io_blob_precision.cpp @@ -3,7 +3,7 @@ // #include "behavior/infer_request/set_io_blob_precision.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" using namespace InferenceEngine; diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/infer_correctness.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/infer_correctness.cpp index 50bf6d393ce348..32f6bd73db4748 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/infer_correctness.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/infer_correctness.cpp @@ -3,7 +3,7 @@ // #include "behavior/ov_infer_request/infer_consistency.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include #include #include diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/infer_request_dynamic.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/infer_request_dynamic.cpp index 28f7b408bb8f37..6540664e3f4b11 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/infer_request_dynamic.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/infer_request_dynamic.cpp @@ -14,8 +14,8 @@ #include #include "openvino/core/shape.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" #include "transformations/utils/utils.hpp" #include #include @@ -24,7 +24,7 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" #include "functional_test_utils/blob_utils.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "shared_test_classes/subgraph/basic_lstm.hpp" #include "behavior/ov_infer_request/infer_request_dynamic.hpp" #include "base/ov_behavior_test_utils.hpp" diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/inference_chaining.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/inference_chaining.cpp index 6c75adfdff676c..602e8bffbcad6c 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/inference_chaining.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/inference_chaining.cpp @@ -20,7 +20,7 @@ #include "openvino/core/type/element_type_traits.hpp" #include "openvino/op/parameter.hpp" #include "openvino/core/model.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "openvino/runtime/infer_request.hpp" #include "openvino/runtime/tensor.hpp" #include "behavior/ov_infer_request/inference_chaining.hpp" diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/iteration_chaining.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/iteration_chaining.cpp index 1bd310debe0ccb..92be239e2b6e04 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/iteration_chaining.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/iteration_chaining.cpp @@ -20,7 +20,7 @@ #include "openvino/core/type/element_type_traits.hpp" #include "openvino/op/parameter.hpp" #include "openvino/core/model.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "openvino/runtime/infer_request.hpp" #include "openvino/runtime/tensor.hpp" #include "behavior/ov_infer_request/iteration_chaining.hpp" diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/caching_tests.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/caching_tests.cpp index 726d2340d9dbb2..ab8b5602224ca0 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/caching_tests.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/caching_tests.cpp @@ -14,8 +14,8 @@ #include "functional_test_utils/skip_tests_config.hpp" #include "functional_test_utils/summary/api_summary.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" #include "openvino/core/node_vector.hpp" #include "openvino/op/parameter.hpp" diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/life_time.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/life_time.cpp index 27a9619b5a6c4d..681e017cc2841d 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/life_time.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/life_time.cpp @@ -4,7 +4,7 @@ #include -#include +#include #include #include "behavior/ov_plugin/life_time.hpp" diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/remote.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/remote.cpp index ea2e1af7e1bcfb..932a8163e825a2 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/remote.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/remote.cpp @@ -5,7 +5,7 @@ #include "behavior/ov_plugin/remote.hpp" #include "transformations/utils/utils.hpp" #include "functional_test_utils/skip_tests_config.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" namespace ov { namespace test { diff --git a/src/tests/functional/plugin/shared/src/behavior/plugin/caching_tests.cpp b/src/tests/functional/plugin/shared/src/behavior/plugin/caching_tests.cpp index df64f4b633941e..3ca54eb5c80566 100644 --- a/src/tests/functional/plugin/shared/src/behavior/plugin/caching_tests.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/plugin/caching_tests.cpp @@ -8,8 +8,8 @@ #include "behavior/plugin/caching_tests.hpp" #include "common_test_utils/file_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/subgraph_builders.hpp" using namespace InferenceEngine::details; using namespace InferenceEngine; diff --git a/src/tests/functional/plugin/shared/src/behavior/plugin/hetero_synthetic.cpp b/src/tests/functional/plugin/shared/src/behavior/plugin/hetero_synthetic.cpp index 133db56d94e6be..005e71be01fc36 100644 --- a/src/tests/functional/plugin/shared/src/behavior/plugin/hetero_synthetic.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/plugin/hetero_synthetic.cpp @@ -4,8 +4,8 @@ #include "behavior/plugin/hetero_synthetic.hpp" #include -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "common_test_utils/file_utils.hpp" #include "openvino/util/file_util.hpp" #include diff --git a/src/tests/functional/plugin/shared/src/behavior/plugin/life_time.cpp b/src/tests/functional/plugin/shared/src/behavior/plugin/life_time.cpp index 2017cf7ebd3481..af13b3bfe5107f 100644 --- a/src/tests/functional/plugin/shared/src/behavior/plugin/life_time.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/plugin/life_time.cpp @@ -4,7 +4,7 @@ #include -#include +#include #include #include "behavior/plugin/life_time.hpp" diff --git a/src/tests/functional/plugin/shared/src/behavior/plugin/stress_tests.cpp b/src/tests/functional/plugin/shared/src/behavior/plugin/stress_tests.cpp index b0a042991fc242..94a017ae58ecdc 100644 --- a/src/tests/functional/plugin/shared/src/behavior/plugin/stress_tests.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/plugin/stress_tests.cpp @@ -3,7 +3,7 @@ // #include "behavior/plugin/stress_tests.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/add_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/add_transformation.cpp index 3109ee6bf3d50d..3faf532bdebfaa 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/add_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/add_transformation.cpp @@ -11,8 +11,8 @@ #include #include -#include "lpt_ngraph_functions/add_function.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_lpt_models/add.hpp" +#include "ov_models/subgraph_builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/assign_and_read_value_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/assign_and_read_value_transformation.cpp index 054c7e081f0d42..2c55bd00187bd6 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/assign_and_read_value_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/assign_and_read_value_transformation.cpp @@ -8,7 +8,7 @@ #include #include -#include "lpt_ngraph_functions/assign_and_read_value_function.hpp" +#include "ov_lpt_models/assign_and_read_value.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/batch_to_space_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/batch_to_space_transformation.cpp index e5bb8d13bb586d..5b0e5997147a86 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/batch_to_space_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/batch_to_space_transformation.cpp @@ -9,7 +9,7 @@ #include #include -#include "lpt_ngraph_functions/batch_to_space_function.hpp" +#include "ov_lpt_models/batch_to_space.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/clamp_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/clamp_transformation.cpp index cb5bfa1c261e29..8f14935ee8ba95 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/clamp_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/clamp_transformation.cpp @@ -8,7 +8,7 @@ #include #include -#include "lpt_ngraph_functions/clamp_function.hpp" +#include "ov_lpt_models/clamp.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_transformation.cpp index a9283bfefdb77c..392b14b2416587 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_transformation.cpp @@ -11,8 +11,8 @@ #include #include -#include "ngraph_functions/subgraph_builders.hpp" -#include "lpt_ngraph_functions/concat_function.hpp" +#include "ov_models/subgraph_builders.hpp" +#include "ov_lpt_models/concat.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_child_and_output.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_child_and_output.cpp index e588a9edeb963f..adbef1bcbf7aea 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_child_and_output.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_child_and_output.cpp @@ -11,8 +11,8 @@ #include #include -#include "ngraph_functions/builders.hpp" -#include "lpt_ngraph_functions/concat_function.hpp" +#include "ov_models/builders.hpp" +#include "ov_lpt_models/concat.hpp" using namespace InferenceEngine; using namespace InferenceEngine::details; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_different_precision_on_children.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_different_precision_on_children.cpp index 91d15a73b637e4..0114a1ed06c1aa 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_different_precision_on_children.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_different_precision_on_children.cpp @@ -11,8 +11,8 @@ #include #include -#include "ngraph_functions/builders.hpp" -#include "lpt_ngraph_functions/concat_function.hpp" +#include "ov_models/builders.hpp" +#include "ov_lpt_models/concat.hpp" using namespace InferenceEngine; using namespace InferenceEngine::details; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_intermediate_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_intermediate_transformation.cpp index cf9938ac4bb4d6..55d9b7b6b2577f 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_intermediate_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_intermediate_transformation.cpp @@ -11,8 +11,8 @@ #include #include -#include "ngraph_functions/builders.hpp" -#include "lpt_ngraph_functions/concat_function.hpp" +#include "ov_models/builders.hpp" +#include "ov_lpt_models/concat.hpp" using namespace InferenceEngine; using namespace InferenceEngine::details; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_neighbors_graph_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_neighbors_graph_transformation.cpp index c28f137e13d62b..b81799f933e242 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_neighbors_graph_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_neighbors_graph_transformation.cpp @@ -11,8 +11,8 @@ #include #include -#include "ngraph_functions/builders.hpp" -#include "lpt_ngraph_functions/concat_function.hpp" +#include "ov_models/builders.hpp" +#include "ov_lpt_models/concat.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_split_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_split_transformation.cpp index 931433870e3a65..c12ba682644daf 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_split_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_split_transformation.cpp @@ -11,8 +11,8 @@ #include #include -#include "ngraph_functions/builders.hpp" -#include "lpt_ngraph_functions/concat_function.hpp" +#include "ov_models/builders.hpp" +#include "ov_lpt_models/concat.hpp" using namespace InferenceEngine; using namespace InferenceEngine::details; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_backprop_data_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_backprop_data_transformation.cpp index 5a75d712c78114..fd4e9f4fdf1550 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_backprop_data_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_backprop_data_transformation.cpp @@ -8,7 +8,7 @@ #include #include -#include "lpt_ngraph_functions/convolution_backprop_data_function.hpp" +#include "ov_lpt_models/convolution_backprop_data.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_qdq_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_qdq_transformation.cpp index 43ee1c173e9e3f..202576fed9ecee 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_qdq_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_qdq_transformation.cpp @@ -15,8 +15,8 @@ #include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "functional_test_utils/blob_utils.hpp" -#include "ngraph_functions/pass/convert_prc.hpp" -#include "lpt_ngraph_functions/fake_quantize_and_convolution_function.hpp" +#include "ov_models/pass/convert_prc.hpp" +#include "ov_lpt_models/fake_quantize_and_convolution.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_transformation.cpp index dc3bc4eccc90c1..8d524a19c8c5a9 100755 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_transformation.cpp @@ -15,8 +15,8 @@ #include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "functional_test_utils/blob_utils.hpp" -#include "ngraph_functions/pass/convert_prc.hpp" -#include "lpt_ngraph_functions/fake_quantize_and_convolution_function.hpp" +#include "ov_models/pass/convert_prc.hpp" +#include "ov_lpt_models/fake_quantize_and_convolution.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_with_incorrect_weights.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_with_incorrect_weights.cpp index dda20a6afd3979..f4d1806ac47a57 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_with_incorrect_weights.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_with_incorrect_weights.cpp @@ -15,8 +15,8 @@ #include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "functional_test_utils/blob_utils.hpp" -#include "ngraph_functions/pass/convert_prc.hpp" -#include "lpt_ngraph_functions/convolution_function.hpp" +#include "ov_models/pass/convert_prc.hpp" +#include "ov_lpt_models/convolution.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp index 86de676830fdc7..d7336b247a3026 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp @@ -15,8 +15,8 @@ #include "shared_test_classes/base/layer_test_utils.hpp" #include "functional_test_utils/blob_utils.hpp" -#include "ngraph_functions/pass/convert_prc.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/pass/convert_prc.hpp" +#include "ov_models/builders.hpp" #include #include @@ -26,7 +26,7 @@ #include #include -#include "lpt_ngraph_functions/depth_to_space_function.hpp" +#include "ov_lpt_models/depth_to_space.hpp" using namespace ngraph::opset1; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/elementwise_branch_selection_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/elementwise_branch_selection_transformation.cpp index 520799b637af8e..86e2cb22672b9d 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/elementwise_branch_selection_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/elementwise_branch_selection_transformation.cpp @@ -8,7 +8,7 @@ #include #include -#include "lpt_ngraph_functions/add_function.hpp" +#include "ov_lpt_models/add.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/eliminate_fake_quantize_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/eliminate_fake_quantize_transformation.cpp index dd4edfa1d326f8..9b6b99a7cfd964 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/eliminate_fake_quantize_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/eliminate_fake_quantize_transformation.cpp @@ -11,7 +11,7 @@ #include #include "openvino/util/common_util.hpp" -#include "lpt_ngraph_functions/fuse_fake_quantize_function.hpp" +#include "ov_lpt_models/fuse_fake_quantize.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_avg_pool_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_avg_pool_transformation.cpp index bce876247ebefa..f8e69d84d4dec0 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_avg_pool_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_avg_pool_transformation.cpp @@ -11,8 +11,8 @@ //#include #include -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/avg_pool_function.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/avg_pool.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_max_pool_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_max_pool_transformation.cpp index 8960c081f23926..74725c0c965ac9 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_max_pool_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_max_pool_transformation.cpp @@ -11,8 +11,8 @@ //#include #include -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/max_pool_function.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/max_pool.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp index 36ae033af96f3c..f4949d770044d9 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp @@ -15,7 +15,7 @@ #include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "functional_test_utils/blob_utils.hpp" -#include "ngraph_functions/pass/convert_prc.hpp" +#include "ov_models/pass/convert_prc.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp index 0d3ed87d9da588..8507c678cd9a9b 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp @@ -3,7 +3,7 @@ // #include "low_precision_transformations/fake_quantize_precision_selection_transformation.hpp" -#include "lpt_ngraph_functions/fake_quantize_precision_selection_function.hpp" +#include "ov_lpt_models/fake_quantize_precision_selection.hpp" #include #include diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_with_dq_not_optimal_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_with_dq_not_optimal_transformation.cpp index b2ed6e3bef06c7..e2ce31b6ecbe47 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_with_dq_not_optimal_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_with_dq_not_optimal_transformation.cpp @@ -11,7 +11,7 @@ #include #include -#include "lpt_ngraph_functions/fake_quantize_and_convolution_function.hpp" +#include "ov_lpt_models/fake_quantize_and_convolution.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fully_connected_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fully_connected_transformation.cpp index 4cdee6b5d211cf..7beabbd522c785 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fully_connected_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fully_connected_transformation.cpp @@ -15,9 +15,9 @@ #include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "functional_test_utils/blob_utils.hpp" -#include "ngraph_functions/pass/convert_prc.hpp" -#include "ngraph_functions/builders.hpp" -#include "lpt_ngraph_functions/mat_mul_function.hpp" +#include "ov_models/pass/convert_prc.hpp" +#include "ov_models/builders.hpp" +#include "ov_lpt_models/mat_mul.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_convert_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_convert_transformation.cpp index a5af68d6043a54..595d8b06ebfa53 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_convert_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_convert_transformation.cpp @@ -16,8 +16,8 @@ #include "shared_test_classes/base/layer_test_utils.hpp" #include "functional_test_utils/blob_utils.hpp" -#include "ngraph_functions/pass/convert_prc.hpp" -#include "lpt_ngraph_functions/fuse_convert_function.hpp" +#include "ov_models/pass/convert_prc.hpp" +#include "ov_lpt_models/fuse_convert.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_dequantize_to_fake_quantize_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_dequantize_to_fake_quantize_transformation.cpp index f0b108849418cd..b814cb1ec4fecb 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_dequantize_to_fake_quantize_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_dequantize_to_fake_quantize_transformation.cpp @@ -10,7 +10,7 @@ #include #include -#include "lpt_ngraph_functions/fuse_fake_quantize_function.hpp" +#include "ov_lpt_models/fuse_fake_quantize.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_multiply_to_fake_quantize_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_multiply_to_fake_quantize_transformation.cpp index 9e7a5a4b43683d..150ddebadddd31 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_multiply_to_fake_quantize_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_multiply_to_fake_quantize_transformation.cpp @@ -10,7 +10,7 @@ #include #include -#include "lpt_ngraph_functions/fuse_multiply_to_fake_quantize_function.hpp" +#include "ov_lpt_models/fuse_multiply_to_fake_quantize.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_subtract_to_fake_quantize_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_subtract_to_fake_quantize_transformation.cpp index 083376eb9532a8..db9fc21b08d74d 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_subtract_to_fake_quantize_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_subtract_to_fake_quantize_transformation.cpp @@ -10,7 +10,7 @@ #include #include -#include "lpt_ngraph_functions/fuse_subtract_to_fake_quantize_function.hpp" +#include "ov_lpt_models/fuse_subtract_to_fake_quantize.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/gather_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/gather_transformation.cpp index 682ce15fad75e8..8858cbdf1f2980 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/gather_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/gather_transformation.cpp @@ -11,7 +11,7 @@ #include #include -#include "lpt_ngraph_functions/gather_function.hpp" +#include "ov_lpt_models/gather.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/gemm_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/gemm_transformation.cpp index dcb5e9af464750..9b6c8fbe3bb300 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/gemm_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/gemm_transformation.cpp @@ -15,9 +15,9 @@ #include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "functional_test_utils/blob_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" -#include "lpt_ngraph_functions/mat_mul_function.hpp" +#include "ov_lpt_models/mat_mul.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/group_convolution_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/group_convolution_transformation.cpp index 4461749510e036..e2857490ebaae8 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/group_convolution_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/group_convolution_transformation.cpp @@ -15,8 +15,8 @@ #include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "functional_test_utils/blob_utils.hpp" -#include "ngraph_functions/pass/convert_prc.hpp" -#include "lpt_ngraph_functions/group_convolution_function.hpp" +#include "ov_models/pass/convert_prc.hpp" +#include "ov_lpt_models/group_convolution.hpp" namespace LayerTestsDefinitions { std::string GroupConvolutionTransformation::getTestCaseName(const testing::TestParamInfo& obj) { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/groupconvolution_qdq_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/groupconvolution_qdq_transformation.cpp index e771223c3f4690..1ffff8920255fa 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/groupconvolution_qdq_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/groupconvolution_qdq_transformation.cpp @@ -15,8 +15,8 @@ #include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "functional_test_utils/blob_utils.hpp" -#include "ngraph_functions/pass/convert_prc.hpp" -#include "lpt_ngraph_functions/fake_quantize_and_convolution_function.hpp" +#include "ov_models/pass/convert_prc.hpp" +#include "ov_lpt_models/fake_quantize_and_convolution.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/interpolate_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/interpolate_transformation.cpp index c0b2ebd8f36a98..56012b6d91289f 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/interpolate_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/interpolate_transformation.cpp @@ -11,7 +11,7 @@ #include #include -#include "lpt_ngraph_functions/interpolate_function.hpp" +#include "ov_lpt_models/interpolate.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_transformation.cpp index 2ad47f13fb1ceb..1c0c7ac7a7050a 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_transformation.cpp @@ -14,8 +14,8 @@ #include "ngraph/op/op.hpp" #include #include "low_precision_transformations/mat_mul_transformation.hpp" -#include "ngraph_functions/subgraph_builders.hpp" -#include "lpt_ngraph_functions/mat_mul_function.hpp" +#include "ov_models/subgraph_builders.hpp" +#include "ov_lpt_models/mat_mul.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_constant_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_constant_transformation.cpp index efa6852e0c43f5..5708daf3b43ca4 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_constant_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_constant_transformation.cpp @@ -14,8 +14,8 @@ #include "ngraph/op/op.hpp" #include #include "low_precision_transformations/mat_mul_transformation.hpp" -#include "ngraph_functions/subgraph_builders.hpp" -#include "lpt_ngraph_functions/mat_mul_function.hpp" +#include "ov_models/subgraph_builders.hpp" +#include "ov_lpt_models/mat_mul.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp index 02e9a345be44df..f807061e96c622 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp @@ -15,8 +15,8 @@ #include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "functional_test_utils/blob_utils.hpp" -#include "ngraph_functions/pass/convert_prc.hpp" -#include "lpt_ngraph_functions/mat_mul_with_optimized_constant_fake_quantize_function.hpp" +#include "ov_models/pass/convert_prc.hpp" +#include "ov_lpt_models/mat_mul_with_optimized_constant_fake_quantize.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/move_fake_quantize_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/move_fake_quantize_transformation.cpp index 2bcfaf6e93b78d..4ea5038a026036 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/move_fake_quantize_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/move_fake_quantize_transformation.cpp @@ -14,7 +14,7 @@ #include "common_test_utils/common_utils.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "functional_test_utils/blob_utils.hpp" -#include "lpt_ngraph_functions/move_fake_quantize_function.hpp" +#include "ov_lpt_models/move_fake_quantize.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_to_group_convolution_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_to_group_convolution_transformation.cpp index 7a83c2daf5e049..ae2affdb42e4f4 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_to_group_convolution_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_to_group_convolution_transformation.cpp @@ -16,8 +16,8 @@ #include "shared_test_classes/base/layer_test_utils.hpp" #include "functional_test_utils/blob_utils.hpp" -#include "ngraph_functions/pass/convert_prc.hpp" -#include "lpt_ngraph_functions/multiply_to_group_convolution_function.hpp" +#include "ov_models/pass/convert_prc.hpp" +#include "ov_lpt_models/multiply_to_group_convolution.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_transformation.cpp index 2088d4db87696a..e913ae4d117ab4 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_transformation.cpp @@ -11,8 +11,8 @@ #include #include -#include "lpt_ngraph_functions/multiply_partial_function.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_lpt_models/multiply_partial_function.hpp" +#include "ov_models/subgraph_builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_with_one_parent_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_with_one_parent_transformation.cpp index 8a13a66fc99b83..4c8b6020c8449d 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_with_one_parent_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_with_one_parent_transformation.cpp @@ -11,7 +11,7 @@ #include #include "common_test_utils/common_utils.hpp" -#include "lpt_ngraph_functions/multiply_with_one_parent_function.hpp" +#include "ov_lpt_models/multiply_with_one_parent.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/mvn_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/mvn_transformation.cpp index dec1318655b798..e28afed0f47de4 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/mvn_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/mvn_transformation.cpp @@ -16,8 +16,8 @@ #include "shared_test_classes/base/layer_test_utils.hpp" #include "functional_test_utils/blob_utils.hpp" -#include "ngraph_functions/pass/convert_prc.hpp" -#include "lpt_ngraph_functions/mvn_function.hpp" +#include "ov_models/pass/convert_prc.hpp" +#include "ov_lpt_models/mvn.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/normalize_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/normalize_transformation.cpp index 960e3209eb7441..fda8edf0d18513 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/normalize_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/normalize_transformation.cpp @@ -16,8 +16,8 @@ #include "shared_test_classes/base/layer_test_utils.hpp" #include "functional_test_utils/blob_utils.hpp" -#include "ngraph_functions/pass/convert_prc.hpp" -#include "lpt_ngraph_functions/normalize_l2_function.hpp" +#include "ov_models/pass/convert_prc.hpp" +#include "ov_lpt_models/normalize_l2.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat.cpp index a06a2c46d890b0..f91f934de24c99 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat.cpp @@ -16,8 +16,8 @@ #include "shared_test_classes/base/layer_test_utils.hpp" #include "functional_test_utils/blob_utils.hpp" -#include "ngraph_functions/pass/convert_prc.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/pass/convert_prc.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat_multi_channel.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat_multi_channel.cpp index 345c633f73abd1..bb82dbdaf77459 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat_multi_channel.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat_multi_channel.cpp @@ -16,8 +16,8 @@ #include "shared_test_classes/base/layer_test_utils.hpp" #include "functional_test_utils/blob_utils.hpp" -#include "ngraph_functions/pass/convert_prc.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/pass/convert_prc.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations.cpp index 5e5d98354d4197..67ca66f65a63c3 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations.cpp @@ -16,8 +16,8 @@ #include "shared_test_classes/base/layer_test_utils.hpp" #include "functional_test_utils/blob_utils.hpp" -#include "ngraph_functions/pass/convert_prc.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/pass/convert_prc.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/pad_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/pad_transformation.cpp index 29933ae1bfec42..57a5dde6799bee 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/pad_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/pad_transformation.cpp @@ -8,7 +8,7 @@ #include #include -#include "lpt_ngraph_functions/pad_function.hpp" +#include "ov_lpt_models/pad.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/prelu_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/prelu_transformation.cpp index 873a0d6f95326f..eb28b5dbf5750c 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/prelu_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/prelu_transformation.cpp @@ -11,7 +11,7 @@ #include #include -#include "lpt_ngraph_functions/prelu_function.hpp" +#include "ov_lpt_models/prelu.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/pull_reshape_through_dequantization_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/pull_reshape_through_dequantization_transformation.cpp index c4739d402a8f37..4b8f6713f8839a 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/pull_reshape_through_dequantization_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/pull_reshape_through_dequantization_transformation.cpp @@ -15,8 +15,8 @@ #include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "functional_test_utils/blob_utils.hpp" -#include "ngraph_functions/pass/convert_prc.hpp" -#include "lpt_ngraph_functions/fake_quantize_and_convolution_function.hpp" +#include "ov_models/pass/convert_prc.hpp" +#include "ov_lpt_models/fake_quantize_and_convolution.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/recurrent_cell_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/recurrent_cell_transformation.cpp index 85188cd1d52534..c377c370c76597 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/recurrent_cell_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/recurrent_cell_transformation.cpp @@ -14,7 +14,7 @@ #include "common_test_utils/common_utils.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "functional_test_utils/blob_utils.hpp" -#include "lpt_ngraph_functions/recurrent_cell_function.hpp" +#include "ov_lpt_models/recurrent_cell.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_max_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_max_transformation.cpp index fef1c1bf5c6394..25c7ec5bee2de7 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_max_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_max_transformation.cpp @@ -8,7 +8,7 @@ #include #include -#include "lpt_ngraph_functions/reduce_function.hpp" +#include "ov_lpt_models/reduce.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_mean_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_mean_transformation.cpp index aed68685e38eb5..b5139f40821269 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_mean_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_mean_transformation.cpp @@ -8,7 +8,7 @@ #include #include -#include "lpt_ngraph_functions/reduce_function.hpp" +#include "ov_lpt_models/reduce.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_min_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_min_transformation.cpp index 77e09b06074647..4e4448dac1e1cf 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_min_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_min_transformation.cpp @@ -8,7 +8,7 @@ #include #include -#include "lpt_ngraph_functions/reduce_function.hpp" +#include "ov_lpt_models/reduce.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_sum_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_sum_transformation.cpp index bed234c41f66c4..f3536cdb67f5af 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_sum_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_sum_transformation.cpp @@ -8,7 +8,7 @@ #include #include -#include "lpt_ngraph_functions/reduce_function.hpp" +#include "ov_lpt_models/reduce.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/relu_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/relu_transformation.cpp index 0969a11221116c..b0acf0f885852d 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/relu_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/relu_transformation.cpp @@ -11,7 +11,7 @@ #include #include -#include "lpt_ngraph_functions/relu_function.hpp" +#include "ov_lpt_models/relu.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/reshape_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/reshape_transformation.cpp index 1ecd3dac9206c7..7a45da7385e478 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/reshape_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/reshape_transformation.cpp @@ -9,7 +9,7 @@ #include #include -#include "lpt_ngraph_functions/reshape_function.hpp" +#include "ov_lpt_models/reshape.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/shuffle_channels_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/shuffle_channels_transformation.cpp index 67a9f88182005a..b1405b805828e3 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/shuffle_channels_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/shuffle_channels_transformation.cpp @@ -14,7 +14,7 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "lpt_ngraph_functions/shuffle_channels_function.hpp" +#include "ov_lpt_models/shuffle_channels.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/space_to_batch_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/space_to_batch_transformation.cpp index 73da4553e323cb..e8887dcf4902da 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/space_to_batch_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/space_to_batch_transformation.cpp @@ -9,7 +9,7 @@ #include #include -#include "lpt_ngraph_functions/space_to_batch_function.hpp" +#include "ov_lpt_models/space_to_batch.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/split_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/split_transformation.cpp index 138aa984786afb..161db8402514fb 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/split_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/split_transformation.cpp @@ -12,7 +12,7 @@ #include #include "low_precision/split.hpp" -#include "lpt_ngraph_functions/split_function.hpp" +#include "ov_lpt_models/split.hpp" namespace LayerTestsDefinitions { std::string SplitTransformation::getTestCaseName(const testing::TestParamInfo& obj) { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/squeeze_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/squeeze_transformation.cpp index a3f8c75f780665..4a6a22d4fdcee4 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/squeeze_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/squeeze_transformation.cpp @@ -12,8 +12,8 @@ #include "ngraph/op/op.hpp" #include #include "low_precision_transformations/squeeze_transformation.hpp" -#include "ngraph_functions/subgraph_builders.hpp" -#include "lpt_ngraph_functions/squeeze_function.hpp" +#include "ov_models/subgraph_builders.hpp" +#include "ov_lpt_models/squeeze.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/strided_slice_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/strided_slice_transformation.cpp index c5aec0401f7eb4..7208df27695db4 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/strided_slice_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/strided_slice_transformation.cpp @@ -8,7 +8,7 @@ #include #include -#include "lpt_ngraph_functions/strided_slice_function.hpp" +#include "ov_lpt_models/strided_slice.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/subtract_multiply_to_multiply_add_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/subtract_multiply_to_multiply_add_transformation.cpp index 505d6d33fd3103..bfa10aa4b75b29 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/subtract_multiply_to_multiply_add_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/subtract_multiply_to_multiply_add_transformation.cpp @@ -11,7 +11,7 @@ #include #include -#include "lpt_ngraph_functions/subtract_multiply_to_multiply_add_function.hpp" +#include "ov_lpt_models/subtract_multiply_to_multiply_add.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/subtract_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/subtract_transformation.cpp index 30c270146e1bb6..1866315bffb0f0 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/subtract_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/subtract_transformation.cpp @@ -11,7 +11,7 @@ #include #include -#include "lpt_ngraph_functions/subtract_function.hpp" +#include "ov_lpt_models/subtract.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/transpose_after_matmul_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/transpose_after_matmul_transformation.cpp index adac1f49a5a2db..fa466880570f07 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/transpose_after_matmul_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/transpose_after_matmul_transformation.cpp @@ -16,7 +16,7 @@ #include "shared_test_classes/base/layer_test_utils.hpp" #include "functional_test_utils/blob_utils.hpp" -#include "lpt_ngraph_functions/transpose_after_mat_mul_function.hpp" +#include "ov_lpt_models/transpose_after_mat_mul.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/transpose_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/transpose_transformation.cpp index 2c3c3904262cbe..ef7fc3ba6f52f0 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/transpose_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/transpose_transformation.cpp @@ -11,7 +11,7 @@ #include #include -#include "lpt_ngraph_functions/transpose_function.hpp" +#include "ov_lpt_models/transpose.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/unsqueeze_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/unsqueeze_transformation.cpp index afe6fe82c1cbb3..3eb1626f23daf4 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/unsqueeze_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/unsqueeze_transformation.cpp @@ -12,8 +12,8 @@ #include "ngraph/op/op.hpp" #include #include "low_precision_transformations/unsqueeze_transformation.hpp" -#include "ngraph_functions/subgraph_builders.hpp" -#include "lpt_ngraph_functions/unsqueeze_function.hpp" +#include "ov_models/subgraph_builders.hpp" +#include "ov_lpt_models/unsqueeze.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/variadic_split_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/variadic_split_transformation.cpp index 67af8ec240a49a..4124c033807f4e 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/variadic_split_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/variadic_split_transformation.cpp @@ -12,7 +12,7 @@ #include #include "low_precision/variadic_split.hpp" -#include "lpt_ngraph_functions/variadic_split_function.hpp" +#include "ov_lpt_models/variadic_split.hpp" namespace LayerTestsDefinitions { std::string VariadicSplitTransformation::getTestCaseName(const testing::TestParamInfo& obj) { diff --git a/src/tests/functional/plugin/shared/src/snippets/add.cpp b/src/tests/functional/plugin/shared/src/snippets/add.cpp index e6ad44fb9e9786..bb4603f42b9ed0 100644 --- a/src/tests/functional/plugin/shared/src/snippets/add.cpp +++ b/src/tests/functional/plugin/shared/src/snippets/add.cpp @@ -5,7 +5,7 @@ #include "common_test_utils/common_utils.hpp" #include "snippets/add.hpp" #include "subgraph_simple.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "functional_test_utils/skip_tests_config.hpp" #include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" diff --git a/src/tests/functional/plugin/shared/src/snippets/codegen_bert.cpp b/src/tests/functional/plugin/shared/src/snippets/codegen_bert.cpp index 62237e96bed01a..fa0d79e691fe63 100644 --- a/src/tests/functional/plugin/shared/src/snippets/codegen_bert.cpp +++ b/src/tests/functional/plugin/shared/src/snippets/codegen_bert.cpp @@ -15,7 +15,7 @@ #include "shared_test_classes/base/layer_test_utils.hpp" #include "functional_test_utils/blob_utils.hpp" -#include "ngraph_functions/pass/convert_prc.hpp" +#include "ov_models/pass/convert_prc.hpp" #include "snippets/codegen_bert.hpp" // todo: Rewrite this test using Snippets test infrastructure. See add_convert or conv_eltwise for example diff --git a/src/tests/functional/plugin/shared/src/snippets/codegen_gelu.cpp b/src/tests/functional/plugin/shared/src/snippets/codegen_gelu.cpp index 0a1fee40ef0bf1..4dc4d7964cdf55 100644 --- a/src/tests/functional/plugin/shared/src/snippets/codegen_gelu.cpp +++ b/src/tests/functional/plugin/shared/src/snippets/codegen_gelu.cpp @@ -9,7 +9,7 @@ #include "openvino/pass/constant_folding.hpp" #include "snippets/codegen_gelu.hpp" #include "subgraph_simple.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "functional_test_utils/skip_tests_config.hpp" #include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" diff --git a/src/tests/functional/plugin/shared/src/snippets/edge_replace.cpp b/src/tests/functional/plugin/shared/src/snippets/edge_replace.cpp index db13eb656607d4..c51e3a9dadfb6b 100644 --- a/src/tests/functional/plugin/shared/src/snippets/edge_replace.cpp +++ b/src/tests/functional/plugin/shared/src/snippets/edge_replace.cpp @@ -5,7 +5,7 @@ #include "common_test_utils/common_utils.hpp" #include "snippets/edge_replace.hpp" #include "subgraph_simple.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "functional_test_utils/skip_tests_config.hpp" namespace ov { diff --git a/src/tests/functional/plugin/shared/src/snippets/fake_quantize_decomposition_test.cpp b/src/tests/functional/plugin/shared/src/snippets/fake_quantize_decomposition_test.cpp index 04d9adcb47322c..e5b3c55391ed44 100644 --- a/src/tests/functional/plugin/shared/src/snippets/fake_quantize_decomposition_test.cpp +++ b/src/tests/functional/plugin/shared/src/snippets/fake_quantize_decomposition_test.cpp @@ -12,7 +12,7 @@ #include #include "ov_ops/type_relaxed.hpp" -#include "fake_quantize_function.hpp" +#include "fake_quantize_helper.hpp" #include "function_helper.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/src/snippets/precision_propagation_convertion.cpp b/src/tests/functional/plugin/shared/src/snippets/precision_propagation_convertion.cpp index 67a91386cf34a5..3cb78321089151 100644 --- a/src/tests/functional/plugin/shared/src/snippets/precision_propagation_convertion.cpp +++ b/src/tests/functional/plugin/shared/src/snippets/precision_propagation_convertion.cpp @@ -5,7 +5,7 @@ #include "snippets/precision_propagation_convertion.hpp" #include "common_test_utils/common_utils.hpp" -#include "precision_propagation_convertion_function.hpp" +#include "precision_propagation_convertion.hpp" #include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" namespace ov { diff --git a/src/tests/functional/plugin/shared/src/snippets/softmax.cpp b/src/tests/functional/plugin/shared/src/snippets/softmax.cpp index 0a611cdf882703..57d27a4d4b3454 100644 --- a/src/tests/functional/plugin/shared/src/snippets/softmax.cpp +++ b/src/tests/functional/plugin/shared/src/snippets/softmax.cpp @@ -5,7 +5,7 @@ #include "common_test_utils/common_utils.hpp" #include "snippets/softmax.hpp" #include "subgraph_softmax.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "functional_test_utils/skip_tests_config.hpp" #include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" diff --git a/src/tests/functional/plugin/shared/src/snippets/transpose_softmax.cpp b/src/tests/functional/plugin/shared/src/snippets/transpose_softmax.cpp index f86b0c2d6e206b..754230ef70a83a 100644 --- a/src/tests/functional/plugin/shared/src/snippets/transpose_softmax.cpp +++ b/src/tests/functional/plugin/shared/src/snippets/transpose_softmax.cpp @@ -5,7 +5,7 @@ #include "common_test_utils/common_utils.hpp" #include "snippets/transpose_softmax.hpp" #include "subgraph_softmax.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "functional_test_utils/skip_tests_config.hpp" #include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" diff --git a/src/tests/functional/shared_test_classes/CMakeLists.txt b/src/tests/functional/shared_test_classes/CMakeLists.txt index 55f2f4e9a1ee24..f53f00ca135317 100644 --- a/src/tests/functional/shared_test_classes/CMakeLists.txt +++ b/src/tests/functional/shared_test_classes/CMakeLists.txt @@ -19,7 +19,7 @@ addIeTarget( LINK_LIBRARIES PRIVATE func_test_utils - ngraphFunctions + ov_models ) ov_build_target_faster(${TARGET_NAME} diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp index 918f63c3af9f54..8558504cbf6a14 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp @@ -30,8 +30,8 @@ #include "functional_test_utils/summary/op_summary.hpp" #include "functional_test_utils/summary/environment.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/pass/convert_prc.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/pass/convert_prc.hpp" namespace LayerTestsUtils { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/activation.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/activation.hpp index 1fc257a19cab79..5ad56aaf2f562d 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/activation.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/activation.hpp @@ -20,8 +20,8 @@ #include "shared_test_classes/base/layer_test_utils.hpp" #include "common_test_utils/common_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/batch_norm.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/batch_norm.hpp index df68a6739a20f9..b261641ab1ea79 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/batch_norm.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/batch_norm.hpp @@ -5,7 +5,7 @@ #pragma once #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { typedef std::tuple< diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/binary_convolution.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/binary_convolution.hpp index 7d5a043b520a24..48562853f67f73 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/binary_convolution.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/binary_convolution.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/bucketize.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/bucketize.hpp index 3f9603a826fab6..169ce16bf892ed 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/bucketize.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/bucketize.hpp @@ -7,8 +7,8 @@ #include #include -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/clamp.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/clamp.hpp index 09f646c8868754..5e2c3a0ab8a64b 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/clamp.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/clamp.hpp @@ -7,8 +7,8 @@ #include #include -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/concat.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/concat.hpp index 26cd42839bb949..d07d45909d1644 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/concat.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/concat.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/constant.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/constant.hpp index bca3a24ef5e6dc..8abc666eccb7e9 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/constant.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/constant.hpp @@ -8,8 +8,8 @@ #include #include -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convert_color_i420.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convert_color_i420.hpp index 628b1bcdcf502c..3a92968aaeec7d 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convert_color_i420.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convert_color_i420.hpp @@ -9,8 +9,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convert_color_nv12.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convert_color_nv12.hpp index 7f774b2aef817d..cb791dc6a3f3d9 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convert_color_nv12.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convert_color_nv12.hpp @@ -9,8 +9,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution.hpp index 37f57fe419f195..6cdc9b5e323988 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop.hpp index 636163053222f2..ef896760a42fe8 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop_data.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop_data.hpp index 4d4b4bfc492538..4ce4dc1decb687 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop_data.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop_data.hpp @@ -12,8 +12,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/cum_sum.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/cum_sum.hpp index 7ebf45de543012..0b3746b5406543 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/cum_sum.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/cum_sum.hpp @@ -8,7 +8,7 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_convolution.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_convolution.hpp index 52384808f3fca4..ad164b7e6d07ba 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_convolution.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_convolution.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_psroi_pooling.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_psroi_pooling.hpp index 89313be8baa118..9ad896f62bfa86 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_psroi_pooling.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_psroi_pooling.hpp @@ -9,8 +9,8 @@ #include #include -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/depth_to_space.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/depth_to_space.hpp index b5bf9529880e35..486a0ae7684ce5 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/depth_to_space.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/depth_to_space.hpp @@ -11,8 +11,8 @@ #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/dft.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/dft.hpp index 88fc518af2d565..1dee86fcad06fd 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/dft.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/dft.hpp @@ -8,7 +8,7 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/eltwise.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/eltwise.hpp index 0cab1f7aa46464..8a3aaa6755bc22 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/eltwise.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/eltwise.hpp @@ -5,7 +5,7 @@ #pragma once -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "common_test_utils/common_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_detection_output.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_detection_output.hpp index 83ab22ed60d1fd..100f73456da676 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_detection_output.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_detection_output.hpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "common_test_utils/common_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_generate_proposals_single_image.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_generate_proposals_single_image.hpp index 1ce9aa0cffaf15..f1500f6e0891d3 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_generate_proposals_single_image.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_generate_proposals_single_image.hpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "common_test_utils/common_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_prior_grid_generator.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_prior_grid_generator.hpp index bb0132c8a35ff9..0eeaee750a8255 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_prior_grid_generator.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_prior_grid_generator.hpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "common_test_utils/common_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_roifeatureextractor.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_roifeatureextractor.hpp index b9f59394d5fefc..38f480a0a6ae45 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_roifeatureextractor.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_roifeatureextractor.hpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "common_test_utils/common_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_topkrois.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_topkrois.hpp index 173612e2eb497e..091c865a893bb6 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_topkrois.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_topkrois.hpp @@ -4,7 +4,7 @@ #pragma once -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "common_test_utils/common_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/fake_quantize.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/fake_quantize.hpp index f97c67878344c1..241b9492e38868 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/fake_quantize.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/fake_quantize.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" // seed selected using current cloc time #define USE_CLOCK_TIME 1 diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gather.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gather.hpp index 5dcc649a415ab1..5f9e9473708969 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gather.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gather.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gather_nd.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gather_nd.hpp index 547122b4b685d2..9e9705009578af 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gather_nd.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gather_nd.hpp @@ -9,8 +9,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace LayerTestsDefinitions { using Config = std::map; diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gather_tree.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gather_tree.hpp index 53846209a3372b..41925413e75d89 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gather_tree.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gather_tree.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/generate_proposals.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/generate_proposals.hpp index 2371de2c379585..9f1484ce666e6c 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/generate_proposals.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/generate_proposals.hpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "common_test_utils/common_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/grn.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/grn.hpp index 8c892b105b4522..585f08f0beaa96 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/grn.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/grn.hpp @@ -23,8 +23,8 @@ #include "shared_test_classes/base/layer_test_utils.hpp" #include "common_test_utils/common_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution.hpp index 3a093ba4eecbd4..add3010c44bac6 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace LayerTestsDefinitions { typedef std::tuple< diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution_backprop_data.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution_backprop_data.hpp index 24e753c6fbe4dc..bb694c120e8e87 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution_backprop_data.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution_backprop_data.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gru_cell.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gru_cell.hpp index eca8261bd97868..ec6c7e61a462b5 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gru_cell.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gru_cell.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gru_sequence.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gru_sequence.hpp index 9c0df506243a5f..225b5d6c2f616a 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gru_sequence.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gru_sequence.hpp @@ -10,8 +10,8 @@ #include #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "common_test_utils/test_enums.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/interpolate.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/interpolate.hpp index 5741393440e734..f2206ddf789ad0 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/interpolate.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/interpolate.hpp @@ -11,8 +11,8 @@ #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/log_softmax.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/log_softmax.hpp index 8808df166b95f6..7218309e776dc2 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/log_softmax.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/log_softmax.hpp @@ -11,8 +11,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/loop.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/loop.hpp index bda29704ba7628..9d6137853eb652 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/loop.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/loop.hpp @@ -10,8 +10,8 @@ #include #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace LayerTestsDefinitions { enum LOOP_IN_TYPE { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/low_precision.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/low_precision.hpp index d329ec1bb30d26..990394bcab167b 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/low_precision.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/low_precision.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace LowPrecisionTestDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lrn.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lrn.hpp index b6ae20f6d40698..d4a3f69a707374 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lrn.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lrn.hpp @@ -9,8 +9,8 @@ #include #include -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lstm_cell.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lstm_cell.hpp index d1e72515995b68..1145c588794328 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lstm_cell.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lstm_cell.hpp @@ -11,8 +11,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lstm_cell_basic.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lstm_cell_basic.hpp index 2d830065b2920e..878da4062d3358 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lstm_cell_basic.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lstm_cell_basic.hpp @@ -11,8 +11,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lstm_sequence.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lstm_sequence.hpp index 591df22156c341..0f520392f88efd 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lstm_sequence.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lstm_sequence.hpp @@ -10,8 +10,8 @@ #include #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/matrix_nms.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/matrix_nms.hpp index 03e68e4a2c004c..586d39dfa3c1e7 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/matrix_nms.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/matrix_nms.hpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "common_test_utils/common_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/minimum_maximum.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/minimum_maximum.hpp index 35c1bddf3d202b..bee0cde0cdfea5 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/minimum_maximum.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/minimum_maximum.hpp @@ -9,8 +9,8 @@ #include #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "common_test_utils/test_constants.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/multiclass_nms.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/multiclass_nms.hpp index a425db3ced0e02..4c936209d72783 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/multiclass_nms.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/multiclass_nms.hpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "common_test_utils/common_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/non_max_suppression.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/non_max_suppression.hpp index 1471eee1bf3156..54d2ea05f831f3 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/non_max_suppression.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/non_max_suppression.hpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" namespace testing { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/nonzero.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/nonzero.hpp index b6c5f401adf07c..e1eff18607cd60 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/nonzero.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/nonzero.hpp @@ -6,8 +6,8 @@ #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include #include diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/normalize_l2.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/normalize_l2.hpp index 9593cbcbaba3a5..3adc1ddc9dac77 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/normalize_l2.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/normalize_l2.hpp @@ -9,7 +9,7 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/one_hot.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/one_hot.hpp index 0f41b5df0b7cae..172127a92abd52 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/one_hot.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/one_hot.hpp @@ -10,7 +10,7 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { typedef std::tuple< ngraph::element::Type, // depth type (any integer type) diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/pad.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/pad.hpp index 613a0659b24314..0db4bc3e0e2719 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/pad.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/pad.hpp @@ -10,7 +10,7 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { typedef std::tuple< diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/pooling.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/pooling.hpp index 476b72aa809e70..82c28c09a9ed2b 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/pooling.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/pooling.hpp @@ -9,8 +9,8 @@ #include #include -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/power.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/power.hpp index 94e92fe9e9ec8c..1a22a296c8c57e 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/power.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/power.hpp @@ -9,7 +9,7 @@ #include #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "common_test_utils/test_constants.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box.hpp index 4d3476d86ac0d3..b1e752c12bae59 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box.hpp @@ -23,8 +23,8 @@ #include "shared_test_classes/base/layer_test_utils.hpp" #include "common_test_utils/common_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { using priorBoxSpecificParams = std::tuple< diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box_clustered.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box_clustered.hpp index 50b2d3d35ed571..b712b4c9a09b75 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box_clustered.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box_clustered.hpp @@ -23,8 +23,8 @@ #include "shared_test_classes/base/layer_test_utils.hpp" #include "common_test_utils/common_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/proposal.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/proposal.hpp index e380464c58deb3..091d9c1c8a02f7 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/proposal.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/proposal.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/psroi_pooling.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/psroi_pooling.hpp index 5f697342000b52..14f35e754b1787 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/psroi_pooling.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/psroi_pooling.hpp @@ -9,8 +9,8 @@ #include #include -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/range.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/range.hpp index 623340a5c9aa5f..b83fcee41fd1ec 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/range.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/range.hpp @@ -10,7 +10,7 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { typedef std::tuple< diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/rdft.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/rdft.hpp index 1cacce2db5dc6e..aeaf504a117f95 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/rdft.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/rdft.hpp @@ -8,7 +8,7 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reduce_ops.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reduce_ops.hpp index 08287750a6b861..cb63bffe99a234 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reduce_ops.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reduce_ops.hpp @@ -10,7 +10,7 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/region_yolo.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/region_yolo.hpp index 0e52c9472d269a..9ed7e38f0869a2 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/region_yolo.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/region_yolo.hpp @@ -9,8 +9,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reorg_yolo.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reorg_yolo.hpp index 99ef0025c8d514..bc9d1560e0073e 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reorg_yolo.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reorg_yolo.hpp @@ -9,8 +9,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reshape.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reshape.hpp index e3ce54a11d6f1e..814444bff712ed 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reshape.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reshape.hpp @@ -9,8 +9,8 @@ #include #include -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/result.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/result.hpp index 554fc17a75b339..71cdbdae6dad38 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/result.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/result.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reverse_sequence.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reverse_sequence.hpp index 387f5818d74edb..77049ac9c20c39 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reverse_sequence.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reverse_sequence.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/rnn_cell.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/rnn_cell.hpp index b65cebf6b19559..9af0ac37ee2877 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/rnn_cell.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/rnn_cell.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/rnn_sequence.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/rnn_sequence.hpp index d637a6afca0385..a3788d9b1cb1da 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/rnn_sequence.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/rnn_sequence.hpp @@ -10,8 +10,8 @@ #include #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roi_pooling.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roi_pooling.hpp index c1616776cf4efe..9ca462fa1f9ad2 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roi_pooling.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roi_pooling.hpp @@ -9,8 +9,8 @@ #include #include -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roll.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roll.hpp index beace403874d98..da3748424f9fd6 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roll.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roll.hpp @@ -8,7 +8,7 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/select.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/select.hpp index af4906e7ccf468..e27f2853fc6186 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/select.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/select.hpp @@ -8,7 +8,7 @@ #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/shape_of.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/shape_of.hpp index 8657af7ab3dca1..2bd8ed1d917644 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/shape_of.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/shape_of.hpp @@ -8,8 +8,8 @@ #include #include #include -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/softmax.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/softmax.hpp index f169d5a9245c86..a3eeeb4e212e54 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/softmax.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/softmax.hpp @@ -4,7 +4,7 @@ #pragma once -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "common_test_utils/common_utils.hpp" diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/space_to_depth.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/space_to_depth.hpp index 8241034c04865e..665baa01dc7265 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/space_to_depth.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/space_to_depth.hpp @@ -11,8 +11,8 @@ #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/split.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/split.hpp index d38f688b1f06c1..ccb9a43b144758 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/split.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/split.hpp @@ -10,7 +10,7 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/squeeze_unsqueeze.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/squeeze_unsqueeze.hpp index f3b7343277ab96..995ca768b15b07 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/squeeze_unsqueeze.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/squeeze_unsqueeze.hpp @@ -10,7 +10,7 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { using ShapeAxesTuple = std::pair, std::vector>; diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/tensor_iterator.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/tensor_iterator.hpp index 87fa36cb800878..f7dbd4322aa3ea 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/tensor_iterator.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/tensor_iterator.hpp @@ -10,8 +10,8 @@ #include #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/tile.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/tile.hpp index 3135469f6f1fa0..1ce46e8bf30878 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/tile.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/tile.hpp @@ -10,7 +10,7 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/topk.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/topk.hpp index ecd5510f2f1665..1892ad7f92bccd 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/topk.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/topk.hpp @@ -8,7 +8,7 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { typedef std::tuple< diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/transpose.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/transpose.hpp index 29626ce93b42de..b7547ac1aa8e47 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/transpose.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/transpose.hpp @@ -10,7 +10,7 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/variadic_split.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/variadic_split.hpp index 5b19b6453425ba..b041d368e8f073 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/variadic_split.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/variadic_split.hpp @@ -10,7 +10,7 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/comparison.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/comparison.hpp index d3eb67f907dcb7..d40f690de16971 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/comparison.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/comparison.hpp @@ -10,7 +10,7 @@ #include "gtest/gtest.h" #include "shared_test_classes/base/ov_subgraph.hpp" #include "common_test_utils/test_constants.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace ov { namespace test { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/basic_lstm.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/basic_lstm.hpp index 6db832fd1f4027..6d58656c0752ea 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/basic_lstm.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/basic_lstm.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/broadcast_power.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/broadcast_power.hpp index a0b06ccf5e72ed..e4f0530e2012dd 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/broadcast_power.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/broadcast_power.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/cascade_concat.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/cascade_concat.hpp index dd506eb5009edb..00534c53e72442 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/cascade_concat.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/cascade_concat.hpp @@ -8,7 +8,7 @@ #include #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/concat_conv.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/concat_conv.hpp index 22cee845d20d4d..37d0410ff5b374 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/concat_conv.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/concat_conv.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/concat_multi_input.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/concat_multi_input.hpp index b51b51e5a22b03..a63203d7a6465b 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/concat_multi_input.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/concat_multi_input.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/concat_quantization.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/concat_quantization.hpp index 8b14ffa54d2281..33f141b6e63dc2 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/concat_quantization.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/concat_quantization.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/concat_quantization_during_memory_requantization.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/concat_quantization_during_memory_requantization.hpp index f0028cf1ffd569..15bc230de6a304 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/concat_quantization_during_memory_requantization.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/concat_quantization_during_memory_requantization.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/connect_split_concat_concat.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/connect_split_concat_concat.hpp index 447ea2a957aeb1..682c401f397f1e 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/connect_split_concat_concat.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/connect_split_concat_concat.hpp @@ -9,8 +9,8 @@ #include #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "common_test_utils/test_constants.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/const_conv_concat.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/const_conv_concat.hpp index ce1a9f24ffebed..faf192cb546ab7 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/const_conv_concat.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/const_conv_concat.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/const_strided_slice_concat.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/const_strided_slice_concat.hpp index 1756f60f6ddd73..f48915e31f21f0 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/const_strided_slice_concat.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/const_strided_slice_concat.hpp @@ -11,8 +11,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_eltwise_fusion.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_eltwise_fusion.hpp index e294a29997b87e..2e96d162821a39 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_eltwise_fusion.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_eltwise_fusion.hpp @@ -8,7 +8,7 @@ #include #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include #include diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_fq_eltwise.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_fq_eltwise.hpp index 25cf069a513eec..b9435dbced40c5 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_fq_eltwise.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_fq_eltwise.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_fq_relu.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_fq_relu.hpp index c8c996051f895b..6d87a5c58441e8 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_fq_relu.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_fq_relu.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_strides_opt.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_strides_opt.hpp index d7670c4f7369ee..9df042c72de523 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_strides_opt.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_strides_opt.hpp @@ -8,7 +8,7 @@ #include #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include #include diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/convert_pad_to_group_conv.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/convert_pad_to_group_conv.hpp index f50efc2a21818f..53bcd5d850e1c5 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/convert_pad_to_group_conv.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/convert_pad_to_group_conv.hpp @@ -8,7 +8,7 @@ #include #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include #include diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/convolution_relu_sequence.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/convolution_relu_sequence.hpp index e915b99a0d5581..3fb665128a7750 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/convolution_relu_sequence.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/convolution_relu_sequence.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/copy_before_squeeze.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/copy_before_squeeze.hpp index d560b8566687a0..fa84e2a1e05d4d 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/copy_before_squeeze.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/copy_before_squeeze.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/delayed_copy_layer.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/delayed_copy_layer.hpp index 6a794bf911bdc9..927fed5b9502be 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/delayed_copy_layer.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/delayed_copy_layer.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/eltwise_conv_eltwise.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/eltwise_conv_eltwise.hpp index ee45c79a8e43fb..17e997deb7a4d9 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/eltwise_conv_eltwise.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/eltwise_conv_eltwise.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/fc_conv_fc.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/fc_conv_fc.hpp index 9f460024670501..221d7bf1cd9b2b 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/fc_conv_fc.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/fc_conv_fc.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/first_connect_input_concat.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/first_connect_input_concat.hpp index 299dd3e4b3987a..3cceae5dce2f0f 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/first_connect_input_concat.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/first_connect_input_concat.hpp @@ -9,7 +9,7 @@ #include #include -#include +#include #include diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/fq_conv_fq_affine.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/fq_conv_fq_affine.hpp index 94236ebf36d593..fee1b8f20ba5e6 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/fq_conv_fq_affine.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/fq_conv_fq_affine.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/fq_with_mixed_levels.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/fq_with_mixed_levels.hpp index 9ea01a2f033e6f..51fa5a38a173df 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/fq_with_mixed_levels.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/fq_with_mixed_levels.hpp @@ -11,8 +11,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/handling_orientation_conv.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/handling_orientation_conv.hpp index 12d056aab8f9a4..a319d54085cf49 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/handling_orientation_conv.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/handling_orientation_conv.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { typedef std::tuple< diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/input_conv.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/input_conv.hpp index 309764cc451b6e..6ba2d763a59c83 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/input_conv.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/input_conv.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/input_split_concat.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/input_split_concat.hpp index 6f288b2271da2f..22815ed51621cb 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/input_split_concat.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/input_split_concat.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_act_add.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_act_add.hpp index c1f77841456794..36bf41e439d141 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_act_add.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_act_add.hpp @@ -9,8 +9,8 @@ #include #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "common_test_utils/test_constants.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_squeeze_add.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_squeeze_add.hpp index a071193eeaed0f..2aa4039e0cd9a3 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_squeeze_add.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_squeeze_add.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/memory_fq_concat_prelu.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/memory_fq_concat_prelu.hpp index b068dc8fd0ec4a..5bfe7222337ad2 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/memory_fq_concat_prelu.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/memory_fq_concat_prelu.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multi_crops_to_concat.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multi_crops_to_concat.hpp index 98728f7f7a0eaf..c3544435e8b0c6 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multi_crops_to_concat.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multi_crops_to_concat.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multioutput_eltwise_squeeze_eltwise.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multioutput_eltwise_squeeze_eltwise.hpp index c87db0150985f6..f01bbace577c59 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multioutput_eltwise_squeeze_eltwise.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multioutput_eltwise_squeeze_eltwise.hpp @@ -9,8 +9,8 @@ #include #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multiple_connect_split_concat.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multiple_connect_split_concat.hpp index baf25e860a0240..08eaae12a7c4fb 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multiple_connect_split_concat.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multiple_connect_split_concat.hpp @@ -9,8 +9,8 @@ #include #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "common_test_utils/test_constants.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multiply_add.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multiply_add.hpp index 4e9321a9608bcd..1e016857d4728f 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multiply_add.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multiply_add.hpp @@ -9,8 +9,8 @@ #include #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "common_test_utils/test_constants.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/mvn_multiply_add.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/mvn_multiply_add.hpp index fc590d970efd07..800fc2cbb0caa1 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/mvn_multiply_add.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/mvn_multiply_add.hpp @@ -9,7 +9,7 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/negative_memory_layer_offset.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/negative_memory_layer_offset.hpp index 3f98a753555b3e..c39a206ceb5cd4 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/negative_memory_layer_offset.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/negative_memory_layer_offset.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/parameter_reshape_result.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/parameter_reshape_result.hpp index df5dbe64c99e3f..d4c310dfd376dc 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/parameter_reshape_result.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/parameter_reshape_result.hpp @@ -9,7 +9,7 @@ #include #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "common_test_utils/test_constants.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/parameter_result.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/parameter_result.hpp index 9fb95dd62cbe89..7156036c6c05a1 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/parameter_result.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/parameter_result.hpp @@ -11,7 +11,7 @@ #include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/parameter_shapeof_result.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/parameter_shapeof_result.hpp index da2f10d2a08d8e..2cc80cfe99c639 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/parameter_shapeof_result.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/parameter_shapeof_result.hpp @@ -5,7 +5,7 @@ #pragma once #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include #include diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/perm_conv_perm_concat.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/perm_conv_perm_concat.hpp index 01835b27e908c4..b8be31f93271e7 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/perm_conv_perm_concat.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/perm_conv_perm_concat.hpp @@ -11,8 +11,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { typedef std::tuple< diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/permute_concat_concat_permute.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/permute_concat_concat_permute.hpp index 8910c616d8b7ae..ed15880da7bdcc 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/permute_concat_concat_permute.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/permute_concat_concat_permute.hpp @@ -8,7 +8,7 @@ #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/preprocess.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/preprocess.hpp index 0b9b88f54326ed..dcc713ff27be8e 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/preprocess.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/preprocess.hpp @@ -10,9 +10,9 @@ #include #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/preprocess/preprocess_builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/preprocess/preprocess_builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/quantized_convolution_backprop_data.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/quantized_convolution_backprop_data.hpp index 29c04995c717e0..03fd861636c3b2 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/quantized_convolution_backprop_data.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/quantized_convolution_backprop_data.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/quantized_group_convolution.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/quantized_group_convolution.hpp index 7cd3b927f339dd..946aa41702ac93 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/quantized_group_convolution.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/quantized_group_convolution.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/quantized_group_convolution_backprop_data.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/quantized_group_convolution_backprop_data.hpp index 5bd62449ab9a5d..d10e3fcdcf847a 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/quantized_group_convolution_backprop_data.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/quantized_group_convolution_backprop_data.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/range_add.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/range_add.hpp index 28e9c486702e48..18fe4775ebbea7 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/range_add.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/range_add.hpp @@ -10,7 +10,7 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/single_layer/range.hpp" diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/reduce_eltwise.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/reduce_eltwise.hpp index 4ec0d39d6e4b86..e0b305c7e54369 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/reduce_eltwise.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/reduce_eltwise.hpp @@ -9,8 +9,8 @@ #include #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "common_test_utils/test_constants.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/relu_shape_of.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/relu_shape_of.hpp index ca71c93a48e7ee..c84f0e2d8292e5 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/relu_shape_of.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/relu_shape_of.hpp @@ -12,7 +12,7 @@ #include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/single_layer/shape_of.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/relu_split_reshape.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/relu_split_reshape.hpp index e05da354d9ae5b..95c0b2f26104da 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/relu_split_reshape.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/relu_split_reshape.hpp @@ -9,7 +9,7 @@ #include #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "common_test_utils/test_constants.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/reshape_permute_conv_permute_reshape_act.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/reshape_permute_conv_permute_reshape_act.hpp index 563b8b8dc31623..320a73f954536e 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/reshape_permute_conv_permute_reshape_act.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/reshape_permute_conv_permute_reshape_act.hpp @@ -11,8 +11,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { typedef std::tuple< diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/reshape_permute_reshape.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/reshape_permute_reshape.hpp index 899beed3dc5e6b..cc9b282a531782 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/reshape_permute_reshape.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/reshape_permute_reshape.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { typedef std::tuple< diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/reshape_squeeze_reshape_relu.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/reshape_squeeze_reshape_relu.hpp index 23189569ea36f0..eb4811e08cf353 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/reshape_squeeze_reshape_relu.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/reshape_squeeze_reshape_relu.hpp @@ -9,8 +9,8 @@ #include #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace SubgraphTestsDefinitions { using ShapeAxesTuple = std::pair, std::vector>; diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/scaleshift_conv_scaleshift.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/scaleshift_conv_scaleshift.hpp index 605af442b31a6a..773555036b93c1 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/scaleshift_conv_scaleshift.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/scaleshift_conv_scaleshift.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/softsign.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/softsign.hpp index 687ff0db2100bf..792dd95f5259c8 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/softsign.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/softsign.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_concat_multi_inputs.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_concat_multi_inputs.hpp index 1eeb959c029d9b..2a2162301b8473 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_concat_multi_inputs.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_concat_multi_inputs.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_conv.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_conv.hpp index 26f522bc013d14..2c3fd302a19c6c 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_conv.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_conv.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_conv_concat.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_conv_concat.hpp index 4b3fddf459c3cb..a02822a50b9374 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_conv_concat.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_conv_concat.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_relu.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_relu.hpp index d47773fa9d28b2..9104775214863e 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_relu.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_relu.hpp @@ -9,7 +9,7 @@ #include #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "common_test_utils/test_constants.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_trivial_permute_concat.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_trivial_permute_concat.hpp index 888aad7e1c0269..38ee0a40cd5095 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_trivial_permute_concat.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_trivial_permute_concat.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/stridedslice_concat.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/stridedslice_concat.hpp index 49be793b33b3af..ecdf2869bde360 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/stridedslice_concat.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/stridedslice_concat.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/stridedslice_conv.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/stridedslice_conv.hpp index 5e632c890a14aa..1d890194221fa6 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/stridedslice_conv.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/stridedslice_conv.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/tensor_names.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/tensor_names.hpp index 4b0da37a3512bf..ddfd40f5030ff7 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/tensor_names.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/tensor_names.hpp @@ -10,7 +10,7 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/transpose_add.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/transpose_add.hpp index 5f2c807ff17383..4a41805fe4a1a6 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/transpose_add.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/transpose_add.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { typedef std::tuple< diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/transpose_conv_transpose_squeeze.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/transpose_conv_transpose_squeeze.hpp index 8945235fddab70..bf3a16f3b35fca 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/transpose_conv_transpose_squeeze.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/transpose_conv_transpose_squeeze.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/trivial_concat.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/trivial_concat.hpp index 510bfaff4412fd..5a6097cb626147 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/trivial_concat.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/trivial_concat.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace SubgraphTestsDefinitions { using trivialConcatParamsTuple = typename std::tuple< diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/two_fake_quantize_to_fullyconnected.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/two_fake_quantize_to_fullyconnected.hpp index 66df9e653b83f0..94d70b23103e42 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/two_fake_quantize_to_fullyconnected.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/two_fake_quantize_to_fullyconnected.hpp @@ -10,8 +10,8 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/variadic_split_pad.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/variadic_split_pad.hpp index dff3ad2ce39ced..6cfbf94286902d 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/variadic_split_pad.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/variadic_split_pad.hpp @@ -9,7 +9,7 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/base/low_precision_transformations/layer_transformation.cpp b/src/tests/functional/shared_test_classes/src/base/low_precision_transformations/layer_transformation.cpp index 1380126e5a605a..e625cc2bdbfe6c 100644 --- a/src/tests/functional/shared_test_classes/src/base/low_precision_transformations/layer_transformation.cpp +++ b/src/tests/functional/shared_test_classes/src/base/low_precision_transformations/layer_transformation.cpp @@ -11,7 +11,7 @@ #include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" #include "functional_test_utils/blob_utils.hpp" -#include "ngraph_functions/pass/convert_prc.hpp" +#include "ov_models/pass/convert_prc.hpp" using namespace InferenceEngine; using namespace ngraph; diff --git a/src/tests/functional/shared_test_classes/src/base/ov_subgraph.cpp b/src/tests/functional/shared_test_classes/src/base/ov_subgraph.cpp index c7059063158d07..016dc26cccdfc5 100644 --- a/src/tests/functional/shared_test_classes/src/base/ov_subgraph.cpp +++ b/src/tests/functional/shared_test_classes/src/base/ov_subgraph.cpp @@ -20,7 +20,7 @@ #include "common_test_utils/graph_comparator.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "common_test_utils/file_utils.hpp" #include "common_test_utils/ov_tensor_utils.hpp" diff --git a/src/tests/functional/shared_test_classes/src/precomp.hpp b/src/tests/functional/shared_test_classes/src/precomp.hpp index 15abb69ee4d6e3..753eb06e3b9434 100644 --- a/src/tests/functional/shared_test_classes/src/precomp.hpp +++ b/src/tests/functional/shared_test_classes/src/precomp.hpp @@ -8,8 +8,8 @@ #include #include -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include #include diff --git a/src/tests/functional/shared_test_classes/src/single_layer/adaptive_pooling.cpp b/src/tests/functional/shared_test_classes/src/single_layer/adaptive_pooling.cpp index 47afa45924b63c..d9e6d3c7b23ade 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/adaptive_pooling.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/adaptive_pooling.cpp @@ -4,7 +4,7 @@ #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/single_layer/adaptive_pooling.hpp" using namespace InferenceEngine; diff --git a/src/tests/functional/shared_test_classes/src/single_layer/batch_to_space.cpp b/src/tests/functional/shared_test_classes/src/single_layer/batch_to_space.cpp index cab4ce86d3c146..f95cff80060304 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/batch_to_space.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/batch_to_space.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/single_layer/batch_to_space.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/broadcast.cpp b/src/tests/functional/shared_test_classes/src/single_layer/broadcast.cpp index 36d41304aca48a..dc33278007facf 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/broadcast.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/broadcast.cpp @@ -4,7 +4,7 @@ #include "shared_test_classes/single_layer/broadcast.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { std::string BroadcastLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/comparison.cpp b/src/tests/functional/shared_test_classes/src/single_layer/comparison.cpp index b526027e1025da..8d4b9b66a30c05 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/comparison.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/comparison.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/single_layer/comparison.hpp" using namespace LayerTestsDefinitions::ComparisonParams; diff --git a/src/tests/functional/shared_test_classes/src/single_layer/conversion.cpp b/src/tests/functional/shared_test_classes/src/single_layer/conversion.cpp index 38141ef4f0621c..ab39066c9c7c11 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/conversion.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/conversion.cpp @@ -4,7 +4,7 @@ #include "shared_test_classes/single_layer/conversion.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/ctc_greedy_decoder.cpp b/src/tests/functional/shared_test_classes/src/single_layer/ctc_greedy_decoder.cpp index 53e25c6b708d9e..c92af89b30287b 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/ctc_greedy_decoder.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/ctc_greedy_decoder.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/single_layer/ctc_greedy_decoder.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { std::string CTCGreedyDecoderLayerTest::getTestCaseName( diff --git a/src/tests/functional/shared_test_classes/src/single_layer/ctc_greedy_decoder_seq_len.cpp b/src/tests/functional/shared_test_classes/src/single_layer/ctc_greedy_decoder_seq_len.cpp index e408f16a733dc0..55309e52fd5c3f 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/ctc_greedy_decoder_seq_len.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/ctc_greedy_decoder_seq_len.cpp @@ -7,7 +7,7 @@ #include #include "shared_test_classes/single_layer/ctc_greedy_decoder_seq_len.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { std::string CTCGreedyDecoderSeqLenLayerTest::getTestCaseName( diff --git a/src/tests/functional/shared_test_classes/src/single_layer/ctc_loss.cpp b/src/tests/functional/shared_test_classes/src/single_layer/ctc_loss.cpp index 3eba58e5d61fd5..f8c3eefd345f5e 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/ctc_loss.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/ctc_loss.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/single_layer/ctc_loss.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/depth_to_space.cpp b/src/tests/functional/shared_test_classes/src/single_layer/depth_to_space.cpp index 1e2105233e4ee0..fbff80b33cab7a 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/depth_to_space.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/depth_to_space.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/single_layer/depth_to_space.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/detection_output.cpp b/src/tests/functional/shared_test_classes/src/single_layer/detection_output.cpp index 2be7ca2506cdfe..c9dfdaefa737c7 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/detection_output.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/detection_output.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/single_layer/detection_output.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/einsum.cpp b/src/tests/functional/shared_test_classes/src/single_layer/einsum.cpp index 54bcccc16c6956..76beb95cbed2f7 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/einsum.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/einsum.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/single_layer/einsum.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/eltwise.cpp b/src/tests/functional/shared_test_classes/src/single_layer/eltwise.cpp index 685aa4517c473e..1f9d25df3c5710 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/eltwise.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/eltwise.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include #include "shared_test_classes/single_layer/eltwise.hpp" diff --git a/src/tests/functional/shared_test_classes/src/single_layer/embedding_bag_offsets_sum.cpp b/src/tests/functional/shared_test_classes/src/single_layer/embedding_bag_offsets_sum.cpp index 59ccd857915a70..574ee1e2b859f4 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/embedding_bag_offsets_sum.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/embedding_bag_offsets_sum.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/single_layer/embedding_bag_offsets_sum.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/embedding_bag_packed_sum.cpp b/src/tests/functional/shared_test_classes/src/single_layer/embedding_bag_packed_sum.cpp index 0016ff7c283e82..262ff84cbd67bd 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/embedding_bag_packed_sum.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/embedding_bag_packed_sum.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/single_layer/embedding_bag_packed_sum.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/embedding_segments_sum.cpp b/src/tests/functional/shared_test_classes/src/single_layer/embedding_segments_sum.cpp index 01576e8ddd3b21..59686188962528 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/embedding_segments_sum.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/embedding_segments_sum.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/single_layer/embedding_segments_sum.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_detection_output.cpp b/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_detection_output.cpp index a3e4c6979f4ca1..3d3ce94024ce63 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_detection_output.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_detection_output.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/single_layer/experimental_detectron_detection_output.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "common_test_utils/data_utils.hpp" #include diff --git a/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_generate_proposals_single_image.cpp b/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_generate_proposals_single_image.cpp index 9bab10c8d597c4..e2a46810a849b5 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_generate_proposals_single_image.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_generate_proposals_single_image.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/single_layer/experimental_detectron_generate_proposals_single_image.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include namespace ov { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_prior_grid_generator.cpp b/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_prior_grid_generator.cpp index 80ab095e219a2f..5d2d952d4f5c59 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_prior_grid_generator.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_prior_grid_generator.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/single_layer/experimental_detectron_prior_grid_generator.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "common_test_utils/data_utils.hpp" #include diff --git a/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_roifeatureextractor.cpp b/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_roifeatureextractor.cpp index 5d2a944cb86145..e789f18f90d792 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_roifeatureextractor.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_roifeatureextractor.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include #include "functional_test_utils/plugin_cache.hpp" #include "shared_test_classes/single_layer/experimental_detectron_roifeatureextractor.hpp" diff --git a/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_topkrois.cpp b/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_topkrois.cpp index cfdb4f7d9fc305..a659240e8d97da 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_topkrois.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_topkrois.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include #include "shared_test_classes/single_layer/experimental_detectron_topkrois.hpp" diff --git a/src/tests/functional/shared_test_classes/src/single_layer/extract_image_patches.cpp b/src/tests/functional/shared_test_classes/src/single_layer/extract_image_patches.cpp index 217baef3ab3f0b..436eaf0477c39c 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/extract_image_patches.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/extract_image_patches.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/single_layer/extract_image_patches.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/eye.cpp b/src/tests/functional/shared_test_classes/src/single_layer/eye.cpp index 1c00cf10a321ca..e6c560c2aa6739 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/eye.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/eye.cpp @@ -8,7 +8,7 @@ #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/gather_elements.cpp b/src/tests/functional/shared_test_classes/src/single_layer/gather_elements.cpp index 7ce6ae16cff1da..3a1d19ddbe11e2 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/gather_elements.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/gather_elements.cpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/single_layer/gather_elements.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/generate_proposals.cpp b/src/tests/functional/shared_test_classes/src/single_layer/generate_proposals.cpp index 384806c17466ec..d4339568d56c47 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/generate_proposals.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/generate_proposals.cpp @@ -4,7 +4,7 @@ #include "shared_test_classes/single_layer/generate_proposals.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "common_test_utils/ov_tensor_utils.hpp" namespace ov { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/interpolate.cpp b/src/tests/functional/shared_test_classes/src/single_layer/interpolate.cpp index e78bc87b183263..0382268acb8cc7 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/interpolate.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/interpolate.cpp @@ -4,8 +4,8 @@ #include #include "shared_test_classes/single_layer/interpolate.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" using ngraph::helpers::operator<<; diff --git a/src/tests/functional/shared_test_classes/src/single_layer/is_inf.cpp b/src/tests/functional/shared_test_classes/src/single_layer/is_inf.cpp index 4419b1ca0a6e48..2372892b6c8a3c 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/is_inf.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/is_inf.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/single_layer/is_inf.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "common_test_utils/ov_tensor_utils.hpp" #include "ie_plugin_config.hpp" diff --git a/src/tests/functional/shared_test_classes/src/single_layer/logical.cpp b/src/tests/functional/shared_test_classes/src/single_layer/logical.cpp index 40b39066576ef9..7ee50d87c0869b 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/logical.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/logical.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/single_layer/logical.hpp" using namespace LayerTestsDefinitions::LogicalParams; diff --git a/src/tests/functional/shared_test_classes/src/single_layer/low_precision.cpp b/src/tests/functional/shared_test_classes/src/single_layer/low_precision.cpp index b49c840305f7c5..a68b292df93b43 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/low_precision.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/low_precision.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/single_layer/low_precision.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LowPrecisionTestDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/mat_mul.cpp b/src/tests/functional/shared_test_classes/src/single_layer/mat_mul.cpp index 4bce8340cae7e3..59a61c87cd071d 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/mat_mul.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/mat_mul.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/single_layer/mat_mul.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/matrix_nms.cpp b/src/tests/functional/shared_test_classes/src/single_layer/matrix_nms.cpp index d2bb754754c9ae..e5c70c114211e8 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/matrix_nms.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/matrix_nms.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include #include "shared_test_classes/single_layer/matrix_nms.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" diff --git a/src/tests/functional/shared_test_classes/src/single_layer/memory.cpp b/src/tests/functional/shared_test_classes/src/single_layer/memory.cpp index 7c0b7e1026c85d..323c2def8230c1 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/memory.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/memory.cpp @@ -7,7 +7,7 @@ #include #include #include "ngraph/opsets/opset7.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "ngraph/pass/low_latency.hpp" #include "openvino/op/util/variable_context.hpp" #include "shared_test_classes/single_layer/memory.hpp" diff --git a/src/tests/functional/shared_test_classes/src/single_layer/multiclass_nms.cpp b/src/tests/functional/shared_test_classes/src/single_layer/multiclass_nms.cpp index 21b0406f1b7fec..7e37b235da1a76 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/multiclass_nms.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/multiclass_nms.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include #include "shared_test_classes/single_layer/multiclass_nms.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" diff --git a/src/tests/functional/shared_test_classes/src/single_layer/mvn.cpp b/src/tests/functional/shared_test_classes/src/single_layer/mvn.cpp index bf07df1c627b4a..cd87ad4116d00f 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/mvn.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/mvn.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/single_layer/mvn.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/random_uniform.cpp b/src/tests/functional/shared_test_classes/src/single_layer/random_uniform.cpp index f4f97a53e3c8c0..d94885cb2ebd6a 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/random_uniform.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/random_uniform.cpp @@ -4,7 +4,7 @@ #include #include "shared_test_classes/single_layer/random_uniform.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/reverse.cpp b/src/tests/functional/shared_test_classes/src/single_layer/reverse.cpp index 0d39713fb1fc46..9ab223e16c2b14 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/reverse.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/reverse.cpp @@ -6,7 +6,7 @@ #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" using namespace InferenceEngine; using namespace FuncTestUtils::PrecisionUtils; diff --git a/src/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp b/src/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp index 136ae6ada0a796..b3a3d51cc7d569 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "openvino/core/enum_names.hpp" using namespace InferenceEngine; diff --git a/src/tests/functional/shared_test_classes/src/single_layer/scatter_ND_update.cpp b/src/tests/functional/shared_test_classes/src/single_layer/scatter_ND_update.cpp index 90288b50e87a16..c04acb446bf1f5 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/scatter_ND_update.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/scatter_ND_update.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/single_layer/scatter_ND_update.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/scatter_elements_update.cpp b/src/tests/functional/shared_test_classes/src/single_layer/scatter_elements_update.cpp index 5809df124bb7d1..440e8ef543faa5 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/scatter_elements_update.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/scatter_elements_update.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/single_layer/scatter_elements_update.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/scatter_update.cpp b/src/tests/functional/shared_test_classes/src/single_layer/scatter_update.cpp index e4e373f24945cf..81754190f3be81 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/scatter_update.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/scatter_update.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/single_layer/scatter_update.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/shuffle_channels.cpp b/src/tests/functional/shared_test_classes/src/single_layer/shuffle_channels.cpp index f091973b685001..b3f5093ad25560 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/shuffle_channels.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/shuffle_channels.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/single_layer/shuffle_channels.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/slice.cpp b/src/tests/functional/shared_test_classes/src/single_layer/slice.cpp index 176d154038ea59..080fb2cce63fec 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/slice.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/slice.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "ngraph/ngraph.hpp" #include "shared_test_classes/single_layer/slice.hpp" diff --git a/src/tests/functional/shared_test_classes/src/single_layer/space_to_batch.cpp b/src/tests/functional/shared_test_classes/src/single_layer/space_to_batch.cpp index 2db31d0aa0b30b..cc49d3d8615924 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/space_to_batch.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/space_to_batch.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/single_layer/space_to_batch.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/space_to_depth.cpp b/src/tests/functional/shared_test_classes/src/single_layer/space_to_depth.cpp index 98f8041476394f..34b4e89fd1ad68 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/space_to_depth.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/space_to_depth.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/single_layer/space_to_depth.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/strided_slice.cpp b/src/tests/functional/shared_test_classes/src/single_layer/strided_slice.cpp index 75f95aa1ac593c..4633baab271c6c 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/strided_slice.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/strided_slice.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/single_layer/strided_slice.hpp" diff --git a/src/tests/functional/shared_test_classes/src/single_op/comparison.cpp b/src/tests/functional/shared_test_classes/src/single_op/comparison.cpp index f7ea71c95bb4b3..4ab59383fe5eb8 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/comparison.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/comparison.cpp @@ -4,7 +4,7 @@ #include "shared_test_classes/single_op/comparison.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "common_test_utils/ov_tensor_utils.hpp" namespace ov { diff --git a/src/tests/functional/shared_test_classes/src/single_op/convolution_backprop_data.cpp b/src/tests/functional/shared_test_classes/src/single_op/convolution_backprop_data.cpp index c858fac6a3e97f..578d2e83b99849 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/convolution_backprop_data.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/convolution_backprop_data.cpp @@ -10,7 +10,7 @@ #include "openvino/op/constant.hpp" #include "openvino/op/result.hpp" #include "openvino/op/convolution.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ov { namespace test { diff --git a/src/tests/functional/shared_test_classes/src/single_op/dft.cpp b/src/tests/functional/shared_test_classes/src/single_op/dft.cpp index 9d39ed7bea6c66..47ef96ded0047c 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/dft.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/dft.cpp @@ -4,7 +4,7 @@ #include "shared_test_classes/single_op/dft.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ov { namespace test { diff --git a/src/tests/functional/shared_test_classes/src/single_op/eltwise.cpp b/src/tests/functional/shared_test_classes/src/single_op/eltwise.cpp index 1297379c267a7e..b05e8be0e44e1d 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/eltwise.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/eltwise.cpp @@ -6,7 +6,7 @@ #include "shared_test_classes/single_op/eltwise.hpp" #include "common_test_utils/ov_tensor_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ov { namespace test { diff --git a/src/tests/functional/shared_test_classes/src/single_op/embedding_bag_offsets_sum.cpp b/src/tests/functional/shared_test_classes/src/single_op/embedding_bag_offsets_sum.cpp index ef1542e2a2bd23..acbcda3a108286 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/embedding_bag_offsets_sum.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/embedding_bag_offsets_sum.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/single_op/embedding_bag_offsets_sum.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ov { namespace test { diff --git a/src/tests/functional/shared_test_classes/src/single_op/embedding_bag_packed_sum.cpp b/src/tests/functional/shared_test_classes/src/single_op/embedding_bag_packed_sum.cpp index 788271a4f4aefc..ab849f0f7d04f7 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/embedding_bag_packed_sum.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/embedding_bag_packed_sum.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/single_op/embedding_bag_packed_sum.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ov { namespace test { diff --git a/src/tests/functional/shared_test_classes/src/single_op/embedding_segments_sum.cpp b/src/tests/functional/shared_test_classes/src/single_op/embedding_segments_sum.cpp index 71a80bd7082514..b3ded1f3d9af36 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/embedding_segments_sum.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/embedding_segments_sum.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/single_op/embedding_segments_sum.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ov { diff --git a/src/tests/functional/shared_test_classes/src/single_op/fake_quantize.cpp b/src/tests/functional/shared_test_classes/src/single_op/fake_quantize.cpp index a6ac9f69f629bf..a5d439d9f0db78 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/fake_quantize.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/fake_quantize.cpp @@ -4,7 +4,7 @@ #include "shared_test_classes/single_op/fake_quantize.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "openvino/op/parameter.hpp" #include "openvino/op/result.hpp" #include "openvino/op/constant.hpp" diff --git a/src/tests/functional/shared_test_classes/src/single_op/gather_nd.cpp b/src/tests/functional/shared_test_classes/src/single_op/gather_nd.cpp index 1c2dd1abbe045c..a0dc08847738e8 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/gather_nd.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/gather_nd.cpp @@ -4,7 +4,7 @@ #include "shared_test_classes/single_op/gather_nd.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ov { namespace test { diff --git a/src/tests/functional/shared_test_classes/src/single_op/group_convolution.cpp b/src/tests/functional/shared_test_classes/src/single_op/group_convolution.cpp index f3efcbb2055845..f86f0014826f4b 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/group_convolution.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/group_convolution.cpp @@ -4,7 +4,7 @@ #include "shared_test_classes/single_op/group_convolution.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "openvino/op/parameter.hpp" #include "openvino/op/result.hpp" #include "openvino/op/constant.hpp" diff --git a/src/tests/functional/shared_test_classes/src/single_op/group_convolution_backprop_data.cpp b/src/tests/functional/shared_test_classes/src/single_op/group_convolution_backprop_data.cpp index 118bfc1ce7f977..a1db20a736ab6c 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/group_convolution_backprop_data.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/group_convolution_backprop_data.cpp @@ -4,7 +4,7 @@ #include "shared_test_classes/single_op/group_convolution_backprop_data.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "openvino/op/parameter.hpp" #include "openvino/op/result.hpp" #include "openvino/op/constant.hpp" diff --git a/src/tests/functional/shared_test_classes/src/single_op/gru_sequence.cpp b/src/tests/functional/shared_test_classes/src/single_op/gru_sequence.cpp index e98b04c5aa8ffc..81ae8ceb1758d9 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/gru_sequence.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/gru_sequence.cpp @@ -8,7 +8,7 @@ #include "transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp" #include "openvino/pass/manager.hpp" #include "common_test_utils/ov_tensor_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace ov { namespace test { diff --git a/src/tests/functional/shared_test_classes/src/single_op/tensor_iterator.cpp b/src/tests/functional/shared_test_classes/src/single_op/tensor_iterator.cpp index 7cf3bd44972846..04c2d6a9b1469a 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/tensor_iterator.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/tensor_iterator.cpp @@ -5,7 +5,7 @@ #include "transformations/control_flow/unroll_tensor_iterator.hpp" #include "shared_test_classes/single_op/tensor_iterator.hpp" #include "openvino/pass/manager.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ov { namespace test { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/activation_concats_eltwise.cpp b/src/tests/functional/shared_test_classes/src/subgraph/activation_concats_eltwise.cpp index 8419860e6950d9..0f3922ca3dc205 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/activation_concats_eltwise.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/activation_concats_eltwise.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/subgraph/activation_concats_eltwise.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/activation_fq.cpp b/src/tests/functional/shared_test_classes/src/subgraph/activation_fq.cpp index 3ca6ac4dfe0120..eda78cc0525999 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/activation_fq.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/activation_fq.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include #include "shared_test_classes/subgraph/activation_fq.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/basic_lstm.cpp b/src/tests/functional/shared_test_classes/src/subgraph/basic_lstm.cpp index 65b017ddc39d8e..d9814b2869dc82 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/basic_lstm.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/basic_lstm.cpp @@ -5,7 +5,7 @@ #include #include #include "shared_test_classes/subgraph/basic_lstm.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/clamp_fq.cpp b/src/tests/functional/shared_test_classes/src/subgraph/clamp_fq.cpp index 4dc9aba177f963..af9652a4c388d7 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/clamp_fq.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/clamp_fq.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include #include "shared_test_classes/subgraph/clamp_fq.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/concat_conv.cpp b/src/tests/functional/shared_test_classes/src/subgraph/concat_conv.cpp index a3962f8172bd2e..4b3fd59ea8a785 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/concat_conv.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/concat_conv.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/subgraph/concat_conv.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/const_conv_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/const_conv_concat.cpp index c6a206e55c5999..2fe17c55ca7353 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/const_conv_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/const_conv_concat.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/subgraph/const_conv_concat.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/const_strided_slice_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/const_strided_slice_concat.cpp index 76b43cadb1a85b..8ba876499b3510 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/const_strided_slice_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/const_strided_slice_concat.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/subgraph/const_strided_slice_concat.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/constant_result.cpp b/src/tests/functional/shared_test_classes/src/subgraph/constant_result.cpp index 9f2cb371469ec8..7e7261fe3eeaec 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/constant_result.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/constant_result.cpp @@ -4,7 +4,7 @@ #include "shared_test_classes/subgraph/constant_result.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "openvino/op/result.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" diff --git a/src/tests/functional/shared_test_classes/src/subgraph/eltwise_conv_eltwise.cpp b/src/tests/functional/shared_test_classes/src/subgraph/eltwise_conv_eltwise.cpp index 1f4ed7282bcd75..5b548fad75b645 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/eltwise_conv_eltwise.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/eltwise_conv_eltwise.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/subgraph/eltwise_conv_eltwise.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/eltwise_reshape_activation.cpp b/src/tests/functional/shared_test_classes/src/subgraph/eltwise_reshape_activation.cpp index 68a4e0bd899950..7e6e6f0147397f 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/eltwise_reshape_activation.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/eltwise_reshape_activation.cpp @@ -1,7 +1,7 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/subgraph/eltwise_reshape_activation.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/fc_conv_fc.cpp b/src/tests/functional/shared_test_classes/src/subgraph/fc_conv_fc.cpp index e855cb1617e42d..a6b5491ea28197 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/fc_conv_fc.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/fc_conv_fc.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/subgraph/fc_conv_fc.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/fq_with_mixed_levels.cpp b/src/tests/functional/shared_test_classes/src/subgraph/fq_with_mixed_levels.cpp index 9838229a10f685..014fbee5160357 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/fq_with_mixed_levels.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/fq_with_mixed_levels.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/subgraph/fq_with_mixed_levels.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/get_output_before_activation.cpp b/src/tests/functional/shared_test_classes/src/subgraph/get_output_before_activation.cpp index 499bd4c391a74f..8a84303f79acfb 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/get_output_before_activation.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/get_output_before_activation.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/subgraph/get_output_before_activation.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/input_conv.cpp b/src/tests/functional/shared_test_classes/src/subgraph/input_conv.cpp index 78f2952641aeaf..6ec9cb30bc7791 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/input_conv.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/input_conv.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/subgraph/input_conv.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/input_split_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/input_split_concat.cpp index 41ead66143e7cb..81cbacfcdade9a 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/input_split_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/input_split_concat.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/subgraph/input_split_concat.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/matmul_const_transposes_extraction.cpp b/src/tests/functional/shared_test_classes/src/subgraph/matmul_const_transposes_extraction.cpp index 0bbdd7642d14ac..59e5d4e397df17 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/matmul_const_transposes_extraction.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/matmul_const_transposes_extraction.cpp @@ -4,7 +4,7 @@ #include "transformations/common_optimizations/matmul_const_transposes_extraction.hpp" #include "shared_test_classes/subgraph/matmul_const_transposes_extraction.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/matmul_multiply_fusion.cpp b/src/tests/functional/shared_test_classes/src/subgraph/matmul_multiply_fusion.cpp index ef8bf703cbb4ed..02252c96fdf4d1 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/matmul_multiply_fusion.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/matmul_multiply_fusion.cpp @@ -4,7 +4,7 @@ #include "transformations/common_optimizations/matmul_multiply_fusion.hpp" #include "shared_test_classes/subgraph/matmul_multiply_fusion.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/matmul_squeeze_add.cpp b/src/tests/functional/shared_test_classes/src/subgraph/matmul_squeeze_add.cpp index 3508efc0b8750c..b91b50bd8a9457 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/matmul_squeeze_add.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/matmul_squeeze_add.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/subgraph/matmul_squeeze_add.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/memory_LSTMCell.cpp b/src/tests/functional/shared_test_classes/src/subgraph/memory_LSTMCell.cpp index 720624e4788ca3..483e862ac4583b 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/memory_LSTMCell.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/memory_LSTMCell.cpp @@ -6,7 +6,7 @@ #include #include "ngraph/pass/low_latency.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/subgraph/memory_LSTMCell.hpp" #include "functional_test_utils/core_config.hpp" diff --git a/src/tests/functional/shared_test_classes/src/subgraph/memory_eltwise_reshape_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/memory_eltwise_reshape_concat.cpp index 9fa025c2ec3a99..eadb9d7b3eb01f 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/memory_eltwise_reshape_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/memory_eltwise_reshape_concat.cpp @@ -4,7 +4,7 @@ #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/subgraph/memory_eltwise_reshape_concat.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/mul_conv_fusion.cpp b/src/tests/functional/shared_test_classes/src/subgraph/mul_conv_fusion.cpp index 1bc3e834641746..d16090a2e1c819 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/mul_conv_fusion.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/mul_conv_fusion.cpp @@ -5,7 +5,7 @@ #include "transformations/common_optimizations/mul_conv_fusion.hpp" #include "ngraph/pass/constant_folding.hpp" #include "shared_test_classes/subgraph/mul_conv_fusion.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/multi_crops_to_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/multi_crops_to_concat.cpp index 409128a951892d..d7e271ea18f1d9 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/multi_crops_to_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/multi_crops_to_concat.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/subgraph/multi_crops_to_concat.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/multi_input_scale.cpp b/src/tests/functional/shared_test_classes/src/subgraph/multi_input_scale.cpp index f4ba307a71dd12..e66bb94703f230 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/multi_input_scale.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/multi_input_scale.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/subgraph/multi_input_scale.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/multiple_LSTMCell.cpp b/src/tests/functional/shared_test_classes/src/subgraph/multiple_LSTMCell.cpp index 090714711742fa..ff68410f207482 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/multiple_LSTMCell.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/multiple_LSTMCell.cpp @@ -7,8 +7,8 @@ #include "ngraph/op/util/variable_context.hpp" #include "ngraph/pass/low_latency.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/subgraph/multiple_LSTMCell.hpp" diff --git a/src/tests/functional/shared_test_classes/src/subgraph/multiple_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/multiple_concat.cpp index 49f7226b103c7e..c12bf94f5811df 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/multiple_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/multiple_concat.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/subgraph/multiple_concat.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/multiple_input_fq.cpp b/src/tests/functional/shared_test_classes/src/subgraph/multiple_input_fq.cpp index 9c39b4851bde63..cf1d06993e46f0 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/multiple_input_fq.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/multiple_input_fq.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/subgraph/multiple_input_fq.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/permute_concat_permute.cpp b/src/tests/functional/shared_test_classes/src/subgraph/permute_concat_permute.cpp index e3b128a050ae44..a469c997608a0f 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/permute_concat_permute.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/permute_concat_permute.cpp @@ -10,7 +10,7 @@ #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { std::string PermuteConcatPermute::getTestCaseName(const testing::TestParamInfo& obj) { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/preprocess.cpp b/src/tests/functional/shared_test_classes/src/subgraph/preprocess.cpp index 0d1206b5846f3d..a340c349584070 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/preprocess.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/preprocess.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/subgraph/preprocess.hpp" -#include "ngraph_functions/preprocess/preprocess_builders.hpp" +#include "ov_models/preprocess/preprocess_builders.hpp" #include "openvino/core/preprocess/pre_post_process.hpp" using namespace ov; diff --git a/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_batch_norm.cpp b/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_batch_norm.cpp index 5f8c8a4fe59511..af934c01e014c3 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_batch_norm.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_batch_norm.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/subgraph/quantized_convolution_batch_norm.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/quantized_mat_mul.cpp b/src/tests/functional/shared_test_classes/src/subgraph/quantized_mat_mul.cpp index bf46ee7f41636f..d4dae9e9e86f97 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/quantized_mat_mul.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/quantized_mat_mul.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/subgraph/quantized_mat_mul.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/reduce_eltwise.cpp b/src/tests/functional/shared_test_classes/src/subgraph/reduce_eltwise.cpp index 8647078756b448..99066cc445665c 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/reduce_eltwise.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/reduce_eltwise.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/subgraph/reduce_eltwise.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/scale_shift.cpp b/src/tests/functional/shared_test_classes/src/subgraph/scale_shift.cpp index 830d601364fedf..45109a87d2349c 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/scale_shift.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/scale_shift.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/subgraph/scaleshift.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/scaleshift_conv_scaleshift.cpp b/src/tests/functional/shared_test_classes/src/subgraph/scaleshift_conv_scaleshift.cpp index f44cde7928672c..a03023d57cfbe5 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/scaleshift_conv_scaleshift.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/scaleshift_conv_scaleshift.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/subgraph/scaleshift_conv_scaleshift.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/simple_if.cpp b/src/tests/functional/shared_test_classes/src/subgraph/simple_if.cpp index 84a70f263ff075..1f81363b54b7c0 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/simple_if.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/simple_if.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/subgraph/simple_if.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/softsign.cpp b/src/tests/functional/shared_test_classes/src/subgraph/softsign.cpp index 6c312c22178bdc..c24296d44dd017 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/softsign.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/softsign.cpp @@ -4,7 +4,7 @@ #include #include "shared_test_classes/subgraph/softsign.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/split_concat_multi_inputs.cpp b/src/tests/functional/shared_test_classes/src/subgraph/split_concat_multi_inputs.cpp index 25da1f34908512..eff402af7e5d6b 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/split_concat_multi_inputs.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/split_concat_multi_inputs.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/subgraph/split_concat_multi_inputs.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/split_conv.cpp b/src/tests/functional/shared_test_classes/src/subgraph/split_conv.cpp index 7069a8b305ccd7..53ceb2b98b00aa 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/split_conv.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/split_conv.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/subgraph/split_conv.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/strided_slice.cpp b/src/tests/functional/shared_test_classes/src/subgraph/strided_slice.cpp index 1f8573abe9ade2..51593c0adff3ad 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/strided_slice.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/strided_slice.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/subgraph/strided_slice.hpp" diff --git a/src/tests/functional/shared_test_classes/src/subgraph/stridedslice_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/stridedslice_concat.cpp index cd4fb6684e6fcd..325f93249ef4af 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/stridedslice_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/stridedslice_concat.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/subgraph/stridedslice_concat.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/stridedslice_conv.cpp b/src/tests/functional/shared_test_classes/src/subgraph/stridedslice_conv.cpp index f7e7c5b7443ec2..5fefc6a9405229 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/stridedslice_conv.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/stridedslice_conv.cpp @@ -3,7 +3,7 @@ // #include "shared_test_classes/subgraph/stridedslice_conv.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace SubgraphTestsDefinitions { diff --git a/src/tests/ngraph_helpers/CMakeLists.txt b/src/tests/ov_helpers/CMakeLists.txt similarity index 50% rename from src/tests/ngraph_helpers/CMakeLists.txt rename to src/tests/ov_helpers/CMakeLists.txt index ee29f72a8ee580..871f457f63a7c3 100644 --- a/src/tests/ngraph_helpers/CMakeLists.txt +++ b/src/tests/ov_helpers/CMakeLists.txt @@ -2,9 +2,9 @@ # SPDX-License-Identifier: Apache-2.0 # -add_subdirectory(ngraph_functions) -add_subdirectory(lpt_ngraph_functions) +add_subdirectory(ov_models) +add_subdirectory(ov_lpt_models) if(TARGET openvino::snippets) - add_subdirectory(snippets_ngraph_functions) + add_subdirectory(ov_snippets_models) endif() diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/CMakeLists.txt b/src/tests/ov_helpers/ov_lpt_models/CMakeLists.txt similarity index 87% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/CMakeLists.txt rename to src/tests/ov_helpers/ov_lpt_models/CMakeLists.txt index 72af70826293e3..27dc9effbdf74d 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/CMakeLists.txt +++ b/src/tests/ov_helpers/ov_lpt_models/CMakeLists.txt @@ -2,7 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 # -set(TARGET_NAME lptNgraphFunctions) +set(TARGET_NAME ov_lpt_models) set(PUBLIC_HEADERS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/include") @@ -17,11 +17,11 @@ addIeTarget( ${CMAKE_CURRENT_SOURCE_DIR}/src LINK_LIBRARIES PRIVATE - ngraphFunctions + ov_models openvino::runtime::dev ADD_CPPLINT DEPENDENCIES - ngraphFunctions + ov_models DEVELOPER_PACKAGE tests ) diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/add_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/add.hpp similarity index 92% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/add_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/add.hpp index 98b435b75daacc..623169d3c93611 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/add_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/add.hpp @@ -8,11 +8,11 @@ #include #include -#include "elementwise_function.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" -#include "lpt_ngraph_functions/common/convolution.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "elementwise.hpp" +#include "ov_lpt_models/common/builders.hpp" +#include "ov_lpt_models/common/convolution.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/align_concat_quantization_parameters_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/align_concat_quantization_parameters.hpp similarity index 100% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/align_concat_quantization_parameters_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/align_concat_quantization_parameters.hpp diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/assign_and_read_value_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/assign_and_read_value.hpp similarity index 92% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/assign_and_read_value_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/assign_and_read_value.hpp index a562944c958ee5..48517b24a0591f 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/assign_and_read_value_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/assign_and_read_value.hpp @@ -7,8 +7,8 @@ #include #include #include -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/avg_pool_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/avg_pool.hpp similarity index 100% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/avg_pool_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/avg_pool.hpp diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/batch_to_space_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/batch_to_space.hpp similarity index 92% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/batch_to_space_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/batch_to_space.hpp index 21763d762617fe..b0220107bbdaed 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/batch_to_space_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/batch_to_space.hpp @@ -6,8 +6,8 @@ #include #include -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/clamp_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/clamp.hpp similarity index 91% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/clamp_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/clamp.hpp index 4d5e33694743c3..882e05ff26d683 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/clamp_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/clamp.hpp @@ -7,8 +7,8 @@ #include #include #include -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/add.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/add.hpp similarity index 100% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/add.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/add.hpp diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/builders.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/builders.hpp similarity index 92% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/builders.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/builders.hpp index a266e23efeeca0..dadfb833dddef5 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/builders.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/builders.hpp @@ -14,12 +14,12 @@ #include "low_precision/rt_info/quantization_alignment_attribute.hpp" #include "low_precision/network_helper.hpp" -#include "lpt_ngraph_functions/common/add.hpp" -#include "lpt_ngraph_functions/common/convolution.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/reshape.hpp" -#include "lpt_ngraph_functions/common/transpose.hpp" +#include "ov_lpt_models/common/add.hpp" +#include "ov_lpt_models/common/convolution.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/reshape.hpp" +#include "ov_lpt_models/common/transpose.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/constant.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/constant.hpp similarity index 100% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/constant.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/constant.hpp diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/convolution.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/convolution.hpp similarity index 100% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/convolution.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/convolution.hpp diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/dequantization_operations.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/dequantization_operations.hpp similarity index 100% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/dequantization_operations.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/dequantization_operations.hpp diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/fake_quantize_on_data.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/fake_quantize_on_data.hpp similarity index 100% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/fake_quantize_on_data.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/fake_quantize_on_data.hpp diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/fake_quantize_on_weights.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/fake_quantize_on_weights.hpp similarity index 100% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/fake_quantize_on_weights.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/fake_quantize_on_weights.hpp diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/multiply.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/multiply.hpp similarity index 100% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/multiply.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/multiply.hpp diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/reshape.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/reshape.hpp similarity index 100% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/reshape.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/reshape.hpp diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/transpose.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/transpose.hpp similarity index 100% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/transpose.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/transpose.hpp diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/compose_fake_quantize_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/compose_fake_quantize.hpp similarity index 85% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/compose_fake_quantize_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/compose_fake_quantize.hpp index 5779a4065c529c..54b5b56811d70a 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/compose_fake_quantize_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/compose_fake_quantize.hpp @@ -7,8 +7,8 @@ #include #include -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_models/subgraph_builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/concat_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/concat.hpp similarity index 100% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/concat_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/concat.hpp diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/convolution_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/convolution.hpp similarity index 94% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/convolution_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/convolution.hpp index fbf8aed8f67c3d..538c35a7e40205 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/convolution_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/convolution.hpp @@ -10,9 +10,9 @@ #include -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/convolution_backprop_data_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/convolution_backprop_data.hpp similarity index 91% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/convolution_backprop_data_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/convolution_backprop_data.hpp index 41f14732358eb0..19f1f45e1270a9 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/convolution_backprop_data_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/convolution_backprop_data.hpp @@ -7,9 +7,9 @@ #include #include -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/depth_to_space_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/depth_to_space.hpp similarity index 95% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/depth_to_space_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/depth_to_space.hpp index 56e1aa874668a6..72fbad6aeb31cd 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/depth_to_space_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/depth_to_space.hpp @@ -11,7 +11,7 @@ #include #include -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/elementwise_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/elementwise.hpp similarity index 82% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/elementwise_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/elementwise.hpp index 37c1de73967157..f637cfa3b3b64f 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/elementwise_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/elementwise.hpp @@ -8,10 +8,10 @@ #include #include -#include "lpt_ngraph_functions/common/builders.hpp" -#include "lpt_ngraph_functions/common/convolution.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/builders.hpp" +#include "ov_lpt_models/common/convolution.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/elementwise_with_multi_parent_dequantization_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/elementwise_with_multi_parent_dequantization.hpp similarity index 94% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/elementwise_with_multi_parent_dequantization_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/elementwise_with_multi_parent_dequantization.hpp index 0c5542dd705095..944d2524624215 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/elementwise_with_multi_parent_dequantization_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/elementwise_with_multi_parent_dequantization.hpp @@ -8,8 +8,8 @@ #include #include -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fake_quantize_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/fake_quantize.hpp similarity index 96% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fake_quantize_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/fake_quantize.hpp index 99890f00b5e85e..b20c119010dc21 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fake_quantize_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/fake_quantize.hpp @@ -9,7 +9,7 @@ #include #include "low_precision/layer_transformation.hpp" #include "common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_lpt_models/common/builders.hpp" namespace ngraph { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fake_quantize_and_convolution_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/fake_quantize_and_convolution.hpp similarity index 84% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fake_quantize_and_convolution_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/fake_quantize_and_convolution.hpp index d5cd9f8100921e..039f4e9fe22b95 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fake_quantize_and_convolution_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/fake_quantize_and_convolution.hpp @@ -8,13 +8,13 @@ #include #include -#include "lpt_ngraph_functions/common/constant.hpp" -#include "lpt_ngraph_functions/common/multiply.hpp" -#include "lpt_ngraph_functions/common/reshape.hpp" -#include "lpt_ngraph_functions/common/transpose.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/constant.hpp" +#include "ov_lpt_models/common/multiply.hpp" +#include "ov_lpt_models/common/reshape.hpp" +#include "ov_lpt_models/common/transpose.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fake_quantize_and_two_output_branches_with_convolution_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/fake_quantize_and_two_output_branches_with_convolution.hpp similarity index 87% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fake_quantize_and_two_output_branches_with_convolution_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/fake_quantize_and_two_output_branches_with_convolution.hpp index 8445e7245894ca..79e6dcdb966874 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fake_quantize_and_two_output_branches_with_convolution_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/fake_quantize_and_two_output_branches_with_convolution.hpp @@ -8,9 +8,9 @@ #include #include -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fake_quantize_on_weights_and_unsupported_child_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/fake_quantize_on_weights_and_unsupported_child.hpp similarity index 90% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fake_quantize_on_weights_and_unsupported_child_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/fake_quantize_on_weights_and_unsupported_child.hpp index cd6da2784b1b75..3470e2ff7ce6f1 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fake_quantize_on_weights_and_unsupported_child_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/fake_quantize_on_weights_and_unsupported_child.hpp @@ -8,7 +8,7 @@ #include #include -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fake_quantize_precision_selection_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/fake_quantize_precision_selection.hpp similarity index 93% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fake_quantize_precision_selection_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/fake_quantize_precision_selection.hpp index 83d5e7861c6c23..74354e1cc567b0 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fake_quantize_precision_selection_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/fake_quantize_precision_selection.hpp @@ -9,8 +9,8 @@ #include #include "low_precision/layer_transformation.hpp" #include "common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fold_fake_quantize_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/fold_fake_quantize.hpp similarity index 100% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fold_fake_quantize_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/fold_fake_quantize.hpp diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fuse_convert_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/fuse_convert.hpp similarity index 88% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fuse_convert_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/fuse_convert.hpp index b0ad75d889647e..42116bc5bb2f5b 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fuse_convert_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/fuse_convert.hpp @@ -7,8 +7,8 @@ #include #include #include -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fuse_fake_quantize_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/fuse_fake_quantize.hpp similarity index 100% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fuse_fake_quantize_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/fuse_fake_quantize.hpp diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fuse_fake_quantize_and_scale_shift_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/fuse_fake_quantize_and_scale_shift.hpp similarity index 100% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fuse_fake_quantize_and_scale_shift_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/fuse_fake_quantize_and_scale_shift.hpp diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fuse_multiply_to_fake_quantize_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/fuse_multiply_to_fake_quantize.hpp similarity index 100% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fuse_multiply_to_fake_quantize_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/fuse_multiply_to_fake_quantize.hpp diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fuse_subtract_to_fake_quantize_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/fuse_subtract_to_fake_quantize.hpp similarity index 100% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fuse_subtract_to_fake_quantize_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/fuse_subtract_to_fake_quantize.hpp diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/gather_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/gather.hpp similarity index 93% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/gather_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/gather.hpp index fe233dced8fdfc..a05092a22771ff 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/gather_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/gather.hpp @@ -7,8 +7,8 @@ #include #include #include -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/get_dequantization_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/get_dequantization.hpp similarity index 90% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/get_dequantization_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/get_dequantization.hpp index dcd2554b20ae7d..b281208cfa7876 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/get_dequantization_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/get_dequantization.hpp @@ -8,8 +8,8 @@ #include #include #include -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/group_convolution_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/group_convolution.hpp similarity index 94% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/group_convolution_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/group_convolution.hpp index 718eee5d72a40a..a215d62e64ae1d 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/group_convolution_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/group_convolution.hpp @@ -8,8 +8,8 @@ #include #include -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/interpolate_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/interpolate.hpp similarity index 97% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/interpolate_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/interpolate.hpp index a3330832022833..5e55aa39e3619c 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/interpolate_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/interpolate.hpp @@ -6,7 +6,7 @@ #include #include -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/markup_avg_pool_precisions_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/markup_avg_pool_precisions.hpp similarity index 100% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/markup_avg_pool_precisions_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/markup_avg_pool_precisions.hpp diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/markup_bias_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/markup_bias.hpp similarity index 100% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/markup_bias_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/markup_bias.hpp diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/mat_mul_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/mat_mul.hpp similarity index 92% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/mat_mul_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/mat_mul.hpp index c880bed9bb114e..dd6b845cca6f03 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/mat_mul_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/mat_mul.hpp @@ -6,10 +6,10 @@ #include #include -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" -#include "lpt_ngraph_functions/common/constant.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/common/constant.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/mat_mul_with_optimized_constant_fake_quantize_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/mat_mul_with_optimized_constant_fake_quantize.hpp similarity index 90% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/mat_mul_with_optimized_constant_fake_quantize_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/mat_mul_with_optimized_constant_fake_quantize.hpp index 39290f3eff4ff8..b542cd5184b20a 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/mat_mul_with_optimized_constant_fake_quantize_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/mat_mul_with_optimized_constant_fake_quantize.hpp @@ -6,7 +6,7 @@ #include #include -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/max_pool_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/max_pool.hpp similarity index 93% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/max_pool_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/max_pool.hpp index 6f7af8b6fca043..62f83ba232ab64 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/max_pool_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/max_pool.hpp @@ -8,7 +8,7 @@ #include #include "common/fake_quantize_on_data.hpp" #include "low_precision/layer_transformation.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/move_dequantization_after_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/move_dequantization_after.hpp similarity index 88% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/move_dequantization_after_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/move_dequantization_after.hpp index 116b591fff7e74..1a65dc56caaa10 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/move_dequantization_after_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/move_dequantization_after.hpp @@ -7,8 +7,8 @@ #include #include -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_models/subgraph_builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/move_fake_quantize_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/move_fake_quantize.hpp similarity index 100% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/move_fake_quantize_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/move_fake_quantize.hpp diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/mul_add_to_scaleshift_or_power_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/mul_add_to_scaleshift_or_power.hpp similarity index 100% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/mul_add_to_scaleshift_or_power_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/mul_add_to_scaleshift_or_power.hpp diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/multiply_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/multiply.hpp similarity index 92% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/multiply_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/multiply.hpp index 553a34b02d1533..ea97bb6d4803b8 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/multiply_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/multiply.hpp @@ -7,9 +7,9 @@ #include #include -#include "elementwise_function.hpp" -#include "lpt_ngraph_functions/common/constant.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "elementwise.hpp" +#include "ov_lpt_models/common/constant.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/multiply_partial_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/multiply_partial_function.hpp similarity index 91% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/multiply_partial_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/multiply_partial_function.hpp index 878554dd1df4e5..64b7d73572fb0a 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/multiply_partial_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/multiply_partial_function.hpp @@ -7,9 +7,9 @@ #include #include -#include "elementwise_function.hpp" -#include "lpt_ngraph_functions/common/constant.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "elementwise.hpp" +#include "ov_lpt_models/common/constant.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/multiply_to_group_convolution_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/multiply_to_group_convolution.hpp similarity index 88% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/multiply_to_group_convolution_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/multiply_to_group_convolution.hpp index c2fe6fdcf907cf..5a864269f63d6f 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/multiply_to_group_convolution_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/multiply_to_group_convolution.hpp @@ -9,9 +9,9 @@ #include #include -#include "lpt_ngraph_functions/common/constant.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/constant.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/multiply_with_one_parent_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/multiply_with_one_parent.hpp similarity index 89% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/multiply_with_one_parent_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/multiply_with_one_parent.hpp index d81318dc9178eb..e67c587fbb60a4 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/multiply_with_one_parent_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/multiply_with_one_parent.hpp @@ -6,7 +6,7 @@ #include #include -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/mvn_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/mvn.hpp similarity index 95% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/mvn_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/mvn.hpp index 674a74b4e92dcd..ceae7e2032b69a 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/mvn_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/mvn.hpp @@ -6,7 +6,7 @@ #include #include -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/normalize_dequantization_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/normalize_dequantization.hpp similarity index 89% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/normalize_dequantization_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/normalize_dequantization.hpp index e7106d68e8924b..b3cebb945e8fdd 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/normalize_dequantization_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/normalize_dequantization.hpp @@ -6,7 +6,7 @@ #include #include -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/normalize_l2_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/normalize_l2.hpp similarity index 95% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/normalize_l2_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/normalize_l2.hpp index bc338033d1e247..195a10cda9fc0b 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/normalize_l2_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/normalize_l2.hpp @@ -11,7 +11,7 @@ #include -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/pad_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/pad.hpp similarity index 89% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/pad_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/pad.hpp index 5e1231940a52a6..6dda7033d62fea 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/pad_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/pad.hpp @@ -7,8 +7,8 @@ #include #include #include -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/precision_propagation_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/precision_propagation.hpp similarity index 100% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/precision_propagation_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/precision_propagation.hpp diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/prelu_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/prelu.hpp similarity index 94% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/prelu_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/prelu.hpp index c3f31b8d83e9b6..877a2173a9defb 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/prelu_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/prelu.hpp @@ -6,7 +6,7 @@ #include #include -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/recurrent_cell_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/recurrent_cell.hpp similarity index 100% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/recurrent_cell_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/recurrent_cell.hpp diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/reduce_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/reduce.hpp similarity index 97% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/reduce_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/reduce.hpp index 650f114f9d9dac..40ed991f551b7b 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/reduce_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/reduce.hpp @@ -8,9 +8,9 @@ #include #include -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" -#include "lpt_ngraph_functions/common/constant.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/builders.hpp" +#include "ov_lpt_models/common/constant.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/relu_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/relu.hpp similarity index 94% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/relu_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/relu.hpp index 7cf7a8590873c0..c42872a224f4d2 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/relu_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/relu.hpp @@ -6,7 +6,7 @@ #include #include -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/reshape_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/reshape.hpp similarity index 91% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/reshape_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/reshape.hpp index e3f8ca2023191a..f02388a39b8b05 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/reshape_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/reshape.hpp @@ -7,8 +7,8 @@ #include #include #include -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/round_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/round.hpp similarity index 86% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/round_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/round.hpp index 9a37fb721b573d..71f13899ee0e57 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/round_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/round.hpp @@ -7,8 +7,8 @@ #include #include -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_models/subgraph_builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/shuffle_channels_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/shuffle_channels.hpp similarity index 92% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/shuffle_channels_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/shuffle_channels.hpp index 9a866b1d912fc8..b9a4f9307d79e3 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/shuffle_channels_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/shuffle_channels.hpp @@ -8,8 +8,8 @@ #include #include -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/space_to_batch_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/space_to_batch.hpp similarity index 92% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/space_to_batch_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/space_to_batch.hpp index 1a85d0edd9e228..047fa52e3c0346 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/space_to_batch_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/space_to_batch.hpp @@ -6,8 +6,8 @@ #include #include -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/split_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/split.hpp similarity index 91% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/split_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/split.hpp index aa8d1849f2f8ee..acd99c4a7978e1 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/split_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/split.hpp @@ -8,8 +8,8 @@ #include #include -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/squeeze_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/squeeze.hpp similarity index 95% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/squeeze_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/squeeze.hpp index e46be467b7a378..70d11780f3f4e3 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/squeeze_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/squeeze.hpp @@ -6,7 +6,7 @@ #include #include -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/strided_slice_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/strided_slice.hpp similarity index 95% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/strided_slice_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/strided_slice.hpp index 876b98cfb66134..ff2bd7e60a580e 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/strided_slice_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/strided_slice.hpp @@ -8,8 +8,8 @@ #include #include -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/subtract_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/subtract.hpp similarity index 73% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/subtract_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/subtract.hpp index d4dc1b0c9889a6..16d60fc786bfc9 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/subtract_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/subtract.hpp @@ -7,9 +7,9 @@ #include #include -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "ngraph_functions/subgraph_builders.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_models/subgraph_builders.hpp" +#include "ov_lpt_models/common/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/subtract_multiply_to_multiply_add_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/subtract_multiply_to_multiply_add.hpp similarity index 85% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/subtract_multiply_to_multiply_add_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/subtract_multiply_to_multiply_add.hpp index 3dc00f501d1804..68ceb0315e7757 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/subtract_multiply_to_multiply_add_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/subtract_multiply_to_multiply_add.hpp @@ -8,10 +8,10 @@ #include #include -#include "lpt_ngraph_functions/common/add.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/multiply.hpp" +#include "ov_lpt_models/common/add.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/multiply.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/transformations_after_split_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/transformations_after_split.hpp similarity index 100% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/transformations_after_split_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/transformations_after_split.hpp diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/transpose_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/transpose.hpp similarity index 91% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/transpose_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/transpose.hpp index d0561cf19bdc30..e31cd49a0f5f63 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/transpose_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/transpose.hpp @@ -7,8 +7,8 @@ #include #include #include -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/transpose_after_mat_mul_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/transpose_after_mat_mul.hpp similarity index 74% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/transpose_after_mat_mul_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/transpose_after_mat_mul.hpp index b8e7f4dac50ece..4c9a4aa8940f45 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/transpose_after_mat_mul_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/transpose_after_mat_mul.hpp @@ -7,9 +7,9 @@ #include #include -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "ngraph_functions/subgraph_builders.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_models/subgraph_builders.hpp" +#include "ov_lpt_models/common/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/unsqueeze_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/unsqueeze.hpp similarity index 95% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/unsqueeze_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/unsqueeze.hpp index 5cc9ec14c12159..c59505fa029907 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/unsqueeze_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/unsqueeze.hpp @@ -6,7 +6,7 @@ #include #include -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/variadic_split_function.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/variadic_split.hpp similarity index 91% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/variadic_split_function.hpp rename to src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/variadic_split.hpp index 9afb7e968b0119..0bacd0f968962d 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/variadic_split_function.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/variadic_split.hpp @@ -8,8 +8,8 @@ #include #include -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/add_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/add.cpp similarity index 98% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/add_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/add.cpp index 852a261c36d1a8..29cc1f2fcd58e6 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/add_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/add.cpp @@ -2,15 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/add_function.hpp" +#include "ov_lpt_models/add.hpp" #include "low_precision/network_helper.hpp" #include "low_precision/layer_transformation.hpp" #include "ngraph/opsets/opset1.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_models/subgraph_builders.hpp" using namespace ov::pass::low_precision; diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/align_concat_quantization_parameters_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/align_concat_quantization_parameters.cpp similarity index 98% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/align_concat_quantization_parameters_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/align_concat_quantization_parameters.cpp index aa2ba30494e439..fcfb95759ea0d7 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/align_concat_quantization_parameters_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/align_concat_quantization_parameters.cpp @@ -2,14 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/align_concat_quantization_parameters_function.hpp" +#include "ov_lpt_models/align_concat_quantization_parameters.hpp" #include #include #include "low_precision/network_helper.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_lpt_models/common/builders.hpp" +#include "ov_models/subgraph_builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/assign_and_read_value_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/assign_and_read_value.cpp similarity index 98% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/assign_and_read_value_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/assign_and_read_value.cpp index f115f72cd63ecf..1874fb2d608c24 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/assign_and_read_value_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/assign_and_read_value.cpp @@ -10,12 +10,12 @@ #include #include #include -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "openvino/op/util/variable.hpp" #include -#include "lpt_ngraph_functions/common/builders.hpp" -#include "lpt_ngraph_functions/assign_and_read_value_function.hpp" +#include "ov_lpt_models/common/builders.hpp" +#include "ov_lpt_models/assign_and_read_value.hpp" #include "low_precision/network_helper.hpp" namespace ngraph { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/avg_pool_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/avg_pool.cpp similarity index 97% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/avg_pool_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/avg_pool.cpp index 04b525f3f1689a..b29245fc1a8834 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/avg_pool_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/avg_pool.cpp @@ -6,10 +6,10 @@ #include #include "low_precision/network_helper.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_lpt_models/common/builders.hpp" -#include "lpt_ngraph_functions/avg_pool_function.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_lpt_models/avg_pool.hpp" +#include "ov_models/subgraph_builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/batch_to_space_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/batch_to_space.cpp similarity index 96% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/batch_to_space_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/batch_to_space.cpp index 0d5021ee808bbc..fe1a72a26a164d 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/batch_to_space_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/batch_to_space.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/batch_to_space_function.hpp" +#include "ov_lpt_models/batch_to_space.hpp" #include -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_lpt_models/common/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/clamp_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/clamp.cpp similarity index 96% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/clamp_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/clamp.cpp index 26f04534870e44..0287e1cf628e0d 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/clamp_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/clamp.cpp @@ -8,9 +8,9 @@ #include -#include "ngraph_functions/subgraph_builders.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" -#include "lpt_ngraph_functions/clamp_function.hpp" +#include "ov_models/subgraph_builders.hpp" +#include "ov_lpt_models/common/builders.hpp" +#include "ov_lpt_models/clamp.hpp" #include "low_precision/network_helper.hpp" namespace ngraph { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/common/add.cpp b/src/tests/ov_helpers/ov_lpt_models/src/common/add.cpp similarity index 96% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/common/add.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/common/add.cpp index 36657b8b287f8d..444b8eed24d114 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/common/add.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/common/add.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/common/add.hpp" +#include "ov_lpt_models/common/add.hpp" #include namespace ngraph { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/common/builders.cpp b/src/tests/ov_helpers/ov_lpt_models/src/common/builders.cpp similarity index 99% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/common/builders.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/common/builders.cpp index ca4adc89b02405..a4f3f8d3fc3997 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/common/builders.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/common/builders.cpp @@ -2,14 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_lpt_models/common/builders.hpp" #include #include #include #include "ov_ops/type_relaxed.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" namespace ngraph { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/common/constant.cpp b/src/tests/ov_helpers/ov_lpt_models/src/common/constant.cpp similarity index 95% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/common/constant.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/common/constant.cpp index 26f7956f2a5daf..2d6d2c513b1f94 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/common/constant.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/common/constant.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/common/constant.hpp" +#include "ov_lpt_models/common/constant.hpp" #include namespace ngraph { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/common/convolution.cpp b/src/tests/ov_helpers/ov_lpt_models/src/common/convolution.cpp similarity index 93% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/common/convolution.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/common/convolution.cpp index ed9e958891068a..19e2cae02abfde 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/common/convolution.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/common/convolution.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/common/convolution.hpp" +#include "ov_lpt_models/common/convolution.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/common/dequantization_operations.cpp b/src/tests/ov_helpers/ov_lpt_models/src/common/dequantization_operations.cpp similarity index 98% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/common/dequantization_operations.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/common/dequantization_operations.cpp index 599e69eeb3de1c..2b073110cdf731 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/common/dequantization_operations.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/common/dequantization_operations.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" #include namespace ngraph { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/common/fake_quantize_on_data.cpp b/src/tests/ov_helpers/ov_lpt_models/src/common/fake_quantize_on_data.cpp similarity index 97% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/common/fake_quantize_on_data.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/common/fake_quantize_on_data.cpp index 3ec02dfbcc96ba..bc36e939da9c49 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/common/fake_quantize_on_data.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/common/fake_quantize_on_data.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" #include namespace ngraph { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/common/fake_quantize_on_weights.cpp b/src/tests/ov_helpers/ov_lpt_models/src/common/fake_quantize_on_weights.cpp similarity index 93% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/common/fake_quantize_on_weights.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/common/fake_quantize_on_weights.cpp index e27336f2d741a9..e560d59e0b47a9 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/common/fake_quantize_on_weights.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/common/fake_quantize_on_weights.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" #include namespace ngraph { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/common/multiply.cpp b/src/tests/ov_helpers/ov_lpt_models/src/common/multiply.cpp similarity index 96% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/common/multiply.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/common/multiply.cpp index 4842ca965bedb1..f8311a72d4e548 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/common/multiply.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/common/multiply.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/common/multiply.hpp" +#include "ov_lpt_models/common/multiply.hpp" #include namespace ngraph { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/common/reshape.cpp b/src/tests/ov_helpers/ov_lpt_models/src/common/reshape.cpp similarity index 90% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/common/reshape.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/common/reshape.cpp index 7e3d198977e640..bb8832e71e2003 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/common/reshape.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/common/reshape.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/common/reshape.hpp" +#include "ov_lpt_models/common/reshape.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/common/transpose.cpp b/src/tests/ov_helpers/ov_lpt_models/src/common/transpose.cpp similarity index 89% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/common/transpose.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/common/transpose.cpp index b80070033a675c..20c21b2dde4b9c 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/common/transpose.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/common/transpose.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/common/transpose.hpp" +#include "ov_lpt_models/common/transpose.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/compose_fake_quantize_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/compose_fake_quantize.cpp similarity index 91% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/compose_fake_quantize_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/compose_fake_quantize.cpp index b2239126dd2a36..ecd1c742403725 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/compose_fake_quantize_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/compose_fake_quantize.cpp @@ -2,12 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/compose_fake_quantize_function.hpp" +#include "ov_lpt_models/compose_fake_quantize.hpp" #include "low_precision/network_helper.hpp" #include -#include "ngraph_functions/subgraph_builders.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_models/subgraph_builders.hpp" +#include "ov_lpt_models/common/builders.hpp" using namespace ov::pass::low_precision; diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/concat_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/concat.cpp similarity index 99% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/concat_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/concat.cpp index fa2f1f9d8ead37..ec63f3369e0aed 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/concat_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/concat.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/concat_function.hpp" +#include "ov_lpt_models/concat.hpp" #include #include "ov_ops/type_relaxed.hpp" @@ -11,10 +11,10 @@ #include "low_precision/rt_info/intervals_alignment_attribute.hpp" #include "low_precision/rt_info/quantization_alignment_attribute.hpp" -#include "ngraph_functions/builders.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_models/builders.hpp" +#include "ov_lpt_models/common/builders.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/convolution_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/convolution.cpp similarity index 98% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/convolution_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/convolution.cpp index f7a2a5fd80c16b..85f9df120df532 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/convolution_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/convolution.cpp @@ -2,18 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/convolution_function.hpp" +#include "ov_lpt_models/convolution.hpp" #include #include -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" #include "low_precision/rt_info/quantization_granularity_attribute.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/builders.hpp" #include "low_precision/network_helper.hpp" using namespace ov::pass::low_precision; diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/convolution_backprop_data_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/convolution_backprop_data.cpp similarity index 95% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/convolution_backprop_data_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/convolution_backprop_data.cpp index 1a56bac32cf33e..57f0505ce44036 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/convolution_backprop_data_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/convolution_backprop_data.cpp @@ -2,17 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/convolution_backprop_data_function.hpp" +#include "ov_lpt_models/convolution_backprop_data.hpp" #include #include -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/builders.hpp" #include "low_precision/network_helper.hpp" using namespace ov::pass::low_precision; diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/depth_to_space_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/depth_to_space.cpp similarity index 95% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/depth_to_space_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/depth_to_space.cpp index 8972646997788b..867a4dccd5a9fe 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/depth_to_space_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/depth_to_space.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/depth_to_space_function.hpp" +#include "ov_lpt_models/depth_to_space.hpp" -#include "ngraph_functions/subgraph_builders.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_models/subgraph_builders.hpp" +#include "ov_lpt_models/common/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/elementwise_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/elementwise.cpp similarity index 97% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/elementwise_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/elementwise.cpp index 4bc0ca320631bc..9b73c223b633f6 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/elementwise_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/elementwise.cpp @@ -2,11 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/elementwise_function.hpp" +#include "ov_lpt_models/elementwise.hpp" #include "low_precision/layer_transformation.hpp" #include "ngraph/opsets/opset1.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" using namespace ov::pass::low_precision; diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/elementwise_with_multi_parent_dequantization_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/elementwise_with_multi_parent_dequantization.cpp similarity index 94% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/elementwise_with_multi_parent_dequantization_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/elementwise_with_multi_parent_dequantization.cpp index c6aaec64b83560..661941aa80149e 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/elementwise_with_multi_parent_dequantization_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/elementwise_with_multi_parent_dequantization.cpp @@ -2,12 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/elementwise_with_multi_parent_dequantization_function.hpp" +#include "ov_lpt_models/elementwise_with_multi_parent_dequantization.hpp" #include "low_precision/network_helper.hpp" #include -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/subgraph_builders.hpp" using namespace ov::pass::low_precision; diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize.cpp similarity index 97% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/fake_quantize.cpp index cfe4f6553696d8..7f4082aebca273 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize.cpp @@ -2,13 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/fake_quantize_function.hpp" +#include "ov_lpt_models/fake_quantize.hpp" #include #include "ov_ops/type_relaxed.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_lpt_models/common/builders.hpp" using namespace ov::pass::low_precision; diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_and_convolution_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_and_convolution.cpp similarity index 98% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_and_convolution_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_and_convolution.cpp index 0cdf3209534c31..7d41fa5ace53af 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_and_convolution_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_and_convolution.cpp @@ -2,11 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/fake_quantize_and_convolution_function.hpp" +#include "ov_lpt_models/fake_quantize_and_convolution.hpp" #include -#include "ngraph_functions/subgraph_builders.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_models/subgraph_builders.hpp" +#include "ov_lpt_models/common/builders.hpp" #include "inference_engine.hpp" namespace ngraph { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_and_two_output_branches_with_convolution_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_and_two_output_branches_with_convolution.cpp similarity index 96% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_and_two_output_branches_with_convolution_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_and_two_output_branches_with_convolution.cpp index 5aa0d57f2a4d57..aca5f62b9895ff 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_and_two_output_branches_with_convolution_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_and_two_output_branches_with_convolution.cpp @@ -3,11 +3,11 @@ // #include -#include "lpt_ngraph_functions/common/builders.hpp" -#include "lpt_ngraph_functions/fake_quantize_and_two_output_branches_with_convolution_function.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/common/builders.hpp" +#include "ov_lpt_models/fake_quantize_and_two_output_branches_with_convolution.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" #include "low_precision/network_helper.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_on_weights_and_unsupported_child_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_on_weights_and_unsupported_child.cpp similarity index 87% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_on_weights_and_unsupported_child_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_on_weights_and_unsupported_child.cpp index 0ccd3a30eb2374..7789fb4aff0c94 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_on_weights_and_unsupported_child_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_on_weights_and_unsupported_child.cpp @@ -3,11 +3,11 @@ // #include -#include "lpt_ngraph_functions/common/builders.hpp" -#include "lpt_ngraph_functions/fake_quantize_on_weights_and_unsupported_child_function.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/common/builders.hpp" +#include "ov_lpt_models/fake_quantize_on_weights_and_unsupported_child.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" #include "low_precision/network_helper.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_precision_selection_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_precision_selection.cpp similarity index 96% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_precision_selection_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_precision_selection.cpp index 947d43eb23b006..23404735668f36 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_precision_selection_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_precision_selection.cpp @@ -2,12 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/fake_quantize_precision_selection_function.hpp" +#include "ov_lpt_models/fake_quantize_precision_selection.hpp" #include #include -#include "ngraph_functions/subgraph_builders.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_models/subgraph_builders.hpp" +#include "ov_lpt_models/common/builders.hpp" #include "low_precision/network_helper.hpp" namespace ngraph { @@ -170,7 +170,7 @@ std::shared_ptr FakeQuantizePrecisionSelectionFunction::getRef ov::pass::low_precision::NetworkHelper::setOutDataPrecisionForTypeRelaxed(branch1Pooling, values.fakeQuantizeOnDataOutPrecision); } else { // TODO: potential workaround for the same case: - // openvino\inference-engine\tests\ngraph_functions\src\low_precision_transformations\concat_function.cpp, line #496 + // openvino\inference-engine\tests\ov_models\src\low_precision_transformations\concat_function.cpp, line #496 branch1Pooling->set_output_type(0, values.fakeQuantizeOnDataOutPrecision, branch1Pooling->get_output_partial_shape(0)); } } diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/fold_fake_quantize_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fold_fake_quantize.cpp similarity index 93% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/fold_fake_quantize_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/fold_fake_quantize.cpp index bbc5298e701f0f..7e92f45e7875a0 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/fold_fake_quantize_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fold_fake_quantize.cpp @@ -2,11 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/fold_fake_quantize_function.hpp" +#include "ov_lpt_models/fold_fake_quantize.hpp" #include #include "ov_ops/type_relaxed.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/fuse_convert_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fuse_convert.cpp similarity index 95% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/fuse_convert_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/fuse_convert.cpp index dd07773dafec10..f2161bb9b5792e 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/fuse_convert_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fuse_convert.cpp @@ -2,11 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/fuse_convert_function.hpp" +#include "ov_lpt_models/fuse_convert.hpp" #include -#include "ngraph_functions/subgraph_builders.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_models/subgraph_builders.hpp" +#include "ov_lpt_models/common/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/fuse_fake_quantize_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fuse_fake_quantize.cpp similarity index 96% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/fuse_fake_quantize_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/fuse_fake_quantize.cpp index 5f5f4ac2ceb478..f3bbcf7aa3c56b 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/fuse_fake_quantize_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fuse_fake_quantize.cpp @@ -2,16 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/fuse_fake_quantize_function.hpp" +#include "ov_lpt_models/fuse_fake_quantize.hpp" #include #include "ov_ops/type_relaxed.hpp" #include "low_precision/network_helper.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/builders.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/fuse_fake_quantize_and_scale_shift_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fuse_fake_quantize_and_scale_shift.cpp similarity index 93% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/fuse_fake_quantize_and_scale_shift_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/fuse_fake_quantize_and_scale_shift.cpp index 53611efaa56088..69be1fad85a095 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/fuse_fake_quantize_and_scale_shift_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fuse_fake_quantize_and_scale_shift.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/fuse_fake_quantize_and_scale_shift_function.hpp" +#include "ov_lpt_models/fuse_fake_quantize_and_scale_shift.hpp" #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/fuse_multiply_to_fake_quantize_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fuse_multiply_to_fake_quantize.cpp similarity index 78% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/fuse_multiply_to_fake_quantize_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/fuse_multiply_to_fake_quantize.cpp index 743899b79f4645..fc0f10cf209f12 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/fuse_multiply_to_fake_quantize_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fuse_multiply_to_fake_quantize.cpp @@ -2,16 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/fuse_multiply_to_fake_quantize_function.hpp" +#include "ov_lpt_models/fuse_multiply_to_fake_quantize.hpp" #include #include "ov_ops/type_relaxed.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/builders.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/fuse_subtract_to_fake_quantize_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fuse_subtract_to_fake_quantize.cpp similarity index 89% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/fuse_subtract_to_fake_quantize_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/fuse_subtract_to_fake_quantize.cpp index 67b0804705f546..27885ddf4c6876 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/fuse_subtract_to_fake_quantize_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fuse_subtract_to_fake_quantize.cpp @@ -2,16 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/fuse_subtract_to_fake_quantize_function.hpp" +#include "ov_lpt_models/fuse_subtract_to_fake_quantize.hpp" #include #include "ov_ops/type_relaxed.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/builders.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/gather_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/gather.cpp similarity index 98% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/gather_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/gather.cpp index 45ee1436994c54..efcae3e74531bd 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/gather_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/gather.cpp @@ -2,12 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/gather_function.hpp" +#include "ov_lpt_models/gather.hpp" #include #include #include -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_lpt_models/common/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/get_dequantization_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/get_dequantization.cpp similarity index 97% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/get_dequantization_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/get_dequantization.cpp index a66f0c7fd18e80..5531019be235bf 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/get_dequantization_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/get_dequantization.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/get_dequantization_function.hpp" +#include "ov_lpt_models/get_dequantization.hpp" #include #include @@ -12,8 +12,8 @@ #include #include -#include "ngraph_functions/subgraph_builders.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_models/subgraph_builders.hpp" +#include "ov_lpt_models/common/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/group_convolution_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/group_convolution.cpp similarity index 97% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/group_convolution_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/group_convolution.cpp index cd1f4ca12a37a7..ea1b7db7e7bc26 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/group_convolution_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/group_convolution.cpp @@ -2,17 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/group_convolution_function.hpp" +#include "ov_lpt_models/group_convolution.hpp" #include #include -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_lpt_models/common/fake_quantize_on_weights.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/builders.hpp" using namespace ngraph::opset1; using namespace ov::pass::low_precision; diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/interpolate_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/interpolate.cpp similarity index 97% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/interpolate_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/interpolate.cpp index aff85f34d60681..eac959de1f2630 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/interpolate_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/interpolate.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/interpolate_function.hpp" +#include "ov_lpt_models/interpolate.hpp" -#include "ngraph_functions/subgraph_builders.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_models/subgraph_builders.hpp" +#include "ov_lpt_models/common/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/markup_avg_pool_precisions_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/markup_avg_pool_precisions.cpp similarity index 98% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/markup_avg_pool_precisions_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/markup_avg_pool_precisions.cpp index 6e7c1761625e41..c49e7503040b29 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/markup_avg_pool_precisions_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/markup_avg_pool_precisions.cpp @@ -6,10 +6,10 @@ #include #include "low_precision/network_helper.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_lpt_models/common/builders.hpp" -#include "lpt_ngraph_functions/markup_avg_pool_precisions_function.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_lpt_models/markup_avg_pool_precisions.hpp" +#include "ov_models/subgraph_builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/markup_bias_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/markup_bias.cpp similarity index 97% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/markup_bias_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/markup_bias.cpp index ca9aecc2424d86..2d76df4099979b 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/markup_bias_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/markup_bias.cpp @@ -3,9 +3,9 @@ // #include -#include "lpt_ngraph_functions/markup_bias_function.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_lpt_models/markup_bias.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/mat_mul_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/mat_mul.cpp similarity index 98% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/mat_mul_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/mat_mul.cpp index 5a9fee0b14989c..e9ec63589e3807 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/mat_mul_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/mat_mul.cpp @@ -2,16 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/mat_mul_function.hpp" +#include "ov_lpt_models/mat_mul.hpp" #include #include #include #include "ov_ops/type_relaxed.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_lpt_models/common/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/mat_mul_with_optimized_constant_fake_quantize_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/mat_mul_with_optimized_constant_fake_quantize.cpp similarity index 95% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/mat_mul_with_optimized_constant_fake_quantize_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/mat_mul_with_optimized_constant_fake_quantize.cpp index e5aea80a94f687..00e87d0bb39040 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/mat_mul_with_optimized_constant_fake_quantize_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/mat_mul_with_optimized_constant_fake_quantize.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/mat_mul_with_optimized_constant_fake_quantize_function.hpp" +#include "ov_lpt_models/mat_mul_with_optimized_constant_fake_quantize.hpp" #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/max_pool_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/max_pool.cpp similarity index 94% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/max_pool_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/max_pool.cpp index ee00fe52e98115..237157952854c4 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/max_pool_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/max_pool.cpp @@ -2,13 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/max_pool_function.hpp" +#include "ov_lpt_models/max_pool.hpp" #include #include #include "low_precision/network_helper.hpp" -#include "ngraph_functions/subgraph_builders.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_models/subgraph_builders.hpp" +#include "ov_lpt_models/common/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/move_dequantization_after_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/move_dequantization_after.cpp similarity index 94% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/move_dequantization_after_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/move_dequantization_after.cpp index 4e951af09868fb..82d5bfbe305ea7 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/move_dequantization_after_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/move_dequantization_after.cpp @@ -2,12 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/move_dequantization_after_function.hpp" +#include "ov_lpt_models/move_dequantization_after.hpp" #include "low_precision/network_helper.hpp" #include -#include "ngraph_functions/subgraph_builders.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_models/subgraph_builders.hpp" +#include "ov_lpt_models/common/builders.hpp" using namespace ov::pass::low_precision; diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/move_fake_quantize_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/move_fake_quantize.cpp similarity index 95% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/move_fake_quantize_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/move_fake_quantize.cpp index ddd13e9cd8753c..b0484c2192f760 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/move_fake_quantize_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/move_fake_quantize.cpp @@ -2,16 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/move_fake_quantize_function.hpp" +#include "ov_lpt_models/move_fake_quantize.hpp" #include #include #include "ov_ops/type_relaxed.hpp" #include "low_precision/network_helper.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/multiply_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/multiply.cpp similarity index 93% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/multiply_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/multiply.cpp index e4ff86359f86db..9937547d2f226b 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/multiply_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/multiply.cpp @@ -2,17 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/multiply_function.hpp" +#include "ov_lpt_models/multiply.hpp" #include #include #include -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/builders.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" using namespace ov::pass::low_precision; diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/multiply_partial_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/multiply_partial_function.cpp similarity index 96% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/multiply_partial_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/multiply_partial_function.cpp index e41d340a634d61..25b923c79fbb0c 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/multiply_partial_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/multiply_partial_function.cpp @@ -2,17 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/multiply_partial_function.hpp" +#include "ov_lpt_models/multiply_partial_function.hpp" #include #include #include -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/builders.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" using namespace ov::pass::low_precision; diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/multiply_to_group_convolution_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/multiply_to_group_convolution.cpp similarity index 96% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/multiply_to_group_convolution_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/multiply_to_group_convolution.cpp index 1987416e1dfeef..e7cb85ce5c50bb 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/multiply_to_group_convolution_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/multiply_to_group_convolution.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/multiply_to_group_convolution_function.hpp" +#include "ov_lpt_models/multiply_to_group_convolution.hpp" -#include "ngraph_functions/subgraph_builders.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_models/subgraph_builders.hpp" +#include "ov_lpt_models/common/builders.hpp" #include "ov_ops/type_relaxed.hpp" namespace ngraph { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/multiply_with_one_parent_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/multiply_with_one_parent.cpp similarity index 91% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/multiply_with_one_parent_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/multiply_with_one_parent.cpp index cd2601fe40c869..a3e816b0e4d3f4 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/multiply_with_one_parent_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/multiply_with_one_parent.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/multiply_with_one_parent_function.hpp" +#include "ov_lpt_models/multiply_with_one_parent.hpp" #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/mvn_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/mvn.cpp similarity index 96% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/mvn_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/mvn.cpp index 46d2138db16b03..e75bdbc422ecb8 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/mvn_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/mvn.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/mvn_function.hpp" +#include "ov_lpt_models/mvn.hpp" -#include "ngraph_functions/subgraph_builders.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_models/subgraph_builders.hpp" +#include "ov_lpt_models/common/builders.hpp" #include "ov_ops/type_relaxed.hpp" namespace ngraph { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/normalize_dequantization_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/normalize_dequantization.cpp similarity index 89% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/normalize_dequantization_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/normalize_dequantization.cpp index 09a762f28eaaad..6cc3427ee3a584 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/normalize_dequantization_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/normalize_dequantization.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/normalize_dequantization_function.hpp" +#include "ov_lpt_models/normalize_dequantization.hpp" -#include "ngraph_functions/subgraph_builders.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_models/subgraph_builders.hpp" +#include "ov_lpt_models/common/builders.hpp" #include "ov_ops/type_relaxed.hpp" namespace ngraph { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/normalize_l2_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/normalize_l2.cpp similarity index 97% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/normalize_l2_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/normalize_l2.cpp index 74aad58608aedc..23ed010af77b1d 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/normalize_l2_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/normalize_l2.cpp @@ -2,12 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/normalize_l2_function.hpp" +#include "ov_lpt_models/normalize_l2.hpp" #include #include -#include "ngraph_functions/subgraph_builders.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_models/subgraph_builders.hpp" +#include "ov_lpt_models/common/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/pad_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/pad.cpp similarity index 95% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/pad_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/pad.cpp index 6aef5ad0c4118b..3a12137b5ac2e6 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/pad_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/pad.cpp @@ -6,11 +6,10 @@ #include #include - #include "openvino/opsets/opset12.hpp" -#include "ngraph_functions/subgraph_builders.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" -#include "lpt_ngraph_functions/pad_function.hpp" +#include "ov_models/subgraph_builders.hpp" +#include "ov_lpt_models/common/builders.hpp" +#include "ov_lpt_models/pad.hpp" #include "low_precision/network_helper.hpp" namespace ngraph { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/precision_propagation_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/precision_propagation.cpp similarity index 97% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/precision_propagation_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/precision_propagation.cpp index 465336117fa0b1..6662fba4368eb3 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/precision_propagation_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/precision_propagation.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/precision_propagation_function.hpp" +#include "ov_lpt_models/precision_propagation.hpp" #include #include "ov_ops/type_relaxed.hpp" @@ -11,11 +11,11 @@ #include "low_precision/rt_info/intervals_alignment_attribute.hpp" #include "low_precision/rt_info/quantization_alignment_attribute.hpp" -#include "ngraph_functions/builders.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_models/builders.hpp" +#include "ov_lpt_models/common/builders.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/precomp.hpp b/src/tests/ov_helpers/ov_lpt_models/src/precomp.hpp similarity index 100% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/precomp.hpp rename to src/tests/ov_helpers/ov_lpt_models/src/precomp.hpp diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/prelu_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/prelu.cpp similarity index 95% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/prelu_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/prelu.cpp index 7676427ff7a89d..0e145e1ab98601 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/prelu_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/prelu.cpp @@ -2,14 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/prelu_function.hpp" +#include "ov_lpt_models/prelu.hpp" #include #include #include "ov_ops/type_relaxed.hpp" -#include "ngraph_functions/subgraph_builders.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_models/subgraph_builders.hpp" +#include "ov_lpt_models/common/builders.hpp" #include "low_precision/network_helper.hpp" namespace ngraph { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/recurrent_cell_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/recurrent_cell.cpp similarity index 95% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/recurrent_cell_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/recurrent_cell.cpp index ac4768586725fa..ecdc1ae296776f 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/recurrent_cell_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/recurrent_cell.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/recurrent_cell_function.hpp" +#include "ov_lpt_models/recurrent_cell.hpp" #include #include "ov_ops/type_relaxed.hpp" @@ -11,11 +11,11 @@ #include "low_precision/rt_info/intervals_alignment_attribute.hpp" #include "low_precision/rt_info/quantization_alignment_attribute.hpp" -#include "ngraph_functions/builders.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_models/builders.hpp" +#include "ov_lpt_models/common/builders.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/relu_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/relu.cpp similarity index 97% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/relu_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/relu.cpp index 7ef5f50e31b3a5..2ee9980e42f400 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/relu_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/relu.cpp @@ -2,13 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/relu_function.hpp" +#include "ov_lpt_models/relu.hpp" #include #include #include "ov_ops/type_relaxed.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_lpt_models/common/builders.hpp" #include "low_precision/network_helper.hpp" namespace ngraph { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/reshape_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/reshape.cpp similarity index 97% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/reshape_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/reshape.cpp index fbf5f57286012e..551d06ca1ce803 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/reshape_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/reshape.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/reshape_function.hpp" +#include "ov_lpt_models/reshape.hpp" #include -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_lpt_models/common/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/round_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/round.cpp similarity index 92% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/round_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/round.cpp index 3a55ae677f83d4..83a1c7785c2d7c 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/round_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/round.cpp @@ -4,10 +4,10 @@ #include -#include "lpt_ngraph_functions/round_function.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_lpt_models/round.hpp" +#include "ov_lpt_models/common/builders.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" using namespace ov::pass::low_precision; diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/shuffle_channels_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/shuffle_channels.cpp similarity index 95% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/shuffle_channels_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/shuffle_channels.cpp index 602ca1b3ddd9bd..781081cbb2ddfa 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/shuffle_channels_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/shuffle_channels.cpp @@ -5,10 +5,10 @@ #include #include "low_precision/network_helper.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_lpt_models/common/builders.hpp" -#include "lpt_ngraph_functions/shuffle_channels_function.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_lpt_models/shuffle_channels.hpp" +#include "ov_models/subgraph_builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/space_to_batch_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/space_to_batch.cpp similarity index 96% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/space_to_batch_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/space_to_batch.cpp index ccbba4d1f2697d..72dd3c34b7bb7e 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/space_to_batch_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/space_to_batch.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/space_to_batch_function.hpp" +#include "ov_lpt_models/space_to_batch.hpp" #include -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_lpt_models/common/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/split_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/split.cpp similarity index 95% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/split_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/split.cpp index 2e9c8cc91e70ff..677fe3cd038b23 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/split_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/split.cpp @@ -6,12 +6,12 @@ #include #include -#include "lpt_ngraph_functions/split_function.hpp" +#include "ov_lpt_models/split.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/builders.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace ngraph { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/squeeze_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/squeeze.cpp similarity index 95% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/squeeze_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/squeeze.cpp index 931ccb55073e89..fadb68ab2afd91 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/squeeze_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/squeeze.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/squeeze_function.hpp" +#include "ov_lpt_models/squeeze.hpp" -#include "ngraph_functions/subgraph_builders.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_models/subgraph_builders.hpp" +#include "ov_lpt_models/common/builders.hpp" #include "ov_ops/type_relaxed.hpp" namespace ngraph { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/strided_slice_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/strided_slice.cpp similarity index 97% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/strided_slice_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/strided_slice.cpp index 4074967702b5f0..faf9c6bf8198d8 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/strided_slice_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/strided_slice.cpp @@ -7,9 +7,9 @@ #include "ngraph/opsets/opset1.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "ngraph_functions/subgraph_builders.hpp" -#include "lpt_ngraph_functions/strided_slice_function.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_models/subgraph_builders.hpp" +#include "ov_lpt_models/strided_slice.hpp" using namespace ov::pass::low_precision; diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/subtract_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/subtract.cpp similarity index 92% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/subtract_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/subtract.cpp index e026ba62cdb220..99b44d0c300d2a 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/subtract_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/subtract.cpp @@ -2,12 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/subtract_function.hpp" +#include "ov_lpt_models/subtract.hpp" #include "low_precision/network_helper.hpp" #include -#include "lpt_ngraph_functions/common/builders.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_lpt_models/common/builders.hpp" +#include "ov_models/subgraph_builders.hpp" using namespace ov::pass::low_precision; diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/subtract_multiply_to_multiply_add_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/subtract_multiply_to_multiply_add.cpp similarity index 96% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/subtract_multiply_to_multiply_add_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/subtract_multiply_to_multiply_add.cpp index 869953f906fbff..d343e26fc86da4 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/subtract_multiply_to_multiply_add_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/subtract_multiply_to_multiply_add.cpp @@ -2,11 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/subtract_multiply_to_multiply_add_function.hpp" +#include "ov_lpt_models/subtract_multiply_to_multiply_add.hpp" #include #include -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_lpt_models/common/builders.hpp" using namespace ov::pass::low_precision; diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/transformations_after_split_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/transformations_after_split.cpp similarity index 97% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/transformations_after_split_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/transformations_after_split.cpp index 95f8e94eeb6536..024b7298a3db62 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/transformations_after_split_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/transformations_after_split.cpp @@ -2,15 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/transformations_after_split_function.hpp" +#include "ov_lpt_models/transformations_after_split.hpp" #include #include -#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_lpt_models/common/fake_quantize_on_data.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" +#include "ov_lpt_models/common/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/transpose_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/transpose.cpp similarity index 97% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/transpose_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/transpose.cpp index 1110d026e6475c..2beadbfebf9d4e 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/transpose_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/transpose.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/transpose_function.hpp" +#include "ov_lpt_models/transpose.hpp" #include -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_lpt_models/common/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/transpose_after_mat_mul_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/transpose_after_mat_mul.cpp similarity index 92% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/transpose_after_mat_mul_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/transpose_after_mat_mul.cpp index e6d987eb243df5..3cb3a97248bc73 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/transpose_after_mat_mul_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/transpose_after_mat_mul.cpp @@ -2,12 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/transpose_after_mat_mul_function.hpp" +#include "ov_lpt_models/transpose_after_mat_mul.hpp" #include "low_precision/network_helper.hpp" #include -#include "lpt_ngraph_functions/common/builders.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_lpt_models/common/builders.hpp" +#include "ov_models/subgraph_builders.hpp" using namespace ov::pass::low_precision; diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/unsqueeze_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/unsqueeze.cpp similarity index 96% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/unsqueeze_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/unsqueeze.cpp index 8aeee5a999509a..d9985fb50269a9 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/unsqueeze_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/unsqueeze.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "lpt_ngraph_functions/unsqueeze_function.hpp" +#include "ov_lpt_models/unsqueeze.hpp" -#include "ngraph_functions/builders.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_models/builders.hpp" +#include "ov_lpt_models/common/builders.hpp" #include "ov_ops/type_relaxed.hpp" namespace ngraph { diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/variadic_split_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/variadic_split.cpp similarity index 95% rename from src/tests/ngraph_helpers/lpt_ngraph_functions/src/variadic_split_function.cpp rename to src/tests/ov_helpers/ov_lpt_models/src/variadic_split.cpp index 87be23a02f33ca..396354e70f0bce 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/variadic_split_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/variadic_split.cpp @@ -5,11 +5,11 @@ #include #include -#include "lpt_ngraph_functions/variadic_split_function.hpp" +#include "ov_lpt_models/variadic_split.hpp" -#include "ngraph_functions/builders.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" -#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "ov_models/builders.hpp" +#include "ov_lpt_models/common/builders.hpp" +#include "ov_lpt_models/common/dequantization_operations.hpp" namespace ngraph { diff --git a/src/tests/ngraph_helpers/ngraph_functions/CMakeLists.txt b/src/tests/ov_helpers/ov_models/CMakeLists.txt similarity index 95% rename from src/tests/ngraph_helpers/ngraph_functions/CMakeLists.txt rename to src/tests/ov_helpers/ov_models/CMakeLists.txt index 4b569ace8c96d7..0c7c1f48cd275b 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/CMakeLists.txt +++ b/src/tests/ov_helpers/ov_models/CMakeLists.txt @@ -2,7 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 # -set(TARGET_NAME ngraphFunctions) +set(TARGET_NAME ov_models) set(PUBLIC_HEADERS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/include") diff --git a/src/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/builders.hpp b/src/tests/ov_helpers/ov_models/include/ov_models/builders.hpp similarity index 99% rename from src/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/builders.hpp rename to src/tests/ov_helpers/ov_models/include/ov_models/builders.hpp index 3e586168976f64..664147ae1b7eb6 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/builders.hpp +++ b/src/tests/ov_helpers/ov_models/include/ov_models/builders.hpp @@ -17,11 +17,10 @@ #include #include #include -#include +#include // TODO: Temporary solution to fix compilation of plugin tests #include "common_test_utils/test_enums.hpp" -#include "ngraph_functions/utils/data_utils.hpp" #include "openvino/core/node.hpp" #include "openvino/core/partial_shape.hpp" #include "openvino/core/type/element_type.hpp" @@ -30,6 +29,7 @@ #include "openvino/op/depth_to_space.hpp" #include "openvino/op/detection_output.hpp" #include "openvino/op/space_to_depth.hpp" +#include "ov_models/utils/data_utils.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/pass/convert_prc.hpp b/src/tests/ov_helpers/ov_models/include/ov_models/pass/convert_prc.hpp similarity index 100% rename from src/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/pass/convert_prc.hpp rename to src/tests/ov_helpers/ov_models/include/ov_models/pass/convert_prc.hpp diff --git a/src/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/preprocess/preprocess_builders.hpp b/src/tests/ov_helpers/ov_models/include/ov_models/preprocess/preprocess_builders.hpp similarity index 100% rename from src/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/preprocess/preprocess_builders.hpp rename to src/tests/ov_helpers/ov_models/include/ov_models/preprocess/preprocess_builders.hpp diff --git a/src/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/subgraph_builders.hpp b/src/tests/ov_helpers/ov_models/include/ov_models/subgraph_builders.hpp similarity index 99% rename from src/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/subgraph_builders.hpp rename to src/tests/ov_helpers/ov_models/include/ov_models/subgraph_builders.hpp index 9b8ffbc6be870d..65e424a77fcf46 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/subgraph_builders.hpp +++ b/src/tests/ov_helpers/ov_models/include/ov_models/subgraph_builders.hpp @@ -7,8 +7,8 @@ #include #include -#include "ngraph_functions/builders.hpp" #include "openvino/core/model.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/utils/data_utils.hpp b/src/tests/ov_helpers/ov_models/include/ov_models/utils/data_utils.hpp similarity index 100% rename from src/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/utils/data_utils.hpp rename to src/tests/ov_helpers/ov_models/include/ov_models/utils/data_utils.hpp diff --git a/src/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/utils/ngraph_helpers.hpp b/src/tests/ov_helpers/ov_models/include/ov_models/utils/ov_helpers.hpp similarity index 100% rename from src/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/utils/ngraph_helpers.hpp rename to src/tests/ov_helpers/ov_models/include/ov_models/utils/ov_helpers.hpp diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/activation.cpp b/src/tests/ov_helpers/ov_models/src/activation.cpp similarity index 100% rename from src/tests/ngraph_helpers/ngraph_functions/src/activation.cpp rename to src/tests/ov_helpers/ov_models/src/activation.cpp diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/augru_cell.cpp b/src/tests/ov_helpers/ov_models/src/augru_cell.cpp similarity index 99% rename from src/tests/ngraph_helpers/ngraph_functions/src/augru_cell.cpp rename to src/tests/ov_helpers/ov_models/src/augru_cell.cpp index 6a1475cd5a9565..789a123de74745 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/augru_cell.cpp +++ b/src/tests/ov_helpers/ov_models/src/augru_cell.cpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "ov_ops/augru_sequence.hpp" namespace ngraph { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/batch_norm.cpp b/src/tests/ov_helpers/ov_models/src/batch_norm.cpp similarity index 96% rename from src/tests/ngraph_helpers/ngraph_functions/src/batch_norm.cpp rename to src/tests/ov_helpers/ov_models/src/batch_norm.cpp index 945491d0118ec5..82f36800674b85 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/batch_norm.cpp +++ b/src/tests/ov_helpers/ov_models/src/batch_norm.cpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/batch_to_space.cpp b/src/tests/ov_helpers/ov_models/src/batch_to_space.cpp similarity index 96% rename from src/tests/ngraph_helpers/ngraph_functions/src/batch_to_space.cpp rename to src/tests/ov_helpers/ov_models/src/batch_to_space.cpp index 21d683e4dd58a8..517b238465efad 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/batch_to_space.cpp +++ b/src/tests/ov_helpers/ov_models/src/batch_to_space.cpp @@ -4,7 +4,7 @@ #include "openvino/op/batch_to_space.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/binary_convolution.cpp b/src/tests/ov_helpers/ov_models/src/binary_convolution.cpp similarity index 95% rename from src/tests/ngraph_helpers/ngraph_functions/src/binary_convolution.cpp rename to src/tests/ov_helpers/ov_models/src/binary_convolution.cpp index 35da03b90365db..70d4c5695d85d7 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/binary_convolution.cpp +++ b/src/tests/ov_helpers/ov_models/src/binary_convolution.cpp @@ -7,8 +7,8 @@ #include #include -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/data_utils.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/data_utils.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/broadcast.cpp b/src/tests/ov_helpers/ov_models/src/broadcast.cpp similarity index 95% rename from src/tests/ngraph_helpers/ngraph_functions/src/broadcast.cpp rename to src/tests/ov_helpers/ov_models/src/broadcast.cpp index bca806b530a1ed..440fe901c5f3b4 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/broadcast.cpp +++ b/src/tests/ov_helpers/ov_models/src/broadcast.cpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/comparison.cpp b/src/tests/ov_helpers/ov_models/src/comparison.cpp similarity index 96% rename from src/tests/ngraph_helpers/ngraph_functions/src/comparison.cpp rename to src/tests/ov_helpers/ov_models/src/comparison.cpp index 7b3464fcaa3a30..3f823a79d798dd 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/comparison.cpp +++ b/src/tests/ov_helpers/ov_models/src/comparison.cpp @@ -5,7 +5,7 @@ #include #include "common_test_utils/test_enums.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/concat.cpp b/src/tests/ov_helpers/ov_models/src/concat.cpp similarity index 90% rename from src/tests/ngraph_helpers/ngraph_functions/src/concat.cpp rename to src/tests/ov_helpers/ov_models/src/concat.cpp index 5871674720cee8..2913ac211341b0 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/concat.cpp +++ b/src/tests/ov_helpers/ov_models/src/concat.cpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/conversion.cpp b/src/tests/ov_helpers/ov_models/src/conversion.cpp similarity index 100% rename from src/tests/ngraph_helpers/ngraph_functions/src/conversion.cpp rename to src/tests/ov_helpers/ov_models/src/conversion.cpp diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/convolution.cpp b/src/tests/ov_helpers/ov_models/src/convolution.cpp similarity index 98% rename from src/tests/ngraph_helpers/ngraph_functions/src/convolution.cpp rename to src/tests/ov_helpers/ov_models/src/convolution.cpp index b19307edce9eb8..88d917a78ceaa2 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/convolution.cpp +++ b/src/tests/ov_helpers/ov_models/src/convolution.cpp @@ -7,8 +7,8 @@ #include #include -#include "ngraph_functions/builders.hpp" #include "openvino/op/add.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/convolution_backprop_data.cpp b/src/tests/ov_helpers/ov_models/src/convolution_backprop_data.cpp similarity index 99% rename from src/tests/ngraph_helpers/ngraph_functions/src/convolution_backprop_data.cpp rename to src/tests/ov_helpers/ov_models/src/convolution_backprop_data.cpp index 9211e4d741747c..0fc09a2dfc5186 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/convolution_backprop_data.cpp +++ b/src/tests/ov_helpers/ov_models/src/convolution_backprop_data.cpp @@ -5,9 +5,9 @@ #include #include -#include "ngraph_functions/builders.hpp" #include "openvino/op/add.hpp" #include "openvino/op/convolution.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/ctc_greedy_decoder.cpp b/src/tests/ov_helpers/ov_models/src/ctc_greedy_decoder.cpp similarity index 96% rename from src/tests/ngraph_helpers/ngraph_functions/src/ctc_greedy_decoder.cpp rename to src/tests/ov_helpers/ov_models/src/ctc_greedy_decoder.cpp index 23ad01603e45fc..683db1837f3232 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/ctc_greedy_decoder.cpp +++ b/src/tests/ov_helpers/ov_models/src/ctc_greedy_decoder.cpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/ctc_greedy_decoder_seq_len.cpp b/src/tests/ov_helpers/ov_models/src/ctc_greedy_decoder_seq_len.cpp similarity index 98% rename from src/tests/ngraph_helpers/ngraph_functions/src/ctc_greedy_decoder_seq_len.cpp rename to src/tests/ov_helpers/ov_models/src/ctc_greedy_decoder_seq_len.cpp index bd3eaaf4804772..f77ce0b3418ada 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/ctc_greedy_decoder_seq_len.cpp +++ b/src/tests/ov_helpers/ov_models/src/ctc_greedy_decoder_seq_len.cpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/ctc_loss.cpp b/src/tests/ov_helpers/ov_models/src/ctc_loss.cpp similarity index 98% rename from src/tests/ngraph_helpers/ngraph_functions/src/ctc_loss.cpp rename to src/tests/ov_helpers/ov_models/src/ctc_loss.cpp index 681ccda9a42eda..1ef1d3d7f27d2e 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/ctc_loss.cpp +++ b/src/tests/ov_helpers/ov_models/src/ctc_loss.cpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/cum_sum.cpp b/src/tests/ov_helpers/ov_models/src/cum_sum.cpp similarity index 92% rename from src/tests/ngraph_helpers/ngraph_functions/src/cum_sum.cpp rename to src/tests/ov_helpers/ov_models/src/cum_sum.cpp index 841d6bb566de1b..73975d6d676d05 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/cum_sum.cpp +++ b/src/tests/ov_helpers/ov_models/src/cum_sum.cpp @@ -4,7 +4,7 @@ #include "openvino/op/cum_sum.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/depth_to_space.cpp b/src/tests/ov_helpers/ov_models/src/depth_to_space.cpp similarity index 92% rename from src/tests/ngraph_helpers/ngraph_functions/src/depth_to_space.cpp rename to src/tests/ov_helpers/ov_models/src/depth_to_space.cpp index 0a6ce5150ce32e..39dd90e7b970de 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/depth_to_space.cpp +++ b/src/tests/ov_helpers/ov_models/src/depth_to_space.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/detection_output.cpp b/src/tests/ov_helpers/ov_models/src/detection_output.cpp similarity index 96% rename from src/tests/ngraph_helpers/ngraph_functions/src/detection_output.cpp rename to src/tests/ov_helpers/ov_models/src/detection_output.cpp index bed5bd320926ff..266c8ae2dd6142 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/detection_output.cpp +++ b/src/tests/ov_helpers/ov_models/src/detection_output.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/dft.cpp b/src/tests/ov_helpers/ov_models/src/dft.cpp similarity index 97% rename from src/tests/ngraph_helpers/ngraph_functions/src/dft.cpp rename to src/tests/ov_helpers/ov_models/src/dft.cpp index 371706ee3cf9af..ca2dd0479621d8 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/dft.cpp +++ b/src/tests/ov_helpers/ov_models/src/dft.cpp @@ -7,8 +7,8 @@ #include #include -#include "ngraph_functions/builders.hpp" #include "openvino/op/idft.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/einsum.cpp b/src/tests/ov_helpers/ov_models/src/einsum.cpp similarity index 91% rename from src/tests/ngraph_helpers/ngraph_functions/src/einsum.cpp rename to src/tests/ov_helpers/ov_models/src/einsum.cpp index 7c871acb81b900..61a75033079a3c 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/einsum.cpp +++ b/src/tests/ov_helpers/ov_models/src/einsum.cpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/eltwise.cpp b/src/tests/ov_helpers/ov_models/src/eltwise.cpp similarity index 96% rename from src/tests/ngraph_helpers/ngraph_functions/src/eltwise.cpp rename to src/tests/ov_helpers/ov_models/src/eltwise.cpp index 7664f0ccb34876..4932332e0773fb 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/eltwise.cpp +++ b/src/tests/ov_helpers/ov_models/src/eltwise.cpp @@ -5,7 +5,7 @@ #include #include "common_test_utils/test_enums.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/embedding_bag_offsets_sum.cpp b/src/tests/ov_helpers/ov_models/src/embedding_bag_offsets_sum.cpp similarity index 98% rename from src/tests/ngraph_helpers/ngraph_functions/src/embedding_bag_offsets_sum.cpp rename to src/tests/ov_helpers/ov_models/src/embedding_bag_offsets_sum.cpp index dadb115121ce4e..e11251a7023c92 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/embedding_bag_offsets_sum.cpp +++ b/src/tests/ov_helpers/ov_models/src/embedding_bag_offsets_sum.cpp @@ -5,8 +5,8 @@ #include #include -#include "ngraph_functions/builders.hpp" #include "openvino/op/embeddingbag_offsets_sum.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/embedding_bag_packed_sum.cpp b/src/tests/ov_helpers/ov_models/src/embedding_bag_packed_sum.cpp similarity index 97% rename from src/tests/ngraph_helpers/ngraph_functions/src/embedding_bag_packed_sum.cpp rename to src/tests/ov_helpers/ov_models/src/embedding_bag_packed_sum.cpp index 8db6c22827b287..0004680b428d9a 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/embedding_bag_packed_sum.cpp +++ b/src/tests/ov_helpers/ov_models/src/embedding_bag_packed_sum.cpp @@ -5,8 +5,8 @@ #include #include -#include "ngraph_functions/builders.hpp" #include "openvino/op/embeddingbag_packedsum.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/embedding_segments_sum.cpp b/src/tests/ov_helpers/ov_models/src/embedding_segments_sum.cpp similarity index 98% rename from src/tests/ngraph_helpers/ngraph_functions/src/embedding_segments_sum.cpp rename to src/tests/ov_helpers/ov_models/src/embedding_segments_sum.cpp index 79f56bc086bb35..37a0a98c2453d0 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/embedding_segments_sum.cpp +++ b/src/tests/ov_helpers/ov_models/src/embedding_segments_sum.cpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/fake_quantize.cpp b/src/tests/ov_helpers/ov_models/src/fake_quantize.cpp similarity index 99% rename from src/tests/ngraph_helpers/ngraph_functions/src/fake_quantize.cpp rename to src/tests/ov_helpers/ov_models/src/fake_quantize.cpp index 5edd7c900301c4..fc462e6ea27865 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/fake_quantize.cpp +++ b/src/tests/ov_helpers/ov_models/src/fake_quantize.cpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/fully_connected.cpp b/src/tests/ov_helpers/ov_models/src/fully_connected.cpp similarity index 97% rename from src/tests/ngraph_helpers/ngraph_functions/src/fully_connected.cpp rename to src/tests/ov_helpers/ov_models/src/fully_connected.cpp index f3eae695db7669..a8bc4e207bf92f 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/fully_connected.cpp +++ b/src/tests/ov_helpers/ov_models/src/fully_connected.cpp @@ -5,9 +5,9 @@ #include #include -#include "ngraph_functions/builders.hpp" #include "openvino/op/add.hpp" #include "openvino/op/matmul.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/gather_elements.cpp b/src/tests/ov_helpers/ov_models/src/gather_elements.cpp similarity index 96% rename from src/tests/ngraph_helpers/ngraph_functions/src/gather_elements.cpp rename to src/tests/ov_helpers/ov_models/src/gather_elements.cpp index 4eb02d8efd345c..e5407431236236 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/gather_elements.cpp +++ b/src/tests/ov_helpers/ov_models/src/gather_elements.cpp @@ -8,7 +8,7 @@ #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/gather_nd.cpp b/src/tests/ov_helpers/ov_models/src/gather_nd.cpp similarity index 98% rename from src/tests/ngraph_helpers/ngraph_functions/src/gather_nd.cpp rename to src/tests/ov_helpers/ov_models/src/gather_nd.cpp index 1168e0e2e9d295..c44f8e640c4fc9 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/gather_nd.cpp +++ b/src/tests/ov_helpers/ov_models/src/gather_nd.cpp @@ -8,7 +8,7 @@ #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/group_convolution.cpp b/src/tests/ov_helpers/ov_models/src/group_convolution.cpp similarity index 98% rename from src/tests/ngraph_helpers/ngraph_functions/src/group_convolution.cpp rename to src/tests/ov_helpers/ov_models/src/group_convolution.cpp index 303aae03df9973..d09820a0b80801 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/group_convolution.cpp +++ b/src/tests/ov_helpers/ov_models/src/group_convolution.cpp @@ -5,9 +5,9 @@ #include #include -#include "ngraph_functions/builders.hpp" #include "openvino/op/add.hpp" #include "openvino/op/group_conv.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/group_convolution_backprop_data.cpp b/src/tests/ov_helpers/ov_models/src/group_convolution_backprop_data.cpp similarity index 99% rename from src/tests/ngraph_helpers/ngraph_functions/src/group_convolution_backprop_data.cpp rename to src/tests/ov_helpers/ov_models/src/group_convolution_backprop_data.cpp index a686883211302f..992750eb2c41b0 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/group_convolution_backprop_data.cpp +++ b/src/tests/ov_helpers/ov_models/src/group_convolution_backprop_data.cpp @@ -5,9 +5,9 @@ #include #include -#include "ngraph_functions/builders.hpp" #include "openvino/op/add.hpp" #include "openvino/op/group_conv.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/gru_cell.cpp b/src/tests/ov_helpers/ov_models/src/gru_cell.cpp similarity index 99% rename from src/tests/ngraph_helpers/ngraph_functions/src/gru_cell.cpp rename to src/tests/ov_helpers/ov_models/src/gru_cell.cpp index baacb1f629fae0..a99de5b892871c 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/gru_cell.cpp +++ b/src/tests/ov_helpers/ov_models/src/gru_cell.cpp @@ -7,8 +7,8 @@ #include #include -#include "ngraph_functions/builders.hpp" #include "openvino/op/gru_sequence.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/input_layer.cpp b/src/tests/ov_helpers/ov_models/src/input_layer.cpp similarity index 94% rename from src/tests/ngraph_helpers/ngraph_functions/src/input_layer.cpp rename to src/tests/ov_helpers/ov_models/src/input_layer.cpp index a95dd4b5021cf0..5d8b8d417f3812 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/input_layer.cpp +++ b/src/tests/ov_helpers/ov_models/src/input_layer.cpp @@ -5,8 +5,8 @@ #include #include -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/logical.cpp b/src/tests/ov_helpers/ov_models/src/logical.cpp similarity index 97% rename from src/tests/ngraph_helpers/ngraph_functions/src/logical.cpp rename to src/tests/ov_helpers/ov_models/src/logical.cpp index cf59d6b240d5b0..e8fb6a56c016e3 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/logical.cpp +++ b/src/tests/ov_helpers/ov_models/src/logical.cpp @@ -5,7 +5,7 @@ #include #include "common_test_utils/test_enums.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/utils/ov_helpers.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/lstm_cell.cpp b/src/tests/ov_helpers/ov_models/src/lstm_cell.cpp similarity index 99% rename from src/tests/ngraph_helpers/ngraph_functions/src/lstm_cell.cpp rename to src/tests/ov_helpers/ov_models/src/lstm_cell.cpp index 7318fa4a957819..4e09ddeceb4a9d 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/lstm_cell.cpp +++ b/src/tests/ov_helpers/ov_models/src/lstm_cell.cpp @@ -5,8 +5,8 @@ #include #include -#include "ngraph_functions/builders.hpp" #include "openvino/op/lstm_sequence.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/mat_mul.cpp b/src/tests/ov_helpers/ov_models/src/mat_mul.cpp similarity index 90% rename from src/tests/ngraph_helpers/ngraph_functions/src/mat_mul.cpp rename to src/tests/ov_helpers/ov_models/src/mat_mul.cpp index 2576ffe0fd7278..461360c3cfac27 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/mat_mul.cpp +++ b/src/tests/ov_helpers/ov_models/src/mat_mul.cpp @@ -2,8 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" #include "openvino/op/matmul.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/minimum_maximum.cpp b/src/tests/ov_helpers/ov_models/src/minimum_maximum.cpp similarity index 95% rename from src/tests/ngraph_helpers/ngraph_functions/src/minimum_maximum.cpp rename to src/tests/ov_helpers/ov_models/src/minimum_maximum.cpp index f4f1ae8604c9c9..e14274953ac76a 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/minimum_maximum.cpp +++ b/src/tests/ov_helpers/ov_models/src/minimum_maximum.cpp @@ -2,9 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" #include "openvino/op/maximum.hpp" #include "openvino/op/minimum.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/mvn.cpp b/src/tests/ov_helpers/ov_models/src/mvn.cpp similarity index 97% rename from src/tests/ngraph_helpers/ngraph_functions/src/mvn.cpp rename to src/tests/ov_helpers/ov_models/src/mvn.cpp index 1aa05c82449e9b..cbbdb32c4fa95c 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/mvn.cpp +++ b/src/tests/ov_helpers/ov_models/src/mvn.cpp @@ -4,7 +4,7 @@ #include "openvino/op/mvn.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/non_max_suppression.cpp b/src/tests/ov_helpers/ov_models/src/non_max_suppression.cpp similarity index 99% rename from src/tests/ngraph_helpers/ngraph_functions/src/non_max_suppression.cpp rename to src/tests/ov_helpers/ov_models/src/non_max_suppression.cpp index d8227f79be0ad4..be129acf32864b 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/non_max_suppression.cpp +++ b/src/tests/ov_helpers/ov_models/src/non_max_suppression.cpp @@ -4,7 +4,7 @@ #include "openvino/op/non_max_suppression.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/normalize_l2.cpp b/src/tests/ov_helpers/ov_models/src/normalize_l2.cpp similarity index 94% rename from src/tests/ngraph_helpers/ngraph_functions/src/normalize_l2.cpp rename to src/tests/ov_helpers/ov_models/src/normalize_l2.cpp index e74d16c29e0018..278ec5b4dd7d50 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/normalize_l2.cpp +++ b/src/tests/ov_helpers/ov_models/src/normalize_l2.cpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/one_hot.cpp b/src/tests/ov_helpers/ov_models/src/one_hot.cpp similarity index 96% rename from src/tests/ngraph_helpers/ngraph_functions/src/one_hot.cpp rename to src/tests/ov_helpers/ov_models/src/one_hot.cpp index a442587a85612d..49f5a2f911556a 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/one_hot.cpp +++ b/src/tests/ov_helpers/ov_models/src/one_hot.cpp @@ -6,7 +6,7 @@ #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/pad.cpp b/src/tests/ov_helpers/ov_models/src/pad.cpp similarity index 98% rename from src/tests/ngraph_helpers/ngraph_functions/src/pad.cpp rename to src/tests/ov_helpers/ov_models/src/pad.cpp index cd7fe81b2f208f..6a4791ca84661b 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/pad.cpp +++ b/src/tests/ov_helpers/ov_models/src/pad.cpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/pooling.cpp b/src/tests/ov_helpers/ov_models/src/pooling.cpp similarity index 98% rename from src/tests/ngraph_helpers/ngraph_functions/src/pooling.cpp rename to src/tests/ov_helpers/ov_models/src/pooling.cpp index 7449b556eebf1e..7ba26974a3213c 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/pooling.cpp +++ b/src/tests/ov_helpers/ov_models/src/pooling.cpp @@ -5,9 +5,9 @@ #include #include -#include "ngraph_functions/builders.hpp" #include "openvino/op/avg_pool.hpp" #include "openvino/op/max_pool.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/precomp.hpp b/src/tests/ov_helpers/ov_models/src/precomp.hpp similarity index 100% rename from src/tests/ngraph_helpers/ngraph_functions/src/precomp.hpp rename to src/tests/ov_helpers/ov_models/src/precomp.hpp diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/proposal.cpp b/src/tests/ov_helpers/ov_models/src/proposal.cpp similarity index 98% rename from src/tests/ngraph_helpers/ngraph_functions/src/proposal.cpp rename to src/tests/ov_helpers/ov_models/src/proposal.cpp index 809a3d9781c3a8..275e15a813ba31 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/proposal.cpp +++ b/src/tests/ov_helpers/ov_models/src/proposal.cpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/rdft.cpp b/src/tests/ov_helpers/ov_models/src/rdft.cpp similarity index 100% rename from src/tests/ngraph_helpers/ngraph_functions/src/rdft.cpp rename to src/tests/ov_helpers/ov_models/src/rdft.cpp diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/reduce.cpp b/src/tests/ov_helpers/ov_models/src/reduce.cpp similarity index 100% rename from src/tests/ngraph_helpers/ngraph_functions/src/reduce.cpp rename to src/tests/ov_helpers/ov_models/src/reduce.cpp diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/rnn_cell.cpp b/src/tests/ov_helpers/ov_models/src/rnn_cell.cpp similarity index 99% rename from src/tests/ngraph_helpers/ngraph_functions/src/rnn_cell.cpp rename to src/tests/ov_helpers/ov_models/src/rnn_cell.cpp index 523ba80c30e35f..c0231225dc0ffe 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/rnn_cell.cpp +++ b/src/tests/ov_helpers/ov_models/src/rnn_cell.cpp @@ -7,8 +7,8 @@ #include #include -#include "ngraph_functions/builders.hpp" #include "openvino/op/rnn_sequence.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/roi_pooling.cpp b/src/tests/ov_helpers/ov_models/src/roi_pooling.cpp similarity index 100% rename from src/tests/ngraph_helpers/ngraph_functions/src/roi_pooling.cpp rename to src/tests/ov_helpers/ov_models/src/roi_pooling.cpp diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/roll.cpp b/src/tests/ov_helpers/ov_models/src/roll.cpp similarity index 100% rename from src/tests/ngraph_helpers/ngraph_functions/src/roll.cpp rename to src/tests/ov_helpers/ov_models/src/roll.cpp diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/scatter_ND_update.cpp b/src/tests/ov_helpers/ov_models/src/scatter_ND_update.cpp similarity index 100% rename from src/tests/ngraph_helpers/ngraph_functions/src/scatter_ND_update.cpp rename to src/tests/ov_helpers/ov_models/src/scatter_ND_update.cpp diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/scatter_elements_update.cpp b/src/tests/ov_helpers/ov_models/src/scatter_elements_update.cpp similarity index 96% rename from src/tests/ngraph_helpers/ngraph_functions/src/scatter_elements_update.cpp rename to src/tests/ov_helpers/ov_models/src/scatter_elements_update.cpp index 16427f0e04644e..72a086d8308963 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/scatter_elements_update.cpp +++ b/src/tests/ov_helpers/ov_models/src/scatter_elements_update.cpp @@ -4,8 +4,8 @@ #include "openvino/op/scatter_elements_update.hpp" -#include "ngraph_functions/builders.hpp" #include "openvino/op/constant.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/scatter_update.cpp b/src/tests/ov_helpers/ov_models/src/scatter_update.cpp similarity index 96% rename from src/tests/ngraph_helpers/ngraph_functions/src/scatter_update.cpp rename to src/tests/ov_helpers/ov_models/src/scatter_update.cpp index 94d8a999913b69..99be052475520f 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/scatter_update.cpp +++ b/src/tests/ov_helpers/ov_models/src/scatter_update.cpp @@ -4,8 +4,8 @@ #include "openvino/op/scatter_update.hpp" -#include "ngraph_functions/builders.hpp" #include "openvino/op/constant.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/select.cpp b/src/tests/ov_helpers/ov_models/src/select.cpp similarity index 92% rename from src/tests/ngraph_helpers/ngraph_functions/src/select.cpp rename to src/tests/ov_helpers/ov_models/src/select.cpp index d31294ed74f998..06e252dc4443fe 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/select.cpp +++ b/src/tests/ov_helpers/ov_models/src/select.cpp @@ -4,7 +4,7 @@ #include "openvino/op/select.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/shuffle_channels.cpp b/src/tests/ov_helpers/ov_models/src/shuffle_channels.cpp similarity index 91% rename from src/tests/ngraph_helpers/ngraph_functions/src/shuffle_channels.cpp rename to src/tests/ov_helpers/ov_models/src/shuffle_channels.cpp index 4cd4bd744e543e..99a4694a680169 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/shuffle_channels.cpp +++ b/src/tests/ov_helpers/ov_models/src/shuffle_channels.cpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/space_to_batch.cpp b/src/tests/ov_helpers/ov_models/src/space_to_batch.cpp similarity index 96% rename from src/tests/ngraph_helpers/ngraph_functions/src/space_to_batch.cpp rename to src/tests/ov_helpers/ov_models/src/space_to_batch.cpp index 5a8436576941c3..d004ce01433e2f 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/space_to_batch.cpp +++ b/src/tests/ov_helpers/ov_models/src/space_to_batch.cpp @@ -4,7 +4,7 @@ #include "openvino/op/space_to_batch.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/space_to_depth.cpp b/src/tests/ov_helpers/ov_models/src/space_to_depth.cpp similarity index 92% rename from src/tests/ngraph_helpers/ngraph_functions/src/space_to_depth.cpp rename to src/tests/ov_helpers/ov_models/src/space_to_depth.cpp index f338e7246b2dfd..1b8a729f815fff 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/space_to_depth.cpp +++ b/src/tests/ov_helpers/ov_models/src/space_to_depth.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/split.cpp b/src/tests/ov_helpers/ov_models/src/split.cpp similarity index 94% rename from src/tests/ngraph_helpers/ngraph_functions/src/split.cpp rename to src/tests/ov_helpers/ov_models/src/split.cpp index c19f8819822de6..01d2748e841408 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/split.cpp +++ b/src/tests/ov_helpers/ov_models/src/split.cpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/squeeze_unsqueeze.cpp b/src/tests/ov_helpers/ov_models/src/squeeze_unsqueeze.cpp similarity index 96% rename from src/tests/ngraph_helpers/ngraph_functions/src/squeeze_unsqueeze.cpp rename to src/tests/ov_helpers/ov_models/src/squeeze_unsqueeze.cpp index 86a6dbab0865d8..0bcb81712493f4 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/squeeze_unsqueeze.cpp +++ b/src/tests/ov_helpers/ov_models/src/squeeze_unsqueeze.cpp @@ -7,8 +7,8 @@ #include #include -#include "ngraph_functions/builders.hpp" #include "openvino/op/unsqueeze.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/strided_slice.cpp b/src/tests/ov_helpers/ov_models/src/strided_slice.cpp similarity index 99% rename from src/tests/ngraph_helpers/ngraph_functions/src/strided_slice.cpp rename to src/tests/ov_helpers/ov_models/src/strided_slice.cpp index 6bd2c61892f6de..2c762aab2e7ed9 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/strided_slice.cpp +++ b/src/tests/ov_helpers/ov_models/src/strided_slice.cpp @@ -4,8 +4,8 @@ #include "openvino/op/strided_slice.hpp" -#include "ngraph_functions/builders.hpp" #include "openvino/op/slice.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/subgraph_builders.cpp b/src/tests/ov_helpers/ov_models/src/subgraph_builders.cpp similarity index 99% rename from src/tests/ngraph_helpers/ngraph_functions/src/subgraph_builders.cpp rename to src/tests/ov_helpers/ov_models/src/subgraph_builders.cpp index 337c9ee21d6dcc..a231c2e4b59028 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/subgraph_builders.cpp +++ b/src/tests/ov_helpers/ov_models/src/subgraph_builders.cpp @@ -2,9 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/subgraph_builders.hpp" +#include "ov_models/subgraph_builders.hpp" -#include "ngraph_functions/builders.hpp" #include "openvino/op/add.hpp" #include "openvino/op/assign.hpp" #include "openvino/op/concat.hpp" @@ -26,6 +25,7 @@ #include "openvino/op/tensor_iterator.hpp" #include "openvino/op/tile.hpp" #include "openvino/op/transpose.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/tile.cpp b/src/tests/ov_helpers/ov_models/src/tile.cpp similarity index 92% rename from src/tests/ngraph_helpers/ngraph_functions/src/tile.cpp rename to src/tests/ov_helpers/ov_models/src/tile.cpp index b4fead0ed53c98..1277ea2f048207 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/tile.cpp +++ b/src/tests/ov_helpers/ov_models/src/tile.cpp @@ -4,7 +4,7 @@ #include "openvino/op/tile.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/utils/ngraph_helpers.cpp b/src/tests/ov_helpers/ov_models/src/utils/ov_helpers.cpp similarity index 99% rename from src/tests/ngraph_helpers/ngraph_functions/src/utils/ngraph_helpers.cpp rename to src/tests/ov_helpers/ov_models/src/utils/ov_helpers.cpp index 6237536c9436e8..ab19b01563ab0c 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/utils/ngraph_helpers.cpp +++ b/src/tests/ov_helpers/ov_models/src/utils/ov_helpers.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include #include diff --git a/src/tests/ngraph_helpers/ngraph_functions/src/variadic_split.cpp b/src/tests/ov_helpers/ov_models/src/variadic_split.cpp similarity index 95% rename from src/tests/ngraph_helpers/ngraph_functions/src/variadic_split.cpp rename to src/tests/ov_helpers/ov_models/src/variadic_split.cpp index 03e819379922ff..236f9fc872fc06 100644 --- a/src/tests/ngraph_helpers/ngraph_functions/src/variadic_split.cpp +++ b/src/tests/ov_helpers/ov_models/src/variadic_split.cpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/CMakeLists.txt b/src/tests/ov_helpers/ov_snippets_models/CMakeLists.txt similarity index 92% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/CMakeLists.txt rename to src/tests/ov_helpers/ov_snippets_models/CMakeLists.txt index 0c88fe6b4bcf6a..872a928e2e0509 100644 --- a/src/tests/ngraph_helpers/snippets_ngraph_functions/CMakeLists.txt +++ b/src/tests/ov_helpers/ov_snippets_models/CMakeLists.txt @@ -2,7 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 # -set(TARGET_NAME snippetsNgraphFunctions) +set(TARGET_NAME ov_snippets_models) set(PUBLIC_HEADERS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/include") set(SNIPPETS_INCLUDES "$/include") @@ -25,7 +25,7 @@ addIeTarget( openvino::runtime::dev common_test_utils openvino::snippets - lptNgraphFunctions + ov_lpt_models ADD_CPPLINT DEVELOPER_PACKAGE tests diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/include/fake_quantize_function.hpp b/src/tests/ov_helpers/ov_snippets_models/include/fake_quantize_helper.hpp similarity index 100% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/include/fake_quantize_function.hpp rename to src/tests/ov_helpers/ov_snippets_models/include/fake_quantize_helper.hpp diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/include/function_helper.hpp b/src/tests/ov_helpers/ov_snippets_models/include/function_helper.hpp similarity index 100% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/include/function_helper.hpp rename to src/tests/ov_helpers/ov_snippets_models/include/function_helper.hpp diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/include/precision_propagation_function.hpp b/src/tests/ov_helpers/ov_snippets_models/include/precision_propagation.hpp similarity index 100% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/include/precision_propagation_function.hpp rename to src/tests/ov_helpers/ov_snippets_models/include/precision_propagation.hpp diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/include/precision_propagation_convertion_function.hpp b/src/tests/ov_helpers/ov_snippets_models/include/precision_propagation_convertion.hpp similarity index 100% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/include/precision_propagation_convertion_function.hpp rename to src/tests/ov_helpers/ov_snippets_models/include/precision_propagation_convertion.hpp diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/include/snippets_helpers.hpp b/src/tests/ov_helpers/ov_snippets_models/include/snippets_helpers.hpp similarity index 100% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/include/snippets_helpers.hpp rename to src/tests/ov_helpers/ov_snippets_models/include/snippets_helpers.hpp diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/include/subgraph_converts.hpp b/src/tests/ov_helpers/ov_snippets_models/include/subgraph_converts.hpp similarity index 100% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/include/subgraph_converts.hpp rename to src/tests/ov_helpers/ov_snippets_models/include/subgraph_converts.hpp diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/include/subgraph_customizable.hpp b/src/tests/ov_helpers/ov_snippets_models/include/subgraph_customizable.hpp similarity index 100% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/include/subgraph_customizable.hpp rename to src/tests/ov_helpers/ov_snippets_models/include/subgraph_customizable.hpp diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/include/subgraph_fq.hpp b/src/tests/ov_helpers/ov_snippets_models/include/subgraph_fq.hpp similarity index 100% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/include/subgraph_fq.hpp rename to src/tests/ov_helpers/ov_snippets_models/include/subgraph_fq.hpp diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/include/subgraph_lowered.hpp b/src/tests/ov_helpers/ov_snippets_models/include/subgraph_lowered.hpp similarity index 100% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/include/subgraph_lowered.hpp rename to src/tests/ov_helpers/ov_snippets_models/include/subgraph_lowered.hpp diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/include/subgraph_matmul.hpp b/src/tests/ov_helpers/ov_snippets_models/include/subgraph_matmul.hpp similarity index 100% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/include/subgraph_matmul.hpp rename to src/tests/ov_helpers/ov_snippets_models/include/subgraph_matmul.hpp diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/include/subgraph_mha.hpp b/src/tests/ov_helpers/ov_snippets_models/include/subgraph_mha.hpp similarity index 100% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/include/subgraph_mha.hpp rename to src/tests/ov_helpers/ov_snippets_models/include/subgraph_mha.hpp diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/include/subgraph_roll_matmul_roll.hpp b/src/tests/ov_helpers/ov_snippets_models/include/subgraph_roll_matmul_roll.hpp similarity index 100% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/include/subgraph_roll_matmul_roll.hpp rename to src/tests/ov_helpers/ov_snippets_models/include/subgraph_roll_matmul_roll.hpp diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/include/subgraph_simple.hpp b/src/tests/ov_helpers/ov_snippets_models/include/subgraph_simple.hpp similarity index 100% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/include/subgraph_simple.hpp rename to src/tests/ov_helpers/ov_snippets_models/include/subgraph_simple.hpp diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/include/subgraph_softmax.hpp b/src/tests/ov_helpers/ov_snippets_models/include/subgraph_softmax.hpp similarity index 100% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/include/subgraph_softmax.hpp rename to src/tests/ov_helpers/ov_snippets_models/include/subgraph_softmax.hpp diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/include/subgraph_transpose.hpp b/src/tests/ov_helpers/ov_snippets_models/include/subgraph_transpose.hpp similarity index 100% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/include/subgraph_transpose.hpp rename to src/tests/ov_helpers/ov_snippets_models/include/subgraph_transpose.hpp diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/include/two_binary_ops_function.hpp b/src/tests/ov_helpers/ov_snippets_models/include/two_binary_ops.hpp similarity index 100% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/include/two_binary_ops_function.hpp rename to src/tests/ov_helpers/ov_snippets_models/include/two_binary_ops.hpp diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/fake_quantize_function.cpp b/src/tests/ov_helpers/ov_snippets_models/src/fake_quantize_helper.cpp similarity index 99% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/src/fake_quantize_function.cpp rename to src/tests/ov_helpers/ov_snippets_models/src/fake_quantize_helper.cpp index 8717476ed86409..0f23849778ba7d 100644 --- a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/fake_quantize_function.cpp +++ b/src/tests/ov_helpers/ov_snippets_models/src/fake_quantize_helper.cpp @@ -2,11 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "fake_quantize_function.hpp" +#include "fake_quantize_helper.hpp" #include "common_test_utils/data_utils.hpp" #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "function_helper.hpp" namespace ov { diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/function_helper.cpp b/src/tests/ov_helpers/ov_snippets_models/src/function_helper.cpp similarity index 98% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/src/function_helper.cpp rename to src/tests/ov_helpers/ov_snippets_models/src/function_helper.cpp index 5bbc14e959ad19..40cf72bf546450 100644 --- a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/function_helper.cpp +++ b/src/tests/ov_helpers/ov_snippets_models/src/function_helper.cpp @@ -6,7 +6,7 @@ #include "common_test_utils/data_utils.hpp" #include #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ov { namespace test { diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/precision_propagation_function.cpp b/src/tests/ov_helpers/ov_snippets_models/src/precision_propagation.cpp similarity index 98% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/src/precision_propagation_function.cpp rename to src/tests/ov_helpers/ov_snippets_models/src/precision_propagation.cpp index 6e6b91c1778909..873a8f3de6e87b 100644 --- a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/precision_propagation_function.cpp +++ b/src/tests/ov_helpers/ov_snippets_models/src/precision_propagation.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "precision_propagation_function.hpp" +#include "precision_propagation.hpp" #include #include diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/precision_propagation_convertion_function.cpp b/src/tests/ov_helpers/ov_snippets_models/src/precision_propagation_convertion.cpp similarity index 98% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/src/precision_propagation_convertion_function.cpp rename to src/tests/ov_helpers/ov_snippets_models/src/precision_propagation_convertion.cpp index 20f517b16dfceb..8992a3a0e9c386 100644 --- a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/precision_propagation_convertion_function.cpp +++ b/src/tests/ov_helpers/ov_snippets_models/src/precision_propagation_convertion.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "precision_propagation_convertion_function.hpp" +#include "precision_propagation_convertion.hpp" #include #include diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/precomp.hpp b/src/tests/ov_helpers/ov_snippets_models/src/precomp.hpp similarity index 100% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/src/precomp.hpp rename to src/tests/ov_helpers/ov_snippets_models/src/precomp.hpp diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/snippets_helpers.cpp b/src/tests/ov_helpers/ov_snippets_models/src/snippets_helpers.cpp similarity index 100% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/src/snippets_helpers.cpp rename to src/tests/ov_helpers/ov_snippets_models/src/snippets_helpers.cpp diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_convert.cpp b/src/tests/ov_helpers/ov_snippets_models/src/subgraph_convert.cpp similarity index 100% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_convert.cpp rename to src/tests/ov_helpers/ov_snippets_models/src/subgraph_convert.cpp diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_customizable.cpp b/src/tests/ov_helpers/ov_snippets_models/src/subgraph_customizable.cpp similarity index 100% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_customizable.cpp rename to src/tests/ov_helpers/ov_snippets_models/src/subgraph_customizable.cpp diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_fq.cpp b/src/tests/ov_helpers/ov_snippets_models/src/subgraph_fq.cpp similarity index 99% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_fq.cpp rename to src/tests/ov_helpers/ov_snippets_models/src/subgraph_fq.cpp index 13b3a3f3ef795a..b690617dbe0123 100644 --- a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_fq.cpp +++ b/src/tests/ov_helpers/ov_snippets_models/src/subgraph_fq.cpp @@ -4,7 +4,7 @@ #include "subgraph_fq.hpp" #include "common_test_utils/data_utils.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_lpt_models/common/builders.hpp" #include namespace ov { diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_lowered.cpp b/src/tests/ov_helpers/ov_snippets_models/src/subgraph_lowered.cpp similarity index 99% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_lowered.cpp rename to src/tests/ov_helpers/ov_snippets_models/src/subgraph_lowered.cpp index 7c4568317c4a3e..6dcf8621df3396 100644 --- a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_lowered.cpp +++ b/src/tests/ov_helpers/ov_snippets_models/src/subgraph_lowered.cpp @@ -5,7 +5,7 @@ #include "subgraph_lowered.hpp" #include "common_test_utils/data_utils.hpp" #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" namespace ov { namespace test { diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_matmul.cpp b/src/tests/ov_helpers/ov_snippets_models/src/subgraph_matmul.cpp similarity index 99% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_matmul.cpp rename to src/tests/ov_helpers/ov_snippets_models/src/subgraph_matmul.cpp index 01d0758a160fc4..6bb7b4785368f2 100644 --- a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_matmul.cpp +++ b/src/tests/ov_helpers/ov_snippets_models/src/subgraph_matmul.cpp @@ -5,7 +5,7 @@ #include "subgraph_matmul.hpp" #include "common_test_utils/data_utils.hpp" #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "ov_ops/type_relaxed.hpp" diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_mha.cpp b/src/tests/ov_helpers/ov_snippets_models/src/subgraph_mha.cpp similarity index 99% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_mha.cpp rename to src/tests/ov_helpers/ov_snippets_models/src/subgraph_mha.cpp index 7933788f185b39..fdefcf03d9dd19 100644 --- a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_mha.cpp +++ b/src/tests/ov_helpers/ov_snippets_models/src/subgraph_mha.cpp @@ -6,9 +6,9 @@ #include "common_test_utils/data_utils.hpp" #include -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include "ov_ops/type_relaxed.hpp" -#include "lpt_ngraph_functions/common/builders.hpp" +#include "ov_lpt_models/common/builders.hpp" namespace ov { namespace test { diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_roll_matmul_roll.cpp b/src/tests/ov_helpers/ov_snippets_models/src/subgraph_roll_matmul_roll.cpp similarity index 100% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_roll_matmul_roll.cpp rename to src/tests/ov_helpers/ov_snippets_models/src/subgraph_roll_matmul_roll.cpp diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_simple.cpp b/src/tests/ov_helpers/ov_snippets_models/src/subgraph_simple.cpp similarity index 100% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_simple.cpp rename to src/tests/ov_helpers/ov_snippets_models/src/subgraph_simple.cpp diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_softmax.cpp b/src/tests/ov_helpers/ov_snippets_models/src/subgraph_softmax.cpp similarity index 98% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_softmax.cpp rename to src/tests/ov_helpers/ov_snippets_models/src/subgraph_softmax.cpp index 9f5515dcc73a10..d19aeae4f9a33f 100644 --- a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_softmax.cpp +++ b/src/tests/ov_helpers/ov_snippets_models/src/subgraph_softmax.cpp @@ -4,7 +4,7 @@ #include "subgraph_softmax.hpp" #include "common_test_utils/data_utils.hpp" -#include "ngraph_functions/builders.hpp" +#include "ov_models/builders.hpp" #include namespace ov { diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_transpose.cpp b/src/tests/ov_helpers/ov_snippets_models/src/subgraph_transpose.cpp similarity index 100% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_transpose.cpp rename to src/tests/ov_helpers/ov_snippets_models/src/subgraph_transpose.cpp diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/two_binary_ops_function.cpp b/src/tests/ov_helpers/ov_snippets_models/src/two_binary_ops.cpp similarity index 99% rename from src/tests/ngraph_helpers/snippets_ngraph_functions/src/two_binary_ops_function.cpp rename to src/tests/ov_helpers/ov_snippets_models/src/two_binary_ops.cpp index 24892edda3f920..e75e169e6dd7f7 100644 --- a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/two_binary_ops_function.cpp +++ b/src/tests/ov_helpers/ov_snippets_models/src/two_binary_ops.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "two_binary_ops_function.hpp" +#include "two_binary_ops.hpp" #include #include "snippets/op/convert_saturation.hpp" diff --git a/src/tests/test_utils/common_test_utils/CMakeLists.txt b/src/tests/test_utils/common_test_utils/CMakeLists.txt index 14db94d47363b4..d2364915b7a962 100644 --- a/src/tests/test_utils/common_test_utils/CMakeLists.txt +++ b/src/tests/test_utils/common_test_utils/CMakeLists.txt @@ -30,7 +30,7 @@ function(add_common_utils ADD_TARGET_NAME) gtest gtest_main gmock - ngraphFunctions + ov_models openvino::runtime openvino::runtime::dev PRIVATE diff --git a/src/tests/test_utils/common_test_utils/src/graph_comparator.cpp b/src/tests/test_utils/common_test_utils/src/graph_comparator.cpp index 0100f8f1416ff7..e3c5a8b2ec1a55 100644 --- a/src/tests/test_utils/common_test_utils/src/graph_comparator.cpp +++ b/src/tests/test_utils/common_test_utils/src/graph_comparator.cpp @@ -7,13 +7,13 @@ #include "common_test_utils/ov_tensor_utils.hpp" #include "gtest/gtest.h" #include "ie_common.h" -#include "ngraph_functions/utils/ngraph_helpers.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/loop.hpp" #include "openvino/op/result.hpp" #include "openvino/op/tensor_iterator.hpp" #include "openvino/op/util/op_types.hpp" #include "openvino/op/util/sub_graph_base.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "precomp.hpp" namespace { diff --git a/src/tests/test_utils/functional_test_utils/CMakeLists.txt b/src/tests/test_utils/functional_test_utils/CMakeLists.txt index 3d3fb084ca5269..5fb2fd6e7d2725 100644 --- a/src/tests/test_utils/functional_test_utils/CMakeLists.txt +++ b/src/tests/test_utils/functional_test_utils/CMakeLists.txt @@ -22,7 +22,7 @@ addIeTarget( openvino::runtime openvino::runtime::dev PRIVATE - ngraphFunctions + ov_models openvino::pugixml INCLUDES PUBLIC diff --git a/src/tests/test_utils/functional_test_utils/src/test_model/test_model.cpp b/src/tests/test_utils/functional_test_utils/src/test_model/test_model.cpp index 7a29f37f6b3120..b5d63dfd6ef8a5 100644 --- a/src/tests/test_utils/functional_test_utils/src/test_model/test_model.cpp +++ b/src/tests/test_utils/functional_test_utils/src/test_model/test_model.cpp @@ -4,10 +4,10 @@ #include "functional_test_utils/test_model/test_model.hpp" -#include "ngraph_functions/subgraph_builders.hpp" #include "openvino/core/partial_shape.hpp" #include "openvino/pass/manager.hpp" #include "openvino/pass/serialize.hpp" +#include "ov_models/subgraph_builders.hpp" namespace ov { namespace test { From d7be40b8087fdd1b27243a8d284bc127fe0d8b31 Mon Sep 17 00:00:00 2001 From: Irina Efode Date: Wed, 4 Oct 2023 20:48:15 +0400 Subject: [PATCH 066/257] [CONFORMANCE][CMAKE] Add SubgraphsDumper lib (#20053) --- .../subgraphs_dumper/CMakeLists.txt | 20 +++++++++++++++++++ .../subgraphs_dumper/tests/CMakeLists.txt | 13 +++++------- 2 files changed, 25 insertions(+), 8 deletions(-) diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/CMakeLists.txt b/src/tests/functional/plugin/conformance/subgraphs_dumper/CMakeLists.txt index fbe206c3633349..3ab17f76aefbcf 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/CMakeLists.txt +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/CMakeLists.txt @@ -11,6 +11,7 @@ list(APPEND LIBRARIES openvino::pugixml ) +# add subgraphs_dumpers tool addIeTargetTest( NAME ${TARGET_NAME} ROOT ${CMAKE_CURRENT_SOURCE_DIR}/src @@ -25,4 +26,23 @@ addIeTargetTest( ADD_CPPLINT ) +# add subgraphs_dumpers lib to get API +addIeTarget( + NAME "${TARGET_NAME}Util" + TYPE STATIC + ROOT "${CMAKE_CURRENT_SOURCE_DIR}/src" + INCLUDES + PUBLIC + ${CMAKE_CURRENT_SOURCE_DIR}/include + LINK_LIBRARIES + PUBLIC + ${LIBRARIES} + EXCLUDED_SOURCE_PATHS + ${OpenVINO_SOURCE_DIR}/src/tests/functional/plugin/conformance/subgraphs_dumper/src/main.cpp + ${OpenVINO_SOURCE_DIR}/src/tests/functional/plugin/conformance/subgraphs_dumper/include/gflag_config.hpp + DEPENDENCIES + ov_frontends + ADD_CPPLINT +) + ov_build_target_faster(${TARGET_NAME} UNITY) \ No newline at end of file diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/CMakeLists.txt b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/CMakeLists.txt index 325504ff9a7700..c9eced4c632b72 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/CMakeLists.txt +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/CMakeLists.txt @@ -4,21 +4,18 @@ set(TARGET_NAME subgraphsDumperTests) +list(APPEND DEPENDENCIES subgraphsDumperUtil) + addIeTargetTest( NAME ${TARGET_NAME} ROOT ${CMAKE_CURRENT_SOURCE_DIR} - ADDITIONAL_SOURCE_DIRS - ${OpenVINO_SOURCE_DIR}/src/tests/functional/plugin/conformance/subgraphs_dumper/src - EXCLUDED_SOURCE_PATHS - ${OpenVINO_SOURCE_DIR}/src/tests/functional/plugin/conformance/subgraphs_dumper/src/main.cpp INCLUDES - ${OpenVINO_SOURCE_DIR}/src/tests/functional/plugin/conformance/subgraphs_dumper/include ${CMAKE_CURRENT_SOURCE_DIR}/ LINK_LIBRARIES PRIVATE - func_test_utils - openvino::runtime - openvino::pugixml + ${DEPENDENCIES} + DEPENDENCIES + ${DEPENDENCIES} ADD_CPPLINT ) From cdcbb1dc00b09bcc2fb9fd516458659669a691af Mon Sep 17 00:00:00 2001 From: Irina Efode Date: Wed, 4 Oct 2023 21:50:56 +0400 Subject: [PATCH 067/257] [CONFORMANCE][SUBGRAPHS DUMPER] Rework `subgraphs_dumper` graphs extraction algo feedback by plugins (#19669) * [CONFORMANCE][SUBGRAPHS DUMPER] Change repeat pattern extractor to avoid duplications and reduce graphs size * Small change * temporary * merge * try to handle large models * Fixes + tests * Remove extra * Exclude models after const folding in case dynamic modesl * shapes to meta * Fix tests * Fix test + is_subgraph * Fix issue with default output * change hashing * Check memory * Hash algo * correct modelsize check * Log large models * tmp disable fused_names extractor * add device for fused_names * remove extra * fix vuild * Disable fused_names extractor --- .../subgraphs_dumper/include/cache/cache.hpp | 24 ++++ .../include/cache/graph_cache.hpp | 19 +-- .../include/cache/meta/input_info.hpp | 24 +++- .../include/cache/meta/meta_info.hpp | 17 ++- .../include/cache/meta/model_info.hpp | 5 +- .../subgraphs_dumper/include/gflag_config.hpp | 7 +- .../include/matchers/single_op/manager.hpp | 2 +- .../include/matchers/subgraph/fused_names.hpp | 7 +- .../include/matchers/subgraph/manager.hpp | 15 +- .../matchers/subgraph/repeat_pattern.hpp | 3 +- .../include/matchers/subgraph/subgraph.hpp | 20 ++- .../subgraphs_dumper/include/utils/memory.hpp | 48 +++++++ .../subgraphs_dumper/include/utils/model.hpp | 34 ++++- .../subgraphs_dumper/include/utils/node.hpp | 13 +- .../subgraphs_dumper/src/cache/cache.cpp | 6 +- .../src/cache/graph_cache.cpp | 121 ++++++++++------ .../src/cache/meta/meta_info.cpp | 54 +++++-- .../conformance/subgraphs_dumper/src/main.cpp | 6 +- .../src/matchers/single_op/manager.cpp | 2 +- .../src/matchers/single_op/single_op.cpp | 21 ++- .../src/matchers/subgraph/fused_names.cpp | 40 ++++-- .../src/matchers/subgraph/manager.cpp | 93 +++++++++--- .../src/matchers/subgraph/repeat_pattern.cpp | 21 ++- .../src/matchers/subgraph/subgraph.cpp | 71 +++++++-- .../subgraphs_dumper/src/utils/model.cpp | 18 ++- .../subgraphs_dumper/src/utils/node.cpp | 2 +- .../subgraphs_dumper/tests/cache/cache.cpp | 2 +- .../subgraphs_dumper/tests/cache/meta.cpp | 57 ++++++-- .../tests/matchers/subgraph/manager.cpp | 21 ++- .../tests/matchers/subgraph/subgraph.cpp | 136 ++++++++++++++++++ .../subgraphs_dumper/tests/utils/node.cpp | 4 +- .../src/read_ir/read_ir_tests.cpp | 1 - .../rename_conformance_ir.py | 21 ++- 33 files changed, 764 insertions(+), 171 deletions(-) create mode 100644 src/tests/functional/plugin/conformance/subgraphs_dumper/include/utils/memory.hpp diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/cache.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/cache.hpp index b12dcd2bf38e01..8f762e5cbacf8c 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/cache.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/cache.hpp @@ -28,9 +28,33 @@ class ICache { m_serialization_dir = serialization_dir; } + bool is_model_large_to_read(const std::shared_ptr& model, const std::string& model_path) { + // ov::Model + ov::CompiledModel + auto model_bytesize = model->get_graph_size(); + if (2 * model_bytesize >= mem_size) { + auto model_bytesize_gb = model_bytesize; + model_bytesize_gb >>= 30; + auto mem_size_gb = mem_size; + mem_size_gb >>= 30; + // std::cout << "[ WARNING ] Model " << model_path << " bytesize is " << model_bytesize_gb << + // "is larger than RAM size: " << mem_size_gb << ". Model will be skipped!" << std::endl; + return true; + } + return false; + } + + bool is_model_large_to_store_const(const std::shared_ptr& model) { + auto model_bytesize = model->get_graph_size(); + if (mem_size < model_bytesize * 4) { + return true; + } + return false; + } + protected: size_t m_serialization_timeout = 60; std::string m_serialization_dir = "."; + static size_t mem_size; ICache() = default; diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/graph_cache.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/graph_cache.hpp index afd501b669de81..130847a58ea8da 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/graph_cache.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/graph_cache.hpp @@ -19,12 +19,13 @@ class GraphCache : public ICache { public: void update_cache(const std::shared_ptr& model, const std::string& model_meta_data, - bool extract_body, bool from_cache = false) override; + bool extract_body, + bool from_cache = false) override; void serialize_cache() override; - static std::shared_ptr& get() { + static std::shared_ptr& get(const std::string& device = "") { if (m_cache_instance == nullptr) { - m_cache_instance = std::shared_ptr(new GraphCache); + m_cache_instance = std::shared_ptr(new GraphCache(device)); } return m_cache_instance; } @@ -46,19 +47,21 @@ class GraphCache : public ICache { // cache byte size uint64_t m_graph_cache_bytesize = 0; - GraphCache() { + GraphCache(const std::string& device = "") { ExtractorsManager::ExtractorsMap matchers = { // temporary disabling according mem leaks in CI and not using swap mem - { "fused_names", FusedNamesExtractor::Ptr(new FusedNamesExtractor) }, + // { "fused_names", FusedNamesExtractor::Ptr(new FusedNamesExtractor(device)) }, { "repeat_pattern", RepeatPatternExtractor::Ptr(new RepeatPatternExtractor) }, }; m_manager.set_extractors(matchers); m_cache_subdir = "subgraph"; } - void update_cache(const std::shared_ptr& model, const std::string& model_path, - std::map& input_info, const std::string& extractor_name, - size_t model_op_cnt, bool from_cache = false); + void update_cache(const std::shared_ptr& model, + const std::string& model_path, + std::map& input_info, + const std::string& extractor_name, + size_t model_op_cnt); }; } // namespace subgraph_dumper diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/meta/input_info.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/meta/input_info.hpp index 2aaa819520ab04..43e2b2a6356ed1 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/meta/input_info.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/meta/input_info.hpp @@ -40,15 +40,33 @@ struct InputInfo { Range ranges; bool is_const; + ov::PartialShape max_shape, min_shape; - InputInfo(double in_min = DEFAULT_MIN_VALUE, + InputInfo(const ov::PartialShape& shape = {}, + double in_min = DEFAULT_MIN_VALUE, double in_max = DEFAULT_MAX_VALUE, bool in_is_const = false) : is_const(in_is_const), - ranges(Range(in_min, in_max)) {} + ranges(Range(in_min, in_max)), + max_shape(shape), + min_shape(shape) {} bool operator==(const InputInfo& input_info_ref) const { - return this->is_const == input_info_ref.is_const && this->ranges == input_info_ref.ranges; + return this->is_const == input_info_ref.is_const && + this->ranges == input_info_ref.ranges && + this->max_shape == input_info_ref.max_shape && + this->min_shape == input_info_ref.min_shape; + } + + InputInfo operator=(const InputInfo& input_info) { + this->ranges = input_info.ranges; + if (ov::shape_size(this->max_shape.get_max_shape()) < ov::shape_size(input_info.max_shape.get_max_shape())) { + this->max_shape = input_info.max_shape; + } + if (ov::shape_size(this->min_shape.get_min_shape()) > ov::shape_size(input_info.min_shape.get_min_shape())) { + this->min_shape = input_info.min_shape; + } + return *this; } }; diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/meta/meta_info.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/meta/meta_info.hpp index 47572db370acd9..54625bfac52b39 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/meta/meta_info.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/meta/meta_info.hpp @@ -13,8 +13,12 @@ namespace subgraph_dumper { class MetaInfo { public: - MetaInfo(const std::string& model_path = "", const std::map& _input_info = {}, - size_t total_op_cnt = 1, size_t this_op_cnt = 1, const std::string& extractor = "", size_t model_priority = 1); + MetaInfo(const std::string& model_path = "", + const std::map& _input_info = {}, + size_t total_op_cnt = 1, + size_t this_op_cnt = 1, + const std::string& extractor = "", + size_t model_priority = 1); MetaInfo(std::map _in_info, std::map _model_info, std::unordered_set _extractors) : @@ -22,9 +26,14 @@ class MetaInfo { input_info(_in_info), extractors(_extractors) {}; void serialize(const std::string& serialization_path); - void update(const std::string& model_path, const std::map& _input_info, size_t _total_op_cnt = 1, - size_t _this_op_cnt = 1, const std::string& extractor = "", const std::vector& ignored_inputs = {}); + void update(const std::string& model_path, + const std::map& _input_info, + size_t _total_op_cnt = 1, + size_t _this_op_cnt = 1, + const std::string& extractor = "", + const std::vector& ignored_inputs = {}); std::map get_input_info() const; + void set_input_info(const std::map& new_in_info) { input_info = new_in_info; }; std::map get_model_info() const; std::string get_any_extractor() const { return *extractors.begin(); } diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/meta/model_info.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/meta/model_info.hpp index e3fa7fec575354..d968cf8e9530c8 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/meta/model_info.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/meta/model_info.hpp @@ -15,7 +15,10 @@ struct ModelInfo { std::set model_paths; size_t this_op_cnt, total_op_cnt, model_priority; - ModelInfo(const std::string& model_path = "", size_t total_ops_in_model = 1, size_t this_ops_in_model = 1, size_t _model_priority = 1) : + ModelInfo(const std::string& model_path = "", + size_t total_ops_in_model = 1, + size_t this_ops_in_model = 1, + size_t _model_priority = 1) : total_op_cnt(total_ops_in_model), this_op_cnt(this_ops_in_model), model_priority(_model_priority) { model_paths = model_path.empty() ? std::set() : std::set({ model_path }) ; } diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/gflag_config.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/gflag_config.hpp index 06f6e3671b892e..c1a999f190227c 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/gflag_config.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/gflag_config.hpp @@ -7,6 +7,8 @@ #include #include +#include "common_test_utils/test_constants.hpp" + static const char help_message[] = "Print a usage message."; static const char input_folders_message[] = "Required. Comma separated paths to the input folders with IRs"; static const char local_cache_message[] = "Optional. Comma separated paths to the local cache folders with IRs"; @@ -15,11 +17,13 @@ static const char path_regex_message[] = "Optional. regular expression to be app "folders recursive discovery"; static const char extract_body_message[] = "Optional. Allow to extract operation bodies to operation cache."; static const char cache_type_message[] = "Optional. Specify caching type: OP, GRAPH. The default value is both"; +static const char device_message[] = "Optional. Specify device to compile model for `fused_names` extractor. Default is `TEMPLATE` "; DEFINE_bool(h, false, help_message); DEFINE_string(input_folders, "", local_cache_message); DEFINE_string(local_cache, "", input_folders_message); DEFINE_string(output_folder, "output", output_folder_message); +DEFINE_string(device, ov::test::utils::DEVICE_TEMPLATE, device_message); DEFINE_string(path_regex, ".*", output_folder_message); DEFINE_bool(extract_body, true, extract_body_message); DEFINE_string(cache_type, "", cache_type_message); @@ -38,6 +42,7 @@ static void showUsage() { std::cout << " --output_folder \"\" " << output_folder_message << "\n"; std::cout << " --path_regex \"\" " << path_regex_message << "\n"; std::cout << " --extract_body \"\" " << extract_body_message << "\n"; - std::cout << " --cache_type \"\" " << extract_body_message << "\n"; + std::cout << " --cache_type \"\" " << cache_type_message << "\n"; + std::cout << " --device \"\" " << device_message << "\n"; std::cout << std::flush; } diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/single_op/manager.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/single_op/manager.hpp index d8895ca4a527f3..8d8143d86250ba 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/single_op/manager.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/single_op/manager.hpp @@ -17,7 +17,7 @@ class MatchersManager { explicit MatchersManager(const MatchersMap& matchers = {}) : m_matchers(matchers) {} bool match(const std::shared_ptr &node, - const std::shared_ptr &ref); + const std::shared_ptr &ref) const; void set_matchers(const MatchersMap& matchers = {}) { m_matchers = matchers; } const MatchersMap& get_matchers() { return m_matchers; } diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/subgraph/fused_names.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/subgraph/fused_names.hpp index 60cacf3a753e17..5df31c77baaa0c 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/subgraph/fused_names.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/subgraph/fused_names.hpp @@ -14,15 +14,16 @@ namespace subgraph_dumper { class FusedNamesExtractor final : public SubgraphExtractor { public: - FusedNamesExtractor(); + FusedNamesExtractor(const std::string& device = ""); ~FusedNamesExtractor(); std::list extract(const std::shared_ptr &model, - bool is_extract_body = true) override; - void set_target_device(const std::string& _device) { device = _device; } + bool is_extract_body = true, + bool is_copy_constants = true) override; protected: std::unordered_set extract_compiled_model_names(const std::shared_ptr& model); + void set_target_device(const std::string& _device); std::string device; std::shared_ptr core; diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/subgraph/manager.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/subgraph/manager.hpp index 05395b80c15a26..8634585cf1a2ce 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/subgraph/manager.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/subgraph/manager.hpp @@ -12,15 +12,23 @@ namespace subgraph_dumper { class ExtractorsManager { public: + // { model, subgraph, model_in_info, subgraph_in_info } + using ExtractedSubgraphTuple = std::tuple, std::shared_ptr, std::map, std::map>; using ExtractorsMap = std::map; + explicit ExtractorsManager(const ExtractorsMap& extractors = {}) : m_extractors(extractors) {} bool match(const std::shared_ptr &model, - const std::shared_ptr &ref, + const std::shared_ptr &ref_model, std::map &in_info, const std::map &in_info_ref); + ExtractedSubgraphTuple is_subgraph(const std::shared_ptr &model, + const std::shared_ptr &ref_model, + const std::map &in_info = {}, + const std::map &in_info_ref = {}); std::list extract(const std::shared_ptr &model, - bool is_extract_body = true); + bool is_extract_body = true, + bool is_copy_constants = true); void set_extractors(const ExtractorsMap& extractors = {}) { m_extractors = extractors; } ExtractorsMap get_extractors() { return m_extractors; } @@ -28,7 +36,8 @@ class ExtractorsManager { std::map align_input_info(const std::shared_ptr& model, const std::shared_ptr& model_ref, const std::map &in_info, - const std::map &in_info_ref); + const std::map &in_info_ref, + const std::map &matched_op = {}); protected: ExtractorsMap m_extractors = {}; diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/subgraph/repeat_pattern.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/subgraph/repeat_pattern.hpp index e003a94ba61d57..874ed35be83662 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/subgraph/repeat_pattern.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/subgraph/repeat_pattern.hpp @@ -25,7 +25,8 @@ class RepeatPatternExtractor final : public SubgraphExtractor { } std::list extract(const std::shared_ptr &model, - bool is_extract_body = true) override; + bool is_extract_body = true, + bool is_copy_constants = true) override; private: MatchersManager manager; diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/subgraph/subgraph.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/subgraph/subgraph.hpp index 44320a8dd34c46..5f7dd9d8204b25 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/subgraph/subgraph.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/subgraph/subgraph.hpp @@ -8,7 +8,11 @@ #include "openvino/op/util/op_types.hpp" #include "common_test_utils/graph_comparator.hpp" + #include "cache/meta/input_info.hpp" +#include "matchers/single_op/single_op.hpp" +#include "matchers/single_op/convolutions.hpp" +#include "matchers/single_op/manager.hpp" namespace ov { namespace tools { @@ -16,13 +20,26 @@ namespace subgraph_dumper { class SubgraphExtractor { public: + // { is_subgraph, model, subgraph, matched_ops{ model_op_name, graph_op_name }} + using IsSubgraphTuple = std::tuple, std::shared_ptr, std::map>; using Ptr = std::shared_ptr; + SubgraphExtractor() { + MatchersManager::MatchersMap matchers = { + { "generic_single_op", SingleOpMatcher::Ptr(new SingleOpMatcher) }, + { "convolutions", ConvolutionsMatcher::Ptr(new ConvolutionsMatcher) }, + }; + m_manager.set_matchers(matchers); + } + bool match(const std::shared_ptr &model, const std::shared_ptr &ref_model) const; + IsSubgraphTuple is_subgraph(const std::shared_ptr &model, + const std::shared_ptr &ref_model) const; virtual std::list extract(const std::shared_ptr &model, - bool is_extract_body = true) { + bool is_extract_body = true, + bool is_copy_constants = true) { return std::list{}; }; @@ -34,6 +51,7 @@ class SubgraphExtractor { .enable(FunctionsComparator::ATTRIBUTES) .enable(FunctionsComparator::NODES) .enable(FunctionsComparator::PRECISIONS); + MatchersManager m_manager = MatchersManager(); inline bool is_node_to_skip(const std::shared_ptr& node) const { return ov::op::util::is_parameter(node) || diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/utils/memory.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/utils/memory.hpp new file mode 100644 index 00000000000000..36bf146eff6555 --- /dev/null +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/utils/memory.hpp @@ -0,0 +1,48 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#if defined(_WIN32) +#include +#else +#include +#include +#include +#endif + +namespace ov { +namespace tools { +namespace subgraph_dumper { + +static size_t get_ram_size() { + size_t ram_mem_size_bytes = 0; +#ifdef _WIN32 + MEMORYSTATUSEX status; + status.dwLength = sizeof(status); + GlobalMemoryStatusEx( &status ); + ram_mem_size_bytes = status.ullTotalPhys; +#elif defined(CTL_HW) && defined(HW_MEMSIZE) + int mib[2]; + mib[0] = CTL_HW; +#if defined(HW_MEMSIZE) + mib[1] = HW_MEMSIZE; +#endif + int64_t size = 0; + size_t len = sizeof( size ); + if ( sysctl( mib, 2, &size, &len, NULL, 0 ) == 0 ) + ram_mem_size_bytes = size; +#elif defined(_SC_AIX_REALMEM) + ram_mem_size_bytes = sysconf( _SC_AIX_REALMEM ) * (size_t)1024L; + +#elif defined(_SC_PHYS_PAGES) && defined(_SC_PAGE_SIZE) + ram_mem_size_bytes = static_cast(sysconf( _SC_PHYS_PAGES )) * + static_cast(sysconf(_SC_PAGE_SIZE)); +#endif + return ram_mem_size_bytes; +} + +} // namespace subgraph_dumper +} // namespace tools +} // namespace ov \ No newline at end of file diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/utils/model.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/utils/model.hpp index 85eee663e959b0..61ce8ce8d7a637 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/utils/model.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/utils/model.hpp @@ -49,13 +49,17 @@ static std::vector FROTEND_REGEXP = { enum ModelCacheStatus { SUCCEED = 0, NOT_FULLY_CACHED = 1, - NOT_READ = 2 + NOT_READ = 2, + LARGE_MODELS_EXCLUDED = 3, + LARGE_MODELS_INCLUDED = 4, }; static std::map model_cache_status_to_str = { { ModelCacheStatus::SUCCEED, "successful_models" }, { ModelCacheStatus::NOT_FULLY_CACHED, "not_fully_cached_models" }, { ModelCacheStatus::NOT_READ, "not_read_models" }, + { ModelCacheStatus::LARGE_MODELS_EXCLUDED, "large_models_excluded" }, + { ModelCacheStatus::LARGE_MODELS_INCLUDED, "large_models_included" }, }; std::pair, std::pair>> @@ -70,10 +74,32 @@ std::map> cache_models( void save_model_status_to_file(const std::map>& caching_status, const std::string& output_dir); +inline bool is_dynamic_model(const std::shared_ptr& model) { + for (const auto& parameter : model->get_parameters()) { + if (is_dynamic_node(parameter)) { + return true; + } + } + for (const auto& result : model->get_results()) { + if (is_dynamic_node(result)) { + return true; + } + } + return false; +} + +inline std::string get_model_type(const std::shared_ptr& model) { + if (is_dynamic_model(model)) { + return "dynamic"; + } + return "static"; +} + inline ExtractedPattern generate_model(const std::set>& nodes, std::unordered_set& checked_ops, - const std::string& extractor_name) { + const std::string& extractor_name, + bool is_copy_constants = true) { // map to recover graph using cloned nodes and original connections // { original_node_name, cloned_node } std::unordered_map> cloned_node_map; @@ -89,7 +115,7 @@ generate_model(const std::set>& nodes, auto orig_node_name = node->get_friendly_name(); checked_ops.insert(orig_node_name); cloned_node_map.insert({ orig_node_name, - clone_node(node, true, false, orig_node_name) }); + clone_node(node, is_copy_constants, false, orig_node_name) }); // create temporary vector to fill node output indexes std::vector out_ports(node->outputs().size()); @@ -127,7 +153,7 @@ generate_model(const std::set>& nodes, if (cloned_node_map.count(orig_in_node_name)) { auto orig_in_node = cloned_node_map[orig_in_node_name]; auto cloned_in_node_name = cloned_in_node->get_friendly_name(); - ov::replace_output_update_name(cloned_in_node->get_default_output(), orig_in_node->output(out_idx)); + ov::replace_output_update_name(cloned_in_node->output(out_idx), orig_in_node->output(out_idx)); if (ov::op::util::is_parameter(orig_in_node)) { auto param = std::dynamic_pointer_cast(orig_in_node); model_parameters.push_back(param); diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/utils/node.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/utils/node.hpp index 6d2412c639651e..c679707bf5b3ae 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/utils/node.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/utils/node.hpp @@ -42,17 +42,24 @@ std::shared_ptr convert_const_to_param(const std::shared_ // all inputs are defined as parameters and contains detailed info in meta std::shared_ptr generate_model_by_node(const std::shared_ptr& node); -inline std::string get_node_type(const std::shared_ptr& node) { +inline bool is_dynamic_node(const std::shared_ptr& node) { for (size_t i = 0; i < node->get_input_size(); ++i) { if (node->get_input_partial_shape(i).is_dynamic()) { - return "dynamic"; + return true; } } for (size_t i = 0; i < node->get_output_size(); ++i) { if (node->get_output_partial_shape(i).is_dynamic()) { - return "dynamic"; + return true; } } + return false; +} + +inline std::string get_node_type(const std::shared_ptr& node) { + if (is_dynamic_node(node)) { + return "dynamic"; + } return "static"; } diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/cache/cache.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/cache/cache.cpp index 5dc7017ed9059f..38df8575dd730b 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/cache/cache.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/cache/cache.cpp @@ -11,10 +11,12 @@ #include "common_test_utils/file_utils.hpp" #include "cache/cache.hpp" +#include "utils/memory.hpp" namespace ov { namespace tools { namespace subgraph_dumper { +size_t ICache::mem_size = get_ram_size(); bool ICache::serialize_model(const std::pair, MetaInfo>& graph_info, const std::string& rel_serialization_dir) { @@ -40,8 +42,8 @@ bool ICache::serialize_model(const std::pair, MetaInf meta.serialize(meta_path); return true; } catch (std::exception &e) { - std::cout << "[ ERROR ] Failed to serialize model: " << model_name - << ". Exception: " << e.what() << std::endl; + // std::cout << "[ ERROR ] Failed to serialize model: " << model_name + // << ". Exception: " << e.what() << std::endl; ov::test::utils::removeFile(xml_path); ov::test::utils::removeFile(bin_path); ov::test::utils::removeFile(meta_path); diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/cache/graph_cache.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/cache/graph_cache.cpp index 5d004219217da3..1273f5cc342d1b 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/cache/graph_cache.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/cache/graph_cache.cpp @@ -13,6 +13,7 @@ #include "cache/graph_cache.hpp" #include "utils/node.hpp" +#include "utils/model.hpp" namespace ov { namespace tools { @@ -22,32 +23,54 @@ std::shared_ptr GraphCache::m_cache_instance = nullptr; void GraphCache::update_cache(const std::shared_ptr& model, const std::string& model_meta_data, - bool extract_body, bool from_cache) { + bool extract_body, + bool from_cache) { std::cout << "[ INFO ][ GRAPH CACHE ] Processing model: " << model_meta_data << std::endl; auto model_total_op = model->get_ops().size() - model->get_output_size() - model->inputs().size(); - auto extracted_patterns = m_manager.extract(model, extract_body); - if (extracted_patterns.empty()) { - return; - } - while (!extracted_patterns.empty()) { - auto it = *extracted_patterns.begin(); - update_cache(std::get<0>(it), model_meta_data, std::get<1>(it), std::get<2>(it), model_total_op); - extracted_patterns.pop_front(); + if (from_cache) { + auto meta_path = ov::test::utils::replaceExt(model_meta_data, "meta"); + auto meta = MetaInfo::read_meta_from_file(meta_path); + m_graph_cache.insert({ model, meta }); + m_graph_cache_bytesize += model->get_graph_size(); + } else { + // const won't be cloned in case model takes > 50% RAM + auto model_bytesize = model->get_graph_size(); + // check that Free RAM memory is enough. Serialize in other case + // serialize graph cache in case graph cache bytesize > 4GB to avoid long search the same graphs + if (m_graph_cache_bytesize + 2 * model_bytesize > mem_size || m_graph_cache_bytesize >> 20 != 0) { + // std::cout << "[ GRAPH CACHE ][ WARNING ] There are not enought RAM memory! Serialize graph cache" << std::endl; + serialize_cache(); + m_graph_cache_bytesize = 0; + } + auto is_large_model = is_model_large_to_store_const(model); + if (is_large_model) { + auto model_bytesize_gb = model_bytesize; + model_bytesize_gb >>= 30; + auto mem_size_gb = mem_size; + mem_size_gb >>= 30; + // std::cout << "[ GRAPH CACHE ][ WARNING ] Model bytesize is " << model_bytesize_gb << + // "GB. It is larger than 25% RAM size: " << mem_size_gb << ". Constants won't be copied!" << std::endl; + } + auto extracted_patterns = m_manager.extract(model, extract_body, !is_large_model); + if (extracted_patterns.empty()) { + return; + } + while (!extracted_patterns.empty()) { + auto it = *extracted_patterns.begin(); + update_cache(std::get<0>(it), model_meta_data, std::get<1>(it), std::get<2>(it), model_total_op); + extracted_patterns.pop_front(); + } } - return; } -void GraphCache::update_cache(const std::shared_ptr& extracted_model, const std::string& model_path, - std::map& input_info, const std::string& extractor_name, size_t model_op_cnt, bool from_cache) { - // todo: check the number 8GB - if (m_graph_cache_bytesize >> 33 > 0) { - std::cout << "[ GRAPH CACHE ][ WARNING ] Cache size > 8 GB. Serialize graph cache" << std::endl; - serialize_cache(); - // m_graph_cache.clear(); - m_graph_cache_bytesize = 0; - } - +void GraphCache::update_cache(const std::shared_ptr& extracted_model, + const std::string& model_path, + std::map& input_info, + const std::string& extractor_name, + size_t model_op_cnt) { auto graph_name = extracted_model->get_friendly_name(); + auto this_op_cnt = extracted_model->get_ops().size() - + extracted_model->get_parameters().size() - extracted_model->get_results().size(); std::string serialized_model_path = ""; for (const auto& extractor : m_manager.get_extractors()) { auto tmp_serialized_model_path = ov::util::path_join({ m_serialization_dir, m_cache_subdir, extractor.first, graph_name + ".xml" }); @@ -60,7 +83,7 @@ void GraphCache::update_cache(const std::shared_ptr& extracted_model, std::shared_ptr model_to_update = nullptr; // if cached model was serialized if (!serialized_model_path.empty()) { - std::cout << "[ GRAPH CACHE ][ INFO ] Reading cached model: " << serialized_model_path << std::endl; + // std::cout << "[ GRAPH CACHE ][ INFO ] Reading cached model: " << serialized_model_path << std::endl; auto bin_path = ov::test::utils::replaceExt(serialized_model_path, ".bin"); auto meta_path = ov::test::utils::replaceExt(serialized_model_path, ".meta"); auto cached_model = ov::test::utils::PluginCache::get().core()->read_model(serialized_model_path); @@ -69,31 +92,48 @@ void GraphCache::update_cache(const std::shared_ptr& extracted_model, ov::test::utils::removeFile(serialized_model_path); ov::test::utils::removeFile(bin_path); ov::test::utils::removeFile(meta_path); + m_graph_cache.insert({ cached_model, cached_meta }); m_graph_cache_bytesize += cached_model->get_graph_size(); - model_to_update = cached_model; - input_info = m_manager.align_input_info(extracted_model, model_to_update, - input_info, cached_meta.get_input_info()); + + if (m_manager.match(extracted_model, cached_model, + input_info, cached_meta.get_input_info())) { + model_to_update = cached_model; + } } else { for (const auto& cached_model : m_graph_cache) { if (m_manager.match(extracted_model, cached_model.first, input_info, cached_model.second.get_input_info())) { model_to_update = cached_model.first; break; + } else { + auto is_subgraph = m_manager.is_subgraph(extracted_model, cached_model.first, + input_info, cached_model.second.get_input_info()); + // in case if one model is subgraph of other to update model meta info and remove subgraph from cache + if (std::get<0>(is_subgraph)) { + std::shared_ptr graph, subgraph; + std::map graph_in_info, subgraph_in_info; + std::tie(std::ignore, graph, subgraph, graph_in_info, subgraph_in_info) = is_subgraph; + if (subgraph == cached_model.first) { + auto meta = m_graph_cache[subgraph]; + meta.set_input_info(graph_in_info); + m_graph_cache.erase(subgraph); + m_graph_cache.insert({graph, meta}); + m_graph_cache_bytesize += (graph->get_graph_size() - subgraph->get_graph_size()); + } + m_graph_cache[cached_model.first].update(model_path, + subgraph_in_info, + model_op_cnt, + this_op_cnt, + extractor_name); + return; + } } } } - auto this_op_cnt = extracted_model->get_ops().size() - - extracted_model->get_parameters().size() - extracted_model->get_results().size(); if (model_to_update == nullptr) { - MetaInfo meta; - if (from_cache) { - auto meta_path = ov::test::utils::replaceExt(model_path, "meta"); - meta = MetaInfo::read_meta_from_file(meta_path); - } else { - meta = MetaInfo(model_path, input_info, model_op_cnt, this_op_cnt, extractor_name); - } + MetaInfo meta = MetaInfo(model_path, input_info, model_op_cnt, this_op_cnt, extractor_name); m_graph_cache.insert({ extracted_model, meta }); m_graph_cache_bytesize += extracted_model->get_graph_size(); return; @@ -110,16 +150,11 @@ void GraphCache::update_cache(const std::shared_ptr& extracted_model, } void GraphCache::serialize_cache() { - // for (const auto& cache_item : m_graph_cache) { - auto it = m_graph_cache.begin(); - while (it != m_graph_cache.end()) { - auto rel_dir = ov::util::path_join({m_cache_subdir, it->second.get_any_extractor() }); - serialize_model(*it, rel_dir); - m_graph_cache.erase(it->first); - it = m_graph_cache.begin(); - } - auto a = 0; - // } + for (const auto& cache_item : m_graph_cache) { + auto rel_dir = ov::util::path_join({ m_cache_subdir, get_model_type(cache_item.first), cache_item.second.get_any_extractor() }); + serialize_model(cache_item, rel_dir); + } + m_graph_cache.clear(); } } // namespace subgraph_dumper diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/cache/meta/meta_info.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/cache/meta/meta_info.cpp index 40d7ed8c63ccfe..68213db323c7a2 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/cache/meta/meta_info.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/cache/meta/meta_info.cpp @@ -15,8 +15,12 @@ namespace subgraph_dumper { unsigned long MetaInfo::MIN_MODEL_PRIORITY = std::numeric_limits::max(); unsigned long MetaInfo::MAX_MODEL_PRIORITY = std::numeric_limits::min(); -MetaInfo::MetaInfo(const std::string& _model_path, const std::map& _input_info, - size_t _total_op_cnt, size_t _this_op_cnt, const std::string& extractor, size_t model_priority) { +MetaInfo::MetaInfo(const std::string& _model_path, + const std::map& _input_info, + size_t _total_op_cnt, + size_t _this_op_cnt, + const std::string& extractor, + size_t model_priority) { unsigned long tmp_graph_priority = _total_op_cnt * model_priority; if (tmp_graph_priority < MIN_MODEL_PRIORITY) MIN_MODEL_PRIORITY = tmp_graph_priority; if (tmp_graph_priority > MAX_MODEL_PRIORITY) MAX_MODEL_PRIORITY = tmp_graph_priority; @@ -46,6 +50,21 @@ double MetaInfo::get_graph_priority() { return diff / delta; } +inline ov::PartialShape str_to_ov_shape(std::string str) { + str = str.replace(str.find('['), 1, ""); + str = str.replace(str.find(']'), 1, ""); + + std::vector shape_vec; + size_t pos = 0; + do { + pos = str.find('.'); + std::string dim_str = str.substr(0, pos); + shape_vec.push_back(atoi(dim_str.c_str())); + str = str.replace(0, dim_str.length() + 1, ""); + } while (pos != std::string::npos); + return ov::PartialShape{shape_vec}; +} + MetaInfo MetaInfo::read_meta_from_file(const std::string& meta_path) { pugi::xml_document doc; doc.load_file(meta_path.c_str()); @@ -80,6 +99,12 @@ MetaInfo MetaInfo::read_meta_from_file(const std::string& meta_path) { } else { in_info.ranges.max = DEFAULT_MAX_VALUE; } + { + auto max_shape_str = std::string(input.attribute("max_shape").value()); + in_info.max_shape = str_to_ov_shape(max_shape_str); + auto min_shape_str = std::string(input.attribute("min_shape").value()); + in_info.min_shape = str_to_ov_shape(min_shape_str); + } input_info.insert({in_name, in_info}); } } @@ -132,6 +157,8 @@ void MetaInfo::serialize(const std::string& serialization_path) { input_node.append_attribute("max").set_value(input.second.ranges.max); } input_node.append_attribute("convert_to_const").set_value(input.second.is_const); + input_node.append_attribute("max_shape").set_value(ov::test::utils::partialShape2str({ input.second.max_shape }).c_str()); + input_node.append_attribute("min_shape").set_value(ov::test::utils::partialShape2str({ input.second.min_shape }).c_str()); } doc.save_file(serialization_path.c_str()); } @@ -142,6 +169,7 @@ void MetaInfo::update(const std::string& _model_path, size_t _this_op_cnt, const std::string& extractor, const std::vector& ignored_inputs) { + bool is_update_in_info = true; if (input_info.size() != _input_info.size()) { throw std::runtime_error("Incompatible input info!"); } @@ -155,6 +183,19 @@ void MetaInfo::update(const std::string& _model_path, } else { model_info.insert({ model_name, ModelInfo(_model_path, _total_op_cnt) });\ } + + // update max and mib abs priority to normilize priorities when serialize + { + auto abs_graph_priority = get_abs_graph_priority(); + if (abs_graph_priority > MAX_MODEL_PRIORITY) MAX_MODEL_PRIORITY = abs_graph_priority; + if (abs_graph_priority < MIN_MODEL_PRIORITY) MIN_MODEL_PRIORITY = abs_graph_priority; + } + if (!extractor.empty()) { + extractors.insert(extractor); + } + if (!is_update_in_info) { + return; + } for (const auto& in : _input_info) { if (std::find(ignored_inputs.begin(), ignored_inputs.end(), in.first) != ignored_inputs.begin()) { continue; @@ -167,15 +208,6 @@ void MetaInfo::update(const std::string& _model_path, input_info[in.first] = in.second; } } - // update max and mib abs priority to normilize priorities when serialize - { - auto abs_graph_priority = get_abs_graph_priority(); - if (abs_graph_priority > MAX_MODEL_PRIORITY) MAX_MODEL_PRIORITY = abs_graph_priority; - if (abs_graph_priority < MIN_MODEL_PRIORITY) MIN_MODEL_PRIORITY = abs_graph_priority; - } - if (!extractor.empty()) { - extractors.insert(extractor); - } } std::map MetaInfo::get_input_info() const { diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/main.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/main.cpp index 1bafbed4e3573b..4b678d8725132a 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/main.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/main.cpp @@ -10,6 +10,7 @@ #include "openvino/util/file_util.hpp" #include "common_test_utils/file_utils.hpp" +#include "utils/memory.hpp" using namespace ov::tools::subgraph_dumper; @@ -40,6 +41,9 @@ int main(int argc, char *argv[]) { std::cout << "[ INFO ] Try 'subgraphsDumper -h' for more information. \nException: " << e.what() << std::endl; return 1; } + size_t ram_size_gb = get_ram_size(); + ram_size_gb >>= 30; + std::cout << "[ INFO ] RAM size is " << ram_size_gb << "GB" << std::endl; std::vector> caches; if (FLAGS_cache_type == "OP" || FLAGS_cache_type.empty()) { @@ -48,7 +52,7 @@ int main(int argc, char *argv[]) { } if (FLAGS_cache_type == "GRAPH" || FLAGS_cache_type.empty()) { std::cout << "[ INFO ] GraphCache is enabled!" << std::endl; - caches.push_back(GraphCache::get()); + caches.push_back(GraphCache::get(FLAGS_device)); } for (auto& cache : caches) { diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/single_op/manager.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/single_op/manager.cpp index a274d0d7786701..675d808d92b42c 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/single_op/manager.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/single_op/manager.cpp @@ -18,7 +18,7 @@ iMatcherConfig::Ptr MatchersManager::get_config(const std::shared_ptr } bool MatchersManager::match(const std::shared_ptr &node, - const std::shared_ptr &ref) { + const std::shared_ptr &ref) const { for (const auto &it : m_matchers) { if (it.second->match(node, ref)) { return true; diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/single_op/single_op.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/single_op/single_op.cpp index 842802e4003350..3e0abda2a936e9 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/single_op/single_op.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/single_op/single_op.cpp @@ -4,6 +4,7 @@ #include "openvino/op/convolution.hpp" #include "openvino/op/group_conv.hpp" +#include "openvino/op/util/op_types.hpp" #include "common_test_utils/graph_comparator.hpp" #include "matchers/single_op/single_op.hpp" @@ -34,10 +35,13 @@ bool SingleOpMatcher::match_inputs(const std::shared_ptr &node, if (std::find(ignored_ports.begin(), ignored_ports.end(), port_id) != ignored_ports.end()) { continue; } - const auto &cur_node_input_type = node->input_value(port_id).get_node_shared_ptr()->get_type_info(); - const auto &ref_node_input_type = ref->input_value(port_id).get_node_shared_ptr()->get_type_info(); - if (cur_node_input_type != ref_node_input_type) { - return false; + if (!ov::op::util::is_parameter(node) && !ov::op::util::is_parameter(ref) && + !ov::op::util::is_constant(node) && !ov::op::util::is_constant(ref)) { + const auto &cur_node_input_type = node->input_value(port_id).get_node_shared_ptr()->get_type_info(); + const auto &ref_node_input_type = ref->input_value(port_id).get_node_shared_ptr()->get_type_info(); + if (cur_node_input_type != ref_node_input_type) { + return false; + } } if (node->get_input_tensor(port_id).get_partial_shape().rank() != ref->get_input_tensor(port_id).get_partial_shape().rank()) { return false; @@ -59,6 +63,13 @@ SingleOpMatcher::match_outputs(const std::shared_ptr &node, return false; } for (size_t port_id = 0; port_id < node->get_output_size(); ++port_id) { + if (!ov::op::util::is_output(node) && !ov::op::util::is_output(ref)) { + const auto &cur_node_out_type = node->output(port_id).get_node_shared_ptr()->get_type_info(); + const auto &ref_node_out_type = ref->output(port_id).get_node_shared_ptr()->get_type_info(); + if (cur_node_out_type != ref_node_out_type) { + return false; + } + } if (node->get_output_tensor(port_id).get_element_type() != ref->get_output_tensor(port_id).get_element_type()) { return false; } @@ -94,7 +105,7 @@ bool SingleOpMatcher::match(const std::shared_ptr &node, if (!match_inputs(node, ref)) { return false; } - if (!match_attrs(node, ref)) { + if (!match_attrs(node, ref) && !ov::op::util::is_parameter(node) && !ov::op::util::is_parameter(ref)) { return false; } if (!match_outputs(node, ref)) { diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/subgraph/fused_names.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/subgraph/fused_names.cpp index a00361ab322b12..f138dba68a9116 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/subgraph/fused_names.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/subgraph/fused_names.cpp @@ -6,6 +6,7 @@ #include "openvino/op/tensor_iterator.hpp" #include "openvino/op/if.hpp" +#include "common_test_utils/common_utils.hpp" #include "functional_test_utils/ov_plugin_cache.hpp" #include "matchers/subgraph/fused_names.hpp" @@ -13,6 +14,26 @@ using namespace ov::tools::subgraph_dumper; +void FusedNamesExtractor::set_target_device(const std::string& _device) { + auto available_devices = core->get_available_devices(); + if (_device.empty()) { + device = available_devices.front(); + std::cout << "[ WARNING ][ GRAPH CACHE ] " << device << + " will be used for `fused_names` extractor" << std::endl; + return; + } else if (std::find(available_devices.begin(), + available_devices.end(), + _device) == available_devices.end()) { + std::string message = "Incorrect device "; + message += _device; + message += " to enable `fused_names` extractor! Available devices: "; + message += ov::test::utils::vec2str(available_devices); + throw std::runtime_error(message); + } + device = _device; + std::cout << "[ INFO ][ GRAPH CACHE ] " << device << " is using for `fused_names` extractor" << std::endl; +} + std::unordered_set FusedNamesExtractor::extract_compiled_model_names(const std::shared_ptr& model) { auto compiled_model = core->compile_model(model, device); @@ -26,9 +47,9 @@ FusedNamesExtractor::extract_compiled_model_names(const std::shared_ptrget_available_devices().begin()); + set_target_device(device); } FusedNamesExtractor::~FusedNamesExtractor() { @@ -37,7 +58,8 @@ FusedNamesExtractor::~FusedNamesExtractor() { std::list FusedNamesExtractor::extract(const std::shared_ptr &model, - bool is_extract_body) { + bool is_extract_body, + bool is_copy_constants) { auto compiled_op_name = extract_compiled_model_names(model); std::list matched_patterns; std::unordered_set checked_ops; @@ -49,10 +71,10 @@ FusedNamesExtractor::extract(const std::shared_ptr &model, } if (compiled_op_name.count(op_name)) { try { - matched_patterns.push_back(generate_model(nodes, checked_ops, extractor_name)); + matched_patterns.push_back(generate_model(nodes, checked_ops, extractor_name, is_copy_constants)); } catch(std::exception& e) { - if (std::string(e.what()) != "Incorrect node number to create model") { - std::cout << "[ WARNING ] Impossible to generate network and add to GraphCache: " < &model, } } try { - matched_patterns.push_back(generate_model(nodes, checked_ops, extractor_name)); + matched_patterns.push_back(generate_model(nodes, checked_ops, extractor_name, is_copy_constants)); } catch(std::exception& e) { - if (std::string(e.what()) != "Incorrect node number to create model") { - std::cout << "[ WARNING ] Impossible to generate network and add to GraphCache: " < +#include "openvino/pass/manager.hpp" +#include "openvino/pass/constant_folding.hpp" #include "matchers/subgraph/manager.hpp" +#include "utils/model.hpp" using namespace ov::tools::subgraph_dumper; @@ -12,18 +15,47 @@ bool ExtractorsManager::match(const std::shared_ptr &model, if (it.second->match(model, ref)) { return true; } + return false; } return false; } +ExtractorsManager::ExtractedSubgraphTuple +ExtractorsManager::is_subgraph(const std::shared_ptr &model, + const std::shared_ptr &ref_model, + const std::map &in_info, + const std::map &in_info_ref) { + for (const auto &it : m_extractors) { + auto extractor_res = it.second->is_subgraph(model, ref_model); + if (std::get<0>(extractor_res)) { + std::map graph_in_info, subgraph_in_info; + if (std::get<1>(extractor_res) == model && std::get<2>(extractor_res) == ref_model) { + graph_in_info = in_info; + subgraph_in_info = in_info_ref; + } else if (std::get<1>(extractor_res) == ref_model && std::get<2>(extractor_res) == model) { + graph_in_info = in_info_ref; + subgraph_in_info = in_info; + } else { + throw std::runtime_error("Generated models are incompatible with original ones!"); + } + try { + subgraph_in_info = align_input_info(std::get<2>(extractor_res), std::get<1>(extractor_res), subgraph_in_info, graph_in_info); + } catch(...) { + return { false, nullptr, nullptr, {}, {} }; + } + return { true, std::get<1>(extractor_res), std::get<2>(extractor_res), graph_in_info, subgraph_in_info }; + } + return { false, nullptr, nullptr, {}, {} }; + } +} + bool ExtractorsManager::match(const std::shared_ptr &model, const std::shared_ptr &ref, std::map &in_info, const std::map &in_info_ref) { if (match(model, ref)) { try { - auto new_input_info = align_input_info(model, ref, in_info, in_info_ref); - in_info = new_input_info; + in_info = align_input_info(model, ref, in_info, in_info_ref); return true; } catch (...) { return false; @@ -36,51 +68,78 @@ std::map ExtractorsManager::align_input_info(const std::shared_ptr& model, const std::shared_ptr& model_ref, const std::map& in_info, - const std::map& in_info_ref) { + const std::map& in_info_ref, + const std::map &matched_op) { std::map new_input_info = in_info; bool is_update_required = false; for (const auto& in_info_item : in_info_ref) { if (!in_info.count(in_info_item.first)) { is_update_required = true; break; + } else if (in_info.at(in_info_item.first).is_const != in_info_item.second.is_const) { + throw std::runtime_error("Impossible to update input info!!!"); } } if (is_update_required) { - std::map new_ref_input_info = in_info_ref; // align matched model names auto ref_model_ops = model_ref->get_ordered_ops(); auto model_ops = model->get_ordered_ops(); + size_t ref_ordered_ops_size = ref_model_ops.size(); size_t ordered_ops_size = model_ops.size(); - if (ordered_ops_size != ref_model_ops.size()) { - throw std::runtime_error("Matched models are different!"); + if (ref_ordered_ops_size != ordered_ops_size && matched_op.empty()) { + throw std::runtime_error("Matched models can not be compared according different op numbers!"); } - for (size_t i = 0; i < ordered_ops_size; ++i) { - auto model_op_name = model_ops[i]->get_friendly_name(); + for (size_t i = 0; i < ref_ordered_ops_size; ++i) { + auto model_op_name = i < ordered_ops_size ? model_ops[i]->get_friendly_name() : ""; auto model_ref_op_name = ref_model_ops[i]->get_friendly_name(); - if (in_info.count(model_op_name)) { - auto input_info = new_input_info[model_op_name]; - if (input_info.is_const != new_ref_input_info[model_ref_op_name].is_const) { - throw std::runtime_error("Impossible yo update input info!!!"); + if (!in_info_ref.count(model_ref_op_name) && !in_info.count(model_op_name)) { + continue; + } + auto input_info = matched_op.empty() ? new_input_info[model_op_name] : in_info_ref.at(model_ref_op_name); + std::string input_name = matched_op.count(model_ref_op_name) ? matched_op.at(model_ref_op_name) : model_op_name; + if (new_input_info.count(input_name)) { + if (input_info.is_const != in_info_ref.at(model_ref_op_name).is_const) { + throw std::runtime_error("Impossible to update input info!!!"); + } + if (!matched_op.empty()) { + input_info = new_input_info.at(input_name); } - new_input_info.erase(model_op_name); - new_input_info.insert({ model_ref_op_name, input_info }); + new_input_info.erase(input_name); } + new_input_info.insert({ model_ref_op_name, input_info }); } } return new_input_info; } std::list -ExtractorsManager::extract(const std::shared_ptr &model, bool is_extract_body) { +ExtractorsManager::extract(const std::shared_ptr &model, + bool is_extract_body, + bool is_copy_constants) { std::list result; for (const auto &it : m_extractors) { + // extract patterns from original models auto start = std::chrono::high_resolution_clock::now(); it.second->set_extractor_name(it.first); - auto extracted_patterns = it.second->extract(model, is_extract_body); + auto extracted_patterns = it.second->extract(model, is_extract_body, is_copy_constants); result.insert(result.end(), extracted_patterns.begin(), extracted_patterns.end()); auto end = std::chrono::high_resolution_clock::now(); auto delta = std::chrono::duration_cast(end - start).count(); - std::cout << "[ INFO ][ EXTRACTOR DURATION ] " << it.first << " " << delta << "ms" << std::endl; + std::cout << "[ INFO ][ EXTRACTOR DURATION ][ ORIGINAL MODEL ] " << it.first << " " << delta << "ms" << std::endl; + + // todo: enable it after validation + // if (!is_dynamic_model(model)) { + // // extract patterns from models after `constant_folding` pass + // ov::pass::Manager manager; + // manager.register_pass(); + // manager.run_passes(model); + // extracted_patterns = it.second->extract(model, is_extract_body, is_copy_constants); + // result.insert(result.end(), extracted_patterns.begin(), extracted_patterns.end()); + + // end = std::chrono::high_resolution_clock::now(); + // delta = std::chrono::duration_cast(end - start).count(); + // std::cout << "[ INFO ][ EXTRACTOR DURATION ][ CONSTANT FOLDING ] " << it.first << " " << delta << "ms" << std::endl; + // } } return result; } diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/subgraph/repeat_pattern.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/subgraph/repeat_pattern.cpp index 7b293eae252ffc..4331b178a7b037 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/subgraph/repeat_pattern.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/subgraph/repeat_pattern.cpp @@ -16,7 +16,8 @@ using namespace ov::tools::subgraph_dumper; std::list RepeatPatternExtractor::extract(const std::shared_ptr &model, - bool is_extract_body) { + bool is_extract_body, + bool is_copy_constants) { std::unordered_set checked_ops; std::list to_cache; @@ -92,11 +93,23 @@ RepeatPatternExtractor::extract(const std::shared_ptr &model, } for (size_t i = 0; i < start_node_idx.size(); ++i) { try { - to_cache.push_back(generate_model(nodes[i], checked_ops, extractor_name)); + std::unordered_set tmp_checked_ops; + auto extracted_pattern = generate_model(nodes[i], tmp_checked_ops, extractor_name, is_copy_constants); + auto extracted_model = std::get<0>(extracted_pattern); + std::list secondary_patterns; + if (nodes[i].size() > 20) { + secondary_patterns = extract(std::get<0>(extracted_pattern), is_extract_body, is_copy_constants); + } + if (secondary_patterns.size() > 1) { + to_cache.insert(to_cache.end(), secondary_patterns.begin(), secondary_patterns.end()); + } else { + to_cache.push_back(extracted_pattern); + } nodes[i].clear(); + checked_ops.insert(tmp_checked_ops.begin(), tmp_checked_ops.end()); } catch(std::exception& e) { - if (std::string(e.what()) != "Incorrect node number to create model") { - std::cout << "[ WARNING ] Impossible to generate network and add to GraphCache: " < #include "matchers/subgraph/subgraph.hpp" using namespace ov::tools::subgraph_dumper; @@ -17,19 +16,61 @@ SubgraphExtractor::match(const std::shared_ptr &model, } std::vector> ordered_ops = model->get_ordered_ops(), ref_ordered_ops = ref_model->get_ordered_ops(); - if (ordered_ops.size() != ref_ordered_ops.size()) + if (ordered_ops.size() != ref_ordered_ops.size()) { return false; - MatchersManager::MatchersMap matchers = { - { "generic_single_op", SingleOpMatcher::Ptr(new SingleOpMatcher) }, - { "convolutions", ConvolutionsMatcher::Ptr(new ConvolutionsMatcher) }, - }; - MatchersManager manager(matchers); - for (size_t i = 0; i < ordered_ops.size(); ++i) { - if (is_node_to_skip(ordered_ops[i]) && is_node_to_skip(ref_ordered_ops[i])) - continue; - if (!manager.match(ordered_ops[i], ref_ordered_ops[i])) { - return false; + } + size_t matched_op_cnt = 0, total_op_cnt = ordered_ops.size(); + size_t matched_op_cnt_required = round(0.9 * total_op_cnt); + for (size_t i = 0; i < total_op_cnt; ++i) { + if (is_node_to_skip(ordered_ops[i]) && + is_node_to_skip(ref_ordered_ops[i]) || + m_manager.match(ordered_ops[i], ref_ordered_ops[i])) { + ++matched_op_cnt; + } + if (matched_op_cnt >= matched_op_cnt_required) { + return true; + } + } + return false; +} + +inline SubgraphExtractor::IsSubgraphTuple prepare_is_subgraph_result(bool is_subgraph, + const std::shared_ptr& graph, + const std::shared_ptr& subgraph, + const std::map& matched_ops) { + return is_subgraph ? + std::make_tuple(is_subgraph, graph, subgraph, matched_ops) : + std::make_tuple(is_subgraph, nullptr, nullptr, std::map()); +} + +SubgraphExtractor::IsSubgraphTuple +SubgraphExtractor::is_subgraph(const std::shared_ptr &model, + const std::shared_ptr &ref_model) const { + std::vector> ordered_ops = model->get_ordered_ops(), + ref_ordered_ops = ref_model->get_ordered_ops(); + bool is_model = ordered_ops.size() > ref_ordered_ops.size(); + ov::NodeVector graph_to_check_ops, subgraph_to_check_ops; + std::shared_ptr graph = nullptr, subgraph = nullptr; + if (is_model) { + graph_to_check_ops = ordered_ops; + subgraph_to_check_ops = ref_ordered_ops; + graph = model; + subgraph = ref_model; + } else { + graph_to_check_ops = ref_ordered_ops; + subgraph_to_check_ops = ordered_ops; + graph = ref_model; + subgraph = model; + } + std::map matched_op_names; + + auto graph_it = graph_to_check_ops.begin(), subgraph_it = subgraph_to_check_ops.begin(); + while (graph_it != graph_to_check_ops.end() && subgraph_it != subgraph_to_check_ops.end()) { + if (m_manager.match(*graph_it, *subgraph_it)) { + matched_op_names.insert({ (*graph_it)->get_friendly_name(), (*subgraph_it)->get_friendly_name()}); + ++subgraph_it; } + ++graph_it; } - return true; + return prepare_is_subgraph_result(subgraph_it == subgraph_to_check_ops.end(), graph, subgraph, matched_op_names); } diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/model.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/model.cpp index af927cce8da079..09dce548b91c02 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/model.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/model.cpp @@ -54,7 +54,7 @@ find_models(const std::vector &dirs, const std::string& regexp) { } } catch (std::exception& e) { not_read_model.emplace_back(model_file); - std::cout << "[ ERROR ] Impossible to read model: " << model_file << std::endl << "Exception: " << e.what(); + // std::cout << "[ ERROR ] Impossible to read model: " << model_file << std::endl << "Exception: " << e.what(); } } } @@ -77,7 +77,9 @@ std::map> cache_models( std::map> cache_status = { { ModelCacheStatus::SUCCEED, {} }, { ModelCacheStatus::NOT_FULLY_CACHED, {} }, - { ModelCacheStatus::NOT_READ, {} } + { ModelCacheStatus::NOT_READ, {} }, + { ModelCacheStatus::LARGE_MODELS_EXCLUDED, {} }, + { ModelCacheStatus::LARGE_MODELS_INCLUDED, {} }, }; auto core = ov::test::utils::PluginCache::get().core(); auto models_size = models.size(); @@ -86,19 +88,25 @@ std::map> cache_models( const auto& model = models[i]; if (ov::util::file_exists(model)) { - std::cout << "[ INFO ] [ " << i << "/" << models_size << " ] model will be processed" << std::endl; + std::cout << "[ INFO ][ " << i + 1 << "/" << models_size << " ] model will be processed" << std::endl; ModelCacheStatus model_status = ModelCacheStatus::SUCCEED; try { std::shared_ptr function = core->read_model(model); try { + if (cache->is_model_large_to_read(function, model)) { + cache_status[ModelCacheStatus::LARGE_MODELS_EXCLUDED].push_back(model); + continue; + } else if (cache->is_model_large_to_store_const(function)) { + cache_status[ModelCacheStatus::LARGE_MODELS_INCLUDED].push_back(model); + } cache->update_cache(function, model, extract_body, from_cache); } catch (std::exception &e) { - std::cout << "[ ERROR ] Model processing failed with exception:" << std::endl << e.what() << std::endl; + // std::cout << "[ ERROR ] Model processing failed with exception:" << std::endl << e.what() << std::endl; model_status = ModelCacheStatus::NOT_FULLY_CACHED; } } catch (std::exception &e) { model_status = ModelCacheStatus::NOT_READ; - std::cout << "[ ERROR ] Model reading failed with exception:" << std::endl << e.what() << std::endl; + // std::cout << "[ ERROR ] Model reading failed with exception:" << std::endl << e.what() << std::endl; } cache_status[model_status].push_back(model); } diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/node.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/node.cpp index 220d1b00de47d0..9df7ea3dc178fb 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/node.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/node.cpp @@ -14,7 +14,7 @@ std::map get_input_info_by_node(const std::shared_ptrget_input_partial_shape(port_id)); std::string input_name = input_node->get_friendly_name(); if (std::dynamic_pointer_cast(input_node)) { if (ov::shape_size(input_node->get_output_shape(0)) == 0) diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/cache/cache.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/cache/cache.cpp index 99bd5ddc4424b2..a0d46c733809d7 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/cache/cache.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/cache/cache.cpp @@ -42,7 +42,7 @@ class ICacheUnitTest : public SubgraphsDumperBaseTest, test_model = std::make_shared(convert, params); test_model->set_friendly_name(model_name); } - test_meta = ov::tools::subgraph_dumper::MetaInfo(test_model_path, {{"in_0", ov::tools::subgraph_dumper::InputInfo(0, 1, true)}}); + test_meta = ov::tools::subgraph_dumper::MetaInfo(test_model_path, {{"in_0", ov::tools::subgraph_dumper::InputInfo({1, 2}, 0, 1, true)}}); } void TearDown() override { diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/cache/meta.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/cache/meta.cpp index 42d8379f7223e3..e1665e1c755fce 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/cache/meta.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/cache/meta.cpp @@ -25,27 +25,38 @@ class InputInfoUnitTest : public SubgraphsDumperBaseTest {}; TEST_F(InputInfoUnitTest, constructor) { ASSERT_NO_THROW(auto in_info = InputInfo()); - ASSERT_NO_THROW(auto in_info = InputInfo(0)); - ASSERT_NO_THROW(auto in_info = InputInfo(0, 1)); - ASSERT_NO_THROW(auto in_info = InputInfo(0, 1, true)); + ASSERT_NO_THROW(auto in_info = InputInfo({10})); + ASSERT_NO_THROW(auto in_info = InputInfo({}, 0)); + ASSERT_NO_THROW(auto in_info = InputInfo({}, 0, 1)); + ASSERT_NO_THROW(auto in_info = InputInfo({}, 0, 1, true)); } TEST_F(InputInfoUnitTest, update_ranges) { auto in_info_0 = InputInfo(); - auto in_info_1 = InputInfo(0); + auto in_info_1 = InputInfo({}, 0); in_info_0 = in_info_1; ASSERT_EQ(in_info_0.ranges.min, in_info_1.ranges.min); ASSERT_EQ(in_info_0.ranges.max, in_info_1.ranges.max); ASSERT_EQ(in_info_0.is_const, in_info_1.is_const); - auto in_info_2 = InputInfo(1, 2); - auto ref_in_info = InputInfo(0, 2); + auto in_info_2 = InputInfo({}, 1, 2); + auto ref_in_info = InputInfo({}, 0, 2); in_info_0 = in_info_2; ASSERT_EQ(in_info_0.ranges.min, ref_in_info.ranges.min); ASSERT_EQ(in_info_0.ranges.max, ref_in_info.ranges.max); ASSERT_EQ(in_info_0.is_const, ref_in_info.is_const); } +TEST_F(InputInfoUnitTest, update_shapes) { + auto in_info_0 = InputInfo({10}); + ASSERT_EQ(in_info_0.min_shape, ov::PartialShape({10})); + ASSERT_EQ(in_info_0.max_shape, ov::PartialShape({10})); + auto in_info_1 = InputInfo({20}); + in_info_0 = in_info_1; + ASSERT_EQ(in_info_0.min_shape, ov::PartialShape({10})); + ASSERT_EQ(in_info_1.max_shape, ov::PartialShape({20})); +} + // ======================== Model Info Func tests ============================================= class ModelInfoFuncTest : public ::testing::Test {}; @@ -55,6 +66,7 @@ TEST_F(ModelInfoFuncTest, constructor) { ASSERT_NO_THROW(auto model_info = ModelInfo("model.xml")); ASSERT_NO_THROW(auto model_info = ModelInfo("model.xml", 1)); ASSERT_NO_THROW(auto model_info = ModelInfo("model.xml", 1, 2)); + ASSERT_NO_THROW(auto model_info = ModelInfo("model.xml", 1, 2, 3)); } // ======================== Meta Info Functional tests ============================================= @@ -71,7 +83,7 @@ class MetaInfoFuncTest : public SubgraphsDumperBaseTest { test_model_path = "test_model_path.xml"; test_extractor_name = "test_extractor"; test_model_name = ov::test::utils::replaceExt(test_model_path, ""); - test_in_info = {{ "test_in_0", InputInfo(DEFAULT_MIN_VALUE, 1, true) }}; + test_in_info = {{ "test_in_0", InputInfo({10}, DEFAULT_MIN_VALUE, 1, true) }}; test_model_info = {{ test_model_name, ModelInfo(test_model_path, 5) }}; test_artifacts_dir = ov::util::path_join({ov::test::utils::getCurrentWorkingDir(), "test_artifacts"}); ov::util::create_directory_recursive(test_artifacts_dir); @@ -110,17 +122,22 @@ TEST_F(MetaInfoFuncTest, get_any_extractor) { } TEST_F(MetaInfoFuncTest, update) { - std::map test_in_info = {{ "test_in_0", InputInfo(DEFAULT_MIN_VALUE, 1, true) }}; + std::map test_in_info = {{ "test_in_0", InputInfo({10}, DEFAULT_MIN_VALUE, 1, true) }}; auto test_meta = MetaInfo(test_model_name, test_in_info, 1, 1, test_extractor_name); - std::map test_input_info_1 = {{ "test_in_0", InputInfo(0, 1, true) }}; + ASSERT_EQ(test_meta.get_input_info().at("test_in_0").min_shape, ov::PartialShape({10})); + ASSERT_EQ(test_meta.get_input_info().at("test_in_0").max_shape, ov::PartialShape({10})); + std::map test_input_info_1 = {{ "test_in_0", InputInfo({50}, 0, 1, true) }}; std::string test_model_1 = "test_model_1"; std::string test_model_path_1 = ov::util::path_join({ "path", "to", test_model_1 + ".xml"}); ASSERT_ANY_THROW(test_meta.update(test_model_path_1, {})); - ASSERT_ANY_THROW(test_meta.update(test_model_path_1, {{ "test_in_1", InputInfo() }})); - ASSERT_ANY_THROW(test_meta.update(test_model_path_1, {{ "test_in_0", InputInfo(0, 1, false) }})); + ASSERT_ANY_THROW(test_meta.update(test_model_path_1, {{ "test_in_1", InputInfo({10}) }})); + ASSERT_ANY_THROW(test_meta.update(test_model_path_1, {{ "test_in_0", InputInfo({10}, 0, 1, false) }})); ASSERT_NO_THROW(test_meta.update(test_model_path_1, test_input_info_1)); + ASSERT_EQ(test_meta.get_input_info().at("test_in_0").min_shape, ov::PartialShape({10})); + ASSERT_EQ(test_meta.get_input_info().at("test_in_0").max_shape, ov::PartialShape({50})); ASSERT_NO_THROW(test_meta.update(test_model_path_1, test_input_info_1, 1, 2, "test_extractor_1")); ASSERT_NO_THROW(test_meta.update(test_model_path_1, test_input_info_1, 2)); + ASSERT_NO_THROW(test_meta.update(test_model_path_1, test_input_info_1, 2, 4, "test")); } TEST_F(MetaInfoFuncTest, serialize) { @@ -178,6 +195,12 @@ TEST_F(MetaInfoUnitTest, serialize) { ASSERT_EQ(input_info[in_xml].ranges.min, min_xml); auto max_xml = std::string(in_info_xml.attribute("max").value()) == "undefined" ? DEFAULT_MAX_VALUE : in_info_xml.attribute("max").as_double(); ASSERT_EQ(input_info[in_xml].ranges.max, max_xml); + auto max_shape_str = std::string(in_info_xml.attribute("max_shape").value()); + auto max_shape_ref = ov::test::utils::partialShape2str({this->get_input_info().begin()->second.max_shape}); + ASSERT_EQ(max_shape_str, max_shape_ref); + auto min_shape_str = std::string(in_info_xml.attribute("min_shape").value()); + auto min_shape_ref = ov::test::utils::partialShape2str({this->get_input_info().begin()->second.min_shape}); + ASSERT_EQ(min_shape_str, min_shape_ref); } } { @@ -202,18 +225,22 @@ TEST_F(MetaInfoUnitTest, read_meta_from_file) { TEST_F(MetaInfoUnitTest, update) { auto test_meta = MetaInfo(test_model_name, test_in_info); - std::map test_meta_1 = {{ "test_in_0", InputInfo(0, 1, true) }}; + std::map test_meta_1 = {{ "test_in_0", InputInfo({20}, 0, 1, true) }}; std::string test_model_1 = "test_model_1"; std::string test_model_path_1 = ov::util::path_join({ "path", "to", test_model_1 + ".xml"}); - this->update(test_model_path_1, test_meta_1); + ASSERT_NO_THROW(this->update(test_model_path_1, test_meta_1)); ASSERT_NE(this->model_info.find(test_model_1), this->model_info.end()); ASSERT_EQ(*this->model_info[test_model_1].model_paths.begin(), test_model_path_1); ASSERT_EQ(this->model_info[test_model_1].this_op_cnt, 1); - this->update(test_model_path_1, test_meta_1); + ASSERT_EQ(this->input_info.begin()->second.min_shape, ov::PartialShape({10})); + ASSERT_EQ(this->input_info.begin()->second.max_shape, ov::PartialShape({20})); + ASSERT_NO_THROW(this->update(test_model_path_1, test_meta_1)); ASSERT_EQ(this->model_info[test_model_1].model_paths.size(), 1); ASSERT_EQ(this->model_info[test_model_1].this_op_cnt, 2); + ASSERT_EQ(this->input_info.begin()->second.min_shape, ov::PartialShape({10})); + ASSERT_EQ(this->input_info.begin()->second.max_shape, ov::PartialShape({20})); test_model_path_1 = ov::util::path_join({ "path", "to", "test", test_model_1 + ".xml"}); - this->update(test_model_path_1, test_meta_1, 0, 1, "test_extractor"); + ASSERT_NO_THROW(this->update(test_model_path_1, test_meta_1, 0, 1, "test_extractor")); ASSERT_EQ(this->model_info[test_model_1].model_paths.size(), 2); ASSERT_EQ(this->model_info[test_model_1].this_op_cnt, 3); ASSERT_EQ(this->model_info[test_model_1].this_op_cnt, 3); diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/subgraph/manager.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/subgraph/manager.cpp index b43ffe33cfb687..76bbeb4769bc08 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/subgraph/manager.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/subgraph/manager.cpp @@ -88,9 +88,20 @@ TEST_F(ExtractorsManagerTest, match) { ASSERT_FALSE(this->match(test_model_0_1, test_model_1)); } +TEST_F(ExtractorsManagerTest, is_subgraph) { + this->set_extractors(test_map); + ASSERT_NO_THROW(this->is_subgraph(test_model_0_0, test_model_0_1)); + auto is_subgraph = this->is_subgraph(test_model_0_0, test_model_0_1); + ASSERT_TRUE(std::get<0>(is_subgraph)); + ASSERT_NO_THROW(this->is_subgraph(test_model_0_0, test_model_1)); + ASSERT_FALSE(std::get<0>(this->is_subgraph(test_model_0_0, test_model_1))); + ASSERT_NO_THROW(this->is_subgraph(test_model_0_1, test_model_1)); + ASSERT_FALSE(std::get<0>(this->is_subgraph(test_model_0_1, test_model_1))); +} + TEST_F(ExtractorsManagerTest, match_with_in_info) { this->set_extractors(test_map); - std::map test_in_info({{"test_parameter_0", InputInfo()}}), test_in_info_1({{"test_parameter_1", InputInfo(1, 2, true)}}); + std::map test_in_info({{"test_parameter_0", InputInfo()}}), test_in_info_1({{"test_parameter_1", InputInfo({}, 1, 2, true)}}); ASSERT_NO_THROW(this->match(test_model_0_0, test_model_0_1, test_in_info, test_in_info)); ASSERT_TRUE(this->match(test_model_0_0, test_model_0_1, test_in_info, test_in_info)); ASSERT_NO_THROW(this->match(test_model_0_0, test_model_0_1, test_in_info, test_in_info_1)); @@ -112,4 +123,12 @@ TEST_F(ExtractorsManagerTest, align_input_info) { ASSERT_EQ(c, test_in_info_ref); } +TEST_F(ExtractorsManagerTest, align_input_info_for_subgraphs) { + std::map test_in_info({{"test_parameter_0", InputInfo()}}), test_in_info_ref({{"test_parameter_1", InputInfo()}}); + ASSERT_NE(test_in_info, test_in_info_ref); + ASSERT_NO_THROW(this->align_input_info(test_model_0_0, test_model_0_1, test_in_info, test_in_info_ref, {{"test_parameter_0", "test_parameter_1"}})); + auto c = this->align_input_info(test_model_0_0, test_model_0_1, test_in_info, test_in_info_ref); + ASSERT_EQ(c, test_in_info_ref); +} + } // namespace diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/subgraph/subgraph.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/subgraph/subgraph.cpp index ee72876e447200..7de5706b9e9a06 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/subgraph/subgraph.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/subgraph/subgraph.cpp @@ -64,10 +64,146 @@ TEST_F(SubgraphExtractorTest, match) { ASSERT_FALSE(this->match(test_model_0_1, test_model_1)); } +TEST_F(SubgraphExtractorTest, match_90_percent) { + { + std::shared_ptr test_parameter = + std::make_shared(ov::element::f32, ov::Shape{1, 2}); + std::shared_ptr test_abs_0 = + std::make_shared(test_parameter); + std::shared_ptr test_abs_1 = + std::make_shared(test_abs_0); + std::shared_ptr test_abs_2 = + std::make_shared(test_abs_1); + std::shared_ptr test_abs_3 = + std::make_shared(test_abs_2); + std::shared_ptr test_abs_4 = + std::make_shared(test_abs_3); + std::shared_ptr test_abs_5 = + std::make_shared(test_abs_4); + std::shared_ptr test_abs_6 = + std::make_shared(test_abs_5); + std::shared_ptr test_abs_7 = + std::make_shared(test_abs_6); + std::shared_ptr test_abs_8 = + std::make_shared(test_abs_7); + std::shared_ptr test_abs_9 = + std::make_shared(test_abs_8); + std::shared_ptr test_abs_10 = + std::make_shared(test_abs_9); + std::shared_ptr test_res = + std::make_shared(test_abs_10); + test_model_0_0 = std::make_shared(ov::ResultVector{test_res}, + ov::ParameterVector{test_parameter}); + } + { + std::shared_ptr test_parameter = + std::make_shared(ov::element::f32, ov::Shape{1, 2}); + std::shared_ptr test_abs_0 = + std::make_shared(test_parameter); + std::shared_ptr test_abs_1 = + std::make_shared(test_abs_0); + std::shared_ptr test_abs_2 = + std::make_shared(test_abs_1); + std::shared_ptr test_abs_3 = + std::make_shared(test_abs_2); + std::shared_ptr test_abs_4 = + std::make_shared(test_abs_3); + std::shared_ptr test_abs_5 = + std::make_shared(test_abs_4); + std::shared_ptr test_abs_6 = + std::make_shared(test_abs_5); + std::shared_ptr test_abs_7 = + std::make_shared(test_abs_6); + std::shared_ptr test_abs_8 = + std::make_shared(test_abs_7); + std::shared_ptr test_abs_9 = + std::make_shared(test_abs_8); + std::shared_ptr test_abs_10 = + std::make_shared(test_abs_9); + std::shared_ptr test_res = + std::make_shared(test_abs_10); + test_model_0_1 = std::make_shared(ov::ResultVector{test_res}, + ov::ParameterVector{test_parameter}); + } + { + std::shared_ptr test_parameter = + std::make_shared(ov::element::f32, ov::Shape{1, 2}); + std::shared_ptr test_abs_0 = + std::make_shared(test_parameter); + std::shared_ptr test_abs_1 = + std::make_shared(test_abs_0); + std::shared_ptr test_abs_2 = + std::make_shared(test_abs_1); + std::shared_ptr test_abs_3 = + std::make_shared(test_abs_2); + std::shared_ptr test_abs_4 = + std::make_shared(test_abs_3); + std::shared_ptr test_abs_5 = + std::make_shared(test_abs_4); + std::shared_ptr test_abs_6 = + std::make_shared(test_abs_5); + std::shared_ptr test_abs_7 = + std::make_shared(test_abs_6); + std::shared_ptr test_abs_8 = + std::make_shared(test_abs_7); + std::shared_ptr test_abs_9 = + std::make_shared(test_abs_8); + std::shared_ptr test_abs_10 = + std::make_shared(test_abs_9); + std::shared_ptr test_res = + std::make_shared(test_abs_10); + test_model_1 = std::make_shared(ov::ResultVector{test_res}, + ov::ParameterVector{test_parameter}); + } + ASSERT_NO_THROW(this->match(test_model_0_0, test_model_0_1)); + ASSERT_TRUE(this->match(test_model_0_0, test_model_0_1)); + ASSERT_NO_THROW(this->match(test_model_0_0, test_model_1)); + ASSERT_FALSE(this->match(test_model_0_0, test_model_1)); + ASSERT_NO_THROW(this->match(test_model_0_1, test_model_1)); + ASSERT_FALSE(this->match(test_model_0_1, test_model_1)); +} + TEST_F(SubgraphExtractorTest, extract) { ASSERT_NO_THROW(this->extract(test_model_0_0)); ASSERT_NO_THROW(this->extract(test_model_0_1)); ASSERT_NO_THROW(this->extract(test_model_1)); } +TEST_F(SubgraphExtractorTest, is_subgraph) { + auto is_subgraph = this->is_subgraph(test_model_0_0, test_model_0_0); + ASSERT_NO_THROW(this->is_subgraph(test_model_0_0, test_model_0_0)); + ASSERT_TRUE(std::get<0>(is_subgraph)); + ASSERT_NO_THROW(this->is_subgraph(test_model_0_0, test_model_1)); + is_subgraph = this->is_subgraph(test_model_0_0, test_model_1); + ASSERT_FALSE(std::get<0>(is_subgraph)); + ASSERT_NO_THROW(this->is_subgraph(test_model_0_1, test_model_1)); + is_subgraph = this->is_subgraph(test_model_0_1, test_model_1); + ASSERT_FALSE(std::get<0>(is_subgraph)); + { + std::shared_ptr test_parameter = + std::make_shared(ov::element::f32, ov::Shape{1, 2}); + std::shared_ptr test_abs_0 = + std::make_shared(test_parameter); + std::shared_ptr test_abs_1 = + std::make_shared(test_abs_0); + std::shared_ptr test_res = + std::make_shared(test_abs_1); + auto big_model_0 = std::make_shared(ov::ResultVector{test_res}, + ov::ParameterVector{test_parameter}); + is_subgraph = this->is_subgraph(test_model_0_0, big_model_0); + ASSERT_NO_THROW(this->is_subgraph(test_model_0_0, big_model_0)); + ASSERT_TRUE(std::get<0>(is_subgraph)); + ASSERT_EQ(std::get<1>(is_subgraph), big_model_0); + ASSERT_EQ(std::get<2>(is_subgraph), test_model_0_0); + + is_subgraph = this->is_subgraph(test_model_0_1, big_model_0); + ASSERT_NO_THROW(this->is_subgraph(test_model_0_1, big_model_0)); + ASSERT_TRUE(std::get<0>(is_subgraph)); + ASSERT_EQ(std::get<1>(is_subgraph), big_model_0); + ASSERT_EQ(std::get<2>(is_subgraph), test_model_0_1); + ASSERT_NO_THROW(this->is_subgraph(test_model_1, big_model_0)); + ASSERT_FALSE(std::get<0>(this->is_subgraph(test_model_1, big_model_0))); + } +} + } // namespace diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/utils/node.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/utils/node.cpp index 675ac51153966e..705662f718d632 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/utils/node.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/utils/node.cpp @@ -28,8 +28,8 @@ TEST_F(NodeUtilsTest, get_input_info_by_node) { auto add_node = std::make_shared(param, const_node); std::map ref_test_info = { - { "const_0", InputInfo(-3.65, 7, true) }, - { "param_0", InputInfo() }, + { "const_0", InputInfo({2, 3}, -3.65, 7, true) }, + { "param_0", InputInfo({2, 3}) }, }; std::map orig_test_info = get_input_info_by_node(add_node); ASSERT_EQ(ref_test_info, orig_test_info); diff --git a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir_tests.cpp b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir_tests.cpp index 35594f890a7a7c..9d8b4bcc57108f 100644 --- a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir_tests.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir_tests.cpp @@ -21,7 +21,6 @@ TEST_P(ReadIRTest, Inference) { run(); } -// temporarty disable to provide correct numbers for release TEST_P(ReadIRTest, QueryModel) { query_model(); } diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/rename_conformance_ir.py b/src/tests/test_utils/functional_test_utils/layer_tests_summary/rename_conformance_ir.py index 371f79c8e281e5..83dfe0eb1ca15a 100644 --- a/src/tests/test_utils/functional_test_utils/layer_tests_summary/rename_conformance_ir.py +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/rename_conformance_ir.py @@ -137,6 +137,7 @@ def create_hash(in_dir_path: Path, operations=dict()): core = Core() models = in_dir_path.rglob("*.xml") models = sorted(models) + model_prefix = os.path.commonprefix(models) for model_path in models: bin_path = model_path.with_suffix(BIN_EXTENSION) meta_path = model_path.with_suffix(META_EXTENSION) @@ -156,6 +157,15 @@ def create_hash(in_dir_path: Path, operations=dict()): if is_report_op(op_name): if not op_name in operations.keys(): operations.update({op_name: TestStructure()}) + # add op/subgraphs, dynamic/static and extractor_name to hash + model_dir, _ = os.path.split(model_path) + model_dir = str(model_dir).replace(model_prefix, "") + if op_name in model_dir: + model_dir = model_dir[:model_dir.find(op_name):] + model_dir = model_dir[:-1:] + model_dir = model_dir.replace(os.path.sep, "_") + str_to_hash += model_dir + # upgrade expected rel passrates files if "static" in str(model_path): operations[op_name].static += rel_weight elif "dynamic" in str(model_path): @@ -170,8 +180,11 @@ def create_hash(in_dir_path: Path, operations=dict()): logger.error(f"Impossible to create hash for {model_path}") try: - input_info = ET.parse(meta_path).getroot().find("input_info") - str_to_hash += ET.tostring(input_info).decode('utf8').replace('\t', '') + # check only parameters/constant structures + for input in ET.parse(meta_path).getroot().find("input_info"): + for attrib in input.attrib: + if attrib == "convert_to_const": + str_to_hash += input.attrib.get(attrib) except: logger.error(f"Impossible to add input_info to hash for {model_path}") @@ -187,7 +200,7 @@ def create_hash(in_dir_path: Path, operations=dict()): meta_path.rename(new_meta_path) bin_path.rename(new_bin_path) # TODO: if some models are still not renaming, create new file and remove old file - logger.info(f"{old_name} -> {new_name}") + # logger.info(f"{old_name} -> {new_name}") elif old_name != new_xml_path: # TODO: if some models are still not renaming and there are duplicates, remove files here logger.warning(f"Could not rename model {old_name} ! Model file name already exists {new_xml_path} ") @@ -236,7 +249,7 @@ def save_rel_weights(rel_weights_dir:Path, operations: dict): if not Path(in_dir).is_dir(): logger.error(f"Directory {in_dir} is not exist!") continue - logger.info(f"Starting to rename models in {in_dir}") + # logger.info(f"Starting to rename models in {in_dir}") operations = create_hash(Path(in_dir), operations) if not rel_weights_dir is None: From 8b089b60cdc32e8396bca67c82040bc5e985d9e1 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Wed, 4 Oct 2023 22:51:15 +0400 Subject: [PATCH 068/257] Removed Azure pipelines migrated to GHA (#20245) --- .ci/azure/android_arm64.yml | 198 --------- .ci/azure/linux_arm64.yml | 236 ----------- .ci/azure/linux_conditional_compilation.yml | 172 -------- .ci/azure/linux_cuda.yml | 139 ------- .ci/azure/linux_debian.yml | 428 -------------------- .ci/azure/linux_lohika.yml | 72 ---- .ci/azure/linux_onnxruntime.yml | 207 ---------- 7 files changed, 1452 deletions(-) delete mode 100644 .ci/azure/android_arm64.yml delete mode 100644 .ci/azure/linux_arm64.yml delete mode 100644 .ci/azure/linux_conditional_compilation.yml delete mode 100644 .ci/azure/linux_cuda.yml delete mode 100644 .ci/azure/linux_debian.yml delete mode 100644 .ci/azure/linux_lohika.yml delete mode 100644 .ci/azure/linux_onnxruntime.yml diff --git a/.ci/azure/android_arm64.yml b/.ci/azure/android_arm64.yml deleted file mode 100644 index 4f4b602ddd8f27..00000000000000 --- a/.ci/azure/android_arm64.yml +++ /dev/null @@ -1,198 +0,0 @@ -trigger: - branches: - include: - - 'master' - - 'releases/*' - paths: - exclude: - - '*/docs/*' - - 'docs/*' - - '*/*.md' - - '*.md' - - '*/layer_tests_summary/*' - - '*/conformance/*' - - 'tools/*' - - 'tests/layer_tests/*' - -pr: - drafts: 'false' - branches: - include: - - 'master' - - 'releases/*' - paths: - exclude: - - '*/docs/*' - - 'docs/*' - - '*/*.md' - - '*.md' - - '*/layer_tests_summary/*' - - '*/conformance/*' - - 'tools/*' - - 'tests/layer_tests/*' - -resources: - repositories: - - repository: vcpkg - type: github - endpoint: openvinotoolkit - name: microsoft/vcpkg - -variables: - - group: github - -jobs: -- job: android_arm64 - # About 150% of total time - timeoutInMinutes: '120' - - pool: - name: LIN_VMSS_VENV_F16S_U20_WU2 - - variables: - system.debug: true - VSTS_HTTP_RETRY: 5 - VSTS_HTTP_TIMEOUT: 200 - BUILD_TYPE: Debug - OPENVINO_REPO_DIR: $(Build.Repository.LocalPath) - VCPKG_ROOT: $(OPENVINO_REPO_DIR)/../vcpkg - WORK_DIR: $(Pipeline.Workspace)/_w - BUILD_DIR: $(WORK_DIR)/build - ANDROID_TOOLS: $(WORK_DIR)/android_tools - ANDROID_NDK_HOME: $(WORK_DIR)/android_tools/ndk-bundle - ANDROID_SDK_VERSION: 29 - ANDROID_ABI_CONFIG: arm64-v8a - TMP_DIR: /mnt/tmp - SHARE_DIR: /mount/cinfsshare/onnxtestdata - CCACHE_DIR: $(SHARE_DIR)/ccache/master/android_arm64 - LD_LIBRARY_PATH: $(Agent.ToolsDirectory)/Python/$(OV_PYTHON_VERSION)/x64/lib - OV_PYTHON_VERSION: 3.11.2 # Full version of Python its required for LD_LIBRARY_PATH. More details https://github.com/microsoft/azure-pipelines-tool-lib/blob/master/docs/overview.md#tool-cache - - steps: - - task: UsePythonVersion@0 - inputs: - versionSpec: '$(OV_PYTHON_VERSION)' # Setting only major & minor version will download latest release from GH repo example 3.10 will be 3.10.10. - addToPath: true - disableDownloadFromRegistry: false - architecture: 'x64' - githubToken: $(auth_token) - displayName: Setup Python 3.11 - name: setupPython - - bash: | - #!/bin/bash - python -V - - - script: | - curl -H Metadata:true --noproxy "*" "http://169.254.169.254/metadata/instance?api-version=2019-06-01" - whoami - uname -a - echo ls /usr/bin/python3.10 - rm -rf /usr/bin/python3 - sudo ln -s /usr/bin/python3.10 /usr/bin/python3 - echo Python3 info ; which python3 ; python3 --version - echo Python info ; which python ; python --version - echo Java info ; which java ; java -version - echo gcc info ; which gcc ; gcc --version - echo cmake info ; which cmake ; cmake --version - lsb_release - env - cat /proc/cpuinfo - cat /proc/meminfo - cat /etc/fstab - vmstat -s - df - lsblk -o NAME,HCTL,SIZE,MOUNTPOINT | grep -i "sd" - free -h - displayName: 'System information' - - - script: | - set -e - rm -rf $(WORK_DIR) ; mkdir $(WORK_DIR) - rm -rf $(BUILD_DIR) ; mkdir $(BUILD_DIR) - rm -rf $(ANDROID_TOOLS) ; mkdir $(ANDROID_TOOLS) - sudo rm -rf $(TMP_DIR) ; sudo mkdir $(TMP_DIR) ; sudo chmod 777 -R $(TMP_DIR) - sudo mkdir -p $(SHARE_DIR) - sudo apt --assume-yes update && sudo apt --assume-yes install nfs-common - sudo mount -vvv -t nfs cinfsshare.file.core.windows.net:/cinfsshare/onnxtestdata $(SHARE_DIR) -o vers=4,minorversion=1,sec=sys - mkdir -p $(CCACHE_DIR) - displayName: 'Make dir' - - - checkout: self - submodules: 'true' - clean: 'true' - path: openvino - - - checkout: vcpkg - clean: 'true' - path: vcpkg - - - script: | - set -e - # generic dependencies - sudo -E apt --assume-yes install ccache scons default-jdk python3-pip ninja-build - # vcpkg requires cmake 3.19 or later - python3 -m pip install -U pip cmake - # vcpkg's tool dependencies - sudo -E apt --assume-yes install curl zip unzip tar - # vcpkg 'python3' port dependencies - sudo -E apt --assume-yes install autoconf libtool autoconf-archive - # vcpkg tree of dependencies require extra packages - sudo -E apt --assume-yes install pkg-config linux-libc-dev - # Install Android SDK, NDK and Tools - sudo apt -y --no-install-recommends install unzip - wget https://dl.google.com/android/repository/commandlinetools-linux-7583922_latest.zip - unzip commandlinetools-linux-7583922_latest.zip - yes | ./cmdline-tools/bin/sdkmanager --sdk_root=$(ANDROID_TOOLS) --licenses - ./cmdline-tools/bin/sdkmanager --sdk_root=$(ANDROID_TOOLS) --install "ndk-bundle" "platform-tools" "platforms;android-$(ANDROID_SDK_VERSION)" - displayName: 'Install dependencies' - - - script: | - set -e - $(VCPKG_ROOT)/bootstrap-vcpkg.sh --disableMetrics - # patch vcpkg default (community) toolchain to build only Release configuration - echo "set(VCPKG_BUILD_TYPE release)" >> $(VCPKG_ROOT)/triplets/community/arm64-android.cmake - displayName: 'Build vcpkg' - - - task: CMake@1 - inputs: - cmakeArgs: > - -G Ninja - -DCMAKE_VERBOSE_MAKEFILE=ON - -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) - -DVCPKG_TARGET_TRIPLET=arm64-android - -DVCPKG_HOST_TRIPLET=x64-linux-release - -DCMAKE_TOOLCHAIN_FILE=$(VCPKG_ROOT)/scripts/buildsystems/vcpkg.cmake - -DVCPKG_CHAINLOAD_TOOLCHAIN_FILE=$(ANDROID_NDK_HOME)/build/cmake/android.toolchain.cmake - -DCMAKE_COMPILE_WARNING_AS_ERROR=ON - -DANDROID_ABI=$(ANDROID_ABI_CONFIG) - -DANDROID_PLATFORM=$(ANDROID_SDK_VERSION) - -DENABLE_PYTHON=OFF - -DENABLE_SYSTEM_OPENCL=ON - -DENABLE_SYSTEM_PROTOBUF=ON - -DENABLE_SYSTEM_PUGIXML=ON - -DENABLE_SYSTEM_SNAPPY=ON - -DENABLE_SYSTEM_TBB=ON - -DENABLE_SYSTEM_FLATBUFFERS=ON - -DENABLE_INTEL_GPU=ON - -DENABLE_TESTS=ON - -DCMAKE_CXX_COMPILER_LAUNCHER=ccache - -DCMAKE_C_COMPILER_LAUNCHER=ccache - -S $(OPENVINO_REPO_DIR) - -B $(BUILD_DIR) - - - script: ccache --zero-stats --max-size=50G --show-config - displayName: 'Clean ccache stats' - - - script: cmake --build $(BUILD_DIR) --parallel --config $(BUILD_TYPE) - env: - CCACHE_DIR: $(CCACHE_DIR) - CCACHE_TEMPDIR: $(TMP_DIR)/ccache - CCACHE_BASEDIR: $(Pipeline.Workspace) - CCACHE_MAXSIZE: 50G - displayName: 'Build Android ARM64' - - - script: ccache --show-stats - displayName: 'Show ccache stats' - - - script: ls -alR $(OPENVINO_REPO_DIR)/bin/ - displayName: 'List binary files' diff --git a/.ci/azure/linux_arm64.yml b/.ci/azure/linux_arm64.yml deleted file mode 100644 index 11f9e545f4bdd8..00000000000000 --- a/.ci/azure/linux_arm64.yml +++ /dev/null @@ -1,236 +0,0 @@ -trigger: - branches: - include: - - 'master' - - 'releases/*' - paths: - exclude: - - '*/docs/*' - - 'docs/*' - - '*/*.md' - - '*.md' - - '*/layer_tests_summary/*' - - '*/conformance/*' - - 'tools/*' - - 'tests/layer_tests/*' - -pr: - drafts: 'false' - branches: - include: - - 'master' - - 'releases/*' - paths: - exclude: - - '*/docs/*' - - 'docs/*' - - '*/*.md' - - '*.md' - - '*/layer_tests_summary/*' - - '*/conformance/*' - - 'tools/*' - - 'tests/layer_tests/*' - -variables: - - group: github - -jobs: -- job: linux_arm64 - # About 150% of total time - timeoutInMinutes: '120' - - pool: - name: LIN_VMSS_VENV_F16S_U20_WU2 - - variables: - system.debug: true - VSTS_HTTP_RETRY: 5 - VSTS_HTTP_TIMEOUT: 200 - NUM_PROC: 2 - BUILD_TYPE: Release - OPENVINO_REPO_DIR: $(Build.Repository.LocalPath) - BUILD_OPENVINO: $(WORK_DIR)/build - INSTALL_OPENVINO: $(WORK_DIR)/install_openvino - WORK_DIR: $(Pipeline.Workspace)/_w - SHARE_DIR: /mount/cinfsshare/onnxtestdata - TMP_DIR: /mnt/tmp - OPENVINO_CCACHE_DIR: $(SHARE_DIR)/ccache/master/linux_arm64 - LD_LIBRARY_PATH: $(Agent.ToolsDirectory)/Python/$(OV_PYTHON_VERSION)/x64/lib - OV_PYTHON_VERSION_MAJOR_MINOR: 3.11 - OV_PYTHON_VERSION: $(OV_PYTHON_VERSION_MAJOR_MINOR).2 # Full version of Python its required for LD_LIBRARY_PATH. More details https://github.com/microsoft/azure-pipelines-tool-lib/blob/master/docs/overview.md#tool-cache - - steps: - - task: UsePythonVersion@0 - inputs: - versionSpec: '$(OV_PYTHON_VERSION)' # Setting only major & minor version will download latest release from GH repo example 3.10 will be 3.10.10. - addToPath: true - disableDownloadFromRegistry: false - architecture: 'x64' - githubToken: $(auth_token) - displayName: Setup Python 3.11 - name: setupPython - - bash: | - #!/bin/bash - python -V - - - script: | - curl -H Metadata:true --noproxy "*" "http://169.254.169.254/metadata/instance?api-version=2019-06-01" - whoami - uname -a - echo Python3 info ; which python3 ; python3 --version - echo Python info ; which python ; python --version - echo Java info ; which java ; java -version - echo gcc info ; which gcc ; gcc --version - echo cmake info ; which cmake ; cmake --version - lsb_release - env - cat /proc/cpuinfo - cat /proc/meminfo - cat /etc/fstab - vmstat -s - df - lsblk -o NAME,HCTL,SIZE,MOUNTPOINT | grep -i "sd" - free -h - echo "##vso[task.setvariable variable=NUM_PROC]$(nproc --all)" - echo "NUM_PROC=$(NUM_PROC)" - displayName: 'System information' - - - script: | - set -e - rm -rf $(WORK_DIR) ; mkdir $(WORK_DIR) - mkdir -p $(BUILD_OPENVINO) - mkdir -p $(INSTALL_OPENVINO) - sudo rm -rf $(TMP_DIR) ; sudo mkdir $(TMP_DIR) ; sudo chmod 777 -R $(TMP_DIR) - sudo mkdir -p $(SHARE_DIR) - sudo apt --assume-yes update && sudo apt --assume-yes install nfs-common - sudo mount -vvv -t nfs cinfsshare.file.core.windows.net:/cinfsshare/onnxtestdata $(SHARE_DIR) -o vers=4,minorversion=1,sec=sys - mkdir -p $(OPENVINO_CCACHE_DIR) - displayName: 'Make directories' - - - checkout: self - clean: 'true' - path: openvino - - - script: | - set -e - python3 -m pip install --upgrade pip - python3 -m pip install cmake - python3 -m pip install -r $(OPENVINO_REPO_DIR)/src/bindings/python/requirements.txt - python3 -m pip install -r $(OPENVINO_REPO_DIR)/src/bindings/python/wheel/requirements-dev.txt - python3 -m pip install -r $(OPENVINO_REPO_DIR)/src/bindings/python/src/compatibility/openvino/requirements-dev.txt - # install dependencies needed to build CPU plugin for ARM - sudo -E apt --assume-yes install scons gcc-10-aarch64-linux-gnu g++-10-aarch64-linux-gnu - # generic dependencies - sudo -E apt --assume-yes install cmake ccache ninja-build unzip fdupes - displayName: 'Install build dependencies' - - - script: | - set -e - echo deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ focal main restricted > arm64-sources.list - echo deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ focal-updates main restricted >> arm64-sources.list - echo deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ focal universe >> arm64-sources.list - echo deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ focal-updates universe >> arm64-sources.list - echo deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ focal multiverse >> arm64-sources.list - echo deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ focal-updates multiverse >> arm64-sources.list - echo deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ focal-backports main restricted universe multiverse >> arm64-sources.list - echo deb [arch=amd64] http://security.ubuntu.com/ubuntu/ focal-security main restricted >> arm64-sources.list - echo deb [arch=amd64] http://security.ubuntu.com/ubuntu/ focal-security universe >> arm64-sources.list - echo deb [arch=amd64] http://security.ubuntu.com/ubuntu/ focal-security multiverse >> arm64-sources.list - echo deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports/ focal main >> arm64-sources.list - echo deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports/ focal universe >> arm64-sources.list - echo deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports/ focal-updates main >> arm64-sources.list - echo deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports/ focal-security main >> arm64-sources.list - sudo mv arm64-sources.list /etc/apt/sources.list.d/ - sudo -E dpkg --add-architecture arm64 - sudo -E apt-get update -o Dir::Etc::sourcelist=/etc/apt/sources.list.d/arm64-sources.list - sudo -E apt-get install -y --no-install-recommends libpython3-dev:arm64 - displayName: 'Install arm64 libraries' - - - script: | - set -e - git submodule update --init -- $(OPENVINO_REPO_DIR)/src/plugins - git submodule update --init -- $(OPENVINO_REPO_DIR)/thirdparty/gtest - git submodule update --init -- $(OPENVINO_REPO_DIR)/thirdparty/open_model_zoo - displayName: 'Init submodules for non Conan dependencies' - - - script: | - set -e - python3 -m pip install conan - # install build profile compilers - sudo -E apt --assume-yes install gcc g++ - # generate build profile - conan profile detect - # generate host profile for linux_arm64 - echo "include(default)" > $(BUILD_OPENVINO)/linux_arm64 - echo "[buildenv]" >> $(BUILD_OPENVINO)/linux_arm64 - echo "CC=aarch64-linux-gnu-gcc-10" >> $(BUILD_OPENVINO)/linux_arm64 - echo "CXX=aarch64-linux-gnu-g++-10" >> $(BUILD_OPENVINO)/linux_arm64 - # install OpenVINO dependencies - conan install $(OPENVINO_REPO_DIR)/conanfile.txt \ - -pr:h $(BUILD_OPENVINO)/linux_arm64 \ - -s:h arch=armv8 \ - -of $(BUILD_OPENVINO)/dependencies \ - -b missing - env: - CMAKE_CXX_COMPILER_LAUNCHER: ccache - CMAKE_C_COMPILER_LAUNCHER: ccache - CCACHE_DIR: $(OPENVINO_CCACHE_DIR) - CCACHE_TEMPDIR: $(TMP_DIR)/ccache - CCACHE_BASEDIR: $(Pipeline.Workspace) - CCACHE_MAXSIZE: 50G - displayName: 'Install conan and dependencies' - - - script: | - set -e - source $(BUILD_OPENVINO)/dependencies/conanbuild.sh - cmake \ - -DCMAKE_VERBOSE_MAKEFILE=ON \ - -DBUILD_SHARED_LIBS=OFF \ - -DCMAKE_COMPILE_WARNING_AS_ERROR=ON \ - -DENABLE_CPPLINT=ON \ - -DENABLE_INTEL_GPU=ON \ - -DENABLE_PYTHON=ON \ - -DENABLE_WHEEL=ON \ - -DPYTHON_MODULE_EXTENSION=$(aarch64-linux-gnu-python3-config --extension-suffix) \ - -DPYBIND11_PYTHON_EXECUTABLE_LAST=$(Agent.ToolsDirectory)/Python/$(OV_PYTHON_VERSION)/x64/bin/python$(OV_PYTHON_VERSION_MAJOR_MINOR) \ - -DPython3_EXECUTABLE=$(Agent.ToolsDirectory)/Python/$(OV_PYTHON_VERSION)/x64/bin/python$(OV_PYTHON_VERSION_MAJOR_MINOR) \ - -DPython3_INCLUDE_DIR=$(Agent.ToolsDirectory)/Python/$(OV_PYTHON_VERSION)/x64/include/python$(OV_PYTHON_VERSION_MAJOR_MINOR) \ - -DENABLE_TESTS=ON \ - -DENABLE_SYSTEM_TBB=ON \ - -DENABLE_SYSTEM_PROTOBUF=ON \ - -DENABLE_SYSTEM_SNAPPY=ON \ - -DENABLE_SYSTEM_PUGIXML=ON \ - -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \ - -DCMAKE_C_COMPILER_LAUNCHER=ccache \ - -DARM_COMPUTE_SCONS_JOBS=$(NUM_PROC) \ - -DCMAKE_INSTALL_PREFIX=$(INSTALL_OPENVINO) \ - -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) \ - -DENABLE_PYTHON_PACKAGING=ON \ - -S $(OPENVINO_REPO_DIR) \ - -B $(BUILD_OPENVINO) - source $(BUILD_OPENVINO)/dependencies/deactivate_conanbuild.sh - env: - CMAKE_GENERATOR: Ninja - CMAKE_TOOLCHAIN_FILE: $(BUILD_OPENVINO)/dependencies/conan_toolchain.cmake - displayName: 'CMake configure' - - - script: cmake --build $(BUILD_OPENVINO) --parallel --config $(BUILD_TYPE) - env: - CCACHE_DIR: $(OPENVINO_CCACHE_DIR) - CCACHE_TEMPDIR: $(TMP_DIR)/ccache - CCACHE_BASEDIR: $(Pipeline.Workspace) - CCACHE_MAXSIZE: 50G - displayName: 'Build OpenVINO Runtime' - - - script: cmake --build $(BUILD_OPENVINO) --parallel --config $(BUILD_TYPE) --target install - displayName: 'Install OpenVINO Runtime' - - - script: | - set -e - source $(BUILD_OPENVINO)/dependencies/conanbuild.sh - $(INSTALL_OPENVINO)/samples/cpp/build_samples.sh - source $(BUILD_OPENVINO)/dependencies/deactivate_conanbuild.sh - env: - CMAKE_GENERATOR: Ninja - CMAKE_TOOLCHAIN_FILE: $(BUILD_OPENVINO)/dependencies/conan_toolchain.cmake - displayName: 'Build OpenVINO C++ samples' diff --git a/.ci/azure/linux_conditional_compilation.yml b/.ci/azure/linux_conditional_compilation.yml deleted file mode 100644 index 7c463d0828a0ac..00000000000000 --- a/.ci/azure/linux_conditional_compilation.yml +++ /dev/null @@ -1,172 +0,0 @@ -trigger: - branches: - include: - - 'master' - - 'releases/*' - paths: - exclude: - - '*/docs/*' - - 'docs/*' - - '*/*.md' - - '*.md' - - '*/layer_tests_summary/*' - - '*/conformance/*' - - 'tools/*' - -pr: - drafts: 'false' - branches: - include: - - 'master' - - 'releases/*' - paths: - exclude: - - '*/docs/*' - - 'docs/*' - - '*/*.md' - - '*.md' - - '*/layer_tests_summary/*' - - '*/conformance/*' - - 'tools/*' - -resources: - repositories: - - repository: testdata - type: github - endpoint: openvinotoolkit - name: openvinotoolkit/testdata - ref: master - -variables: - - group: github - -jobs: -- job: LinCC - # About 150% of total time - timeoutInMinutes: '90' - - pool: - name: LIN_VMSS_VENV_F16S_U20_WU2 - - variables: - system.debug: true - VSTS_HTTP_RETRY: 5 - VSTS_HTTP_TIMEOUT: 200 - BUILD_TYPE: Release - REPO_DIR: $(Build.Repository.LocalPath) - MODELS_PATH: $(REPO_DIR)/../testdata - WORK_DIR: $(Pipeline.Workspace)/_w - BUILD_DIR: $(WORK_DIR)/build - INSTALL_DIR: $(WORK_DIR)/install_pkg - SETUPVARS: $(INSTALL_DIR)/setupvars.sh - LD_LIBRARY_PATH: $(Agent.ToolsDirectory)/Python/$(OV_PYTHON_VERSION)/x64/lib - OV_PYTHON_VERSION: 3.11.2 # Full version of Python its required for LD_LIBRARY_PATH. More details https://github.com/microsoft/azure-pipelines-tool-lib/blob/master/docs/overview.md#tool-cache - - steps: - - task: UsePythonVersion@0 - inputs: - versionSpec: '$(OV_PYTHON_VERSION)' # Setting only major & minor version will download latest release from GH repo example 3.10 will be 3.10.10. - addToPath: true - disableDownloadFromRegistry: false - architecture: 'x64' - githubToken: $(auth_token) - displayName: Setup Python 3.11 - name: setupPython - - bash: | - #!/bin/bash - python -V - - - script: | - curl -H Metadata:true --noproxy "*" "http://169.254.169.254/metadata/instance?api-version=2019-06-01" - whoami - uname -a - echo Python3 info ; which python3 ; python3 --version - echo Python info ; which python ; python --version - echo Java info ; which java ; java -version - echo gcc info ; which gcc ; gcc --version - echo cmake info ; which cmake ; cmake --version - lsb_release - env - cat /proc/cpuinfo - cat /proc/meminfo - cat /etc/fstab - vmstat -s - df - lsblk -o NAME,HCTL,SIZE,MOUNTPOINT | grep -i "sd" - free -h - displayName: 'System info' - - - script: | - set -e - rm -rf $(WORK_DIR) ; mkdir $(WORK_DIR) - rm -rf $(BUILD_DIR) ; mkdir $(BUILD_DIR) - displayName: 'Make dir' - - - checkout: self - clean: 'true' - submodules: 'true' - path: openvino - - - script: | - set -e - sudo -E $(REPO_DIR)/install_build_dependencies.sh - # Speed up build - sudo apt -y --no-install-recommends install unzip - wget https://github.com/ninja-build/ninja/releases/download/v1.10.2/ninja-linux.zip - unzip ninja-linux.zip - sudo cp -v ninja /usr/local/bin/ - displayName: 'Install dependencies' - - - checkout: testdata - clean: 'true' - lfs: 'true' - path: testdata - - - task: CMake@1 - inputs: - cmakeArgs: > - -G "Ninja Multi-Config" - -DENABLE_CPPLINT=OFF - -DENABLE_GAPI_PREPROCESSING=OFF - -DCMAKE_VERBOSE_MAKEFILE=ON - -DCMAKE_COMPILE_WARNING_AS_ERROR=ON - -DENABLE_FASTER_BUILD=ON - -DENABLE_PROFILING_ITT=ON - -DSELECTIVE_BUILD=COLLECT - -S $(REPO_DIR) - -B $(BUILD_DIR) - displayName: 'Cmake CC COLLECT' - - - script: cmake --build $(BUILD_DIR) --parallel --config $(BUILD_TYPE) --target openvino_intel_cpu_plugin openvino_ir_frontend benchmark_app sea_itt_lib - displayName: 'Build CC COLLECT' - - - script: ls -alR $(REPO_DIR)/bin/ - displayName: 'List bin files' - - - script: | - set -e - python3 $(REPO_DIR)/thirdparty/itt_collector/runtool/sea_runtool.py \ - --bindir $(REPO_DIR)/bin/intel64/Release -o $(BUILD_DIR)/itt_stat ! \ - $(REPO_DIR)/bin/intel64/Release/benchmark_app -niter 1 -nireq 1 \ - -m $(MODELS_PATH)/models/test_model/test_model_fp32.xml -d CPU - displayName: 'Code usage analysis' - - - task: CMake@1 - inputs: - cmakeArgs: > - -DSELECTIVE_BUILD=ON - -DSELECTIVE_BUILD_STAT=$(BUILD_DIR)/*.csv - -B $(BUILD_DIR) - -S $(REPO_DIR) - displayName: 'CMake CC ON' - - - script: cmake --build $(BUILD_DIR) --parallel --config $(BUILD_TYPE) --target openvino_intel_cpu_plugin openvino_ir_frontend - displayName: 'Build CC ON' - - - script: ls -alR $(REPO_DIR)/bin/ - displayName: 'List bin files ON' - - - script: | - $(REPO_DIR)/bin/intel64/Release/benchmark_app -niter 1 -nireq 1 \ - -m $(MODELS_PATH)/models/test_model/test_model_fp32.xml -d CPU - displayName: 'Use OpenVINO after CC' diff --git a/.ci/azure/linux_cuda.yml b/.ci/azure/linux_cuda.yml deleted file mode 100644 index 09e75560937229..00000000000000 --- a/.ci/azure/linux_cuda.yml +++ /dev/null @@ -1,139 +0,0 @@ -trigger: - branches: - include: - - 'master' - - 'releases/*' - paths: - exclude: - - '*/docs/*' - - 'docs/*' - - '*/*.md' - - '*.md' - - '*/layer_tests_summary/*' - - '*/conformance/*' - - 'tools/*' - - 'tests/layer_tests/*' - -pr: - drafts: 'false' - branches: - include: - - 'master' - - 'releases/*' - paths: - exclude: - - '*/docs/*' - - 'docs/*' - - '*/*.md' - - '*.md' - - '*/layer_tests_summary/*' - - '*/conformance/*' - - 'tools/*' - - 'tests/layer_tests/*' - -resources: - repositories: - - repository: openvino_contrib - type: github - endpoint: openvinotoolkit - name: openvinotoolkit/openvino_contrib - ref: master - -jobs: -- job: CUDAPlugin_Lin - timeoutInMinutes: '60' - - pool: - name: LIN_VMSS_VENV_F16S_U20_WU2 - - variables: - system.debug: true - VSTS_HTTP_RETRY: 5 - VSTS_HTTP_TIMEOUT: 200 - BUILD_TYPE: Release - HOME_DIR: $(Agent.HomeDirectory) - REPO_DIR: $(Build.Repository.LocalPath) - OPENVINO_REPO_DIR: $(REPO_DIR)/../openvino - WORK_DIR: $(Pipeline.Workspace)/_w - BUILD_DIR: $(WORK_DIR)/build - BIN_DIR: $(OPENVINO_REPO_DIR)/bin/intel64/$(BUILD_TYPE) - INSTALL_DIR: $(WORK_DIR)/install_pkg - SETUPVARS: $(INSTALL_DIR)/setupvars.sh - GRADLE_VER: 7.1.1 - - steps: - - script: | - curl -H Metadata:true --noproxy "*" "http://169.254.169.254/metadata/instance?api-version=2019-06-01" - echo # prev line output doesn't end with eol - whoami - uname -a - echo Python3 info ; which python3 ; python3 --version - echo Python info ; which python ; python --version - echo Java info ; which java ; java -version - echo gcc info ; which gcc ; gcc --version - echo cmake info ; which cmake ; cmake --version - lsb_release - env - cat /proc/cpuinfo - cat /proc/meminfo - cat /etc/fstab - vmstat -s - df - lsblk -o NAME,HCTL,SIZE,MOUNTPOINT | grep -i "sd" - free -h - displayName: 'System info' - - - script: | - rm -rf $(WORK_DIR) ; mkdir $(WORK_DIR) - rm -rf $(BUILD_DIR) ; mkdir $(BUILD_DIR) - displayName: 'Make dir' - - - checkout: self - clean: 'true' - submodules: 'true' - path: openvino - - - checkout: openvino_contrib - clean: 'true' - submodules: 'true' - path: openvino_contrib - - - script: | - set -e - curl -fsSL https://get.docker.com -o get-docker.sh - sudo sh get-docker.sh - # Speed up build - sudo apt --assume-yes install unzip - wget https://github.com/ninja-build/ninja/releases/download/v1.10.2/ninja-linux.zip - unzip ninja-linux.zip - displayName: 'Install dependencies' - - - script: | - set -e - sudo docker pull openvino.azurecr.io/openvino_ci/cuda-ubuntu2004:2022.1 - sudo docker run --volume $(REPO_DIR)/../:/root/repos --volume $(WORK_DIR):/root/w \ - openvino.azurecr.io/openvino_ci/cuda-ubuntu2004:2022.1 \ - bash -c " - sudo -E /root/repos/openvino/install_build_dependencies.sh && - python3 -m pip install -r /root/repos/openvino/src/bindings/python/requirements.txt && - cmake -GNinja \ - -DCMAKE_VERBOSE_MAKEFILE=ON \ - -DENABLE_CPPLINT=OFF \ - -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) \ - -DOPENVINO_EXTRA_MODULES=/root/repos/openvino_contrib/modules/nvidia_plugin \ - -DENABLE_INTEL_CPU=OFF \ - -DENABLE_INTEL_GPU=OFF \ - -DENABLE_INTEL_GNA=OFF \ - -DENABLE_OV_TF_FRONTEND=OFF \ - -DENABLE_OV_PADDLE_FRONTEND=OFF \ - -DENABLE_OV_PYTORCH_FRONTEND=OFF \ - -DENABLE_OV_ONNX_FRONTEND=OFF \ - -DENABLE_PYTHON=OFF \ - -DENABLE_TESTS=ON \ - -S /root/repos/openvino \ - -B /root/w/build && - cmake --build /root/w/build --parallel --config Release --verbose -- ov_nvidia_func_tests ov_nvidia_unit_tests" - displayName: 'Docker build Lin' - - - script: ls -alR $(OPENVINO_REPO_DIR)/bin/ - displayName: 'List bin files' diff --git a/.ci/azure/linux_debian.yml b/.ci/azure/linux_debian.yml deleted file mode 100644 index c0c6225c411e7b..00000000000000 --- a/.ci/azure/linux_debian.yml +++ /dev/null @@ -1,428 +0,0 @@ -trigger: - branches: - include: - - 'master' - - 'releases/*' - paths: - exclude: - - '*/docs/*' - - 'docs/*' - - '*/*.md' - - '*.md' - - '*/layer_tests_summary/*' - - '*/conformance/*' - - 'tests/layer_tests/*' - -pr: - branches: - include: - - 'master' - - 'releases/*' - paths: - exclude: - - '*/docs/*' - - 'docs/*' - - '*/*.md' - - '*.md' - - '*/layer_tests_summary/*' - - '*/conformance/*' - - 'tests/layer_tests/*' - -jobs: -- job: Lin_Debian - # About 150% of total time - timeoutInMinutes: '120' - - pool: - name: LIN_VMSS_VENV_F16S_U20_WU2 - - variables: - system.debug: true - VSTS_HTTP_RETRY: 5 - VSTS_HTTP_TIMEOUT: 200 - BUILD_TYPE: Release - REPO_DIR: $(Build.Repository.LocalPath) - WORK_DIR: $(Pipeline.Workspace)/_w - BUILD_DIR: $(WORK_DIR)/build - BUILD_SAMPLES_DIR: $(WORK_DIR)/build_samples - BUILD_LAYER_TESTS_DIR: $(WORK_DIR)/build_layer_tests - BUILD_SAMPLES_TESTS_DIR: $(WORK_DIR)/build_samples_tests - INSTALL_DIR: $(WORK_DIR)/install_pkg - INSTALL_TEST_DIR: $(INSTALL_DIR)/tests - LAYER_TESTS_DIR: $(INSTALL_TEST_DIR)/layer_tests - SAMPLES_INSTALL_DIR: /usr/share/openvino/samples - PYTHON_SAMPLES_INSTALL_DIR: $(INSTALL_DIR)/share/openvino/samples/python - PYTHON_WHEEL_INSTALL_DIR: $HOME/.local/lib/python3.8/site-packages - BUILD_VENV: $(WORK_DIR)/build_venv - TEST_VENV: $(WORK_DIR)/test_venv - TMP_DIR: /mnt/tmp - SHARE_DIR: /mount/cinfsshare/onnxtestdata - CCACHE_DIR: $(SHARE_DIR)/ccache/master/linux - - steps: - - script: | - curl -H Metadata:true --noproxy "*" "http://169.254.169.254/metadata/instance?api-version=2019-06-01" - whoami - uname -a - echo Python3 info ; which python3 ; python3 --version - echo Python info ; which python ; python --version - echo gcc info ; which gcc ; gcc --version - echo cmake info ; which cmake ; cmake --version - lsb_release - env - cat /proc/cpuinfo - cat /proc/meminfo - cat /etc/fstab - vmstat -s - df - lsblk -o NAME,HCTL,SIZE,MOUNTPOINT | grep -i "sd" - free -h - echo TargetBranch: $(System.PullRequest.TargetBranch) - echo SourceBranch: $(Build.SourceBranch) - displayName: 'System info' - - - script: | - set -e - rm -rf $(WORK_DIR) ; mkdir $(WORK_DIR) - rm -rf $(BUILD_DIR) ; mkdir $(BUILD_DIR) - rm -rf $(BUILD_SAMPLES_DIR) ; mkdir $(BUILD_SAMPLES_DIR) - sudo rm -rf $(TMP_DIR) ; sudo mkdir $(TMP_DIR) ; sudo chmod 777 -R $(TMP_DIR) - sudo mkdir -p $(SHARE_DIR) - sudo apt --assume-yes update && sudo apt --assume-yes install nfs-common - sudo mount -vvv -t nfs cinfsshare.file.core.windows.net:/cinfsshare/onnxtestdata $(SHARE_DIR) -o vers=4,minorversion=1,sec=sys - mkdir -p $(CCACHE_DIR) - displayName: 'Make dir' - - - checkout: self - clean: 'true' - submodules: 'true' - path: openvino - - - script: | - set -e - sudo -E $(REPO_DIR)/install_build_dependencies.sh - # 'clang' is used as a default compiler - sudo apt --assume-yes install clang - sudo apt --assume-yes install --no-install-recommends libopencv-imgproc-dev libopencv-imgcodecs-dev - # install build dependencies - (cd $(WORK_DIR) && python3 -m venv build_venv) - $(BUILD_VENV)/bin/python3 -m pip install -U pip - $(BUILD_VENV)/bin/python3 -m pip install -r $(REPO_DIR)/src/bindings/python/wheel/requirements-dev.txt - $(BUILD_VENV)/bin/python3 -m pip install -r $(REPO_DIR)/src/bindings/python/requirements.txt - # For running Python API tests - $(BUILD_VENV)/bin/python3 -m pip install -r $(REPO_DIR)/src/bindings/python/src/compatibility/openvino/requirements-dev.txt - # For running Paddle frontend unit tests - $(BUILD_VENV)/bin/python3 -m pip install -r $(REPO_DIR)/src/frontends/paddle/tests/requirements.txt - # For running ONNX frontend unit tests - $(BUILD_VENV)/bin/python3 -m pip install -r $(REPO_DIR)/src/frontends/onnx/tests/requirements.txt - # For running TensorFlow frontend unit tests - $(BUILD_VENV)/bin/python3 -m pip install -r $(REPO_DIR)/src/frontends/tensorflow/tests/requirements.txt - # For MO unit tests - (cd $(WORK_DIR) && python3 -m venv test_venv) - $(TEST_VENV)/bin/python3 -m pip install -U pip - $(TEST_VENV)/bin/python3 -m pip install -r $(REPO_DIR)/tools/mo/requirements_mxnet.txt - $(TEST_VENV)/bin/python3 -m pip install -r $(REPO_DIR)/tools/mo/requirements_caffe.txt - $(TEST_VENV)/bin/python3 -m pip install -r $(REPO_DIR)/tools/mo/requirements_kaldi.txt - $(TEST_VENV)/bin/python3 -m pip install -r $(REPO_DIR)/tools/mo/requirements_onnx.txt - $(TEST_VENV)/bin/python3 -m pip install -r $(REPO_DIR)/tools/mo/requirements_tf2.txt - $(TEST_VENV)/bin/python3 -m pip install -r $(REPO_DIR)/tools/mo/requirements_dev.txt - $(TEST_VENV)/bin/python3 -m pip install -r $(REPO_DIR)/src/frontends/paddle/tests/requirements.txt - # for Python API tests - /usr/bin/python3 -m pip install -r $(REPO_DIR)/src/bindings/python/requirements_test.txt - /usr/bin/python3 -m pip install -r $(REPO_DIR)/tools/mo/requirements.txt - /usr/bin/python3 -m pip uninstall -y numpy # apt-get install python3-numpy will be used - # Speed up build - sudo apt -y --no-install-recommends install unzip - wget https://github.com/ninja-build/ninja/releases/download/v1.10.2/ninja-linux.zip - unzip ninja-linux.zip - sudo cp -v ninja /usr/local/bin/ - # Speed up tests - git clone https://github.com/google/gtest-parallel.git - displayName: 'Install build dependencies' - - - task: CMake@1 - inputs: - # CMake must get Python 3.x version by default - cmakeArgs: > - -GNinja - -DENABLE_CPPLINT=OFF - -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) - -DCMAKE_COMPILE_WARNING_AS_ERROR=ON - -DENABLE_PYTHON=ON - -DENABLE_INTEL_GNA=OFF - -DPython3_EXECUTABLE=$(BUILD_VENV)/bin/python3 - -DENABLE_TESTS=ON - -DENABLE_FASTER_BUILD=ON - -DENABLE_STRICT_DEPENDENCIES=OFF - -DENABLE_SYSTEM_SNAPPY=ON - -DCMAKE_CXX_COMPILER_LAUNCHER=ccache - -DCMAKE_C_COMPILER_LAUNCHER=ccache - -DCMAKE_CXX_LINKER_LAUNCHER=ccache - -DCMAKE_C_LINKER_LAUNCHER=ccache - -DENABLE_PYTHON_PACKAGING=ON - -DCPACK_GENERATOR=DEB - -S $(REPO_DIR) - -B $(BUILD_DIR) - displayName: 'CMake OpenVINO' - - - script: ls -alR $(REPO_DIR)/temp/ - displayName: 'List temp SDKs' - - - script: ccache --zero-stats --max-size=50G --show-config - displayName: 'Clean ccache stats' - - - script: cmake --build $(BUILD_DIR) --parallel --config $(BUILD_TYPE) - env: - CCACHE_DIR: $(CCACHE_DIR) - CCACHE_TEMPDIR: $(TMP_DIR)/ccache - CCACHE_BASEDIR: $(Pipeline.Workspace) - CCACHE_MAXSIZE: 50G - displayName: 'Build Lin' - - - script: ccache --show-stats - displayName: 'Show ccache stats' - - - script: ls -alR $(REPO_DIR)/bin/ - displayName: 'List bin files' - - - task: CMake@1 - inputs: - cmakeArgs: > - -GNinja - -S $(REPO_DIR)/tests/layer_tests - -B $(BUILD_LAYER_TESTS_DIR) - displayName: 'CMake Layer Tests' - - - script: cmake --build $(BUILD_LAYER_TESTS_DIR) --parallel --config $(BUILD_TYPE) - displayName: 'Build Layer Tests' - - # to check that wheel packages tested later, contain all all the dependencies like TBB or pugixml - - script: sudo apt-get remove libtbb2 libpugixml1v5 -y - displayName: 'Remove debian dependencies' - - - script: cmake -DCOMPONENT=python_wheels -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -P $(BUILD_DIR)/cmake_install.cmake - displayName: 'Install wheel packages' - - - script: cmake -DCOMPONENT=python_samples -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -P $(BUILD_DIR)/cmake_install.cmake - displayName: 'Install Python Samples' - - - script: cmake -DCOMPONENT=tests -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -P $(BUILD_LAYER_TESTS_DIR)/cmake_install.cmake - displayName: 'Install Layer Tests' - - - script: cmake -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -DCOMPONENT=tests -P $(BUILD_DIR)/cmake_install.cmake - displayName: 'Install tests' - - - script: ls -alR $(INSTALL_DIR) - displayName: 'List install test files' - - - script: | - sudo apt-get install libtbb-dev libpugixml-dev -y - cmake --build $(BUILD_DIR) --config $(BUILD_TYPE) --target package --parallel - displayName: 'Build Debian packages' - - - script: | - set -e - # install debian packages from previous release - sudo apt-get -y update - sudo apt-get install --no-install-recommends gnupg wget -y - wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB - sudo apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB - echo "deb https://apt.repos.intel.com/openvino/2023 ubuntu20 main" | sudo tee /etc/apt/sources.list.d/intel-openvino-2023.list - sudo apt-get update - sudo apt-get install openvino -y - # install our local one and make sure the conflicts are resolved - sudo apt-get install --no-install-recommends dpkg-dev -y - rm -r _CPack_Packages - dpkg-scanpackages . /dev/null | gzip -9c > Packages.gz - echo "deb [trusted=yes] file:$(BUILD_DIR) ./" | sudo tee /etc/apt/sources.list.d/openvino-local.list - sudo apt-get update - sudo apt-get install openvino -y - workingDirectory: $(BUILD_DIR) - displayName: 'Install Debian packages' - - - script: ls -alR $(INSTALL_DIR) - displayName: 'List install files' - - - script: rm -fr $(BUILD_DIR) - displayName: 'Clean build dir' - - - script: $(SAMPLES_INSTALL_DIR)/cpp/build_samples.sh -i $(INSTALL_DIR) - displayName: 'Build cpp samples - gcc' - - - script: $(SAMPLES_INSTALL_DIR)/cpp/build_samples.sh -i $(INSTALL_DIR) - displayName: 'Build cpp samples - clang' - env: - CC: clang - CXX: clang++ - - - script: $(SAMPLES_INSTALL_DIR)/c/build_samples.sh -i $(INSTALL_DIR) - displayName: 'Build c samples' - - - script: $(INSTALL_TEST_DIR)/ov_core_unit_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-NGraphUT.xml - env: - LD_LIBRARY_PATH: $(INSTALL_TEST_DIR) - displayName: 'OV Core UT' - - - script: | - $(INSTALL_TEST_DIR)/ov_proxy_plugin_tests --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-OVProxyTests.xml - env: - LD_LIBRARY_PATH: $(INSTALL_TEST_DIR) - displayName: 'OV Proxy Tests' - - - script: | - $(INSTALL_TEST_DIR)/ov_hetero_unit_tests --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-OVHeteroUnitTests.xml - env: - LD_LIBRARY_PATH: $(INSTALL_TEST_DIR) - displayName: 'OV Hetero Unit Tests' - - - script: | - $(INSTALL_TEST_DIR)/ov_hetero_func_tests --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-OVHeteroFuncTests.xml - env: - LD_LIBRARY_PATH: $(INSTALL_TEST_DIR) - displayName: 'OV Hetero Func Tests' - - - script: $(INSTALL_TEST_DIR)/ov_onnx_frontend_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ONNXFrontend.xml - env: - LD_LIBRARY_PATH: $(INSTALL_TEST_DIR) - displayName: 'ONNX Frontend Tests' - - # TODO Reenable PDPD after paddlepaddle==2.5.1 with compliant protobuf is released (ticket 95904) - - script: $(INSTALL_TEST_DIR)/paddle_tests --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-Paddle.xml - env: - LD_LIBRARY_PATH: $(INSTALL_TEST_DIR) - displayName: 'Paddle Frontend UT' - enabled: 'false' - - - script: $(INSTALL_TEST_DIR)/ov_tensorflow_frontend_tests --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-Tensorflow.xml - env: - LD_LIBRARY_PATH: $(INSTALL_TEST_DIR) - displayName: 'TensorFlow Frontend Unit Tests' - - - script: $(INSTALL_TEST_DIR)/ov_tensorflow_common_tests --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-TensorflowCommon.xml - env: - LD_LIBRARY_PATH: $(INSTALL_TEST_DIR) - displayName: 'TensorFlow Common Unit Tests' - - - script: $(INSTALL_TEST_DIR)/ov_tensorflow_lite_frontend_tests --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-TensorflowLite.xml - env: - LD_LIBRARY_PATH: $(INSTALL_TEST_DIR) - displayName: 'TensorFlow Lite Frontend Unit Tests' - - - script: $(INSTALL_TEST_DIR)/ov_snippets_func_tests --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ov_snippets_func_tests.xml - displayName: 'Snippets Func Tests' - - - script: $(INSTALL_TEST_DIR)/ov_cpu_unit_tests --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ov_cpu_unit_tests.xml - displayName: 'Intel CPU Unit Tests' - - - script: $(INSTALL_TEST_DIR)/ov_auto_unit_tests --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ov_auto_unit_tests.xml - displayName: 'AUTO UT' - - - script: $(INSTALL_TEST_DIR)/ov_template_func_tests --gtest_filter=*smoke* --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-templateFuncTests.xml - env: - LD_LIBRARY_PATH: $(INSTALL_TEST_DIR) - displayName: 'TEMPLATE FuncTests' - - - script: $(INSTALL_TEST_DIR)/InferenceEngineCAPITests --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-InferenceEngineCAPITests.xml - displayName: 'IE CAPITests' - - - script: $(INSTALL_TEST_DIR)/ov_capi_test --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ov_capi_test.xml - displayName: 'OV CAPITests' - - # Skip test_onnx/test_zoo_models and test_onnx/test_backend due to long execution time - - script: | - /usr/bin/python3 -m pytest -s $(INSTALL_TEST_DIR)/pyngraph \ - --junitxml=$(INSTALL_TEST_DIR)/TEST-Pyngraph.xml \ - --ignore=$(INSTALL_TEST_DIR)/pyngraph/tests/test_onnx/test_zoo_models.py \ - --ignore=$(INSTALL_TEST_DIR)/pyngraph/tests/test_onnx/test_backend.py - env: - LD_LIBRARY_PATH: $(INSTALL_TEST_DIR) - displayName: 'nGraph and IE Python Bindings Tests' - - - script: | - /usr/bin/python3 -m pytest -s $(INSTALL_TEST_DIR)/pyopenvino \ - --junitxml=$(INSTALL_TEST_DIR)/TEST-Pyngraph.xml \ - --ignore=$(INSTALL_TEST_DIR)/pyopenvino/tests/test_utils/test_utils.py -v - env: - # Required by python imports to load requires libraries - # - tests install dir for mock_py - LD_LIBRARY_PATH: $(INSTALL_TEST_DIR) - # For python imports to import pybind_mock_frontend - PYTHONPATH: $(INSTALL_TEST_DIR):$(REPO_DIR)/tools/mo/ - displayName: 'Python API 2.0 Tests' - - # Skip test_onnx/test_zoo_models and test_onnx/test_backend due to long execution time - - script: | - /usr/bin/python3 -m pytest -s $(REPO_DIR)/src/frontends/onnx/tests \ - --ignore=$(REPO_DIR)/src/frontends/onnx/tests/test_python/test_zoo_models.py \ - --ignore=$(REPO_DIR)/src/frontends/onnx/tests/test_python/test_backend.py -v - env: - LD_LIBRARY_PATH: $(INSTALL_TEST_DIR) - PYTHONPATH: $(INSTALL_TEST_DIR) - displayName: 'ONNX Frontend Python Tests' - - - script: | - set -e - # TODO: fix 'No mock frontend API available' - $(TEST_VENV)/bin/python3 -m pip install openvino-dev --find-links=$(INSTALL_DIR)/tools - $(TEST_VENV)/bin/python3 -m pytest -s $(INSTALL_TEST_DIR)/mo/unit_tests --junitxml=$(INSTALL_TEST_DIR)/TEST-ModelOptimizer.xml - env: - PYTHONPATH: $(REPO_DIR)/tools/ovc/ - displayName: 'Model Optimizer UT' - - # run not all smoke filter to save time in post-commit - - script: $(INSTALL_TEST_DIR)/ov_cpu_func_tests --gtest_filter=*OVCLass*:*CoreThreadingTests* --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ov_cpu_func_tests.xml - displayName: 'CPU FuncTests' - - - task: CMake@1 - inputs: - cmakeArgs: > - -GNinja - -S $(REPO_DIR)/tests/samples_tests - -B $(BUILD_SAMPLES_TESTS_DIR) - displayName: 'CMake Samples Tests' - - - script: cmake -DCOMPONENT=tests -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -P $(BUILD_SAMPLES_TESTS_DIR)/cmake_install.cmake - displayName: 'Install Samples Tests' - - - script: | - set -e - /usr/bin/python3 -m pip install -r $(INSTALL_TEST_DIR)/smoke_tests/requirements.txt - # GNA isn't a part of Debian package, so filter out that tests - /usr/bin/python3 -m pytest $(INSTALL_TEST_DIR)/smoke_tests/ -k "not GNA" --env_conf $(INSTALL_TEST_DIR)/smoke_tests/env_config.yml -s --junitxml=$(INSTALL_TEST_DIR)/TEST-SamplesSmokeTests.xml - env: - IE_APP_PATH: $(INSTALL_DIR)/samples_bin - LD_LIBRARY_PATH: $(INSTALL_DIR)/samples_bin - IE_APP_PYTHON_PATH: $(PYTHON_SAMPLES_INSTALL_DIR)/ - SHARE: $(INSTALL_TEST_DIR)/smoke_tests/samples_smoke_tests_data/ - WORKSPACE: $(INSTALL_DIR) - displayName: 'Samples Smoke Tests' - - - script: | - set -e - $(TEST_VENV)/bin/python3 -m pip install -r $(LAYER_TESTS_DIR)/requirements.txt - $(TEST_VENV)/bin/python3 -m pytest $(LAYER_TESTS_DIR)/tensorflow_tests/test_tf_Roll.py --ir_version=10 --junitxml=$(INSTALL_TEST_DIR)/TEST-tf_Roll.xmlTEST - env: - PYTHONPATH: $(REPO_DIR)/tools/ovc/:$(LAYER_TESTS_DIR) - displayName: 'TensorFlow 1 Layer Tests - Legacy FE' - - - script: | - set -e - $(TEST_VENV)/bin/python3 -m pip install -r $(LAYER_TESTS_DIR)/requirements.txt - $(RUN_PREFIX) $(TEST_VENV)/bin/python3 -m pytest $(LAYER_TESTS_DIR)/tensorflow_lite_tests/ --junitxml=$(INSTALL_TEST_DIR)/TEST-tfl_fe.xmlTEST - env: - PYTHONPATH: $(REPO_DIR)/tools/ovc/:$(REPO_DIR)/tools/mo/:$(LAYER_TESTS_DIR) - TEST_DEVICE: CPU - displayName: 'TensorFlow Lite Layer Tests - TFL FE' - - - task: PublishTestResults@2 - condition: always() - inputs: - testResultsFormat: 'JUnit' # Options: JUnit, NUnit, VSTest, xUnit, cTest - testResultsFiles: '**/TEST-*.xml' - #searchFolder: '$(BUILD_DIR)' - mergeTestResults: false # Optional - #failTaskOnFailedTests: false # Optional - #testRunTitle: 'Pre/Post-Commit' # Optional - buildPlatform: 'x64' # Optional - buildConfiguration: 'Linux' # Optional - #publishRunAttachments: true # Optional diff --git a/.ci/azure/linux_lohika.yml b/.ci/azure/linux_lohika.yml deleted file mode 100644 index 08f3690c76c282..00000000000000 --- a/.ci/azure/linux_lohika.yml +++ /dev/null @@ -1,72 +0,0 @@ -#resources: -# repositories: -# - repository: testdata -# type: github -# endpoint: openvinotoolkit -# name: openvinotoolkit/testdata -# ref: master - -jobs: -- job: Lin_lohika - # About 150% of total time - timeoutInMinutes: '90' - - pool: - name: LIN_LOHIKA - - variables: - system.debug: true -# VSTS_HTTP_RETRY: 5 -# VSTS_HTTP_TIMEOUT: 200 -# BUILD_TYPE: Release -# REPO_DIR: $(Build.Repository.LocalPath) -# WORK_DIR: $(Pipeline.Workspace)/_w -# BUILD_DIR: $(WORK_DIR)/build - - steps: - - script: git -C ~/work/openvino fetch origin $(Build.SourceBranch) - displayName: fetch - -# - checkout: self -# clean: 'true' -# submodules: 'true' -# path: openvino - - - checkout: none - - - script: git -C ~/work/openvino checkout -m $(Build.SourceVersion) && git -C ~/work/openvino submodule update --init --recursive - displayName: checkout - - - script: env -C ~/work ./configreleasenolto.sh - displayName: CMake - -# - task: CMake@1 -# inputs: -# # CMake must get Python 3.x version by default -# cmakeArgs: > -# -GNinja -# -DENABLE_CPPLINT=OFF -# -DCMAKE_VERBOSE_MAKEFILE=ON -# -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -# -DENABLE_PYTHON=ON -# -DPython3_EXECUTABLE=/usr/bin/python3.8 -# -DENABLE_TESTS=ON -# -DENABLE_OV_ONNX_FRONTEND=ON -# -DENABLE_FASTER_BUILD=ON -# -DENABLE_STRICT_DEPENDENCIES=OFF -# -DOPENVINO_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)/modules -# -S $(REPO_DIR) -# -B $(BUILD_DIR) - - - script: | - env -C ~/work - ./buildreleasenolto.sh - libopenvino_gapi_preproc.so - openvino_intel_cpu_plugin - openvino_intel_gpu_plugin - ov_gpu_unit_tests - gpuFuncTests - displayName: Build Lin - - - script: ~/work/testreleasenolto.sh - displayName: cldnn tests diff --git a/.ci/azure/linux_onnxruntime.yml b/.ci/azure/linux_onnxruntime.yml deleted file mode 100644 index 22fb78fffb5d0d..00000000000000 --- a/.ci/azure/linux_onnxruntime.yml +++ /dev/null @@ -1,207 +0,0 @@ -trigger: - branches: - include: - - 'master' - - 'releases/*' - paths: - exclude: - - '*/docs/*' - - 'docs/*' - - '*/*.md' - - '*.md' - - '*/layer_tests_summary/*' - - '*/conformance/*' - - 'tools/*' - - 'tests/layer_tests/*' - -pr: - drafts: 'false' - branches: - include: - - 'master' - - 'releases/*' - paths: - exclude: - - '*/docs/*' - - 'docs/*' - - '*/*.md' - - '*.md' - - '*/layer_tests_summary/*' - - '*/conformance/*' - - 'tools/*' - - 'tests/layer_tests/*' - -variables: - - group: github - -jobs: -- job: onnxruntime - timeoutInMinutes: '90' - - pool: - name: LIN_VMSS_VENV_ONNX_U20_WU2 - - variables: - system.debug: true - VSTS_HTTP_RETRY: 5 - VSTS_HTTP_TIMEOUT: 200 - BUILD_TYPE: Release - REPO_DIR: $(Build.Repository.LocalPath) - ONNXRUNTIME_REPO_DIR: $(REPO_DIR)/../onnxruntime - WORK_DIR: $(Pipeline.Workspace)/_w - MODELS_DIR: /mount/cinfsshare/onnxtestdata - TMP_DIR: /mnt/tmp - INSTALL_DIR: $(WORK_DIR)/install_pkg/openvino - BUILD_DIR: $(WORK_DIR)/build - ONNXRUNTIME_UTILS: $(REPO_DIR)/.ci/azure/ci_utils/onnxruntime - ONNXRUNTIME_BUILD_DIR: $(ONNXRUNTIME_REPO_DIR)/build - LD_LIBRARY_PATH: $(Agent.ToolsDirectory)/Python/$(OV_PYTHON_VERSION)/x64/lib - OV_PYTHON_VERSION: 3.11.2 # Full version of Python its required for LD_LIBRARY_PATH. More details https://github.com/microsoft/azure-pipelines-tool-lib/blob/master/docs/overview.md#tool-cache - - steps: - - task: UsePythonVersion@0 - inputs: - versionSpec: '$(OV_PYTHON_VERSION)' # Setting only major & minor version will download latest release from GH repo example 3.10 will be 3.10.10. - addToPath: true - disableDownloadFromRegistry: false - architecture: 'x64' - githubToken: $(auth_token) - displayName: Setup Python 3.11 - name: setupPython - - bash: | - #!/bin/bash - python -V - - - script: | - curl -H Metadata:true --noproxy "*" "http://169.254.169.254/metadata/instance?api-version=2019-06-01" - whoami - uname -a - echo Python3 info ; which python3 ; python3 --version - echo Python info ; which python ; python --version - echo gcc info ; which gcc ; gcc --version - echo cmake info ; which cmake ; cmake --version - lsb_release - env - cat /proc/cpuinfo - cat /proc/meminfo - cat /etc/fstab - vmstat -s - df - lsblk -o NAME,HCTL,SIZE,MOUNTPOINT | grep -i "sd" - free -h - displayName: 'System info' - - - script: | - set -e - rm -rf $(WORK_DIR) ; mkdir $(WORK_DIR) - sudo rm -rf $(TMP_DIR) ; sudo mkdir $(TMP_DIR) ; sudo chmod 777 -R $(TMP_DIR) - sudo mkdir -p $(MODELS_DIR) - sudo apt --assume-yes update && sudo apt --assume-yes install nfs-common - sudo mount -vvv -t nfs cinfsshare.file.core.windows.net:/cinfsshare/onnxtestdata $(MODELS_DIR) -o vers=4,minorversion=1,sec=sys - displayName: 'Make dirs' - - - checkout: self - clean: 'true' - submodules: 'true' - path: openvino - - - script: | - set -e - branch=`tr -s '\n ' < $(ONNXRUNTIME_UTILS)/version` - git clone --branch $branch --single-branch --recursive https://github.com/microsoft/onnxruntime.git $(ONNXRUNTIME_REPO_DIR) - displayName: 'Clone onnxruntime' - - - script: | - set -e - sudo -E $(REPO_DIR)/install_build_dependencies.sh - # Speed up build - sudo apt -y --no-install-recommends install unzip - wget https://github.com/ninja-build/ninja/releases/download/v1.10.2/ninja-linux.zip - unzip ninja-linux.zip - sudo cp -v ninja /usr/local/bin/ - displayName: 'Install dependencies' - - - task: CMake@1 - inputs: - # CMake must get Python 3.x version by default - cmakeArgs: > - -GNinja - -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) - -DCMAKE_COMPILE_WARNING_AS_ERROR=ON - -DENABLE_INTEL_GNA=OFF - -DENABLE_INTEL_GPU=OFF - -DENABLE_CPPLINT=OFF - -DENABLE_PROFILING_ITT=OFF - -DENABLE_SAMPLES=OFF - -DENABLE_OV_TF_FRONTEND=OFF - -DENABLE_OV_PADDLE_FRONTEND=OFF - -DENABLE_OV_PYTORCH_FRONTEND=OFF - -DENABLE_OPENVINO_DEBUG=OFF - -S $(REPO_DIR) - -B $(BUILD_DIR) - - - script: cmake --build $(BUILD_DIR) --parallel --config $(BUILD_TYPE) - displayName: 'Build Lin ONNX' - - - script: ls -alR $(REPO_DIR)/bin/ - displayName: 'List bin files' - - - script: cmake -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -P $(BUILD_DIR)/cmake_install.cmake - displayName: 'Install OpenVINO' - - - script: | - set -e - source $(INSTALL_DIR)/setupvars.sh - $(ONNXRUNTIME_REPO_DIR)/build.sh \ - --config RelWithDebInfo \ - --use_openvino CPU_FP32 \ - --build_shared_lib \ - --parallel \ - --skip_tests \ - --build_dir $(ONNXRUNTIME_BUILD_DIR) - env: - CXXFLAGS: "-Wno-error=deprecated-declarations" - displayName: 'Build Lin ONNX Runtime' - - - script: | - set -e - source $(INSTALL_DIR)/setupvars.sh - skip_tests=$(tr -s '\n ' ':' < $(ONNXRUNTIME_UTILS)/skip_tests) - ./onnxruntime_test_all --gtest_filter=-$skip_tests - workingDirectory: $(ONNXRUNTIME_BUILD_DIR)/RelWithDebInfo - displayName: 'Run onnxruntime_test_all' - - - script: | - set -e - source $(INSTALL_DIR)/setupvars.sh - ./onnxruntime_shared_lib_test --gtest_filter=-CApiTest.test_custom_op_openvino_wrapper_library - workingDirectory: $(ONNXRUNTIME_BUILD_DIR)/RelWithDebInfo - displayName: 'Run onnxruntime_shared_lib_test' - - - script: | - set -e - source $(INSTALL_DIR)/setupvars.sh - ./onnxruntime_global_thread_pools_test - workingDirectory: $(ONNXRUNTIME_BUILD_DIR)/RelWithDebInfo - displayName: 'Run onnxruntime_global_thread_pools_test' - - - script: | - set -e - source $(INSTALL_DIR)/setupvars.sh - ./onnxruntime_api_tests_without_env - workingDirectory: $(ONNXRUNTIME_BUILD_DIR)/RelWithDebInfo - displayName: 'Run onnxruntime_api_tests_without_env' - - - script: | - set -e - source $(INSTALL_DIR)/setupvars.sh - ./onnx_test_runner "$(ONNXRUNTIME_REPO_DIR)/cmake/external/onnx/onnx/backend/test/data/pytorch-converted" - workingDirectory: $(ONNXRUNTIME_BUILD_DIR)/RelWithDebInfo - displayName: 'Run pytorch-converted tests' - - - script: | - set -e - source $(INSTALL_DIR)/setupvars.sh - ./onnx_test_runner "$(ONNXRUNTIME_REPO_DIR)/cmake/external/onnx/onnx/backend/test/data/pytorch-operator" - workingDirectory: $(ONNXRUNTIME_BUILD_DIR)/RelWithDebInfo - displayName: 'Run pytorch-operator tests' From 921e621404727cacc4810bc6d7ed70e45d18de56 Mon Sep 17 00:00:00 2001 From: Tomasz Jankowski Date: Thu, 5 Oct 2023 05:26:49 +0200 Subject: [PATCH 069/257] [Ref] Drop legacy API (#20006) * Drop legacy API - CoordinateTransform * Refactor AvgPool ref * Fix zero padding indices ceil rounding * Transpose 3D reshaped kernels * Reuse max_pool v8 ref in v1 * Change ref::slice params validation * Fix wrong alloc in max_pool ref * Drop legacy from TopK * Fix mvn ref * Deprecate unused * Drop ngraph from TopK op * Remove deprecated * Use OPENVINO_ASSERT * Replace headers paths * Replace headers paths * Add missing include * Address review comments * Clean up * Remove unused and duplicated code --- .../openvino/reference/adaptive_avg_pool.hpp | 9 +- .../openvino/reference/adaptive_max_pool.hpp | 16 +- .../include/openvino/reference/and.hpp | 4 +- .../reference/autobroadcast_binop.hpp | 80 ++--- .../include/openvino/reference/avg_pool.hpp | 335 ++++++++---------- .../include/openvino/reference/batch_norm.hpp | 6 +- .../openvino/reference/binary_convolution.hpp | 2 +- .../include/openvino/reference/broadcast.hpp | 4 +- .../include/openvino/reference/bucketize.hpp | 2 +- .../include/openvino/reference/concat.hpp | 2 +- .../include/openvino/reference/convert.hpp | 6 +- .../openvino/reference/convert_color_nv12.hpp | 12 +- .../openvino/reference/convolution.hpp | 52 +-- .../reference/convolution_backprop_data.hpp | 54 ++- .../openvino/reference/ctc_greedy_decoder.hpp | 14 +- .../include/openvino/reference/ctc_loss.hpp | 6 +- .../reference/deformable_convolution.hpp | 41 +-- .../reference/deformable_psroi_pooling.hpp | 2 +- .../openvino/reference/depth_to_space.hpp | 4 +- .../openvino/reference/detection_output.hpp | 27 +- .../include/openvino/reference/divide.hpp | 8 +- .../include/openvino/reference/einsum.hpp | 3 +- .../reference/embedding_bag_offsets_sum.hpp | 2 +- .../reference/embedding_bag_packed_sum.hpp | 2 +- .../reference/embedding_segments_sum.hpp | 2 +- .../include/openvino/reference/equal.hpp | 4 +- .../include/openvino/reference/erf.hpp | 3 - ...xperimental_detectron_detection_output.hpp | 9 +- ...imental_detectron_prior_grid_generator.hpp | 7 +- ...mental_detectron_proposal_single_image.hpp | 9 +- ...mental_detectron_roi_feature_extractor.hpp | 8 +- .../experimental_detectron_topk_rois.hpp | 7 +- .../reference/extract_image_patches.hpp | 4 +- .../include/openvino/reference/eye.hpp | 2 +- .../openvino/reference/fake_quantize.hpp | 30 +- .../include/openvino/reference/floor_mod.hpp | 1 - .../include/openvino/reference/gather.hpp | 2 +- .../openvino/reference/gather_tree.hpp | 4 +- .../include/openvino/reference/gelu.hpp | 4 +- .../openvino/reference/generate_proposal.hpp | 11 +- .../include/openvino/reference/greater.hpp | 4 +- .../include/openvino/reference/greater_eq.hpp | 4 +- .../openvino/reference/grid_sample.hpp | 2 +- .../group_convolution_backprop_data.hpp | 9 +- .../openvino/reference/interpolate.hpp | 59 ++- .../openvino/reference/interpolate_pil.hpp | 4 +- .../include/openvino/reference/irdft.hpp | 2 +- .../include/openvino/reference/less.hpp | 4 +- .../include/openvino/reference/less_eq.hpp | 4 +- .../openvino/reference/log_softmax.hpp | 27 +- .../openvino/reference/logical_reduction.hpp | 1 - .../include/openvino/reference/lrn.hpp | 8 +- .../include/openvino/reference/matmul.hpp | 1 - .../include/openvino/reference/matrix_nms.hpp | 13 +- .../include/openvino/reference/max_pool.hpp | 157 +++----- .../include/openvino/reference/maximum.hpp | 4 +- .../include/openvino/reference/minimum.hpp | 4 +- .../openvino/reference/multiclass_nms.hpp | 13 +- .../include/openvino/reference/multiply.hpp | 4 +- .../include/openvino/reference/mvn.hpp | 10 +- .../reference/non_max_suppression.hpp | 21 +- .../include/openvino/reference/non_zero.hpp | 2 +- .../openvino/reference/normalize_l2.hpp | 2 - .../include/openvino/reference/not_equal.hpp | 4 +- .../include/openvino/reference/one_hot.hpp | 2 +- .../include/openvino/reference/or.hpp | 4 +- .../include/openvino/reference/pad.hpp | 6 +- .../include/openvino/reference/power.hpp | 4 +- .../include/openvino/reference/prelu.hpp | 16 +- .../include/openvino/reference/prior_box.hpp | 7 +- .../reference/prior_box_clustered.hpp | 9 +- .../include/openvino/reference/proposal.hpp | 4 +- .../openvino/reference/psroi_pooling.hpp | 4 +- .../include/openvino/reference/quantize.hpp | 76 ---- .../openvino/reference/random_uniform.hpp | 6 +- .../include/openvino/reference/range.hpp | 6 +- .../include/openvino/reference/rdft.hpp | 6 +- .../openvino/reference/region_yolo.hpp | 4 +- .../include/openvino/reference/reorg_yolo.hpp | 2 +- .../include/openvino/reference/reshape.hpp | 4 +- .../include/openvino/reference/result.hpp | 2 - .../openvino/reference/reverse_sequence.hpp | 5 +- .../include/openvino/reference/roi_align.hpp | 69 ++-- .../openvino/reference/roi_pooling.hpp | 4 +- .../include/openvino/reference/roll.hpp | 2 +- .../include/openvino/reference/round.hpp | 4 +- .../{round_guard.hpp => rounding_guard.hpp} | 6 +- .../reference/scatter_elements_update.hpp | 4 +- .../openvino/reference/scatter_nd_update.hpp | 3 +- .../openvino/reference/scatter_update.hpp | 5 +- .../include/openvino/reference/sequences.hpp | 48 +-- .../include/openvino/reference/shape_of.hpp | 2 +- .../openvino/reference/shuffle_channels.hpp | 2 +- .../include/openvino/reference/slice.hpp | 2 +- .../include/openvino/reference/softmax.hpp | 34 +- .../openvino/reference/space_to_depth.hpp | 4 +- .../openvino/reference/squared_difference.hpp | 1 - .../include/openvino/reference/subtract.hpp | 2 + .../include/openvino/reference/tile.hpp | 5 +- .../include/openvino/reference/topk.hpp | 33 +- .../include/openvino/reference/transpose.hpp | 2 +- .../include/openvino/reference/unique.hpp | 4 +- .../openvino/reference/utils/fft_common.hpp | 5 +- .../openvino/reference/utils/nms_common.hpp | 6 +- src/core/reference/src/op/depth_to_space.cpp | 22 +- src/core/reference/src/op/einsum.cpp | 28 +- ...xperimental_detectron_detection_output.cpp | 6 +- ...mental_detectron_proposal_single_image.cpp | 6 +- ...mental_detectron_roi_feature_extractor.cpp | 6 +- src/core/reference/src/op/function.cpp | 4 - src/core/reference/src/op/gather_tree.cpp | 5 +- .../reference/src/op/generate_proposal.cpp | 8 +- .../reference/src/op/group_convolution.cpp | 18 +- .../op/group_convolution_backprop_data.cpp | 60 ++-- src/core/reference/src/op/if.cpp | 16 +- src/core/reference/src/op/interpolate.cpp | 20 +- src/core/reference/src/op/irdft.cpp | 2 +- src/core/reference/src/op/matmul.cpp | 4 +- src/core/reference/src/op/matrix_nms.cpp | 4 +- src/core/reference/src/op/multiclass_nms.cpp | 5 +- src/core/reference/src/op/pad.cpp | 9 +- src/core/reference/src/op/random_uniform.cpp | 18 +- src/core/reference/src/op/rdft.cpp | 2 +- src/core/reference/src/op/reorg_yolo.cpp | 2 +- src/core/reference/src/op/reshape.cpp | 4 +- src/core/reference/src/op/reverse.cpp | 6 +- src/core/reference/src/op/slice.cpp | 22 +- src/core/reference/src/op/space_to_depth.cpp | 16 +- src/core/reference/src/op/split.cpp | 2 - src/core/reference/src/op/strided_slice.cpp | 1 - src/core/reference/src/op/transpose.cpp | 2 +- .../reference/src/op/utils/fft_common.cpp | 4 +- .../reference/src/op/utils/nms_common.cpp | 12 +- .../reference/src/op/utils/round_guard.cpp | 11 - .../reference/src/op/utils/rounding_guard.cpp | 11 + .../src/runtime/opt_kernel/reshape.cpp | 1 - .../src/utils/coordinate_transform.cpp | 16 +- src/core/src/op/interpolate.cpp | 31 +- src/core/src/op/topk.cpp | 114 +++--- .../template/backend/ops/interpolate.cpp | 36 +- .../functional/op_reference/avg_pool.cpp | 32 +- 141 files changed, 875 insertions(+), 1268 deletions(-) delete mode 100644 src/core/reference/include/openvino/reference/quantize.hpp rename src/core/reference/include/openvino/reference/{round_guard.hpp => rounding_guard.hpp} (84%) delete mode 100644 src/core/reference/src/op/utils/round_guard.cpp create mode 100644 src/core/reference/src/op/utils/rounding_guard.cpp diff --git a/src/core/reference/include/openvino/reference/adaptive_avg_pool.hpp b/src/core/reference/include/openvino/reference/adaptive_avg_pool.hpp index d8c6bf9443085c..d59eb1bf6e1834 100644 --- a/src/core/reference/include/openvino/reference/adaptive_avg_pool.hpp +++ b/src/core/reference/include/openvino/reference/adaptive_avg_pool.hpp @@ -8,8 +8,7 @@ #include #include -#include "ngraph/axis_vector.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" namespace ov { namespace reference { @@ -23,7 +22,7 @@ inline size_t window_end(size_t idx, size_t arg_shape, size_t out_shape) { } template T avg_div(const T sum, size_t n) { - NGRAPH_CHECK(n != 0, "AdaptiveAvgPool elements == 0, must be non-zero"); + OPENVINO_ASSERT(n != 0, "AdaptiveAvgPool elements == 0, must be non-zero"); if (std::is_same::value || std::is_same::value) { return static_cast(std::nearbyint(static_cast(sum) / n)); @@ -90,8 +89,8 @@ void adaptive_avg_pool_3d(const T* arg, } // namespace adaptive_pool template void adaptive_avg_pool(const T* arg, T* out, const Shape& arg_shape, const Shape& out_shape) { - NGRAPH_CHECK(arg_shape.size() == out_shape.size() && 2 < arg_shape.size() && arg_shape.size() < 6, - "AdaptiveAvgPool supports only 3D, 4D and 5D input shape"); + OPENVINO_ASSERT(arg_shape.size() == out_shape.size() && 2 < arg_shape.size() && arg_shape.size() < 6, + "AdaptiveAvgPool supports only 3D, 4D and 5D input shape"); size_t channel_size = 1; for (size_t i = 2; i < arg_shape.size(); i++) { channel_size *= arg_shape[i]; diff --git a/src/core/reference/include/openvino/reference/adaptive_max_pool.hpp b/src/core/reference/include/openvino/reference/adaptive_max_pool.hpp index 69c8ef2c940834..a84ed81b47d7d7 100644 --- a/src/core/reference/include/openvino/reference/adaptive_max_pool.hpp +++ b/src/core/reference/include/openvino/reference/adaptive_max_pool.hpp @@ -8,8 +8,7 @@ #include #include -#include "ngraph/axis_vector.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" #include "openvino/reference/adaptive_avg_pool.hpp" namespace ov { @@ -19,7 +18,7 @@ void adaptive_max_pool_1d(const T* arg, T* out, IT* indices, size_t h_in, size_t for (size_t i = 0; i < h_out; i++) { auto from = arg + adaptive_pool::window_start(i, h_in, h_out); auto to = arg + adaptive_pool::window_end(i, h_in, h_out); - NGRAPH_CHECK(to - from != 0, "AdaptiveMaxPool elements == 0, must be non-zero"); + OPENVINO_ASSERT(to - from != 0, "AdaptiveMaxPool elements == 0, must be non-zero"); auto it = std::max_element(from, to); out[i] = static_cast(*it); indices[i] = static_cast(it - arg); @@ -33,7 +32,8 @@ void adaptive_max_pool_2d(const T* arg, T* out, IT* indices, size_t h_in, size_t for (size_t j = 0; j < w_out; j++) { size_t w_start = adaptive_pool::window_start(j, w_in, w_out); size_t w_end = adaptive_pool::window_end(j, w_in, w_out); - NGRAPH_CHECK((w_end - w_start) * (h_end - h_start) != 0, "AdaptiveMaxPool elements == 0, must be non-zero"); + OPENVINO_ASSERT((w_end - w_start) * (h_end - h_start) != 0, + "AdaptiveMaxPool elements == 0, must be non-zero"); auto result = arg + h_start * w_in + w_start; for (size_t n = h_start; n < h_end; n++) { auto from = arg + n * w_in + w_start; @@ -65,8 +65,8 @@ void adaptive_max_pool_3d(const T* arg, for (size_t k = 0; k < w_out; k++) { size_t w_start = adaptive_pool::window_start(k, w_in, w_out); size_t w_end = adaptive_pool::window_end(k, w_in, w_out); - NGRAPH_CHECK((w_end - w_start) * (h_end - h_start) != 0, - "AdaptiveMaxPool elements == 0, must be non-zero"); + OPENVINO_ASSERT((w_end - w_start) * (h_end - h_start) != 0, + "AdaptiveMaxPool elements == 0, must be non-zero"); auto result = arg + d_start * h_in * w_in + h_start * w_in + w_start; for (size_t n = d_start; n < d_end; n++) { for (size_t m = h_start; m < h_end; m++) { @@ -84,8 +84,8 @@ void adaptive_max_pool_3d(const T* arg, } template void adaptive_max_pool(const T* arg, T* out, IT* selected_indices, const Shape& arg_shape, const Shape& out_shape) { - NGRAPH_CHECK(arg_shape.size() == out_shape.size() && 2 < arg_shape.size() && arg_shape.size() < 6, - "AdaptiveAvgPool supports only 3D, 4D and 5D input shape"); + OPENVINO_ASSERT(arg_shape.size() == out_shape.size() && 2 < arg_shape.size() && arg_shape.size() < 6, + "AdaptiveAvgPool supports only 3D, 4D and 5D input shape"); size_t channel_size = 1; for (size_t i = 2; i < arg_shape.size(); i++) { channel_size *= arg_shape[i]; diff --git a/src/core/reference/include/openvino/reference/and.hpp b/src/core/reference/include/openvino/reference/and.hpp index f1b6783b285f6f..326e4b59d773af 100644 --- a/src/core/reference/include/openvino/reference/and.hpp +++ b/src/core/reference/include/openvino/reference/and.hpp @@ -6,8 +6,8 @@ #include -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/op/util/attr_types.hpp" #include "openvino/reference/autobroadcast_binop.hpp" namespace ov { diff --git a/src/core/reference/include/openvino/reference/autobroadcast_binop.hpp b/src/core/reference/include/openvino/reference/autobroadcast_binop.hpp index 0de0e66de9fa13..510d691f2286b8 100644 --- a/src/core/reference/include/openvino/reference/autobroadcast_binop.hpp +++ b/src/core/reference/include/openvino/reference/autobroadcast_binop.hpp @@ -8,8 +8,9 @@ #include #include -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/shape_util.hpp" +#include "openvino/core/shape_util.hpp" +#include "openvino/op/util/attr_types.hpp" +#include "openvino/reference/utils/coordinate_index.hpp" #include "openvino/reference/utils/coordinate_transform.hpp" namespace ov { @@ -103,13 +104,13 @@ void autobroadcast_binop(const T* arg0, } break; case op::AutoBroadcastType::NUMPY: - // We'll be using CoordinateTransform to handle the broadcasting. The general + // We'll be using CoordinateTransformBasic to handle the broadcasting. The general // procedure is as follows: // // (1) Left pad the shorter of the two shapes with ones. // (2) Squeeze (remove ones from) both shapes, and record the squeezed axis // indices. - // (3) Using CoordinateTransform, broadcast both args to the final output + // (3) Using CoordinateTransformBasic, broadcast both args to the final output // shape. The "broadcasted axes" will be those that were squeezed in step // 2. // @@ -207,7 +208,7 @@ void autobroadcast_binop(const T* arg0, } break; case op::AutoBroadcastType::PDPD: - // We'll be using CoordinateTransform to handle the broadcasting. No need to + // We'll be using CoordinateTransformBasic to handle the broadcasting. No need to // process arg0 and output shape will be the same as arg0. We need to process // arg1 and the general procedure is as follows: // @@ -216,7 +217,7 @@ void autobroadcast_binop(const T* arg0, // to align between arg0 and arg1. // (3) Squeeze (remove ones from) arg1 shape, and record the squeezed axis // indices. - // (3) Using CoordinateTransform, broadcast arg1 to the final output + // (3) Using CoordinateTransformBasic, broadcast arg1 to the final output // shape. The "broadcasted axes" will be those that were squeezed in step // 23. // @@ -262,18 +263,15 @@ void autobroadcast_binop(const T* arg0, } } - NGRAPH_SUPPRESS_DEPRECATED_START - CoordinateTransform arg0_transform(arg0_shape); - CoordinateTransform arg1_transform(arg1_squeezed_shape); - CoordinateTransform output_transform(arg0_shape); + const CoordinateTransformBasic output_transform{arg0_shape}; for (const Coordinate& output_coord : output_transform) { - Coordinate arg1_coord = ngraph::reduce(output_coord, arg1_squeezed_axes, false); - out[output_transform.index(output_coord)] = - elementwise_functor(arg0[arg0_transform.index(output_coord)], - arg1[arg1_transform.index(arg1_coord)]); + const auto arg1_coord = util::reduce(output_coord, arg1_squeezed_axes); + const auto out_index = coordinate_index(output_coord, arg0_shape); + const auto arg0_index = coordinate_index(output_coord, arg0_shape); + const auto arg1_index = coordinate_index(arg1_coord, arg1_squeezed_shape); + out[out_index] = elementwise_functor(arg0[arg0_index], arg1[arg1_index]); } - NGRAPH_SUPPRESS_DEPRECATED_END } } } @@ -366,10 +364,7 @@ void autobroadcast_select(const U* arg0, output_shape.push_back(std::max({arg0_padded_shape[i], arg2_padded_shape[i], arg1_padded_shape[i]})); } - CoordinateTransformBasic arg0_transform(arg0_squeezed_shape); - CoordinateTransformBasic arg1_transform(arg1_squeezed_shape); - CoordinateTransformBasic arg2_transform(arg2_squeezed_shape); - CoordinateTransformBasic output_transform(output_shape); + const CoordinateTransformBasic output_transform{output_shape}; const auto arg0_strides = row_major_strides(arg0_squeezed_shape); const auto arg1_strides = row_major_strides(arg1_squeezed_shape); @@ -377,20 +372,14 @@ void autobroadcast_select(const U* arg0, const auto output_strides = row_major_strides(output_shape); for (const Coordinate& output_coord : output_transform) { - NGRAPH_SUPPRESS_DEPRECATED_START - const Coordinate arg0_coord = ngraph::reduce(output_coord, arg0_squeezed_axes, false); - const Coordinate arg1_coord = ngraph::reduce(output_coord, arg1_squeezed_axes, false); - const Coordinate arg2_coord = ngraph::reduce(output_coord, arg2_squeezed_axes, false); - NGRAPH_SUPPRESS_DEPRECATED_END - - const size_t arg0_idx = - std::inner_product(arg0_coord.begin(), arg0_coord.end(), arg0_strides.begin(), uint64_t(0)); - const size_t arg1_idx = - std::inner_product(arg1_coord.begin(), arg1_coord.end(), arg1_strides.begin(), uint64_t(0)); - const size_t arg2_idx = - std::inner_product(arg2_coord.begin(), arg2_coord.end(), arg2_strides.begin(), uint64_t(0)); - const size_t output_idx = - std::inner_product(output_coord.begin(), output_coord.end(), output_strides.begin(), uint64_t(0)); + const auto arg0_coord = util::reduce(output_coord, arg0_squeezed_axes); + const auto arg1_coord = util::reduce(output_coord, arg1_squeezed_axes); + const auto arg2_coord = util::reduce(output_coord, arg2_squeezed_axes); + + const size_t arg0_idx = coordinate_offset(arg0_coord, arg0_strides); + const size_t arg1_idx = coordinate_offset(arg1_coord, arg1_strides); + const size_t arg2_idx = coordinate_offset(arg2_coord, arg2_strides); + const size_t output_idx = coordinate_offset(output_coord, output_strides); out[output_idx] = elementwise_functor(arg0[arg0_idx], arg1[arg1_idx], arg2[arg2_idx]); } } @@ -446,29 +435,20 @@ void autobroadcast_select(const U* arg0, } } - CoordinateTransformBasic arg0_transform(arg0_squeezed_shape); - CoordinateTransformBasic arg1_transform(arg1_shape); - CoordinateTransformBasic arg2_transform(arg2_squeezed_shape); - CoordinateTransformBasic output_transform(arg1_shape); + const CoordinateTransformBasic output_transform{arg1_shape}; const auto arg0_strides = row_major_strides(arg0_squeezed_shape); const auto arg2_strides = row_major_strides(arg2_squeezed_shape); const auto output_strides = row_major_strides(arg1_shape); for (const Coordinate& output_coord : output_transform) { - NGRAPH_SUPPRESS_DEPRECATED_START - const Coordinate arg0_coord = ngraph::reduce(output_coord, arg0_squeezed_axes, false); - const Coordinate arg2_coord = ngraph::reduce(output_coord, arg2_squeezed_axes, false); - NGRAPH_SUPPRESS_DEPRECATED_END - - const size_t arg0_idx = - std::inner_product(arg0_coord.begin(), arg0_coord.end(), arg0_strides.begin(), uint64_t(0)); - const size_t arg1_idx = - std::inner_product(output_coord.begin(), output_coord.end(), output_strides.begin(), uint64_t(0)); - const size_t arg2_idx = - std::inner_product(arg2_coord.begin(), arg2_coord.end(), arg2_strides.begin(), uint64_t(0)); - const size_t output_idx = - std::inner_product(output_coord.begin(), output_coord.end(), output_strides.begin(), uint64_t(0)); + const auto arg0_coord = util::reduce(output_coord, arg0_squeezed_axes); + const auto arg2_coord = util::reduce(output_coord, arg2_squeezed_axes); + + const size_t arg0_idx = coordinate_offset(arg0_coord, arg0_strides); + const size_t arg1_idx = coordinate_offset(output_coord, output_strides); + const size_t arg2_idx = coordinate_offset(arg2_coord, arg2_strides); + const size_t output_idx = coordinate_offset(output_coord, output_strides); out[output_idx] = elementwise_functor(arg0[arg0_idx], arg1[arg1_idx], arg2[arg2_idx]); } diff --git a/src/core/reference/include/openvino/reference/avg_pool.hpp b/src/core/reference/include/openvino/reference/avg_pool.hpp index bb7bd25d933fbd..c395ff5c52fd9b 100644 --- a/src/core/reference/include/openvino/reference/avg_pool.hpp +++ b/src/core/reference/include/openvino/reference/avg_pool.hpp @@ -7,227 +7,168 @@ #include #include #include -#include #include -#include "ngraph/axis_vector.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/axis_vector.hpp" +#include "openvino/core/coordinate.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/reference/rounding_guard.hpp" #include "openvino/reference/utils/coordinate_transform.hpp" namespace ov { namespace reference { -template -void avg_pool_backprop(const T* delta, - T* out, - const Shape& delta_shape, - const Shape& out_shape, - const Shape& window_shape, - const Strides& window_movement_strides, - const Shape& padding_below, - const Shape& padding_above, - bool include_padding_in_avg_computation) { - NGRAPH_SUPPRESS_DEPRECATED_START - CoordinateTransform out_transform(out_shape); - - for (const Coordinate& out_coord : out_transform) { - out[out_transform.index(out_coord)] = 0; +namespace { +inline bool elem_in_padding_area(const Coordinate& kernel_position, + const Coordinate& kernel_offset, + const Shape& data_shape) { + for (size_t dim = 0; dim + 2 < data_shape.size(); ++dim) { + if (static_cast(kernel_position[dim]) + static_cast(kernel_offset[dim]) < 0LL || + kernel_position[dim] + kernel_offset[dim] >= data_shape[dim + 2]) { + return true; + } } - CoordinateTransform delta_transform(delta_shape); - - for (const Coordinate& delta_coord : delta_transform) { - size_t img_index = delta_coord[0]; - size_t channel = delta_coord[1]; - - size_t n_image_dimensions = out_shape.size() - 2; - Coordinate source_window_transform_start(2 + n_image_dimensions); - Coordinate source_window_transform_end(2 + n_image_dimensions); - Strides source_window_transform_source_strides(2 + n_image_dimensions, 1); - AxisVector source_window_transform_source_axis_order(2 + n_image_dimensions); - CoordinateDiff source_window_transform_padding_below(2 + n_image_dimensions); - CoordinateDiff source_window_transform_padding_above(2 + n_image_dimensions); - - source_window_transform_start[0] = img_index; - source_window_transform_end[0] = img_index + 1; - source_window_transform_start[1] = channel; - source_window_transform_end[1] = channel + 1; - source_window_transform_padding_below[0] = 0; - source_window_transform_padding_below[1] = 0; - source_window_transform_padding_above[0] = 0; - source_window_transform_padding_above[1] = 0; - - for (size_t i = 2; i < n_image_dimensions + 2; i++) { - size_t window_shape_this_dim = window_shape[i - 2]; - size_t movement_stride = window_movement_strides[i - 2]; - - source_window_transform_start[i] = movement_stride * delta_coord[i]; - source_window_transform_end[i] = source_window_transform_start[i] + window_shape_this_dim; - source_window_transform_padding_below[i] = padding_below[i - 2]; - source_window_transform_padding_above[i] = padding_above[i - 2]; - } - std::iota(begin(source_window_transform_source_axis_order), end(source_window_transform_source_axis_order), 0); - - CoordinateTransform source_window_transform(out_shape, - source_window_transform_start, - source_window_transform_end, - source_window_transform_source_strides, - source_window_transform_source_axis_order, - source_window_transform_padding_below, - source_window_transform_padding_above); - - size_t num_elements_in_window = 0; - - for (const Coordinate& source_window_coord : source_window_transform) { - if (source_window_transform.has_source_coordinate(source_window_coord) || - include_padding_in_avg_computation) { - num_elements_in_window++; - } - } + return false; +} - for (const Coordinate& source_window_coord : source_window_transform) { - if (source_window_transform.has_source_coordinate(source_window_coord)) { - size_t out_index = source_window_transform.index(source_window_coord); - out[out_index] += delta[delta_transform.index(delta_coord)] / num_elements_in_window; +inline Coordinate calculate_kernel_position(const Coordinate& out_elem_coord, + const Strides& kernel_strides, + const Shape& pads_begin) { + Coordinate top_left_corner; + top_left_corner.reserve(out_elem_coord.size()); + for (size_t i = 0u; i < out_elem_coord.size(); ++i) { + top_left_corner.emplace_back(out_elem_coord[i] * kernel_strides[i] - pads_begin[i]); + } + return top_left_corner; +} + +namespace kernel { +template +void avg_pool_3d(const Values_t* data, + Values_t* out, + const Shape& data_shape, + const Shape& out_shape, + const Shape& kernel, + const Strides& kernel_strides, + const Shape& pads_begin, + const Shape& pads_end, + const bool pads_in_avg) { + // helper constants(axes) denoting dimensions in the input data shape and kernel shape + constexpr size_t data_D = 2, data_H = 3, data_W = 4; + constexpr size_t kernel_D = 0, kernel_H = 1, kernel_W = 2; + + // select max elem and its index for each "placeholder" in the out buffer (pointed to by out_idx) + size_t out_idx = 0u; + for (size_t out_channel = 0u; out_channel < out_shape[data_D]; ++out_channel) { + for (size_t out_row = 0u; out_row < out_shape[data_H]; ++out_row) { + for (size_t out_col = 0u; out_col < out_shape[data_W]; ++out_col) { + auto sum = Values_t{0}; + auto count = size_t{0}; + + const auto kernel_position = + calculate_kernel_position({out_channel, out_row, out_col}, kernel_strides, pads_begin); + + for (size_t kernel_channel = 0; kernel_channel < kernel[kernel_D]; ++kernel_channel) { + for (size_t kernel_row = 0; kernel_row < kernel[kernel_H]; ++kernel_row) { + for (size_t kernel_col = 0; kernel_col < kernel[kernel_W]; ++kernel_col) { + // offset from the top-left corner of the kernel for a given row and col + const Coordinate kernel_offset{kernel_channel, kernel_row, kernel_col}; + + const auto in_padding = elem_in_padding_area(kernel_position, kernel_offset, data_shape); + // ignore the elements in the padding area + if (!in_padding) { + // index of the flattened tensor element under the current row & column of the kernel + const size_t data_elem_index = + data_shape[data_H] * data_shape[data_W] * + (kernel_offset[kernel_D] + kernel_position[kernel_D]) + + data_shape[data_W] * (kernel_offset[kernel_H] + kernel_position[kernel_H]) + + kernel_offset[kernel_W] + kernel_position[kernel_W]; + + sum += data[data_elem_index]; + } + if (pads_in_avg || !in_padding) { + ++count; + } + } + } + } + + if (count != 0) { + if (std::is_same::value || std::is_same::value) { + out[out_idx] = static_cast(std::nearbyint(sum / count)); + } else { + out[out_idx] = sum / static_cast(count); + } + } else { + out[out_idx] = Values_t{0}; + } + ++out_idx; } } } - NGRAPH_SUPPRESS_DEPRECATED_END } +} // namespace kernel +} // namespace template -void avg_pool(const T* arg, - T* out, +void avg_pool(const T* const arg, + T* const out, const Shape& arg_shape, const Shape& out_shape, const Shape& window_shape, const Strides& window_movement_strides, const Shape& padding_below, const Shape& padding_above, - bool include_padding_in_avg_computation) { - NGRAPH_SUPPRESS_DEPRECATED_START - auto old_mode = std::fegetround(); - std::fesetround(FE_TONEAREST); - // At the outermost level we will walk over every output coordinate O. - CoordinateTransform output_transform(out_shape); - - for (const Coordinate& out_coord : output_transform) { - // Our output coordinate O will have the form: - // - // (N,chan,i_1,...,i_n) - - size_t batch_index = out_coord[0]; - size_t channel = out_coord[1]; - - // For the input data we need to iterate the coordinate: - // - // I: - // - // over the range (noninclusive on the right): - // - // (N,chan,s_1*i_1,s_2*i_2,...,s_n*i_n) -> - // - // (N+1,chan+1,s_1*i_1 + window_shape_1,...,s_n*i_n + window_shape_n) - // - // with unit stride. - // - // We iterate this over the *padded* data, so below we will need to check for - // coordinates that fall in the padding area. - - size_t n_spatial_dimensions = arg_shape.size() - 2; - - Coordinate input_batch_transform_start(2 + n_spatial_dimensions); - Coordinate input_batch_transform_end(2 + n_spatial_dimensions); - Strides input_batch_transform_source_strides(2 + n_spatial_dimensions, 1); - AxisVector input_batch_transform_source_axis_order(2 + n_spatial_dimensions); - CoordinateDiff input_batch_transform_padding_below(2 + n_spatial_dimensions); - CoordinateDiff input_batch_transform_padding_above(2 + n_spatial_dimensions); - - input_batch_transform_start[0] = batch_index; - input_batch_transform_end[0] = batch_index + 1; - input_batch_transform_start[1] = channel; - input_batch_transform_end[1] = channel + 1; - input_batch_transform_padding_below[0] = 0; - input_batch_transform_padding_below[1] = 0; - input_batch_transform_padding_above[0] = 0; - input_batch_transform_padding_above[1] = 0; - - for (size_t i = 2; i < n_spatial_dimensions + 2; i++) { - size_t window_shape_this_dim = window_shape[i - 2]; - size_t movement_stride = window_movement_strides[i - 2]; - - input_batch_transform_start[i] = movement_stride * out_coord[i]; - input_batch_transform_end[i] = input_batch_transform_start[i] + window_shape_this_dim; - input_batch_transform_padding_below[i] = padding_below[i - 2]; - input_batch_transform_padding_above[i] = padding_above[i - 2]; - // If a window (kernel) is out of arg shape bounds, trim it to fit - auto padded_upper_bound = arg_shape[i] + padding_below[i - 2] + padding_above[i - 2]; - if (input_batch_transform_end[i] > padded_upper_bound) { - input_batch_transform_end[i] = padded_upper_bound; - } - } - - for (size_t i = 0; i < arg_shape.size(); i++) { - input_batch_transform_source_axis_order[i] = i; - } - - CoordinateTransform input_batch_transform(arg_shape, - input_batch_transform_start, - input_batch_transform_end, - input_batch_transform_source_strides, - input_batch_transform_source_axis_order, - input_batch_transform_padding_below, - input_batch_transform_padding_above); - - // As we go, we compute the sum value: - // - // output[O] := output[O] + arg[I] - // - // and the number of elements: - // - // n_elements := n_elements + 1 - - T result = 0; - size_t n_elements = 0; - - // The below conditions are to provide conformance between the ref and plugins: - // If exclude_padding is disabled (include_padding... enabled), then: - // The size of window doesn't change even if the window was clipped to fit the - // input, number of elements will be equal to window_size.width * - // window_size.height. The exception from this rule is if padding is not - // present, then window size is calculated each time. - - auto padding_present = - padding_below[0] != 0 || padding_below[1] != 0 || padding_above[0] != 0 || padding_above[1] != 0; - - if (include_padding_in_avg_computation && padding_present) { - n_elements = shape_size(window_shape); - } - for (const Coordinate& input_batch_coord : input_batch_transform) { - bool in_bounds = input_batch_transform.has_source_coordinate(input_batch_coord); - - if (in_bounds || include_padding_in_avg_computation) { - T v = in_bounds ? arg[input_batch_transform.index(input_batch_coord)] : static_cast(0); - result += v; - if (!padding_present || (in_bounds && !include_padding_in_avg_computation)) { - n_elements++; - } - } - } + const bool include_padding_in_avg_computation) { + if (window_shape.size() > 3) + return; + const RoundingGuard rounding_g{FE_TONEAREST}; + + const auto not_zero = [](size_t p) { + return p != 0; + }; + const auto pads_in_avg = + include_padding_in_avg_computation && (std::any_of(padding_below.begin(), padding_below.end(), not_zero) || + std::any_of(padding_above.begin(), padding_above.end(), not_zero)); + + Shape arg_shape_3D{arg_shape}; + Shape out_shape_3D{out_shape}; + Shape window_shape_3D{window_shape}; + Strides window_movement_strides_3D{window_movement_strides}; + Shape padding_below_3D{padding_below}; + Shape padding_above_3D{padding_above}; + + if (window_shape.size() < 3) { + const size_t dim_diff = 3 - window_shape.size(); + arg_shape_3D.insert(std::next(arg_shape_3D.begin(), 2), dim_diff, 1); + out_shape_3D.insert(std::next(out_shape_3D.begin(), 2), dim_diff, 1); + window_shape_3D.insert(window_shape_3D.begin(), dim_diff, 1); + window_movement_strides_3D.insert(window_movement_strides_3D.begin(), dim_diff, 1); + padding_below_3D.insert(padding_below_3D.begin(), dim_diff, 0); + padding_above_3D.insert(padding_above_3D.begin(), dim_diff, 0); + } - if (n_elements != 0) { - if (std::is_same::value || std::is_same::value) { - out[output_transform.index(out_coord)] = - static_cast(std::nearbyint(static_cast(result) / n_elements)); - } else { - out[output_transform.index(out_coord)] = result / static_cast(n_elements); - } - } else { - out[output_transform.index(out_coord)] = T{0}; + const auto data_batch_elems = shape_size(std::begin(arg_shape) + 1, std::end(arg_shape)); + const auto data_channel_elems = shape_size(std::begin(arg_shape) + 2, std::end(arg_shape)); + + const auto out_batch_elems = shape_size(std::begin(out_shape) + 1, std::end(out_shape)); + const auto out_channel_elems = shape_size(std::begin(out_shape) + 2, std::end(out_shape)); + + for (size_t b = 0; b < arg_shape[0]; ++b) { + for (size_t c = 0; c < arg_shape[1]; ++c) { + const T* data_channel_first_elem = arg + b * data_batch_elems + c * data_channel_elems; + T* out_channel_first_elem = out + b * out_batch_elems + c * out_channel_elems; + kernel::avg_pool_3d(data_channel_first_elem, + out_channel_first_elem, + arg_shape_3D, + out_shape_3D, + window_shape_3D, + window_movement_strides_3D, + padding_below_3D, + padding_above_3D, + pads_in_avg); } - - std::fesetround(old_mode); } - NGRAPH_SUPPRESS_DEPRECATED_END } } // namespace reference } // namespace ov diff --git a/src/core/reference/include/openvino/reference/batch_norm.hpp b/src/core/reference/include/openvino/reference/batch_norm.hpp index 15e10cd5c9175c..1050d78ecaab6a 100644 --- a/src/core/reference/include/openvino/reference/batch_norm.hpp +++ b/src/core/reference/include/openvino/reference/batch_norm.hpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" #include "openvino/reference/utils/coordinate_transform.hpp" namespace ov { @@ -26,11 +26,10 @@ void batch_norm_inference(float eps, const T* variance, T* out, const Shape& in_shape) { - NGRAPH_SUPPRESS_DEPRECATED_START auto eps_casted = static_cast(eps); size_t in_idx = 0; - CoordinateTransform in_transform(in_shape); + const CoordinateTransformBasic in_transform{in_shape}; for (Coordinate in_coord : in_transform) { auto ch_num = in_coord[1]; auto ch_gamma = gamma[ch_num]; @@ -42,7 +41,6 @@ void batch_norm_inference(float eps, out[in_idx] = normalized * ch_gamma + ch_beta; in_idx++; } - NGRAPH_SUPPRESS_DEPRECATED_END } } // namespace reference } // namespace ov diff --git a/src/core/reference/include/openvino/reference/binary_convolution.hpp b/src/core/reference/include/openvino/reference/binary_convolution.hpp index bf3e16beae2a08..ba13dad1a299e2 100644 --- a/src/core/reference/include/openvino/reference/binary_convolution.hpp +++ b/src/core/reference/include/openvino/reference/binary_convolution.hpp @@ -4,7 +4,7 @@ #pragma once -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" #include "openvino/reference/convolution.hpp" namespace ov { diff --git a/src/core/reference/include/openvino/reference/broadcast.hpp b/src/core/reference/include/openvino/reference/broadcast.hpp index abad1b8ce5e765..36767f94cbd618 100644 --- a/src/core/reference/include/openvino/reference/broadcast.hpp +++ b/src/core/reference/include/openvino/reference/broadcast.hpp @@ -4,8 +4,8 @@ #pragma once -#include "ngraph/axis_set.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/axis_set.hpp" +#include "openvino/core/shape.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/include/openvino/reference/bucketize.hpp b/src/core/reference/include/openvino/reference/bucketize.hpp index 2288d15388dd23..49fd22eaf66b98 100644 --- a/src/core/reference/include/openvino/reference/bucketize.hpp +++ b/src/core/reference/include/openvino/reference/bucketize.hpp @@ -6,7 +6,7 @@ #include -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/include/openvino/reference/concat.hpp b/src/core/reference/include/openvino/reference/concat.hpp index 6d210a2244b7e0..13d4499dc85a23 100644 --- a/src/core/reference/include/openvino/reference/concat.hpp +++ b/src/core/reference/include/openvino/reference/concat.hpp @@ -6,7 +6,7 @@ #include -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/include/openvino/reference/convert.hpp b/src/core/reference/include/openvino/reference/convert.hpp index a6b843bd5f0c9d..e943e548a8fa4e 100644 --- a/src/core/reference/include/openvino/reference/convert.hpp +++ b/src/core/reference/include/openvino/reference/convert.hpp @@ -6,8 +6,8 @@ #include -#include "ngraph/type/element_type.hpp" -#include "ngraph/type/float16.hpp" +#include "openvino/core/type/element_type.hpp" +#include "openvino/core/type/float16.hpp" namespace ov { namespace reference { @@ -123,7 +123,7 @@ size_t count_out_of_f16_range(const float* arg, size_t count); // Convert values from f32 to f16 with claming to f16 min/max when value is out of normal finite numbers range void convert_from_f32_to_f16_with_clamp(const float* arg, float16* out, size_t count); -// overload to handle ngraph::boolean (it is stored as char) +// overload to handle ov::boolean (it is stored as char) template typename std::enable_if::value>::type convert(const TI* arg, TO* out, size_t count) { for (size_t i = 0; i < count; ++i) { diff --git a/src/core/reference/include/openvino/reference/convert_color_nv12.hpp b/src/core/reference/include/openvino/reference/convert_color_nv12.hpp index a42aff6184c17f..110e1caf411093 100644 --- a/src/core/reference/include/openvino/reference/convert_color_nv12.hpp +++ b/src/core/reference/include/openvino/reference/convert_color_nv12.hpp @@ -116,9 +116,9 @@ inline bool color_convert_nv12(const std::shared_ptr& op, static const size_t N_DIM = 0; static const size_t H_DIM = 1; static const size_t W_DIM = 2; - NGRAPH_CHECK(op->get_input_size() == 1 || op->get_input_size() == 2, - "NV12 conversion shall have one or 2 inputs, but it is ", - op->get_input_size()); + OPENVINO_ASSERT(op->get_input_size() == 1 || op->get_input_size() == 2, + "NV12 conversion shall have one or 2 inputs, but it is ", + op->get_input_size()); auto single_plane = op->get_input_size() == 1; const auto& y_tensor = inputs[0]; @@ -163,9 +163,9 @@ inline bool color_convert_i420(const std::shared_ptr& op, static const size_t N_DIM = 0; static const size_t H_DIM = 1; static const size_t W_DIM = 2; - NGRAPH_CHECK(op->get_input_size() == 1 || op->get_input_size() == 3, - "I420 conversion shall have one or 3 inputs, but it is ", - op->get_input_size()); + OPENVINO_ASSERT(op->get_input_size() == 1 || op->get_input_size() == 3, + "I420 conversion shall have one or 3 inputs, but it is ", + op->get_input_size()); auto single_plane = op->get_input_size() == 1; const auto& y_tensor = inputs[0]; diff --git a/src/core/reference/include/openvino/reference/convolution.hpp b/src/core/reference/include/openvino/reference/convolution.hpp index 6a26372befe073..fb9e68fa3c3acd 100644 --- a/src/core/reference/include/openvino/reference/convolution.hpp +++ b/src/core/reference/include/openvino/reference/convolution.hpp @@ -6,7 +6,9 @@ #include -#include "ngraph/util.hpp" +#include "openvino/core/coordinate_diff.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/core/strides.hpp" namespace ov { namespace reference { @@ -260,33 +262,33 @@ inline void validate_convolution_parameters(const Shape& in_shape, const CoordinateDiff& pads_begin, const CoordinateDiff& pads_end) { // this implementation supports 1D, 2D and 3D convolutions - NGRAPH_CHECK(in_shape.size() >= 3 && in_shape.size() <= 5, "Unsupported input rank: ", in_shape); - - NGRAPH_CHECK(in_shape.size() == f_shape.size(), - "Incompatible input ranks: ", - in_shape.size(), - " and ", - f_shape.size()); - - NGRAPH_CHECK(in_shape[in_channel_axis] == f_shape[filter_in_ch_axis], - "Incompatible input channels in data batch and filters shapes: ", - in_shape[in_channel_axis], - " and ", - f_shape[filter_in_ch_axis]); - - NGRAPH_CHECK(in_shape.size() == out_shape.size(), - "Incompatible input and output ranks: ", - in_shape.size(), - " and ", - out_shape.size()); + OPENVINO_ASSERT(in_shape.size() >= 3 && in_shape.size() <= 5, "Unsupported input rank: ", in_shape); + + OPENVINO_ASSERT(in_shape.size() == f_shape.size(), + "Incompatible input ranks: ", + in_shape.size(), + " and ", + f_shape.size()); + + OPENVINO_ASSERT(in_shape[in_channel_axis] == f_shape[filter_in_ch_axis], + "Incompatible input channels in data batch and filters shapes: ", + in_shape[in_channel_axis], + " and ", + f_shape[filter_in_ch_axis]); + + OPENVINO_ASSERT(in_shape.size() == out_shape.size(), + "Incompatible input and output ranks: ", + in_shape.size(), + " and ", + out_shape.size()); const auto spatial_dims = in_shape.size() - 2; - NGRAPH_CHECK(strides.size() == spatial_dims, "Strides not definied for all and only spatial dimensions"); + OPENVINO_ASSERT(strides.size() == spatial_dims, "Strides not definied for all and only spatial dimensions"); - NGRAPH_CHECK(dilations.size() == spatial_dims, "Dilations not defined for all and only spatial dimensions"); + OPENVINO_ASSERT(dilations.size() == spatial_dims, "Dilations not defined for all and only spatial dimensions"); - NGRAPH_CHECK((pads_begin.size() == pads_end.size()) && (pads_begin.size() == spatial_dims), - "Pads not defined for all and only spatial dimensions"); + OPENVINO_ASSERT((pads_begin.size() == pads_end.size()) && (pads_begin.size() == spatial_dims), + "Pads not defined for all and only spatial dimensions"); Shape out_spatial_shape{std::next(out_shape.begin(), 2), std::end(out_shape)}; Shape infered_out_spatial_shape{}; @@ -297,7 +299,7 @@ inline void validate_convolution_parameters(const Shape& in_shape, dilations, pads_begin, pads_end); - NGRAPH_CHECK(out_spatial_shape == infered_out_spatial_shape, "Incorrect output shape provided"); + OPENVINO_ASSERT(out_spatial_shape == infered_out_spatial_shape, "Incorrect output shape provided"); } } // namespace diff --git a/src/core/reference/include/openvino/reference/convolution_backprop_data.hpp b/src/core/reference/include/openvino/reference/convolution_backprop_data.hpp index 05d6e8d559a7ef..d1491d68b5f156 100644 --- a/src/core/reference/include/openvino/reference/convolution_backprop_data.hpp +++ b/src/core/reference/include/openvino/reference/convolution_backprop_data.hpp @@ -9,8 +9,6 @@ #include #include -#include "ngraph/axis_vector.hpp" -#include "ngraph/util.hpp" #include "openvino/reference/convolution.hpp" #include "openvino/reference/reverse.hpp" @@ -105,36 +103,36 @@ inline void validate_convolution_backprop_parameters(const Shape& in_shape, const CoordinateDiff& pads_end, const CoordinateDiff& output_padding) { // this implementation supports 1D, 2D and 3D convolutions - NGRAPH_CHECK(in_shape.size() >= 3 && in_shape.size() <= 5, "Unsupported input rank: ", in_shape); - - NGRAPH_CHECK(in_shape.size() == f_shape.size(), - "Incompatible input ranks: ", - in_shape.size(), - " and ", - f_shape.size()); - - NGRAPH_CHECK(in_shape[in_channel_axis] == f_shape[filter_input_ch_axis], - "Incompatible input channels in data batch and filters shapes: ", - in_shape[in_channel_axis], - " and ", - f_shape[filter_input_ch_axis]); - - NGRAPH_CHECK(in_shape.size() == out_shape.size(), - "Incompatible input and output ranks: ", - in_shape.size(), - " and ", - out_shape.size()); + OPENVINO_ASSERT(in_shape.size() >= 3 && in_shape.size() <= 5, "Unsupported input rank: ", in_shape); + + OPENVINO_ASSERT(in_shape.size() == f_shape.size(), + "Incompatible input ranks: ", + in_shape.size(), + " and ", + f_shape.size()); + + OPENVINO_ASSERT(in_shape[in_channel_axis] == f_shape[filter_input_ch_axis], + "Incompatible input channels in data batch and filters shapes: ", + in_shape[in_channel_axis], + " and ", + f_shape[filter_input_ch_axis]); + + OPENVINO_ASSERT(in_shape.size() == out_shape.size(), + "Incompatible input and output ranks: ", + in_shape.size(), + " and ", + out_shape.size()); const auto spatial_dims = in_shape.size() - 2; - NGRAPH_CHECK(strides.size() == spatial_dims, "Strides not definied for all and only spatial dimensions."); + OPENVINO_ASSERT(strides.size() == spatial_dims, "Strides not definied for all and only spatial dimensions."); - NGRAPH_CHECK(dilations.size() == spatial_dims, "Dilations not defined for all and only spatial dimensions."); + OPENVINO_ASSERT(dilations.size() == spatial_dims, "Dilations not defined for all and only spatial dimensions."); - NGRAPH_CHECK((pads_begin.size() == pads_end.size()) && (pads_begin.size() == spatial_dims), - "Pads not defined for all and only spatial dimensions."); + OPENVINO_ASSERT((pads_begin.size() == pads_end.size()) && (pads_begin.size() == spatial_dims), + "Pads not defined for all and only spatial dimensions."); - NGRAPH_CHECK(!output_padding.empty() && output_padding.size() == spatial_dims, - "Output padding not defined for all and only spatial dimensions."); + OPENVINO_ASSERT(!output_padding.empty() && output_padding.size() == spatial_dims, + "Output padding not defined for all and only spatial dimensions."); Shape out_spatial_shape{std::next(out_shape.begin(), 2), std::end(out_shape)}; Shape infered_out_spatial_shape{}; @@ -145,7 +143,7 @@ inline void validate_convolution_backprop_parameters(const Shape& in_shape, strides, dilations, output_padding); - NGRAPH_CHECK(out_spatial_shape == infered_out_spatial_shape, "Incorrect output shape provided"); + OPENVINO_ASSERT(out_spatial_shape == infered_out_spatial_shape, "Incorrect output shape provided"); } } // namespace diff --git a/src/core/reference/include/openvino/reference/ctc_greedy_decoder.hpp b/src/core/reference/include/openvino/reference/ctc_greedy_decoder.hpp index 1cab6887744d61..bb19527c659465 100644 --- a/src/core/reference/include/openvino/reference/ctc_greedy_decoder.hpp +++ b/src/core/reference/include/openvino/reference/ctc_greedy_decoder.hpp @@ -8,7 +8,7 @@ #include #include -#include "openvino/reference/utils/coordinate_transform.hpp" +#include "openvino/reference/utils/coordinate_index.hpp" namespace ov { namespace reference { @@ -20,16 +20,11 @@ void ctc_greedy_decoder(const T* data, const Shape& sequence_masks_shape, const Shape& out_shape, const bool ctc_merge_repeated) { - OPENVINO_SUPPRESS_DEPRECATED_START const auto max_seq_len = data_shape[0]; const auto batch_size = data_shape[1]; const auto class_count = data_shape[2]; const uint64_t blank_index = class_count - 1; - CoordinateTransform out_transform = CoordinateTransform(out_shape); - CoordinateTransform data_transform = CoordinateTransform(data_shape); - CoordinateTransform seq_masks_transform = CoordinateTransform(sequence_masks_shape); - // final sequences don't have to fill the whole output, elements that don't store // information are set to -1 @@ -38,10 +33,10 @@ void ctc_greedy_decoder(const T* data, for (unsigned int batch_ind = 0; batch_ind < batch_size; batch_ind++) { T previous_class_index = static_cast(-1); - auto out_index = out_transform.index({batch_ind, 0, 0, 0}); + auto out_index = coordinate_index({batch_ind, 0, 0, 0}, out_shape); for (unsigned int seq_ind = 0; seq_ind < max_seq_len; seq_ind++) { - auto data_index = data_transform.index({seq_ind, batch_ind, 0}); - auto mask_index = seq_masks_transform.index({seq_ind, batch_ind}); + auto data_index = coordinate_index({seq_ind, batch_ind, 0}, data_shape); + auto mask_index = coordinate_index({seq_ind, batch_ind}, sequence_masks_shape); if (sequence_masks[mask_index] == T{0}) { break; @@ -59,7 +54,6 @@ void ctc_greedy_decoder(const T* data, } } std::copy(tmp_out.begin(), tmp_out.end(), out); - OPENVINO_SUPPRESS_DEPRECATED_END } } // namespace reference } // namespace ov diff --git a/src/core/reference/include/openvino/reference/ctc_loss.hpp b/src/core/reference/include/openvino/reference/ctc_loss.hpp index 1b16b352eb4870..7e12b9abf84911 100644 --- a/src/core/reference/include/openvino/reference/ctc_loss.hpp +++ b/src/core/reference/include/openvino/reference/ctc_loss.hpp @@ -4,11 +4,11 @@ #pragma once -#include - +#include +#include #include -#include "ngraph/shape_util.hpp" +#include "openvino/core/shape.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/include/openvino/reference/deformable_convolution.hpp b/src/core/reference/include/openvino/reference/deformable_convolution.hpp index e3b553352c24f9..a625578226359c 100644 --- a/src/core/reference/include/openvino/reference/deformable_convolution.hpp +++ b/src/core/reference/include/openvino/reference/deformable_convolution.hpp @@ -21,17 +21,17 @@ inline void validate_deformable_convolution_params(const Shape& in_shape, const int64_t groups, const int64_t deformable_groups) { // this implementation supports 2D deformable convolutions - NGRAPH_CHECK(in_shape.size() == 4, "Unsupported input rank: ", in_shape); - NGRAPH_CHECK(o_shape.size() == 4, "Unsupported offset rank: ", o_shape); - NGRAPH_CHECK(f_shape.size() == 4, "Unsupported kernel rank: ", f_shape); - NGRAPH_CHECK(m_shape.size() == 4, "Unsupported mask rank: ", m_shape); - - NGRAPH_CHECK(in_shape[1] % groups == 0, - "Input channels of data batch input must be evenly divisible by " - "'groups' attribute"); - NGRAPH_CHECK(f_shape[0] % groups == 0, - "Output channels of filters must be evenly divisible by 'groups' " - "attribute"); + OPENVINO_ASSERT(in_shape.size() == 4, "Unsupported input rank: ", in_shape); + OPENVINO_ASSERT(o_shape.size() == 4, "Unsupported offset rank: ", o_shape); + OPENVINO_ASSERT(f_shape.size() == 4, "Unsupported kernel rank: ", f_shape); + OPENVINO_ASSERT(m_shape.size() == 4, "Unsupported mask rank: ", m_shape); + + OPENVINO_ASSERT(in_shape[1] % groups == 0, + "Input channels of data batch input must be evenly divisible by " + "'groups' attribute"); + OPENVINO_ASSERT(f_shape[0] % groups == 0, + "Output channels of filters must be evenly divisible by 'groups' " + "attribute"); const Shape scaled_f_shape = [f_shape](int64_t g) { Shape shape{f_shape}; @@ -46,14 +46,15 @@ inline void validate_deformable_convolution_params(const Shape& in_shape, const Shape m_spatial_shape{std::next(m_shape.begin(), 2), std::end(m_shape)}; const Shape out_spatial_shape{std::next(out_shape.begin(), 2), std::end(out_shape)}; - NGRAPH_CHECK(o_shape[1] == deformable_groups * shape_size(f_spatial_shape) * 2, - "The channels dimension of offsets input is not " - "compatible with filters and 'deformable group' attribute"); - NGRAPH_CHECK(m_shape[1] == deformable_groups * shape_size(f_spatial_shape), - "The channels dimension of mask input is not " - "compatible with filters and 'deformable group' attribute"); - NGRAPH_CHECK(out_spatial_shape == o_spatial_shape, "Spatial dimensions of output and offsets values must be equal"); - NGRAPH_CHECK(out_spatial_shape == m_spatial_shape, "Spatial dimensions of output and mask values must be equal"); + OPENVINO_ASSERT(o_shape[1] == deformable_groups * shape_size(f_spatial_shape) * 2, + "The channels dimension of offsets input is not " + "compatible with filters and 'deformable group' attribute"); + OPENVINO_ASSERT(m_shape[1] == deformable_groups * shape_size(f_spatial_shape), + "The channels dimension of mask input is not " + "compatible with filters and 'deformable group' attribute"); + OPENVINO_ASSERT(out_spatial_shape == o_spatial_shape, + "Spatial dimensions of output and offsets values must be equal"); + OPENVINO_ASSERT(out_spatial_shape == m_spatial_shape, "Spatial dimensions of output and mask values must be equal"); } inline Shape shape_reduce(const Shape& s) { @@ -295,7 +296,7 @@ void deformable_convolution(const T* in, const int64_t deformable_groups, const bool bilinear_interpolation_pad = false) { Shape m_shape = {o_shape[0], o_shape[1] / 2, o_shape[2], o_shape[3]}; - std::vector mask(ngraph::shape_size(m_shape), 1); + std::vector mask(shape_size(m_shape), 1); deformable_convolution(in, offsets, filters, diff --git a/src/core/reference/include/openvino/reference/deformable_psroi_pooling.hpp b/src/core/reference/include/openvino/reference/deformable_psroi_pooling.hpp index 5c1d5fed7df4ce..1d62f34f4f92cf 100644 --- a/src/core/reference/include/openvino/reference/deformable_psroi_pooling.hpp +++ b/src/core/reference/include/openvino/reference/deformable_psroi_pooling.hpp @@ -14,7 +14,7 @@ #include #include "clamp.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/include/openvino/reference/depth_to_space.hpp b/src/core/reference/include/openvino/reference/depth_to_space.hpp index d895354aad0d27..93b957f93eaf6a 100644 --- a/src/core/reference/include/openvino/reference/depth_to_space.hpp +++ b/src/core/reference/include/openvino/reference/depth_to_space.hpp @@ -4,8 +4,8 @@ #pragma once -#include "ngraph/op/depth_to_space.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/op/depth_to_space.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/include/openvino/reference/detection_output.hpp b/src/core/reference/include/openvino/reference/detection_output.hpp index b55ad70916de58..f5e79988ba7e71 100644 --- a/src/core/reference/include/openvino/reference/detection_output.hpp +++ b/src/core/reference/include/openvino/reference/detection_output.hpp @@ -6,12 +6,11 @@ #include #include -#include #include -#include "ngraph/op/detection_output.hpp" -#include "ngraph/op/util/detection_output_base.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/op/detection_output.hpp" +#include "openvino/op/util/detection_output_base.hpp" namespace ov { namespace reference { @@ -28,7 +27,7 @@ class referenceDetectionOutput { }; using LabelBBox = std::map>; - ngraph::op::util::DetectionOutputBase::AttributesBase attrs; + op::util::DetectionOutputBase::AttributesBase attrs; size_t numImages; size_t priorSize; size_t numPriors; @@ -417,10 +416,10 @@ class referenceDetectionOutput { } public: - referenceDetectionOutput(const ngraph::op::DetectionOutputAttrs& _attrs, - const ngraph::Shape& locShape, - const ngraph::Shape& priorsShape, - const ngraph::Shape& outShape) + referenceDetectionOutput(const op::v0::DetectionOutput::Attributes& _attrs, + const Shape& locShape, + const Shape& priorsShape, + const Shape& outShape) : attrs(_attrs) { numImages = locShape[0]; priorSize = _attrs.normalized ? 4 : 5; @@ -433,11 +432,11 @@ class referenceDetectionOutput { outTotalSize = shape_size(outShape); } - referenceDetectionOutput(const ngraph::op::util::DetectionOutputBase::AttributesBase& _attrs, - const ngraph::Shape& locShape, - const ngraph::Shape& classPredShape, - const ngraph::Shape& priorsShape, - const ngraph::Shape& outShape) + referenceDetectionOutput(const op::util::DetectionOutputBase::AttributesBase& _attrs, + const Shape& locShape, + const Shape& classPredShape, + const Shape& priorsShape, + const Shape& outShape) : attrs(_attrs) { numImages = locShape[0]; priorSize = _attrs.normalized ? 4 : 5; diff --git a/src/core/reference/include/openvino/reference/divide.hpp b/src/core/reference/include/openvino/reference/divide.hpp index 858d8f4f696813..08b75017c293d2 100644 --- a/src/core/reference/include/openvino/reference/divide.hpp +++ b/src/core/reference/include/openvino/reference/divide.hpp @@ -8,10 +8,10 @@ #include #include -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/shape.hpp" -#include "ngraph/type/bfloat16.hpp" -#include "ngraph/type/float16.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/core/type/bfloat16.hpp" +#include "openvino/core/type/float16.hpp" +#include "openvino/op/util/attr_types.hpp" #include "openvino/reference/autobroadcast_binop.hpp" namespace ov { diff --git a/src/core/reference/include/openvino/reference/einsum.hpp b/src/core/reference/include/openvino/reference/einsum.hpp index 8e477959bcba37..c1a42524d50a2c 100644 --- a/src/core/reference/include/openvino/reference/einsum.hpp +++ b/src/core/reference/include/openvino/reference/einsum.hpp @@ -5,9 +5,8 @@ #pragma once #include -#include -#include "ngraph/shape.hpp" +#include "openvino/runtime/tensor.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/include/openvino/reference/embedding_bag_offsets_sum.hpp b/src/core/reference/include/openvino/reference/embedding_bag_offsets_sum.hpp index d6b03e1fbc5e5a..0d87538de890b0 100644 --- a/src/core/reference/include/openvino/reference/embedding_bag_offsets_sum.hpp +++ b/src/core/reference/include/openvino/reference/embedding_bag_offsets_sum.hpp @@ -4,7 +4,7 @@ #pragma once -#include "ngraph/shape_util.hpp" +#include "openvino/core/shape.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/include/openvino/reference/embedding_bag_packed_sum.hpp b/src/core/reference/include/openvino/reference/embedding_bag_packed_sum.hpp index f16b2355b9465a..678b7495c733cf 100644 --- a/src/core/reference/include/openvino/reference/embedding_bag_packed_sum.hpp +++ b/src/core/reference/include/openvino/reference/embedding_bag_packed_sum.hpp @@ -4,7 +4,7 @@ #pragma once -#include "ngraph/shape_util.hpp" +#include "openvino/core/shape.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/include/openvino/reference/embedding_segments_sum.hpp b/src/core/reference/include/openvino/reference/embedding_segments_sum.hpp index f11947fac9b5de..557fd248d0d12d 100644 --- a/src/core/reference/include/openvino/reference/embedding_segments_sum.hpp +++ b/src/core/reference/include/openvino/reference/embedding_segments_sum.hpp @@ -4,7 +4,7 @@ #pragma once -#include "ngraph/shape_util.hpp" +#include "openvino/core/shape.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/include/openvino/reference/equal.hpp b/src/core/reference/include/openvino/reference/equal.hpp index 62554f9b4a234f..c81d47c23d18ff 100644 --- a/src/core/reference/include/openvino/reference/equal.hpp +++ b/src/core/reference/include/openvino/reference/equal.hpp @@ -11,8 +11,8 @@ #include -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/op/util/attr_types.hpp" #include "openvino/reference/autobroadcast_binop.hpp" namespace ov { diff --git a/src/core/reference/include/openvino/reference/erf.hpp b/src/core/reference/include/openvino/reference/erf.hpp index 09ff245dd4976e..ea69fe98bd6ca3 100644 --- a/src/core/reference/include/openvino/reference/erf.hpp +++ b/src/core/reference/include/openvino/reference/erf.hpp @@ -8,9 +8,6 @@ #include #include -#include "ngraph/type/bfloat16.hpp" -#include "ngraph/type/float16.hpp" - namespace ov { namespace reference { template ::value, bool>::type = true> diff --git a/src/core/reference/include/openvino/reference/experimental_detectron_detection_output.hpp b/src/core/reference/include/openvino/reference/experimental_detectron_detection_output.hpp index c2ba17605e6b5d..52e3602897c049 100644 --- a/src/core/reference/include/openvino/reference/experimental_detectron_detection_output.hpp +++ b/src/core/reference/include/openvino/reference/experimental_detectron_detection_output.hpp @@ -16,16 +16,11 @@ #pragma once -#include #include #include -#include #include -#include "ngraph/node.hpp" -#include "ngraph/op/util/op_types.hpp" -#include "ngraph/ops.hpp" -#include "ngraph/shape_util.hpp" +#include "openvino/op/experimental_detectron_detection_output.hpp" namespace ov { namespace reference { @@ -41,7 +36,7 @@ void experimental_detectron_detection_output(const float* input_rois, void experimental_detectron_detection_output_postprocessing(void* pboxes, void* pclasses, void* pscores, - const ngraph::element::Type output_type, + const element::Type output_type, const std::vector& output_boxes, const std::vector& output_classes, const std::vector& output_scores, diff --git a/src/core/reference/include/openvino/reference/experimental_detectron_prior_grid_generator.hpp b/src/core/reference/include/openvino/reference/experimental_detectron_prior_grid_generator.hpp index 565baefb5e18af..c5437649d54b63 100644 --- a/src/core/reference/include/openvino/reference/experimental_detectron_prior_grid_generator.hpp +++ b/src/core/reference/include/openvino/reference/experimental_detectron_prior_grid_generator.hpp @@ -18,13 +18,8 @@ #include #include -#include -#include -#include "ngraph/node.hpp" -#include "ngraph/op/util/op_types.hpp" -#include "ngraph/ops.hpp" -#include "ngraph/shape_util.hpp" +#include "openvino/core/shape.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/include/openvino/reference/experimental_detectron_proposal_single_image.hpp b/src/core/reference/include/openvino/reference/experimental_detectron_proposal_single_image.hpp index 4e890c051e352d..68d7f7e6889124 100644 --- a/src/core/reference/include/openvino/reference/experimental_detectron_proposal_single_image.hpp +++ b/src/core/reference/include/openvino/reference/experimental_detectron_proposal_single_image.hpp @@ -4,16 +4,11 @@ #pragma once -#include #include #include -#include #include -#include "ngraph/node.hpp" -#include "ngraph/op/util/op_types.hpp" -#include "ngraph/ops.hpp" -#include "ngraph/shape_util.hpp" +#include "openvino/op/experimental_detectron_generate_proposals.hpp" namespace ov { namespace reference { @@ -32,7 +27,7 @@ void experimental_detectron_proposals_single_image( void experimental_detectron_proposals_single_image_postprocessing(void* prois, void* pscores, - const ngraph::element::Type output_type, + const element::Type output_type, const std::vector& output_rois, const std::vector& output_scores, const Shape& output_rois_shape, diff --git a/src/core/reference/include/openvino/reference/experimental_detectron_roi_feature_extractor.hpp b/src/core/reference/include/openvino/reference/experimental_detectron_roi_feature_extractor.hpp index df8edf00986547..80d283d2dad175 100644 --- a/src/core/reference/include/openvino/reference/experimental_detectron_roi_feature_extractor.hpp +++ b/src/core/reference/include/openvino/reference/experimental_detectron_roi_feature_extractor.hpp @@ -6,13 +6,9 @@ #include #include -#include #include -#include "ngraph/node.hpp" -#include "ngraph/op/util/op_types.hpp" -#include "ngraph/ops.hpp" -#include "ngraph/shape_util.hpp" +#include "openvino/op/experimental_detectron_roi_feature.hpp" namespace ov { namespace reference { @@ -25,7 +21,7 @@ void experimental_detectron_roi_feature_extractor( void experimental_detectron_roi_feature_extractor_postprocessing(void* prois_features, void* prois, - const ngraph::element::Type output_type, + const element::Type output_type, const std::vector& output_roi_features, const std::vector& output_rois, const Shape& output_roi_features_shape, diff --git a/src/core/reference/include/openvino/reference/experimental_detectron_topk_rois.hpp b/src/core/reference/include/openvino/reference/experimental_detectron_topk_rois.hpp index 8f29fc33161cb7..876874c2a3a258 100644 --- a/src/core/reference/include/openvino/reference/experimental_detectron_topk_rois.hpp +++ b/src/core/reference/include/openvino/reference/experimental_detectron_topk_rois.hpp @@ -4,16 +4,11 @@ #pragma once -#include #include #include -#include #include -#include "ngraph/node.hpp" -#include "ngraph/op/util/op_types.hpp" -#include "ngraph/ops.hpp" -#include "ngraph/shape_util.hpp" +#include "openvino/core/shape.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/include/openvino/reference/extract_image_patches.hpp b/src/core/reference/include/openvino/reference/extract_image_patches.hpp index c6130c38906bad..248409178566cf 100644 --- a/src/core/reference/include/openvino/reference/extract_image_patches.hpp +++ b/src/core/reference/include/openvino/reference/extract_image_patches.hpp @@ -2,9 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - -#include "ngraph/shape_util.hpp" +#include "openvino/op/extractimagepatches.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/include/openvino/reference/eye.hpp b/src/core/reference/include/openvino/reference/eye.hpp index 328c7f942a2e38..0991637031538f 100644 --- a/src/core/reference/include/openvino/reference/eye.hpp +++ b/src/core/reference/include/openvino/reference/eye.hpp @@ -6,7 +6,7 @@ #include -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" #include "utils/span.hpp" namespace ov { diff --git a/src/core/reference/include/openvino/reference/fake_quantize.hpp b/src/core/reference/include/openvino/reference/fake_quantize.hpp index a9122ede16e170..d0828cd23087d3 100644 --- a/src/core/reference/include/openvino/reference/fake_quantize.hpp +++ b/src/core/reference/include/openvino/reference/fake_quantize.hpp @@ -11,10 +11,10 @@ #include #include -#include "ngraph/check.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/shape.hpp" -#include "ngraph/shape_util.hpp" +#include "openvino/core/except.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/core/shape_util.hpp" +#include "openvino/op/util/attr_types.hpp" #include "openvino/reference/utils/coordinate_transform.hpp" namespace ov { @@ -65,11 +65,11 @@ void fake_quantize(const T* const arg, out[i] = q(arg[i]); } } else { - NGRAPH_CHECK(in_low_shape.size() <= arg_shape.size() && in_high_shape.size() <= arg_shape.size() && - out_low_shape.size() <= arg_shape.size() && out_high_shape.size() <= arg_shape.size(), - "Tensors with input\\output ranges should have rank less or " - "equal to data tensor rank equal to ", - arg_shape.size()); + OPENVINO_ASSERT(in_low_shape.size() <= arg_shape.size() && in_high_shape.size() <= arg_shape.size() && + out_low_shape.size() <= arg_shape.size() && out_high_shape.size() <= arg_shape.size(), + "Tensors with input\\output ranges should have rank less or " + "equal to data tensor rank equal to ", + arg_shape.size()); Shape arg0_padded_shape = arg_shape; Shape arg1_padded_shape = in_low_shape; @@ -156,13 +156,11 @@ void fake_quantize(const T* const arg, const auto output_strides = row_major_strides(output_shape); for (const Coordinate& output_coord : output_transform) { - OPENVINO_SUPPRESS_DEPRECATED_START - const Coordinate arg0_coord = ngraph::reduce(output_coord, arg0_squeezed_axes, false); - const Coordinate arg1_coord = ngraph::reduce(output_coord, arg1_squeezed_axes, false); - const Coordinate arg2_coord = ngraph::reduce(output_coord, arg2_squeezed_axes, false); - const Coordinate arg3_coord = ngraph::reduce(output_coord, arg3_squeezed_axes, false); - const Coordinate arg4_coord = ngraph::reduce(output_coord, arg4_squeezed_axes, false); - OPENVINO_SUPPRESS_DEPRECATED_END + const auto arg0_coord = util::reduce(output_coord, arg0_squeezed_axes); + const auto arg1_coord = util::reduce(output_coord, arg1_squeezed_axes); + const auto arg2_coord = util::reduce(output_coord, arg2_squeezed_axes); + const auto arg3_coord = util::reduce(output_coord, arg3_squeezed_axes); + const auto arg4_coord = util::reduce(output_coord, arg4_squeezed_axes); const size_t arg0_idx = std::inner_product(arg0_coord.begin(), arg0_coord.end(), arg0_strides.begin(), uint64_t(0)); diff --git a/src/core/reference/include/openvino/reference/floor_mod.hpp b/src/core/reference/include/openvino/reference/floor_mod.hpp index 09add88410d3bc..2c63b92310cbaa 100644 --- a/src/core/reference/include/openvino/reference/floor_mod.hpp +++ b/src/core/reference/include/openvino/reference/floor_mod.hpp @@ -7,7 +7,6 @@ #include #include -#include "ngraph/shape_util.hpp" #include "openvino/reference/autobroadcast_binop.hpp" namespace ov { diff --git a/src/core/reference/include/openvino/reference/gather.hpp b/src/core/reference/include/openvino/reference/gather.hpp index 30a52889a7ba8a..4324e0ffc5de4a 100644 --- a/src/core/reference/include/openvino/reference/gather.hpp +++ b/src/core/reference/include/openvino/reference/gather.hpp @@ -6,7 +6,7 @@ #include -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" #include "utils/span.hpp" namespace ov { diff --git a/src/core/reference/include/openvino/reference/gather_tree.hpp b/src/core/reference/include/openvino/reference/gather_tree.hpp index 5ac49c4c337e3f..df0e581f0dfc2c 100644 --- a/src/core/reference/include/openvino/reference/gather_tree.hpp +++ b/src/core/reference/include/openvino/reference/gather_tree.hpp @@ -4,8 +4,8 @@ #pragma once -#include "ngraph/shape.hpp" -#include "ngraph/type/element_type.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/core/type/element_type.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/include/openvino/reference/gelu.hpp b/src/core/reference/include/openvino/reference/gelu.hpp index d67bcd8827d2d8..091887c4f9d471 100644 --- a/src/core/reference/include/openvino/reference/gelu.hpp +++ b/src/core/reference/include/openvino/reference/gelu.hpp @@ -4,9 +4,9 @@ #pragma once -#include #include -#include + +#include "openvino/op/gelu.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/include/openvino/reference/generate_proposal.hpp b/src/core/reference/include/openvino/reference/generate_proposal.hpp index ae29efa0cc358d..64bbfc6e55dbaf 100644 --- a/src/core/reference/include/openvino/reference/generate_proposal.hpp +++ b/src/core/reference/include/openvino/reference/generate_proposal.hpp @@ -4,16 +4,11 @@ #pragma once -#include #include #include -#include #include -#include "ngraph/node.hpp" -#include "ngraph/op/util/op_types.hpp" -#include "ngraph/ops.hpp" -#include "ngraph/shape_util.hpp" +#include "openvino/op/generate_proposals.hpp" namespace ov { namespace reference { @@ -33,8 +28,8 @@ void generate_proposals(const std::vector& im_info, void generate_proposals_postprocessing(void* prois, void* pscores, void* proi_num, - const ngraph::element::Type& output_type, - const ngraph::element::Type& roi_num_type, + const element::Type& output_type, + const element::Type& roi_num_type, const std::vector& output_rois, const std::vector& output_scores, const std::vector& num_rois, diff --git a/src/core/reference/include/openvino/reference/greater.hpp b/src/core/reference/include/openvino/reference/greater.hpp index 41128fc1c5de2e..2dff5e6c4899b8 100644 --- a/src/core/reference/include/openvino/reference/greater.hpp +++ b/src/core/reference/include/openvino/reference/greater.hpp @@ -6,8 +6,8 @@ #include -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/op/util/attr_types.hpp" #include "openvino/reference/autobroadcast_binop.hpp" namespace ov { diff --git a/src/core/reference/include/openvino/reference/greater_eq.hpp b/src/core/reference/include/openvino/reference/greater_eq.hpp index 5072604a413ad2..79f66e3280fdd5 100644 --- a/src/core/reference/include/openvino/reference/greater_eq.hpp +++ b/src/core/reference/include/openvino/reference/greater_eq.hpp @@ -6,8 +6,8 @@ #include -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/op/util/attr_types.hpp" #include "openvino/reference/autobroadcast_binop.hpp" namespace ov { diff --git a/src/core/reference/include/openvino/reference/grid_sample.hpp b/src/core/reference/include/openvino/reference/grid_sample.hpp index 9ad50767dc25a5..88c071538cc1cc 100644 --- a/src/core/reference/include/openvino/reference/grid_sample.hpp +++ b/src/core/reference/include/openvino/reference/grid_sample.hpp @@ -10,7 +10,7 @@ #include #include -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" #include "openvino/op/grid_sample.hpp" namespace ov { diff --git a/src/core/reference/include/openvino/reference/group_convolution_backprop_data.hpp b/src/core/reference/include/openvino/reference/group_convolution_backprop_data.hpp index d64c63734a14e1..66d2a6f431df92 100644 --- a/src/core/reference/include/openvino/reference/group_convolution_backprop_data.hpp +++ b/src/core/reference/include/openvino/reference/group_convolution_backprop_data.hpp @@ -4,7 +4,8 @@ #pragma once -#include "ngraph/util.hpp" +#include "openvino/core/coordinate_diff.hpp" +#include "openvino/core/strides.hpp" #include "openvino/reference/group_convolution.hpp" namespace ov { @@ -100,8 +101,8 @@ void group_convolution_backprop_data(const T* in, // DEPRECATED, can't be removed currently due to arm-plugin dependency template ::type> -NGRAPH_DEPRECATED("group_convolution_backprop_data function without output_paddings is deprecated, " - "use the one with output_padding.") +OPENVINO_DEPRECATED("group_convolution_backprop_data function without output_paddings is deprecated, " + "use the one with output_padding.") void group_convolution_backprop_data(const INPUT* in, const FILTER* f, OUTPUT* out, @@ -112,7 +113,7 @@ void group_convolution_backprop_data(const INPUT* in, const Strides& dilation, const CoordinateDiff& pads_begin, const CoordinateDiff& pads_end) { - const ngraph::CoordinateDiff output_padding(in_shape.size() - 2, 0); + const CoordinateDiff output_padding(in_shape.size() - 2, 0); group_convolution_backprop_data(in, f, diff --git a/src/core/reference/include/openvino/reference/interpolate.hpp b/src/core/reference/include/openvino/reference/interpolate.hpp index 10b6a466f224b3..13fb11c16206fc 100644 --- a/src/core/reference/include/openvino/reference/interpolate.hpp +++ b/src/core/reference/include/openvino/reference/interpolate.hpp @@ -13,16 +13,16 @@ #include #include "interpolate_pil.hpp" -#include "ngraph/op/interpolate.hpp" -#include "ngraph/shape_util.hpp" +#include "openvino/op/interpolate.hpp" +#include "openvino/reference/utils/coordinate_index.hpp" #include "openvino/reference/utils/coordinate_transform.hpp" #include "transpose.hpp" namespace ov { namespace reference { -using Nearest_mode = ngraph::op::v4::Interpolate::NearestMode; -using Transform_mode = ngraph::op::v4::Interpolate::CoordinateTransformMode; -using InterpolateMode = ngraph::op::v4::Interpolate::InterpolateMode; +using Nearest_mode = op::v4::Interpolate::NearestMode; +using Transform_mode = op::v4::Interpolate::CoordinateTransformMode; +using InterpolateMode = op::v4::Interpolate::InterpolateMode; /// \brief Calculation of nearest pixel. class GetNearestPixel final { @@ -211,7 +211,7 @@ class InterpolateEvalHelper final { float prod_a; std::vector a; std::vector r; - Shape shape_for_indeces; + Shape shape_for_indices; }; InfoForLinearMode get_info_for_linear_mode(); @@ -362,9 +362,7 @@ template void InterpolateEval::linear_func(const T* input_data, T* out) { auto info = helper.get_info_for_linear_mode(); - NGRAPH_SUPPRESS_DEPRECATED_START - CoordinateTransform output_transform(m_out_shape); - CoordinateTransform input_transform(m_input_data_shape); + const CoordinateTransformBasic output_transform{m_out_shape}; for (const Coordinate& output_coord : output_transform) { auto icoords_data = helper.get_icoords(output_coord); @@ -372,29 +370,30 @@ void InterpolateEval::linear_func(const T* input_data, T* out) { float summa = 0.0f; float wsum = 0.0f; - CoordinateTransform indices{info.shape_for_indeces}; + const CoordinateTransformBasic indices{info.shape_for_indices}; for (const auto& index : indices) { auto inner_result = helper.inner_calculation(output_coord, icoords_data, info, index); if (!inner_result.condition) { continue; } + const auto input_index = coordinate_index(inner_result.inner_coord, m_input_data_shape); wsum += inner_result.w; - summa += inner_result.w * static_cast(input_data[input_transform.index(inner_result.inner_coord)]); + summa += inner_result.w * static_cast(input_data[input_index]); } + const auto out_index = coordinate_index(output_coord, m_out_shape); if (wsum == 0.0f) { - out[output_transform.index(output_coord)] = T{}; + out[out_index] = T{}; } else { if (std::is_integral()) { // Round value for integral return types - out[output_transform.index(output_coord)] = static_cast(std::round(summa / wsum)); + out[out_index] = static_cast(std::round(summa / wsum)); } else { - out[output_transform.index(output_coord)] = static_cast(summa / wsum); + out[out_index] = static_cast(summa / wsum); } } } - NGRAPH_SUPPRESS_DEPRECATED_END } template @@ -532,9 +531,7 @@ void InterpolateEval::cubic_func(const T* input_data, T* out) { size_t input_rank = m_input_data_shape.size(); size_t num_of_axes = m_axes.size(); - NGRAPH_SUPPRESS_DEPRECATED_START - CoordinateTransform output_transform(m_out_shape); - CoordinateTransform input_transform(m_input_data_shape); + const CoordinateTransformBasic output_transform{m_out_shape}; Shape indices_shape{std::vector(num_of_axes, 4)}; for (const Coordinate& output_coord : output_transform) { @@ -551,7 +548,7 @@ void InterpolateEval::cubic_func(const T* input_data, T* out) { } float summa = 0.0f; - CoordinateTransform indices{indices_shape}; + const CoordinateTransformBasic indices{indices_shape}; for (const Coordinate& idx : indices) { auto coords_for_sum = output_coord; @@ -567,12 +564,12 @@ void InterpolateEval::cubic_func(const T* input_data, T* out) { coeffs_prod *= cubic_coeffs[axis][idx[i]]; } - summa += coeffs_prod * static_cast(input_data[input_transform.index(coords_for_sum)]); + const auto input_index = coordinate_index(coords_for_sum, m_input_data_shape); + summa += coeffs_prod * static_cast(input_data[input_index]); } - out[output_transform.index(output_coord)] = static_cast(summa); + out[coordinate_index(output_coord, m_out_shape)] = static_cast(summa); } - NGRAPH_SUPPRESS_DEPRECATED_END } template @@ -677,15 +674,14 @@ void InterpolateEval::multidim_pil_func(const T* input_data, T* out, const in template void InterpolateEval::nearest_func(const T* input_data, T* out) { - NGRAPH_SUPPRESS_DEPRECATED_START - CoordinateTransform output_transform(m_out_shape); - CoordinateTransform input_transform(m_input_data_shape); + const CoordinateTransformBasic output_transform{m_out_shape}; for (const Coordinate& output_coord : output_transform) { auto input_coord = helper.get_input_coords_for_nearest_mode(output_coord); - out[output_transform.index(output_coord)] = input_data[input_transform.index(input_coord)]; + const auto input_index = coordinate_index(input_coord, m_input_data_shape); + const auto out_index = coordinate_index(output_coord, m_out_shape); + out[out_index] = input_data[input_index]; } - NGRAPH_SUPPRESS_DEPRECATED_END } inline void pad_input_data(const uint8_t* data_ptr, @@ -694,9 +690,7 @@ inline void pad_input_data(const uint8_t* data_ptr, const ov::Shape& input_shape, const ov::Shape& padded_input_shape, const std::vector& pads_begin) { - NGRAPH_SUPPRESS_DEPRECATED_START - CoordinateTransform input_transform(input_shape); - CoordinateTransform padded_transform(padded_input_shape); + const CoordinateTransformBasic input_transform{input_shape}; for (const Coordinate& input_coord : input_transform) { auto padded_coord = input_coord; @@ -705,11 +699,10 @@ inline void pad_input_data(const uint8_t* data_ptr, padded_coord[i] += pad; ++i; } - uint8_t* dst_ptr = padded_data_ptr + type_size * padded_transform.index(padded_coord); - const uint8_t* src_ptr = data_ptr + type_size * input_transform.index(input_coord); + uint8_t* dst_ptr = padded_data_ptr + type_size * coordinate_index(padded_coord, padded_input_shape); + const uint8_t* src_ptr = data_ptr + type_size * coordinate_index(input_coord, input_shape); memcpy(dst_ptr, src_ptr, type_size); } - NGRAPH_SUPPRESS_DEPRECATED_END } inline PartialShape get_padded_input_shape(const PartialShape& input_shape, diff --git a/src/core/reference/include/openvino/reference/interpolate_pil.hpp b/src/core/reference/include/openvino/reference/interpolate_pil.hpp index 66a40c8f88c48a..d57875cc538407 100644 --- a/src/core/reference/include/openvino/reference/interpolate_pil.hpp +++ b/src/core/reference/include/openvino/reference/interpolate_pil.hpp @@ -40,9 +40,9 @@ #include #include +#include -#include "ngraph/op/interpolate.hpp" -#include "ngraph/shape_util.hpp" +#include "openvino/core/shape.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/include/openvino/reference/irdft.hpp b/src/core/reference/include/openvino/reference/irdft.hpp index a32c20e9765014..0ee03fda8583e2 100644 --- a/src/core/reference/include/openvino/reference/irdft.hpp +++ b/src/core/reference/include/openvino/reference/irdft.hpp @@ -6,7 +6,7 @@ #include -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/include/openvino/reference/less.hpp b/src/core/reference/include/openvino/reference/less.hpp index c45197265891d5..21d2321f5664f6 100644 --- a/src/core/reference/include/openvino/reference/less.hpp +++ b/src/core/reference/include/openvino/reference/less.hpp @@ -6,8 +6,8 @@ #include -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/op/util/attr_types.hpp" #include "openvino/reference/autobroadcast_binop.hpp" namespace ov { diff --git a/src/core/reference/include/openvino/reference/less_eq.hpp b/src/core/reference/include/openvino/reference/less_eq.hpp index 80aff2fad7384c..d4ab3c2775bea6 100644 --- a/src/core/reference/include/openvino/reference/less_eq.hpp +++ b/src/core/reference/include/openvino/reference/less_eq.hpp @@ -6,8 +6,8 @@ #include -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/op/util/attr_types.hpp" #include "openvino/reference/autobroadcast_binop.hpp" namespace ov { diff --git a/src/core/reference/include/openvino/reference/log_softmax.hpp b/src/core/reference/include/openvino/reference/log_softmax.hpp index 7335bd36989b18..710b605e8508c7 100644 --- a/src/core/reference/include/openvino/reference/log_softmax.hpp +++ b/src/core/reference/include/openvino/reference/log_softmax.hpp @@ -6,40 +6,39 @@ #include -#include "ngraph/shape_util.hpp" +#include "openvino/core/shape_util.hpp" #include "openvino/reference/reduce_max.hpp" #include "openvino/reference/reduce_sum.hpp" +#include "openvino/reference/utils/coordinate_index.hpp" #include "openvino/reference/utils/coordinate_transform.hpp" namespace ov { namespace reference { template void log_softmax(const T* arg, T* out, const Shape& shape, const AxisSet& axes) { - NGRAPH_SUPPRESS_DEPRECATED_START - auto temp_shape = ngraph::reduce(shape, axes, true); - auto temp_elements = shape_size(temp_shape); + const auto temp_shape = util::reduce_keep_dims(shape, axes); + const auto temp_elements = shape_size(temp_shape); auto temp_max = std::vector(temp_elements, 0); auto temp_sum = std::vector(temp_elements, 0); reduce_max(arg, temp_max.data(), shape, axes); - CoordinateTransform transform(shape); - CoordinateTransform temp_transform(temp_shape); + const CoordinateTransformBasic transform{shape}; for (const Coordinate& coord : transform) { - Coordinate temp_coord = ngraph::reduce(coord, axes, true); - out[transform.index(coord)] = - static_cast(std::exp(arg[transform.index(coord)] - temp_max[temp_transform.index(temp_coord)])); + const Coordinate temp_coord = util::reduce_keep_dims(coord, axes); + const auto out_index = coordinate_index(coord, shape); + const auto temp_index = coordinate_index(temp_coord, temp_shape); + out[out_index] = static_cast(std::exp(arg[out_index] - temp_max[temp_index])); } reduce_sum(out, temp_sum.data(), shape, axes); for (const Coordinate& coord : transform) { - Coordinate temp_coord = ngraph::reduce(coord, axes, true); - out[transform.index(coord)] = - static_cast((arg[transform.index(coord)] - temp_max[temp_transform.index(temp_coord)]) - - std::log(temp_sum[temp_transform.index(temp_coord)])); + const Coordinate temp_coord = util::reduce_keep_dims(coord, axes); + const auto out_index = coordinate_index(coord, shape); + const auto temp_index = coordinate_index(temp_coord, temp_shape); + out[out_index] = static_cast((arg[out_index] - temp_max[temp_index]) - std::log(temp_sum[temp_index])); } - NGRAPH_SUPPRESS_DEPRECATED_END } } // namespace reference } // namespace ov diff --git a/src/core/reference/include/openvino/reference/logical_reduction.hpp b/src/core/reference/include/openvino/reference/logical_reduction.hpp index 97be74d112285a..9005c574c4c139 100644 --- a/src/core/reference/include/openvino/reference/logical_reduction.hpp +++ b/src/core/reference/include/openvino/reference/logical_reduction.hpp @@ -7,7 +7,6 @@ #include #include -#include "ngraph/shape_util.hpp" #include "openvino/core/shape_util.hpp" #include "openvino/reference/utils/coordinate_index.hpp" #include "openvino/reference/utils/coordinate_transform.hpp" diff --git a/src/core/reference/include/openvino/reference/lrn.hpp b/src/core/reference/include/openvino/reference/lrn.hpp index e3e2177ef2146e..b4df9e363f7c2b 100644 --- a/src/core/reference/include/openvino/reference/lrn.hpp +++ b/src/core/reference/include/openvino/reference/lrn.hpp @@ -8,7 +8,7 @@ #include #include -#include "ngraph/util.hpp" +#include "openvino/reference/utils/coordinate_index.hpp" #include "openvino/reference/utils/coordinate_transform.hpp" namespace ov { @@ -61,7 +61,6 @@ void lrn(const T* arg, double dbeta, double dbias, size_t size) { - NGRAPH_SUPPRESS_DEPRECATED_START T alpha = static_cast(dalpha); T beta = static_cast(dbeta); T bias = static_cast(dbias); @@ -74,7 +73,7 @@ void lrn(const T* arg, axes_map[axis_coord] = true; } - CoordinateTransform input_transform(arg_shape); + const CoordinateTransformBasic input_transform{arg_shape}; for (const Coordinate& in_coord : input_transform) { // area determined by in_coord local neighborhood for (size_t i = 0; i < axes_map.size(); i++) { @@ -89,11 +88,10 @@ void lrn(const T* arg, } T square_sum = sum_region_across_axes(arg, slice_indices(arg_shape, begin_area, area_shape)); - auto index = input_transform.index(in_coord); + const auto index = coordinate_index(in_coord, arg_shape); T x = arg[index]; out[index] = x / static_cast(std::pow(bias + scale * square_sum, beta)); } - NGRAPH_SUPPRESS_DEPRECATED_END } } // namespace reference } // namespace ov diff --git a/src/core/reference/include/openvino/reference/matmul.hpp b/src/core/reference/include/openvino/reference/matmul.hpp index f05dd224f88884..b4a09e0f276d94 100644 --- a/src/core/reference/include/openvino/reference/matmul.hpp +++ b/src/core/reference/include/openvino/reference/matmul.hpp @@ -10,7 +10,6 @@ #include #include "ngraph/runtime/opt_kernel/reshape.hpp" -#include "ngraph/shape_util.hpp" #include "openvino/reference/broadcast.hpp" namespace ov { diff --git a/src/core/reference/include/openvino/reference/matrix_nms.hpp b/src/core/reference/include/openvino/reference/matrix_nms.hpp index 4a3b5b015e7f92..eb2040dabc56a5 100644 --- a/src/core/reference/include/openvino/reference/matrix_nms.hpp +++ b/src/core/reference/include/openvino/reference/matrix_nms.hpp @@ -4,20 +4,9 @@ #pragma once -#include -#include -#include -#include #include -#include -#include -#include -#include -#include "ngraph/node.hpp" -#include "ngraph/op/matrix_nms.hpp" -#include "ngraph/op/util/op_types.hpp" -#include "ngraph/shape_util.hpp" +#include "openvino/op/matrix_nms.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/include/openvino/reference/max_pool.hpp b/src/core/reference/include/openvino/reference/max_pool.hpp index eaf2dee89328c4..df56c4f2b26494 100644 --- a/src/core/reference/include/openvino/reference/max_pool.hpp +++ b/src/core/reference/include/openvino/reference/max_pool.hpp @@ -11,105 +11,6 @@ namespace ov { namespace reference { -template -void max_pool(const T* arg, - T* out, - const Shape& arg_shape, - const Shape& out_shape, - const Shape& window_shape, - const Strides& window_movement_strides, - const Shape& padding_below, - const Shape& padding_above) { - NGRAPH_SUPPRESS_DEPRECATED_START - // At the outermost level we will walk over every output coordinate O. - CoordinateTransform output_transform(out_shape); - - for (const Coordinate& out_coord : output_transform) { - // Our output coordinate O will have the form: - // - // (N,chan,i_1,...,i_n) - - size_t batch_index = out_coord[0]; - size_t channel = out_coord[1]; - - // For the input data we need to iterate the coordinate: - // - // I: - // - // over the range (noninclusive on the right): - // - // (N,chan,s_1*i_1,s_2*i_2,...,s_n*i_n) -> - // - // (N+1,chan+1,s_1*i_1 + window_shape_1,...,s_n*i_n + window_shape_n) - // - // with unit stride. - // - // We iterate this over the *padded* data, so below we will need to check for - // coordinates that fall in the padding area. - - size_t n_spatial_dimensions = arg_shape.size() - 2; - - Coordinate input_batch_transform_start(2 + n_spatial_dimensions); - Coordinate input_batch_transform_end(2 + n_spatial_dimensions); - Strides input_batch_transform_source_strides(2 + n_spatial_dimensions, 1); - AxisVector input_batch_transform_source_axis_order(2 + n_spatial_dimensions); - CoordinateDiff input_batch_transform_padding_below(2 + n_spatial_dimensions); - CoordinateDiff input_batch_transform_padding_above(2 + n_spatial_dimensions); - - input_batch_transform_start[0] = batch_index; - input_batch_transform_end[0] = batch_index + 1; - input_batch_transform_start[1] = channel; - input_batch_transform_end[1] = channel + 1; - input_batch_transform_padding_below[0] = 0; - input_batch_transform_padding_below[1] = 0; - input_batch_transform_padding_above[0] = 0; - input_batch_transform_padding_above[1] = 0; - - for (size_t i = 2; i < n_spatial_dimensions + 2; i++) { - size_t window_shape_this_dim = window_shape[i - 2]; - size_t movement_stride = window_movement_strides[i - 2]; - - input_batch_transform_start[i] = movement_stride * out_coord[i]; - input_batch_transform_end[i] = input_batch_transform_start[i] + window_shape_this_dim; - // If a window (kernel) is out of arg shape bounds, trim it to fit - auto padded_upper_bound = arg_shape[i] + padding_below[i - 2] + padding_above[i - 2]; - if (input_batch_transform_end[i] > padded_upper_bound) { - input_batch_transform_end[i] = padded_upper_bound; - } - input_batch_transform_padding_below[i] = padding_below[i - 2]; - input_batch_transform_padding_above[i] = padding_above[i - 2]; - } - - for (size_t i = 0; i < arg_shape.size(); i++) { - input_batch_transform_source_axis_order[i] = i; - } - - CoordinateTransform input_batch_transform(arg_shape, - input_batch_transform_start, - input_batch_transform_end, - input_batch_transform_source_strides, - input_batch_transform_source_axis_order, - input_batch_transform_padding_below, - input_batch_transform_padding_above); - - // As we go, we compute the maximum value: - // - // output[O] = max(output[O],arg[I]) - - T result = std::numeric_limits::lowest(); - - for (const Coordinate& input_batch_coord : input_batch_transform) { - if (input_batch_transform.has_source_coordinate(input_batch_coord)) { - T x = arg[input_batch_transform.index(input_batch_coord)]; - result = x > result ? x : result; - } - } - - out[output_transform.index(out_coord)] = result; - } - NGRAPH_SUPPRESS_DEPRECATED_END -} - namespace { void validate_max_pool_kernel_params(const size_t dims, const Shape& kernel, @@ -117,20 +18,20 @@ void validate_max_pool_kernel_params(const size_t dims, const Strides& kernel_dilations, const Shape& pads_begin, const Shape& pads_end) { - NGRAPH_CHECK(kernel.size() == dims && kernel_strides.size() == dims && kernel_dilations.size() == dims && - pads_begin.size() == dims && pads_end.size() == dims, - "One of the MaxPool params does not match the ", - dims, - "D implementation.\nkernel=", - kernel, - "\nkernel_strides=", - kernel_strides, - "\nkernel_dilations=", - kernel_dilations, - "\npads_begin=", - pads_begin, - "\npads_end=", - pads_end); + OPENVINO_ASSERT(kernel.size() == dims && kernel_strides.size() == dims && kernel_dilations.size() == dims && + pads_begin.size() == dims && pads_end.size() == dims, + "One of the MaxPool params does not match the ", + dims, + "D implementation.\nkernel=", + kernel, + "\nkernel_strides=", + kernel_strides, + "\nkernel_dilations=", + kernel_dilations, + "\npads_begin=", + pads_begin, + "\npads_end=", + pads_end); } /// \brief A helper struct representing spatial coordinates of a tensor element. It can use signed numbers as the @@ -390,10 +291,9 @@ void max_pool(const Values_t* data, pads_end, indices_offset); } else { - NGRAPH_CHECK(false, - "Unsupported input shape ", - data_shape, - " passed to the MaxPool reference implementation. Supported shapes: 3D, 4D and 5D."); + OPENVINO_THROW("Unsupported input shape ", + data_shape, + " passed to the MaxPool reference implementation. Supported shapes: 3D, 4D and 5D."); } } } @@ -409,5 +309,28 @@ void max_pool(const Values_t* data, } } } + +template +void max_pool(const Value_t* data, + Value_t* values, + const Shape& data_shape, + const Shape& out_shape, + const Shape& kernel, + const Strides& strides, + const Shape& pads_begin, + const Shape& pads_end) { + std::vector indices(shape_size(out_shape)); + const Strides dilations(kernel.size(), 1); + max_pool(data, + values, + indices.data(), + data_shape, + out_shape, + kernel, + strides, + dilations, + pads_begin, + pads_end); +} } // namespace reference } // namespace ov diff --git a/src/core/reference/include/openvino/reference/maximum.hpp b/src/core/reference/include/openvino/reference/maximum.hpp index e918d4281b37f9..12388a1026c685 100644 --- a/src/core/reference/include/openvino/reference/maximum.hpp +++ b/src/core/reference/include/openvino/reference/maximum.hpp @@ -6,8 +6,8 @@ #include -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/op/util/attr_types.hpp" #include "openvino/reference/autobroadcast_binop.hpp" namespace ov { diff --git a/src/core/reference/include/openvino/reference/minimum.hpp b/src/core/reference/include/openvino/reference/minimum.hpp index 78b8788ef805b4..4bfe8ff0c89c83 100644 --- a/src/core/reference/include/openvino/reference/minimum.hpp +++ b/src/core/reference/include/openvino/reference/minimum.hpp @@ -6,8 +6,8 @@ #include -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/op/util/attr_types.hpp" #include "openvino/reference/autobroadcast_binop.hpp" namespace ov { diff --git a/src/core/reference/include/openvino/reference/multiclass_nms.hpp b/src/core/reference/include/openvino/reference/multiclass_nms.hpp index 1a33c91eb552d3..58b67f3257d250 100644 --- a/src/core/reference/include/openvino/reference/multiclass_nms.hpp +++ b/src/core/reference/include/openvino/reference/multiclass_nms.hpp @@ -4,20 +4,9 @@ #pragma once -#include -#include -#include -#include #include -#include -#include -#include -#include -#include "ngraph/node.hpp" -#include "ngraph/op/util/multiclass_nms_base.hpp" -#include "ngraph/op/util/op_types.hpp" -#include "ngraph/shape_util.hpp" +#include "openvino/op/util/multiclass_nms_base.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/include/openvino/reference/multiply.hpp b/src/core/reference/include/openvino/reference/multiply.hpp index bfc1dd01e67c78..91d279cc6935da 100644 --- a/src/core/reference/include/openvino/reference/multiply.hpp +++ b/src/core/reference/include/openvino/reference/multiply.hpp @@ -6,8 +6,8 @@ #include -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/op/util/attr_types.hpp" #include "openvino/reference/autobroadcast_binop.hpp" namespace ov { diff --git a/src/core/reference/include/openvino/reference/mvn.hpp b/src/core/reference/include/openvino/reference/mvn.hpp index 8bcce83295e5eb..3083b6e0d5876e 100644 --- a/src/core/reference/include/openvino/reference/mvn.hpp +++ b/src/core/reference/include/openvino/reference/mvn.hpp @@ -5,9 +5,9 @@ #pragma once #include -#include -#include +#include "openvino/core/shape.hpp" +#include "openvino/op/mvn.hpp" #include "openvino/reference/add.hpp" #include "openvino/reference/divide.hpp" #include "openvino/reference/multiply.hpp" @@ -18,7 +18,6 @@ namespace ov { namespace reference { -OPENVINO_SUPPRESS_DEPRECATED_START template void mvn(const T* arg, T* out, @@ -26,7 +25,7 @@ void mvn(const T* arg, const bool normalize_variance, const AxisSet& reduction_axes, const double eps) { - auto reduced_shape = ngraph::reduce(in_shape, reduction_axes, true); + auto reduced_shape = util::reduce_keep_dims(in_shape, reduction_axes); std::vector tmp_buffer(shape_size(in_shape)); reduce_mean(arg, tmp_buffer.data(), in_shape, reduction_axes); subtract(arg, tmp_buffer.data(), out, in_shape, reduced_shape, op::AutoBroadcastType::NUMPY); @@ -56,7 +55,7 @@ void mvn_6(const T* arg, bool normalize_variance, double eps, op::MVNEpsMode eps_mode) { - auto reduced_shape = ngraph::reduce(in_shape, reduction_axes, true); + auto reduced_shape = util::reduce_keep_dims(in_shape, reduction_axes); std::vector tmp_buffer(shape_size(in_shape)); reduce_mean(arg, tmp_buffer.data(), in_shape, reduction_axes); subtract(arg, tmp_buffer.data(), out, in_shape, reduced_shape, op::AutoBroadcastType::NUMPY); @@ -87,7 +86,6 @@ void mvn_6(const T* arg, divide(out, tmp_buffer.data(), out, in_shape, reduced_shape, op::AutoBroadcastType::NUMPY, true); } } -OPENVINO_SUPPRESS_DEPRECATED_END template AxisSet mvn_6_reduction_axes(const ov::Tensor& axes_input, size_t rank) { diff --git a/src/core/reference/include/openvino/reference/non_max_suppression.hpp b/src/core/reference/include/openvino/reference/non_max_suppression.hpp index b9e37e28c6a365..9787e38ed47c40 100644 --- a/src/core/reference/include/openvino/reference/non_max_suppression.hpp +++ b/src/core/reference/include/openvino/reference/non_max_suppression.hpp @@ -4,20 +4,11 @@ #pragma once -#include -#include -#include -#include #include -#include -#include -#include #include -#include "ngraph/node.hpp" -#include "ngraph/op/util/op_types.hpp" -#include "ngraph/ops.hpp" -#include "ngraph/shape_util.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/runtime/tensor.hpp" namespace ov { namespace reference { @@ -37,11 +28,11 @@ void non_max_suppression5(const float* boxes_data, const bool sort_result_descending); void nms5_postprocessing(ov::TensorVector& outputs, - const ngraph::element::Type output_type, + const element::Type output_type, const std::vector& selected_indices, const std::vector& selected_scores, int64_t valid_outputs, - const ngraph::element::Type selected_scores_type); + const element::Type selected_scores_type); void non_max_suppression(const float* boxes_data, const Shape& boxes_data_shape, @@ -59,10 +50,10 @@ void non_max_suppression(const float* boxes_data, const bool sort_result_descending); void nms_postprocessing(ov::TensorVector& outputs, - const ngraph::element::Type output_type, + const element::Type output_type, const std::vector& selected_indices, const std::vector& selected_scores, int64_t valid_outputs, - const ngraph::element::Type selected_scores_type); + const element::Type selected_scores_type); } // namespace reference } // namespace ov diff --git a/src/core/reference/include/openvino/reference/non_zero.hpp b/src/core/reference/include/openvino/reference/non_zero.hpp index 3ac5ee3c8ba681..69276e37594d9c 100644 --- a/src/core/reference/include/openvino/reference/non_zero.hpp +++ b/src/core/reference/include/openvino/reference/non_zero.hpp @@ -6,7 +6,7 @@ #include -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/include/openvino/reference/normalize_l2.hpp b/src/core/reference/include/openvino/reference/normalize_l2.hpp index 69c0cff34fdc34..b1af06ea7634f6 100644 --- a/src/core/reference/include/openvino/reference/normalize_l2.hpp +++ b/src/core/reference/include/openvino/reference/normalize_l2.hpp @@ -4,8 +4,6 @@ #pragma once -#include - #include "openvino/reference/autobroadcast_binop.hpp" #include "openvino/reference/reduce_sum.hpp" diff --git a/src/core/reference/include/openvino/reference/not_equal.hpp b/src/core/reference/include/openvino/reference/not_equal.hpp index f033bcdde70f72..b6b5c1a348476d 100644 --- a/src/core/reference/include/openvino/reference/not_equal.hpp +++ b/src/core/reference/include/openvino/reference/not_equal.hpp @@ -11,8 +11,8 @@ #include -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/op/util/attr_types.hpp" #include "openvino/reference/autobroadcast_binop.hpp" namespace ov { diff --git a/src/core/reference/include/openvino/reference/one_hot.hpp b/src/core/reference/include/openvino/reference/one_hot.hpp index 8527c1281bcce7..abf2bd7142d849 100644 --- a/src/core/reference/include/openvino/reference/one_hot.hpp +++ b/src/core/reference/include/openvino/reference/one_hot.hpp @@ -4,7 +4,7 @@ #pragma once -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/include/openvino/reference/or.hpp b/src/core/reference/include/openvino/reference/or.hpp index 9d34d7b09782e9..7e821de63e3c03 100644 --- a/src/core/reference/include/openvino/reference/or.hpp +++ b/src/core/reference/include/openvino/reference/or.hpp @@ -6,8 +6,8 @@ #include -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/op/util/attr_types.hpp" #include "openvino/reference/autobroadcast_binop.hpp" namespace ov { diff --git a/src/core/reference/include/openvino/reference/pad.hpp b/src/core/reference/include/openvino/reference/pad.hpp index ce66f64b05aae8..27ef1a471fb4e3 100644 --- a/src/core/reference/include/openvino/reference/pad.hpp +++ b/src/core/reference/include/openvino/reference/pad.hpp @@ -4,9 +4,9 @@ #pragma once -#include "ngraph/coordinate_diff.hpp" -#include "ngraph/op/util/attr_types.hpp" // for op::PadMode -#include "ngraph/shape.hpp" +#include "openvino/core/coordinate_diff.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/op/util/attr_types.hpp" // for op::PadMode namespace ov { namespace reference { diff --git a/src/core/reference/include/openvino/reference/power.hpp b/src/core/reference/include/openvino/reference/power.hpp index 2aeb3042fcffd0..23b404fdb847b3 100644 --- a/src/core/reference/include/openvino/reference/power.hpp +++ b/src/core/reference/include/openvino/reference/power.hpp @@ -7,8 +7,8 @@ #include #include -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/op/util/attr_types.hpp" #include "openvino/reference/autobroadcast_binop.hpp" namespace ov { diff --git a/src/core/reference/include/openvino/reference/prelu.hpp b/src/core/reference/include/openvino/reference/prelu.hpp index 3de4744bf7e1c3..7c3005e7e5701d 100644 --- a/src/core/reference/include/openvino/reference/prelu.hpp +++ b/src/core/reference/include/openvino/reference/prelu.hpp @@ -6,9 +6,9 @@ #include #include -#include -#include +#include "openvino/core/shape.hpp" +#include "openvino/op/util/attr_types.hpp" #include "openvino/reference/autobroadcast_binop.hpp" namespace ov { @@ -22,15 +22,9 @@ void prelu(const T* arg, const T* slope, T* out, const Shape& arg_shape, const S channel_slope_shape[channel_dim_idx] = slope_shape[0]; std::swap(slope_shape_tmp, channel_slope_shape); } - autobroadcast_binop(arg, - slope, - out, - arg_shape, - slope_shape_tmp, - ngraph::op::AutoBroadcastType::NUMPY, - [](T x, T y) -> T { - return x < T(0) ? T(x * y) : x; - }); + autobroadcast_binop(arg, slope, out, arg_shape, slope_shape_tmp, op::AutoBroadcastType::NUMPY, [](T x, T y) -> T { + return x < T(0) ? T(x * y) : x; + }); } } // namespace reference } // namespace ov diff --git a/src/core/reference/include/openvino/reference/prior_box.hpp b/src/core/reference/include/openvino/reference/prior_box.hpp index e4ca13ae310695..57b5373e49803c 100644 --- a/src/core/reference/include/openvino/reference/prior_box.hpp +++ b/src/core/reference/include/openvino/reference/prior_box.hpp @@ -6,9 +6,8 @@ #include -#include "ngraph/axis_vector.hpp" -#include "ngraph/check.hpp" -#include "ngraph/op/prior_box.hpp" +#include "openvino/core/except.hpp" +#include "openvino/op/prior_box.hpp" #include "openvino/reference/utils/coordinate_transform.hpp" namespace ov { @@ -50,7 +49,7 @@ void prior_box(const T* data, } std::vector variance = attrs.variance; - NGRAPH_CHECK(variance.size() == 1 || variance.size() == 4 || variance.empty()); + OPENVINO_ASSERT(variance.size() == 1 || variance.size() == 4 || variance.empty()); if (variance.empty()) variance.push_back(0.1f); diff --git a/src/core/reference/include/openvino/reference/prior_box_clustered.hpp b/src/core/reference/include/openvino/reference/prior_box_clustered.hpp index d15d69a675714e..d4b2b2f64bdc3a 100644 --- a/src/core/reference/include/openvino/reference/prior_box_clustered.hpp +++ b/src/core/reference/include/openvino/reference/prior_box_clustered.hpp @@ -6,9 +6,8 @@ #include -#include "ngraph/axis_vector.hpp" -#include "ngraph/check.hpp" -#include "ngraph/op/prior_box_clustered.hpp" +#include "openvino/core/except.hpp" +#include "openvino/op/prior_box_clustered.hpp" #include "openvino/reference/utils/coordinate_transform.hpp" namespace ov { @@ -18,11 +17,11 @@ void prior_box_clustered(const T* data, const T* img, float* dst_data, const Shape& out_shape, - const ngraph::op::PriorBoxClusteredAttrs& attrs) { + const op::v0::PriorBoxClustered::Attributes& attrs) { size_t num_priors_ = attrs.widths.size(); auto variances = attrs.variances; - NGRAPH_CHECK(variances.size() == 1 || variances.size() == 4 || variances.empty()); + OPENVINO_ASSERT(variances.size() == 1 || variances.size() == 4 || variances.empty()); if (variances.empty()) variances.push_back(0.1f); diff --git a/src/core/reference/include/openvino/reference/proposal.hpp b/src/core/reference/include/openvino/reference/proposal.hpp index febec805131739..2f25027ba36d82 100644 --- a/src/core/reference/include/openvino/reference/proposal.hpp +++ b/src/core/reference/include/openvino/reference/proposal.hpp @@ -2,8 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/proposal.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/op/proposal.hpp" namespace ov { namespace reference { namespace details { diff --git a/src/core/reference/include/openvino/reference/psroi_pooling.hpp b/src/core/reference/include/openvino/reference/psroi_pooling.hpp index 482f48ea28f823..e718232564877e 100644 --- a/src/core/reference/include/openvino/reference/psroi_pooling.hpp +++ b/src/core/reference/include/openvino/reference/psroi_pooling.hpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" namespace ov { namespace reference { @@ -29,7 +29,7 @@ void psroi_pooling(const T* input, } else if (mode_str == "bilinear") { mode = BILINEAR; } else { - NGRAPH_CHECK(false, "Invalid PS ROI pooling mode: " + mode_str); + OPENVINO_ASSERT(false, "Invalid PS ROI pooling mode: " + mode_str); } size_t channels_in = input_shape[1]; size_t height = input_shape[2]; diff --git a/src/core/reference/include/openvino/reference/quantize.hpp b/src/core/reference/include/openvino/reference/quantize.hpp deleted file mode 100644 index e5333aadca5201..00000000000000 --- a/src/core/reference/include/openvino/reference/quantize.hpp +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "ngraph/op/quantize.hpp" -#include "ngraph/shape_util.hpp" -#include "openvino/reference/utils/coordinate_transform.hpp" - -namespace ov { -namespace reference { -template -void quantize(const REAL* input, - const REAL* scale, - const QUANT* zero_point, - QUANT* output, - const Shape& input_shape, - const Shape& scale_zero_point_shape, - const AxisSet& axes, - op::Quantize::RoundMode round_mode) { - CoordinateTransform input_transform(input_shape); - CoordinateTransform scale_zero_point_transform(scale_zero_point_shape); - - for (const Coordinate& input_coord : input_transform) { - Coordinate scale_zero_point_coord = project(input_coord, axes); - - // apply scale - REAL qvalue = - input[input_transform.index(input_coord)] / scale[scale_zero_point_transform.index(scale_zero_point_coord)]; - - // round - if (round_mode == op::Quantize::RoundMode::ROUND_NEAREST_TOWARD_INFINITY) { - REAL abs_qvalue = std::fabs(qvalue); - REAL abs_qvalue_toward_inf = std::floor(abs_qvalue + static_cast(0.5)); - qvalue = (qvalue < static_cast(0.0)) ? -abs_qvalue_toward_inf : abs_qvalue_toward_inf; - } else if (round_mode == op::Quantize::RoundMode::ROUND_NEAREST_TOWARD_ZERO) { - auto abs_qvalue = std::fabs(qvalue); - auto abs_qvalue_toward_zero = std::ceil(abs_qvalue - static_cast(0.5)); - qvalue = (qvalue < static_cast(0.0)) ? -abs_qvalue_toward_zero : abs_qvalue_toward_zero; - } else if (round_mode == op::Quantize::RoundMode::ROUND_NEAREST_UPWARD) { - qvalue = std::floor(qvalue + static_cast(0.5)); - } else if (round_mode == op::Quantize::RoundMode::ROUND_NEAREST_DOWNWARD) { - qvalue = std::ceil(qvalue - static_cast(0.5)); - } else if (round_mode == op::Quantize::RoundMode::ROUND_NEAREST_TOWARD_EVEN) { - auto up_qvalue = std::floor(qvalue + static_cast(0.5)); - auto dn_qvalue = std::ceil(qvalue - static_cast(0.5)); - auto rem = std::fmod(up_qvalue, 2.0); - qvalue = (rem == 0.0) ? up_qvalue : dn_qvalue; - } else if (round_mode == op::Quantize::RoundMode::ROUND_TOWARD_INFINITY) { - auto abs_qvalue = std::fabs(qvalue); - auto abs_qvalue_toward_inf = std::ceil(abs_qvalue); - qvalue = (qvalue < static_cast(0.0)) ? -abs_qvalue_toward_inf : abs_qvalue_toward_inf; - } else if (round_mode == op::Quantize::RoundMode::ROUND_TOWARD_ZERO) { - auto abs_qvalue = std::fabs(qvalue); - auto abs_qvalue_toward_zero = std::floor(abs_qvalue); - qvalue = (qvalue < static_cast(0.0)) ? -abs_qvalue_toward_zero : abs_qvalue_toward_zero; - } else if (round_mode == op::Quantize::RoundMode::ROUND_UP) { - qvalue = std::ceil(qvalue); - } else if (round_mode == op::Quantize::RoundMode::ROUND_DOWN) { - qvalue = std::floor(qvalue); - } - - // apply zero_point - qvalue += zero_point[scale_zero_point_transform.index(scale_zero_point_coord)]; - - // clamp - qvalue = std::max(qvalue, static_cast(std::numeric_limits::min())); - qvalue = std::min(qvalue, static_cast(std::numeric_limits::max())); - - // cast - output[input_transform.index(input_coord)] = static_cast(qvalue); - } -} -} // namespace reference -} // namespace ov diff --git a/src/core/reference/include/openvino/reference/random_uniform.hpp b/src/core/reference/include/openvino/reference/random_uniform.hpp index 6f942b97dc6659..35257bba4a0b9b 100644 --- a/src/core/reference/include/openvino/reference/random_uniform.hpp +++ b/src/core/reference/include/openvino/reference/random_uniform.hpp @@ -5,9 +5,9 @@ #pragma once #include -#include -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/core/type/element_type.hpp" namespace ov { namespace reference { @@ -28,7 +28,7 @@ std::pair random_uniform(const uint64_t* out_shape, const char* max_val, char* out, const Shape& out_shape_shape, - const ngraph::element::Type& elem_type, + const element::Type& elem_type, uint64_t seed, uint64_t seed2, std::pair prev_state); diff --git a/src/core/reference/include/openvino/reference/range.hpp b/src/core/reference/include/openvino/reference/range.hpp index 99a6cb39d876db..cc9cb2f643ae06 100644 --- a/src/core/reference/include/openvino/reference/range.hpp +++ b/src/core/reference/include/openvino/reference/range.hpp @@ -7,10 +7,8 @@ #include #include -#include "ngraph/axis_vector.hpp" -#include "ngraph/check.hpp" -#include "ngraph/type/bfloat16.hpp" -#include "ngraph/type/float16.hpp" +#include "openvino/core/type/bfloat16.hpp" +#include "openvino/core/type/float16.hpp" #include "openvino/reference/utils/coordinate_transform.hpp" namespace ov { diff --git a/src/core/reference/include/openvino/reference/rdft.hpp b/src/core/reference/include/openvino/reference/rdft.hpp index ecfdae9585f3ca..5abc2c7bcfbd03 100644 --- a/src/core/reference/include/openvino/reference/rdft.hpp +++ b/src/core/reference/include/openvino/reference/rdft.hpp @@ -17,13 +17,9 @@ #pragma once #include -#include #include -#include "ngraph/node.hpp" -#include "ngraph/op/util/op_types.hpp" -#include "ngraph/ops.hpp" -#include "ngraph/shape_util.hpp" +#include "openvino/core/shape.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/include/openvino/reference/region_yolo.hpp b/src/core/reference/include/openvino/reference/region_yolo.hpp index f510c683db0250..58a110c4429ee1 100644 --- a/src/core/reference/include/openvino/reference/region_yolo.hpp +++ b/src/core/reference/include/openvino/reference/region_yolo.hpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" namespace ov { namespace reference { @@ -57,7 +57,7 @@ void region_yolo(const T* input, const int regions, const bool do_softmax, const std::vector& mask) { - NGRAPH_CHECK(input_shape.size() == 4); + OPENVINO_ASSERT(input_shape.size() == 4); const int batches = static_cast(input_shape[0]); const int height = static_cast(input_shape[2]); diff --git a/src/core/reference/include/openvino/reference/reorg_yolo.hpp b/src/core/reference/include/openvino/reference/reorg_yolo.hpp index 2678a4e82e332f..64e5f2180a89da 100644 --- a/src/core/reference/include/openvino/reference/reorg_yolo.hpp +++ b/src/core/reference/include/openvino/reference/reorg_yolo.hpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/include/openvino/reference/reshape.hpp b/src/core/reference/include/openvino/reference/reshape.hpp index 3d7694753521cc..b3cdd12df47e06 100644 --- a/src/core/reference/include/openvino/reference/reshape.hpp +++ b/src/core/reference/include/openvino/reference/reshape.hpp @@ -4,8 +4,8 @@ #pragma once -#include "ngraph/axis_vector.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/axis_vector.hpp" +#include "openvino/core/shape.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/include/openvino/reference/result.hpp b/src/core/reference/include/openvino/reference/result.hpp index ca71bab4dcb3c3..9b59302fb0c5f7 100644 --- a/src/core/reference/include/openvino/reference/result.hpp +++ b/src/core/reference/include/openvino/reference/result.hpp @@ -9,8 +9,6 @@ #include #include -#include "ngraph/shape.hpp" - namespace ov { namespace reference { template diff --git a/src/core/reference/include/openvino/reference/reverse_sequence.hpp b/src/core/reference/include/openvino/reference/reverse_sequence.hpp index 6a01bc9303e584..07f7f6f68afe24 100644 --- a/src/core/reference/include/openvino/reference/reverse_sequence.hpp +++ b/src/core/reference/include/openvino/reference/reverse_sequence.hpp @@ -7,7 +7,6 @@ #include #include -#include "ngraph/util.hpp" #include "openvino/reference/utils/coordinate_transform.hpp" namespace ov { @@ -25,8 +24,8 @@ void reverse_sequence(const T* arg, size_t batch_index = in_coord[batch_axis]; auto orig_seq_index = static_cast(sequence_lengths[batch_index]); - NGRAPH_CHECK(orig_seq_index <= arg_shape.at(sequence_axis), - "One of the elements of sequence lengths is greater than sequence axis dimension"); + OPENVINO_ASSERT(orig_seq_index <= arg_shape.at(sequence_axis), + "One of the elements of sequence lengths is greater than sequence axis dimension"); if (orig_seq_index == 0) { orig_seq_index = 1; diff --git a/src/core/reference/include/openvino/reference/roi_align.hpp b/src/core/reference/include/openvino/reference/roi_align.hpp index f7f9d1bc791a41..31eca09ebe4dcf 100644 --- a/src/core/reference/include/openvino/reference/roi_align.hpp +++ b/src/core/reference/include/openvino/reference/roi_align.hpp @@ -6,9 +6,11 @@ #include -#include "ngraph/op/roi_align.hpp" // for ROIAlign:PoolingMode -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/op/roi_align.hpp" +#include "openvino/reference/utils/coordinate_index.hpp" #include "openvino/reference/utils/coordinate_transform.hpp" + namespace ov { namespace reference { using ROIPoolingMode = op::v3::ROIAlign::PoolingMode; @@ -33,11 +35,6 @@ void roi_align(const T* feature_maps, auto feature_map_width = feature_maps_shape[3]; auto num_rois = rois_shape[0]; - NGRAPH_SUPPRESS_DEPRECATED_START - CoordinateTransform feature_maps_transform(feature_maps_shape); - CoordinateTransform rois_transform(rois_shape); - CoordinateTransform out_transform(out_shape); - bool aligned = false; T offset_src = static_cast(0); T offset_dst = static_cast(0); @@ -64,10 +61,10 @@ void roi_align(const T* feature_maps, for (unsigned int roi_index = 0; roi_index < num_rois; roi_index++) { // Get ROI`s corners - T x1 = (rois[rois_transform.index({roi_index, 0})] + offset_src) * spatial_scale + offset_dst; - T y1 = (rois[rois_transform.index({roi_index, 1})] + offset_src) * spatial_scale + offset_dst; - T x2 = (rois[rois_transform.index({roi_index, 2})] + offset_src) * spatial_scale + offset_dst; - T y2 = (rois[rois_transform.index({roi_index, 3})] + offset_src) * spatial_scale + offset_dst; + T x1 = (rois[coordinate_index({roi_index, 0}, rois_shape)] + offset_src) * spatial_scale + offset_dst; + T y1 = (rois[coordinate_index({roi_index, 1}, rois_shape)] + offset_src) * spatial_scale + offset_dst; + T x2 = (rois[coordinate_index({roi_index, 2}, rois_shape)] + offset_src) * spatial_scale + offset_dst; + T y2 = (rois[coordinate_index({roi_index, 3}, rois_shape)] + offset_src) * spatial_scale + offset_dst; T roi_width = x2 - x1; T roi_height = y2 - y1; @@ -83,7 +80,7 @@ void roi_align(const T* feature_maps, auto sampling_ratio_x = sampling_ratio == 0 ? static_cast(ceil(bin_width)) : sampling_ratio; auto sampling_ratio_y = sampling_ratio == 0 ? static_cast(ceil(bin_height)) : sampling_ratio; - NGRAPH_CHECK(sampling_ratio_x >= 0 && sampling_ratio_y >= 0); + OPENVINO_ASSERT(sampling_ratio_x >= 0 && sampling_ratio_y >= 0); uint64_t num_samples_in_bin = static_cast(sampling_ratio_x) * static_cast(sampling_ratio_y); @@ -169,26 +166,27 @@ void roi_align(const T* feature_maps, // the four parts are values of the four closest surrounding // neighbours of considered sample, then basing on all sampled // values in bin we calculate pooled value - auto sample_part_1 = feature_maps[feature_maps_transform.index( - {static_cast(batch_indices[roi_index]), - channel_index, - pooling_points[sample_index].first, - pooling_points[sample_index].second})]; - auto sample_part_2 = feature_maps[feature_maps_transform.index( - {static_cast(batch_indices[roi_index]), - channel_index, - pooling_points[sample_index + 1].first, - pooling_points[sample_index + 1].second})]; - auto sample_part_3 = feature_maps[feature_maps_transform.index( - {static_cast(batch_indices[roi_index]), - channel_index, - pooling_points[sample_index + 2].first, - pooling_points[sample_index + 2].second})]; - auto sample_part_4 = feature_maps[feature_maps_transform.index( - {static_cast(batch_indices[roi_index]), - channel_index, - pooling_points[sample_index + 3].first, - pooling_points[sample_index + 3].second})]; + const auto batch_index = static_cast(batch_indices[roi_index]); + auto sample_part_1 = feature_maps[coordinate_index({batch_index, + channel_index, + pooling_points[sample_index].first, + pooling_points[sample_index].second}, + feature_maps_shape)]; + auto sample_part_2 = feature_maps[coordinate_index({batch_index, + channel_index, + pooling_points[sample_index + 1].first, + pooling_points[sample_index + 1].second}, + feature_maps_shape)]; + auto sample_part_3 = feature_maps[coordinate_index({batch_index, + channel_index, + pooling_points[sample_index + 2].first, + pooling_points[sample_index + 2].second}, + feature_maps_shape)]; + auto sample_part_4 = feature_maps[coordinate_index({batch_index, + channel_index, + pooling_points[sample_index + 3].first, + pooling_points[sample_index + 3].second}, + feature_maps_shape)]; T sample_value = pooling_weights[sample_index] * sample_part_1 + pooling_weights[sample_index + 1] * sample_part_2 + @@ -210,17 +208,12 @@ void roi_align(const T* feature_maps, } } // save the calculations for all bins across this channel - auto output_channel_offset = out_transform.index({static_cast(roi_index), - static_cast(channel_index), - static_cast(0), - static_cast(0)}); + auto output_channel_offset = coordinate_index({roi_index, channel_index, 0ul, 0ul}, out_shape); std::copy(tmp_out.begin(), tmp_out.end(), out + output_channel_offset); tmp_out.clear(); } } - NGRAPH_SUPPRESS_DEPRECATED_END - return; } } // namespace reference } // namespace ov diff --git a/src/core/reference/include/openvino/reference/roi_pooling.hpp b/src/core/reference/include/openvino/reference/roi_pooling.hpp index 5dbe13d1de51b6..02247ee2fa543d 100644 --- a/src/core/reference/include/openvino/reference/roi_pooling.hpp +++ b/src/core/reference/include/openvino/reference/roi_pooling.hpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" namespace ov { namespace reference { @@ -42,7 +42,7 @@ void roi_pooling(const T* feature_maps, int roi_batch_id = static_cast(rois[roi_idx + 0]); // ROI batch id must be in the range of [0, N-1] - NGRAPH_CHECK(0 <= roi_batch_id && roi_batch_id < batches, "ROI batch id must be in the range of [0, N-1]"); + OPENVINO_ASSERT(0 <= roi_batch_id && roi_batch_id < batches, "ROI batch id must be in the range of [0, N-1]"); if (pooling_method == "max") { // ROI coordinates scaled to input feature maps diff --git a/src/core/reference/include/openvino/reference/roll.hpp b/src/core/reference/include/openvino/reference/roll.hpp index 16b50bc32f634c..dfe33c00ffa19b 100644 --- a/src/core/reference/include/openvino/reference/roll.hpp +++ b/src/core/reference/include/openvino/reference/roll.hpp @@ -16,7 +16,7 @@ #pragma once -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" #include "openvino/reference/utils/coordinate_transform.hpp" namespace ov { diff --git a/src/core/reference/include/openvino/reference/round.hpp b/src/core/reference/include/openvino/reference/round.hpp index b5c1ee87f555eb..f3080c4edcd157 100644 --- a/src/core/reference/include/openvino/reference/round.hpp +++ b/src/core/reference/include/openvino/reference/round.hpp @@ -9,7 +9,7 @@ #include #include "openvino/op/round.hpp" -#include "openvino/reference/round_guard.hpp" +#include "openvino/reference/rounding_guard.hpp" #include "openvino/reference/utils/type_util.hpp" namespace ov { @@ -50,7 +50,7 @@ T round_half_away_zero(T value) { */ template ()>::type* = nullptr> void round(const T* arg, T* out, const size_t count, const op::v5::Round::RoundMode mode) { - const ov::RoundGuard round_g{FE_TONEAREST}; + const ov::RoundingGuard round_g{FE_TONEAREST}; const auto round_algo = (mode == op::v5::Round::RoundMode::HALF_TO_EVEN) ? round_to_nearest_even : round_half_away_zero; diff --git a/src/core/reference/include/openvino/reference/round_guard.hpp b/src/core/reference/include/openvino/reference/rounding_guard.hpp similarity index 84% rename from src/core/reference/include/openvino/reference/round_guard.hpp rename to src/core/reference/include/openvino/reference/rounding_guard.hpp index cfccdc01b7a569..4c11b2637ae5f2 100644 --- a/src/core/reference/include/openvino/reference/round_guard.hpp +++ b/src/core/reference/include/openvino/reference/rounding_guard.hpp @@ -18,10 +18,10 @@ namespace ov { * - FE_UPWARD * see std header for details. */ -class RoundGuard { +class RoundingGuard { public: - RoundGuard(int mode); - ~RoundGuard(); + RoundingGuard(int mode); + ~RoundingGuard(); private: int m_prev_round_mode; diff --git a/src/core/reference/include/openvino/reference/scatter_elements_update.hpp b/src/core/reference/include/openvino/reference/scatter_elements_update.hpp index 0262db0a1ce492..3fd38f06600737 100644 --- a/src/core/reference/include/openvino/reference/scatter_elements_update.hpp +++ b/src/core/reference/include/openvino/reference/scatter_elements_update.hpp @@ -8,8 +8,8 @@ #include #include -#include "ngraph/check.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/except.hpp" +#include "openvino/core/shape.hpp" #include "openvino/op/scatter_elements_update.hpp" #include "openvino/reference/utils/coordinate_transform.hpp" diff --git a/src/core/reference/include/openvino/reference/scatter_nd_update.hpp b/src/core/reference/include/openvino/reference/scatter_nd_update.hpp index f4c5821dac00c8..ff63313823b788 100644 --- a/src/core/reference/include/openvino/reference/scatter_nd_update.hpp +++ b/src/core/reference/include/openvino/reference/scatter_nd_update.hpp @@ -7,8 +7,7 @@ #include #include -#include "ngraph/coordinate.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" #include "utils/span.hpp" namespace ov { diff --git a/src/core/reference/include/openvino/reference/scatter_update.hpp b/src/core/reference/include/openvino/reference/scatter_update.hpp index 90a1b50b1b73c0..07dab6be32b6f1 100644 --- a/src/core/reference/include/openvino/reference/scatter_update.hpp +++ b/src/core/reference/include/openvino/reference/scatter_update.hpp @@ -6,8 +6,7 @@ #include -#include "ngraph/check.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" #include "openvino/reference/utils/coordinate_transform.hpp" #include "openvino/util/common_util.hpp" @@ -22,12 +21,10 @@ static const CoordinateTransformBasic get_target_shape(const Shape& data_shape, AxisVector axis_order(m_n_axes); std::iota(axis_order.begin(), axis_order.end(), 0); const Strides strides(m_n_axes, 1); - OPENVINO_SUPPRESS_DEPRECATED_START for (size_t axis = 0; axis < m_n_axes; axis++) { target_shape.push_back( util::ceil_div(end_corner[axis_order[axis]] - start_corner[axis_order[axis]], strides[axis_order[axis]])); } - OPENVINO_SUPPRESS_DEPRECATED_END return target_shape; } diff --git a/src/core/reference/include/openvino/reference/sequences.hpp b/src/core/reference/include/openvino/reference/sequences.hpp index c466ec5d559f83..cbbd91655b1c94 100644 --- a/src/core/reference/include/openvino/reference/sequences.hpp +++ b/src/core/reference/include/openvino/reference/sequences.hpp @@ -51,7 +51,7 @@ void cell_pass(CellType type, return new_shape; }; - size_t x_shape_size = ngraph::shape_size(shapes[0]); + size_t x_shape_size = shape_size(shapes[0]); // split X size_t num_splits = shapes[0].at(1); @@ -90,7 +90,7 @@ void cell_pass(CellType type, // split A std::vector a_seqs; if (type == CellType::AUGRU) { - const auto a_shape_size = ngraph::shape_size(shapes[6]); + const auto a_shape_size = shape_size(shapes[6]); a_seqs.resize(a_shape_size * sizeof(T)); std::vector a_pointers(num_splits); for (size_t i = 0; i < num_splits; ++i) { @@ -100,18 +100,18 @@ void cell_pass(CellType type, } Shape part_shape{batch, 1, hidden_size}; - size_t part_shape_size = ngraph::shape_size(part_shape); + size_t part_shape_size = shape_size(part_shape); std::vector> h_list(num_splits, std::vector(part_shape_size * sizeof(T), 0)); std::vector> c_list(num_splits, std::vector(part_shape_size * sizeof(T), 0)); // use outputs as a buffer for temporarily values char* H_i = outputs[1]; - std::memcpy(H_i, inputs[2], ngraph::shape_size(shapes[2]) * sizeof(T)); + std::memcpy(H_i, inputs[2], shape_size(shapes[2]) * sizeof(T)); char* C_i = nullptr; // LSTMCell only if ((type == CellType::LSTM) || (type == CellType::LSTM_v1)) { C_i = outputs[2]; - std::memcpy(C_i, inputs[3], ngraph::shape_size(shapes[3]) * sizeof(T)); + std::memcpy(C_i, inputs[3], shape_size(shapes[3]) * sizeof(T)); } for (size_t time_step = 0; time_step < num_splits; ++time_step) { @@ -310,11 +310,11 @@ void lstm_sequence(const char* X, } else if (direction == op::RecurrentSequenceDirection::BIDIRECTIONAL) { // Split bidirectional case to forward + reverse passes. // split inputs - std::vector> H_split(2, std::vector(sizeof(T) * ngraph::shape_size(H_shape) / 2)); - std::vector> C_split(2, std::vector(sizeof(T) * ngraph::shape_size(C_shape) / 2)); - std::vector> W_split(2, std::vector(sizeof(T) * ngraph::shape_size(W_shape) / 2)); - std::vector> R_split(2, std::vector(sizeof(T) * ngraph::shape_size(R_shape) / 2)); - std::vector> B_split(2, std::vector(sizeof(T) * ngraph::shape_size(B_shape) / 2)); + std::vector> H_split(2, std::vector(sizeof(T) * shape_size(H_shape) / 2)); + std::vector> C_split(2, std::vector(sizeof(T) * shape_size(C_shape) / 2)); + std::vector> W_split(2, std::vector(sizeof(T) * shape_size(W_shape) / 2)); + std::vector> R_split(2, std::vector(sizeof(T) * shape_size(R_shape) / 2)); + std::vector> B_split(2, std::vector(sizeof(T) * shape_size(B_shape) / 2)); char* h_pointers[2] = {H_split[0].data(), H_split[1].data()}; char* c_pointers[2] = {C_split[0].data(), C_split[1].data()}; char* w_pointers[2] = {W_split[0].data(), W_split[1].data()}; @@ -428,12 +428,12 @@ void lstm_sequence_v1(const char* X, } else if (direction == op::RecurrentSequenceDirection::BIDIRECTIONAL) { // Split bidirectional case to forward + reverse passes. // split inputs - std::vector> H_split(2, std::vector(sizeof(T) * ngraph::shape_size(H_shape) / 2)); - std::vector> C_split(2, std::vector(sizeof(T) * ngraph::shape_size(C_shape) / 2)); - std::vector> W_split(2, std::vector(sizeof(T) * ngraph::shape_size(W_shape) / 2)); - std::vector> R_split(2, std::vector(sizeof(T) * ngraph::shape_size(R_shape) / 2)); - std::vector> B_split(2, std::vector(sizeof(T) * ngraph::shape_size(B_shape) / 2)); - std::vector> P_split(2, std::vector(sizeof(T) * ngraph::shape_size(P_shape) / 2)); + std::vector> H_split(2, std::vector(sizeof(T) * shape_size(H_shape) / 2)); + std::vector> C_split(2, std::vector(sizeof(T) * shape_size(C_shape) / 2)); + std::vector> W_split(2, std::vector(sizeof(T) * shape_size(W_shape) / 2)); + std::vector> R_split(2, std::vector(sizeof(T) * shape_size(R_shape) / 2)); + std::vector> B_split(2, std::vector(sizeof(T) * shape_size(B_shape) / 2)); + std::vector> P_split(2, std::vector(sizeof(T) * shape_size(P_shape) / 2)); char* h_pointers[2] = {H_split[0].data(), H_split[1].data()}; char* c_pointers[2] = {C_split[0].data(), C_split[1].data()}; char* w_pointers[2] = {W_split[0].data(), W_split[1].data()}; @@ -554,10 +554,10 @@ void gru_sequence(const char* X, } else if (direction == op::RecurrentSequenceDirection::BIDIRECTIONAL) { // Split bidirectional case to forward + reverse passes. // split inputs - std::vector> H_split(2, std::vector(sizeof(T) * ngraph::shape_size(H_shape) / 2)); - std::vector> W_split(2, std::vector(sizeof(T) * ngraph::shape_size(W_shape) / 2)); - std::vector> R_split(2, std::vector(sizeof(T) * ngraph::shape_size(R_shape) / 2)); - std::vector> B_split(2, std::vector(sizeof(T) * ngraph::shape_size(B_shape) / 2)); + std::vector> H_split(2, std::vector(sizeof(T) * shape_size(H_shape) / 2)); + std::vector> W_split(2, std::vector(sizeof(T) * shape_size(W_shape) / 2)); + std::vector> R_split(2, std::vector(sizeof(T) * shape_size(R_shape) / 2)); + std::vector> B_split(2, std::vector(sizeof(T) * shape_size(B_shape) / 2)); char* h_pointers[2] = {H_split[0].data(), H_split[1].data()}; char* w_pointers[2] = {W_split[0].data(), W_split[1].data()}; char* r_pointers[2] = {R_split[0].data(), R_split[1].data()}; @@ -645,10 +645,10 @@ void rnn_sequence(const char* X, } else if (direction == op::RecurrentSequenceDirection::BIDIRECTIONAL) { // Split bidirectional case to forward + reverse passes. // split inputs - std::vector> H_split(2, std::vector(sizeof(T) * ngraph::shape_size(H_shape) / 2)); - std::vector> W_split(2, std::vector(sizeof(T) * ngraph::shape_size(W_shape) / 2)); - std::vector> R_split(2, std::vector(sizeof(T) * ngraph::shape_size(R_shape) / 2)); - std::vector> B_split(2, std::vector(sizeof(T) * ngraph::shape_size(B_shape) / 2)); + std::vector> H_split(2, std::vector(sizeof(T) * shape_size(H_shape) / 2)); + std::vector> W_split(2, std::vector(sizeof(T) * shape_size(W_shape) / 2)); + std::vector> R_split(2, std::vector(sizeof(T) * shape_size(R_shape) / 2)); + std::vector> B_split(2, std::vector(sizeof(T) * shape_size(B_shape) / 2)); char* h_pointers[2] = {H_split[0].data(), H_split[1].data()}; char* w_pointers[2] = {W_split[0].data(), W_split[1].data()}; char* r_pointers[2] = {R_split[0].data(), R_split[1].data()}; diff --git a/src/core/reference/include/openvino/reference/shape_of.hpp b/src/core/reference/include/openvino/reference/shape_of.hpp index 940c236ec1fbc1..b2b6ab478b175a 100644 --- a/src/core/reference/include/openvino/reference/shape_of.hpp +++ b/src/core/reference/include/openvino/reference/shape_of.hpp @@ -4,7 +4,7 @@ #pragma once -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/include/openvino/reference/shuffle_channels.hpp b/src/core/reference/include/openvino/reference/shuffle_channels.hpp index aff376b15b99ba..36d25657b19d97 100644 --- a/src/core/reference/include/openvino/reference/shuffle_channels.hpp +++ b/src/core/reference/include/openvino/reference/shuffle_channels.hpp @@ -8,7 +8,7 @@ #include #include -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/include/openvino/reference/slice.hpp b/src/core/reference/include/openvino/reference/slice.hpp index a86b47f76fe5fc..bb45b596c02ab1 100644 --- a/src/core/reference/include/openvino/reference/slice.hpp +++ b/src/core/reference/include/openvino/reference/slice.hpp @@ -4,7 +4,7 @@ #pragma once -#include "ngraph/type/element_type.hpp" +#include "openvino/core/type/element_type.hpp" #include "openvino/reference/utils/coordinate_transform.hpp" namespace ov { diff --git a/src/core/reference/include/openvino/reference/softmax.hpp b/src/core/reference/include/openvino/reference/softmax.hpp index 69ea583fbc6a2a..1e03f940376253 100644 --- a/src/core/reference/include/openvino/reference/softmax.hpp +++ b/src/core/reference/include/openvino/reference/softmax.hpp @@ -6,39 +6,39 @@ #include -#include "ngraph/shape_util.hpp" +#include "openvino/core/shape_util.hpp" #include "openvino/reference/reduce_max.hpp" #include "openvino/reference/reduce_sum.hpp" +#include "openvino/reference/utils/coordinate_index.hpp" #include "openvino/reference/utils/coordinate_transform.hpp" namespace ov { namespace reference { template void softmax(const T* arg, T* out, const Shape& shape, const AxisSet& axes) { - NGRAPH_SUPPRESS_DEPRECATED_START - auto temp_shape = ngraph::reduce(shape, axes, true); - auto temp_elements = shape_size(temp_shape); - auto temp_ptr = new T[temp_elements]; + const auto temp_shape = util::reduce_keep_dims(shape, axes); + const auto temp_elements = shape_size(temp_shape); + auto temp_storage = std::vector(temp_elements); + const auto temp_ptr = temp_storage.data(); reduce_max(arg, temp_ptr, shape, axes); - CoordinateTransform transform(shape); - CoordinateTransform temp_transform(temp_shape); - for (const Coordinate& coord : transform) { - Coordinate temp_coord = ngraph::reduce(coord, axes, true); - out[transform.index(coord)] = - std::exp(arg[transform.index(coord)] - temp_ptr[temp_transform.index(temp_coord)]); + const CoordinateTransformBasic transform{shape}; + for (const auto& coord : transform) { + const Coordinate temp_coord = util::reduce_keep_dims(coord, axes); + const auto out_index = coordinate_index(coord, shape); + const auto temp_index = coordinate_index(temp_coord, temp_shape); + out[out_index] = std::exp(arg[out_index] - temp_ptr[temp_index]); } reduce_sum(out, temp_ptr, shape, axes); - for (const Coordinate& coord : transform) { - Coordinate temp_coord = ngraph::reduce(coord, axes, true); - out[transform.index(coord)] /= temp_ptr[temp_transform.index(temp_coord)]; + for (const auto& coord : transform) { + const Coordinate temp_coord = util::reduce_keep_dims(coord, axes); + const auto out_index = coordinate_index(coord, shape); + const auto temp_index = coordinate_index(temp_coord, temp_shape); + out[out_index] /= temp_ptr[temp_index]; } - - delete[] temp_ptr; - NGRAPH_SUPPRESS_DEPRECATED_END } } // namespace reference } // namespace ov diff --git a/src/core/reference/include/openvino/reference/space_to_depth.hpp b/src/core/reference/include/openvino/reference/space_to_depth.hpp index 3eeb26534637c1..3df0bdd41232b7 100644 --- a/src/core/reference/include/openvino/reference/space_to_depth.hpp +++ b/src/core/reference/include/openvino/reference/space_to_depth.hpp @@ -4,8 +4,8 @@ #pragma once -#include "ngraph/op/space_to_depth.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/op/space_to_depth.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/include/openvino/reference/squared_difference.hpp b/src/core/reference/include/openvino/reference/squared_difference.hpp index b28586d3623dab..a7e4149de212b8 100644 --- a/src/core/reference/include/openvino/reference/squared_difference.hpp +++ b/src/core/reference/include/openvino/reference/squared_difference.hpp @@ -7,7 +7,6 @@ #include #include -#include "ngraph/shape_util.hpp" #include "openvino/reference/autobroadcast_binop.hpp" namespace ov { diff --git a/src/core/reference/include/openvino/reference/subtract.hpp b/src/core/reference/include/openvino/reference/subtract.hpp index 2051dd1874d510..689ceb6915b1bc 100644 --- a/src/core/reference/include/openvino/reference/subtract.hpp +++ b/src/core/reference/include/openvino/reference/subtract.hpp @@ -7,6 +7,8 @@ #include #include +#include "openvino/core/shape.hpp" +#include "openvino/op/util/attr_types.hpp" #include "openvino/reference/autobroadcast_binop.hpp" namespace ov { diff --git a/src/core/reference/include/openvino/reference/tile.hpp b/src/core/reference/include/openvino/reference/tile.hpp index 2ee3da6b0c0218..81fcf4b0182156 100644 --- a/src/core/reference/include/openvino/reference/tile.hpp +++ b/src/core/reference/include/openvino/reference/tile.hpp @@ -4,10 +4,7 @@ #pragma once -#include - -#include "ngraph/type/element_type.hpp" -#include "openvino/reference/utils/coordinate_transform.hpp" +#include "openvino/core/shape.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/include/openvino/reference/topk.hpp b/src/core/reference/include/openvino/reference/topk.hpp index 0b7b4d48a53b1b..c84fb54e9962bb 100644 --- a/src/core/reference/include/openvino/reference/topk.hpp +++ b/src/core/reference/include/openvino/reference/topk.hpp @@ -8,7 +8,8 @@ #include #include -#include "ngraph/op/topk.hpp" +#include "openvino/op/topk.hpp" +#include "openvino/reference/utils/coordinate_index.hpp" #include "openvino/reference/utils/coordinate_transform.hpp" namespace ov { @@ -51,30 +52,21 @@ void topk(const T* arg, size_t k, bool compute_max, op::TopKSortType sort = op::TopKSortType::NONE) { - NGRAPH_SUPPRESS_DEPRECATED_START using namespace std; - // reorder source axis visit order and make "axis" inner most - size_t ndim = static_cast(in_shape.size()); - Coordinate start_corner(ndim, 0); - Coordinate end_corner(in_shape); - end_corner[axis] = 1; - Strides strides(ndim, 1); - AxisVector axis_order(ndim); - iota(axis_order.begin(), axis_order.end(), 0); - axis_order.erase(axis_order.begin() + axis); - axis_order.push_back(axis); - // Create CoordinateTransforms that visits only the first element along "axis" - CoordinateTransform input_transform(in_shape, start_corner, end_corner, strides, axis_order); - CoordinateTransform output_transform(out_shape, start_corner, end_corner, strides, axis_order); // Create temp vector for sorting. vector> workspace(in_shape[axis]); - vector in_strides = ngraph::row_major_strides(in_shape); - vector out_strides = ngraph::row_major_strides(out_shape); + vector in_strides = row_major_strides(in_shape); + vector out_strides = row_major_strides(out_shape); auto in_axis_stride = in_strides[axis]; auto out_axis_stride = out_strides[axis]; - for (const Coordinate& coord : input_transform) { - auto arg_index = input_transform.index(coord); - auto out_index = output_transform.index(coord); + + // Iterate over elements with 0 index at "axis" dimension + auto traverse_shape = in_shape; + traverse_shape[axis] = 1; + CoordinateTransformBasic traverse_transform(traverse_shape); + for (const Coordinate& coord : traverse_transform) { + auto arg_index = coordinate_index(coord, in_shape); + auto out_index = coordinate_index(coord, out_shape); // Fill the temp vector U i = 0; for (tuple& entry : workspace) { @@ -109,7 +101,6 @@ void topk(const T* arg, out_index += out_axis_stride; } } - NGRAPH_SUPPRESS_DEPRECATED_END } } // namespace reference } // namespace ov diff --git a/src/core/reference/include/openvino/reference/transpose.hpp b/src/core/reference/include/openvino/reference/transpose.hpp index 03af9040382b26..6d91676dab9aa6 100644 --- a/src/core/reference/include/openvino/reference/transpose.hpp +++ b/src/core/reference/include/openvino/reference/transpose.hpp @@ -9,7 +9,7 @@ #include #include -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/include/openvino/reference/unique.hpp b/src/core/reference/include/openvino/reference/unique.hpp index f037f1e7f00682..fc8234320059fd 100644 --- a/src/core/reference/include/openvino/reference/unique.hpp +++ b/src/core/reference/include/openvino/reference/unique.hpp @@ -5,7 +5,7 @@ #pragma once #include "gather.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" #include "openvino/reference/utils/coordinate_index.hpp" #include "openvino/reference/utils/coordinate_transform.hpp" @@ -105,7 +105,7 @@ UniqueElements find_unique_elements(const Data_t* data, using std::begin; using std::end; - const auto data_shape_strides = ngraph::row_major_strides(data_shape); + const auto data_shape_strides = row_major_strides(data_shape); if (axis && *axis < 0) { const auto normalized_axis = *axis + data_shape.size(); diff --git a/src/core/reference/include/openvino/reference/utils/fft_common.hpp b/src/core/reference/include/openvino/reference/utils/fft_common.hpp index d445efbba71d33..02ede7769ea8c8 100644 --- a/src/core/reference/include/openvino/reference/utils/fft_common.hpp +++ b/src/core/reference/include/openvino/reference/utils/fft_common.hpp @@ -11,8 +11,7 @@ #include #include -#include "ngraph/shape.hpp" -#include "ngraph/type/element_type.hpp" +#include "openvino/core/shape.hpp" namespace ov { namespace reference { @@ -25,7 +24,7 @@ namespace fft_common { // into [N_{r - 1}, ..., N_0]. // At this time, complex tensors are supported only for FFT-like operations, as // DFT, IDFT, RDFT -std::vector reverse_shape_of_emulated_complex_tensor(const ngraph::Shape& shape); +std::vector reverse_shape_of_emulated_complex_tensor(const Shape& shape); // Calculates strides for all axes. std::vector compute_strides(const std::vector& v); diff --git a/src/core/reference/include/openvino/reference/utils/nms_common.hpp b/src/core/reference/include/openvino/reference/utils/nms_common.hpp index 4138db64085022..b8364bb0661afb 100644 --- a/src/core/reference/include/openvino/reference/utils/nms_common.hpp +++ b/src/core/reference/include/openvino/reference/utils/nms_common.hpp @@ -10,7 +10,7 @@ #include #include -#include "ngraph/type/element_type.hpp" +#include "openvino/core/type/element_type.hpp" namespace ov { namespace reference { @@ -60,11 +60,11 @@ struct BoxInfo { void nms_common_postprocessing(void* prois, void* pscores, void* pselected_num, - const ngraph::element::Type& output_type, + const element::Type& output_type, const std::vector& selected_outputs, const std::vector& selected_indices, const std::vector& valid_outputs, - const ngraph::element::Type& selected_outputs_type); + const element::Type& selected_outputs_type); } // namespace nms_common } // namespace reference diff --git a/src/core/reference/src/op/depth_to_space.cpp b/src/core/reference/src/op/depth_to_space.cpp index 490b566f4564e3..0a0d33596d4961 100644 --- a/src/core/reference/src/op/depth_to_space.cpp +++ b/src/core/reference/src/op/depth_to_space.cpp @@ -7,8 +7,8 @@ #include #include -#include "ngraph/check.hpp" #include "ngraph/runtime/opt_kernel/reshape.hpp" +#include "openvino/core/except.hpp" namespace ov { namespace reference { @@ -35,16 +35,16 @@ void depth_to_space(const char* const in, const size_t spatial_dims = in_shape.size() - spatial_dim_index; const size_t c_dim_divider = static_cast(std::pow(block_size, spatial_dims)); - NGRAPH_CHECK(block_size > 0 && c_dim % c_dim_divider == 0, - "DepthToSpace: The input data's 'channels' axis size: ", - c_dim, - " must be evenly divided by 'block_size'^'spatial_dims': (", - c_dim_divider, - ", ", - block_size, - "^", - spatial_dims, - ")"); + OPENVINO_ASSERT(block_size > 0 && c_dim % c_dim_divider == 0, + "DepthToSpace: The input data's 'channels' axis size: ", + c_dim, + " must be evenly divided by 'block_size'^'spatial_dims': (", + c_dim_divider, + ", ", + block_size, + "^", + spatial_dims, + ")"); const size_t c_flat = c_dim / c_dim_divider; diff --git a/src/core/reference/src/op/einsum.cpp b/src/core/reference/src/op/einsum.cpp index a48dc998495286..abe8f8c14ba547 100644 --- a/src/core/reference/src/op/einsum.cpp +++ b/src/core/reference/src/op/einsum.cpp @@ -249,6 +249,14 @@ Shape compute_matmul_output_shape(const Shape& common_sub_shape, return matmul_output_shape; } +/// @brief Prepares default order axis vector +/// +AxisVector get_default_order(size_t rank) { + AxisVector default_order(rank); + std::iota(begin(default_order), end(default_order), 0); + return default_order; +} + /// \brief Update a vector of inputs and subscripts by removing items for /// inputs with indices input_ind1 and input_ind2 and inserted new input and /// the corresponsing subscript in the tail @@ -278,8 +286,8 @@ ov::Tensor unsqueeze_input(const ov::Tensor& input, std::vector& unsque return input; } - Shape input_shape = input.get_shape(); - Shape output_shape = input_shape; + const auto& input_shape = input.get_shape(); + auto output_shape = input_shape; std::sort(unsqueeze_axes.begin(), unsqueeze_axes.end()); for (auto unsqueeze_axis : unsqueeze_axes) { OPENVINO_ASSERT(unsqueeze_axis >= 0); @@ -288,9 +296,7 @@ ov::Tensor unsqueeze_input(const ov::Tensor& input, std::vector& unsque } auto output = ov::Tensor(input.get_element_type(), output_shape); - OPENVINO_SUPPRESS_DEPRECATED_START - const AxisVector order = ngraph::get_default_order(input.get_shape()); - OPENVINO_SUPPRESS_DEPRECATED_END + const auto order = get_default_order(input_shape.size()); const auto element_type = input.get_element_type(); reference::reshape(reinterpret_cast(input.data()), @@ -645,11 +651,9 @@ ov::Tensor reshape_input_for_matmul(const ov::Tensor& input, } const auto element_type = input.get_element_type(); - const auto input_shape = input.get_shape(); + const auto& input_shape = input.get_shape(); auto output = ov::Tensor(element_type, new_shape); - OPENVINO_SUPPRESS_DEPRECATED_START - const AxisVector order = ngraph::get_default_order(input_shape); - OPENVINO_SUPPRESS_DEPRECATED_END + const auto order = get_default_order(input_shape.size()); reference::reshape(reinterpret_cast(input.data()), reinterpret_cast(output.data()), @@ -871,7 +875,7 @@ void contract_two_inputs(ov::TensorVector& inputs, // broadcast both inputs to have common sub-shape broadcasted that is needed // in case of ellipsis among the common labels // reference::broadcast() - PartialShape::broadcast_merge_into(common_sub_shape1, common_sub_shape2, ngraph::op::AutoBroadcastType::NUMPY); + PartialShape::broadcast_merge_into(common_sub_shape1, common_sub_shape2, op::AutoBroadcastType::NUMPY); Shape common_sub_shape = common_sub_shape1.get_shape(); broadcast_input(inputs, input_ind1, @@ -926,9 +930,7 @@ void contract_two_inputs(ov::TensorVector& inputs, back_shape.insert(back_shape.end(), separate2_sub_shape.begin(), separate2_sub_shape.end()); auto contract_output = ov::Tensor(matmul_output.get_element_type(), back_shape); - OPENVINO_SUPPRESS_DEPRECATED_START - const AxisVector order = ngraph::get_default_order(matmul_output.get_shape()); - OPENVINO_SUPPRESS_DEPRECATED_END + const auto order = get_default_order(matmul_output.get_shape().size()); reference::reshape(reinterpret_cast(matmul_output.data()), reinterpret_cast(contract_output.data()), matmul_output.get_shape(), diff --git a/src/core/reference/src/op/experimental_detectron_detection_output.cpp b/src/core/reference/src/op/experimental_detectron_detection_output.cpp index da60c8a7ce8101..bf297fef97d4bd 100644 --- a/src/core/reference/src/op/experimental_detectron_detection_output.cpp +++ b/src/core/reference/src/op/experimental_detectron_detection_output.cpp @@ -14,13 +14,13 @@ // limitations under the License. //***************************************************************************** -#include "ngraph/op/experimental_detectron_detection_output.hpp" +#include "openvino/op/experimental_detectron_detection_output.hpp" #include #include #include -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" #include "openvino/reference/experimental_detectron_detection_output.hpp" namespace { @@ -318,7 +318,7 @@ void experimental_detectron_detection_output(const float* boxes, void experimental_detectron_detection_output_postprocessing(void* pboxes, void* pclasses, void* pscores, - const ngraph::element::Type output_type, + const element::Type output_type, const std::vector& output_boxes, const std::vector& output_classes, const std::vector& output_scores, diff --git a/src/core/reference/src/op/experimental_detectron_proposal_single_image.cpp b/src/core/reference/src/op/experimental_detectron_proposal_single_image.cpp index 319c4c75f8d1af..ef9ad0002d4b86 100644 --- a/src/core/reference/src/op/experimental_detectron_proposal_single_image.cpp +++ b/src/core/reference/src/op/experimental_detectron_proposal_single_image.cpp @@ -10,8 +10,8 @@ #include #include -#include "ngraph/op/experimental_detectron_generate_proposals.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/op/experimental_detectron_generate_proposals.hpp" #include "openvino/reference/proposal.hpp" namespace { @@ -295,7 +295,7 @@ void experimental_detectron_proposals_single_image( void experimental_detectron_proposals_single_image_postprocessing(void* prois, void* pscores, - const ngraph::element::Type output_type, + const element::Type output_type, const std::vector& output_rois, const std::vector& output_scores, const Shape& output_rois_shape, diff --git a/src/core/reference/src/op/experimental_detectron_roi_feature_extractor.cpp b/src/core/reference/src/op/experimental_detectron_roi_feature_extractor.cpp index ad4c35f34824fb..423fb3d4a7dd89 100644 --- a/src/core/reference/src/op/experimental_detectron_roi_feature_extractor.cpp +++ b/src/core/reference/src/op/experimental_detectron_roi_feature_extractor.cpp @@ -10,8 +10,8 @@ #include #include -#include "ngraph/op/experimental_detectron_roi_feature.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/op/experimental_detectron_roi_feature.hpp" #if defined(__GNUC__) && !defined(__clang__) # if defined(__linux__) && defined(OPENVINO_ARCH_X86) && \ @@ -344,7 +344,7 @@ void experimental_detectron_roi_feature_extractor( void experimental_detectron_roi_feature_extractor_postprocessing(void* prois_features, void* prois, - const ngraph::element::Type output_type, + const element::Type output_type, const std::vector& output_rois_features, const std::vector& output_rois, const Shape& output_rois_features_shape, diff --git a/src/core/reference/src/op/function.cpp b/src/core/reference/src/op/function.cpp index ebf706e3f03c0d..c70bf4020b17d0 100644 --- a/src/core/reference/src/op/function.cpp +++ b/src/core/reference/src/op/function.cpp @@ -6,12 +6,8 @@ #include -#include "ngraph/opsets/opset5.hpp" -#include "ngraph/runtime/host_tensor.hpp" -#include "ngraph/runtime/tensor.hpp" #include "openvino/core/deprecated.hpp" #include "openvino/core/shape_util.hpp" -#include "openvino/reference/concat.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/src/op/gather_tree.cpp b/src/core/reference/src/op/gather_tree.cpp index 6e9ef4bb04de38..4a5dd31092e1a0 100644 --- a/src/core/reference/src/op/gather_tree.cpp +++ b/src/core/reference/src/op/gather_tree.cpp @@ -9,7 +9,6 @@ #include #include -#include "ngraph/check.hpp" #include "openvino/core/except.hpp" #include "openvino/reference/utils/coordinate_transform.hpp" @@ -19,8 +18,8 @@ static size_t _asIndex(const char* source, const element::Type& element_type) { // According to the GatherTree op specification only I32 and FP32 precisions are supported. switch (element_type) { case element::Type_t::f16: { - ngraph::float16 tmpBuff = 0.f; - memcpy(&tmpBuff, source, sizeof(ngraph::float16)); + ov::float16 tmpBuff = 0.f; + memcpy(&tmpBuff, source, sizeof(ov::float16)); return static_cast(tmpBuff); } case element::Type_t::f32: { diff --git a/src/core/reference/src/op/generate_proposal.cpp b/src/core/reference/src/op/generate_proposal.cpp index 3e4f9b8707b99b..10fa35d0dcb3c6 100644 --- a/src/core/reference/src/op/generate_proposal.cpp +++ b/src/core/reference/src/op/generate_proposal.cpp @@ -10,8 +10,8 @@ #include #include -#include "ngraph/op/generate_proposals.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/op/generate_proposals.hpp" struct sProposalBox { float x0; @@ -358,8 +358,8 @@ void generate_proposals(const std::vector& im_info, void generate_proposals_postprocessing(void* prois, void* pscores, void* proi_num, - const ngraph::element::Type& output_type, - const ngraph::element::Type& roi_num_type, + const element::Type& output_type, + const element::Type& roi_num_type, const std::vector& output_rois, const std::vector& output_scores, const std::vector& num_rois, diff --git a/src/core/reference/src/op/group_convolution.cpp b/src/core/reference/src/op/group_convolution.cpp index f3a85a8b37a06f..cb613b74ed7c79 100644 --- a/src/core/reference/src/op/group_convolution.cpp +++ b/src/core/reference/src/op/group_convolution.cpp @@ -15,19 +15,19 @@ void validate_group_convolution_parameters(const Shape& in_shape, const CoordinateDiff& pads_begin, const CoordinateDiff& pads_end) { // this implementation supports 1D, 2D and 3D convolutions - NGRAPH_CHECK(in_shape.size() >= 3 && in_shape.size() <= 5, "Unsupported input rank: ", in_shape); + OPENVINO_ASSERT(in_shape.size() >= 3 && in_shape.size() <= 5, "Unsupported input rank: ", in_shape); - NGRAPH_CHECK(in_shape.size() + 1 == f_shape.size(), "Unsupported filter rank: ", f_shape.size()); + OPENVINO_ASSERT(in_shape.size() + 1 == f_shape.size(), "Unsupported filter rank: ", f_shape.size()); - NGRAPH_CHECK(in_shape.size() == out_shape.size(), - "Incompatible input and output ranks: ", - in_shape.size(), - " and ", - out_shape.size()); + OPENVINO_ASSERT(in_shape.size() == out_shape.size(), + "Incompatible input and output ranks: ", + in_shape.size(), + " and ", + out_shape.size()); const size_t groups = f_shape[filter_group_axis]; const size_t in_channels = in_shape[in_channel_axis]; - NGRAPH_CHECK(in_channels % groups == 0, "Input channels of data batch input must be multiple of groups"); + OPENVINO_ASSERT(in_channels % groups == 0, "Input channels of data batch input must be multiple of groups"); const Shape in_group_shape = [&]() { Shape new_shape{in_shape}; new_shape[in_channel_axis] /= groups; @@ -35,7 +35,7 @@ void validate_group_convolution_parameters(const Shape& in_shape, }(); const size_t out_channels = out_shape[out_channel_axis]; - NGRAPH_CHECK(out_channels % groups == 0, "Output channels of output must be multiple of groups"); + OPENVINO_ASSERT(out_channels % groups == 0, "Output channels of output must be multiple of groups"); const Shape out_group_shape = [&]() { Shape new_shape{out_shape}; new_shape[out_channel_axis] /= groups; diff --git a/src/core/reference/src/op/group_convolution_backprop_data.cpp b/src/core/reference/src/op/group_convolution_backprop_data.cpp index a45356d109d5e0..dd8cacb46d799e 100644 --- a/src/core/reference/src/op/group_convolution_backprop_data.cpp +++ b/src/core/reference/src/op/group_convolution_backprop_data.cpp @@ -32,27 +32,27 @@ void validate_convolution_backprop_data_parameters(const Shape& in_shape, const CoordinateDiff& pads_begin, const CoordinateDiff& pads_end) { // this implementation supports 1D, 2D and 3D convolutions - NGRAPH_CHECK(in_shape.size() >= 3 && in_shape.size() <= 5, "Unsupported input rank: ", in_shape); - NGRAPH_CHECK(in_shape.size() == f_shape.size(), - "Incompatible input ranks: ", - in_shape.size(), - " and ", - f_shape.size()); - NGRAPH_CHECK(in_shape[in_channel_axis] == f_shape[filter_in_ch_axis], - "Incompatible input channels in data batch and filters shapes: ", - in_shape[in_channel_axis], - " and ", - f_shape[filter_in_ch_axis]); - NGRAPH_CHECK(in_shape.size() == out_shape.size(), - "Incompatible input and output ranks: ", - in_shape.size(), - " and ", - out_shape.size()); + OPENVINO_ASSERT(in_shape.size() >= 3 && in_shape.size() <= 5, "Unsupported input rank: ", in_shape); + OPENVINO_ASSERT(in_shape.size() == f_shape.size(), + "Incompatible input ranks: ", + in_shape.size(), + " and ", + f_shape.size()); + OPENVINO_ASSERT(in_shape[in_channel_axis] == f_shape[filter_in_ch_axis], + "Incompatible input channels in data batch and filters shapes: ", + in_shape[in_channel_axis], + " and ", + f_shape[filter_in_ch_axis]); + OPENVINO_ASSERT(in_shape.size() == out_shape.size(), + "Incompatible input and output ranks: ", + in_shape.size(), + " and ", + out_shape.size()); const auto spatial_dims = in_shape.size() - 2; - NGRAPH_CHECK(strides.size() == spatial_dims, "Strides not definied for all and only spatial dimensions"); - NGRAPH_CHECK(dilations.size() == spatial_dims, "Dilations not defined for all and only spatial dimensions"); - NGRAPH_CHECK((pads_begin.size() == pads_end.size()) && (pads_begin.size() == spatial_dims), - "Pads not defined for all and only spatial dimensions"); + OPENVINO_ASSERT(strides.size() == spatial_dims, "Strides not definied for all and only spatial dimensions"); + OPENVINO_ASSERT(dilations.size() == spatial_dims, "Dilations not defined for all and only spatial dimensions"); + OPENVINO_ASSERT((pads_begin.size() == pads_end.size()) && (pads_begin.size() == spatial_dims), + "Pads not defined for all and only spatial dimensions"); Shape out_spatial_shape{std::next(out_shape.begin(), 2), std::end(out_shape)}; Shape infered_out_spatial_shape{}; @@ -63,7 +63,7 @@ void validate_convolution_backprop_data_parameters(const Shape& in_shape, dilations, pads_begin, pads_end); - NGRAPH_CHECK(out_spatial_shape == infered_out_spatial_shape, "Incorrect output shape provided"); + OPENVINO_ASSERT(out_spatial_shape == infered_out_spatial_shape, "Incorrect output shape provided"); } void validate_group_convolution_backprop_data_parameters(const Shape& in_shape, @@ -74,19 +74,19 @@ void validate_group_convolution_backprop_data_parameters(const Shape& in_shape, const CoordinateDiff& pads_begin, const CoordinateDiff& pads_end) { // this implementation supports 1D, 2D and 3D convolutions - NGRAPH_CHECK(in_shape.size() >= 3 && in_shape.size() <= 5, "Unsupported input rank: ", in_shape); + OPENVINO_ASSERT(in_shape.size() >= 3 && in_shape.size() <= 5, "Unsupported input rank: ", in_shape); - NGRAPH_CHECK(in_shape.size() + 1 == f_shape.size(), "Unsupported filter rank: ", f_shape.size()); + OPENVINO_ASSERT(in_shape.size() + 1 == f_shape.size(), "Unsupported filter rank: ", f_shape.size()); - NGRAPH_CHECK(in_shape.size() == out_shape.size(), - "Incompatible input and output ranks: ", - in_shape.size(), - " and ", - out_shape.size()); + OPENVINO_ASSERT(in_shape.size() == out_shape.size(), + "Incompatible input and output ranks: ", + in_shape.size(), + " and ", + out_shape.size()); const size_t groups = f_shape[filter_group_axis]; const size_t in_channels = in_shape[in_channel_axis]; - NGRAPH_CHECK(in_channels % groups == 0, "Input channels of data batch input must be multiple of groups"); + OPENVINO_ASSERT(in_channels % groups == 0, "Input channels of data batch input must be multiple of groups"); const Shape in_group_shape = [&]() { Shape new_shape{in_shape}; new_shape[in_channel_axis] /= groups; @@ -94,7 +94,7 @@ void validate_group_convolution_backprop_data_parameters(const Shape& in_shape, }(); const size_t out_channels = out_shape[out_channel_axis]; - NGRAPH_CHECK(out_channels % groups == 0, "Output channels of output must be multiple of groups"); + OPENVINO_ASSERT(out_channels % groups == 0, "Output channels of output must be multiple of groups"); const Shape out_group_shape = [&]() { Shape new_shape{out_shape}; new_shape[out_channel_axis] /= groups; diff --git a/src/core/reference/src/op/if.cpp b/src/core/reference/src/op/if.cpp index 4bffb99470e32d..3b74fa78108baa 100644 --- a/src/core/reference/src/op/if.cpp +++ b/src/core/reference/src/op/if.cpp @@ -4,7 +4,7 @@ #include "openvino/reference/if.hpp" -#include "ngraph/op/if.hpp" +#include "openvino/op/if.hpp" #include "openvino/reference/function.hpp" namespace ov { @@ -14,7 +14,7 @@ void if_reference(const std::vector>& bodies, const std::vector& input_descs, ov::TensorVector& out, const ov::TensorVector& args) { - NGRAPH_CHECK(args.size() > 0, "If operation must have input condition value"); + OPENVINO_ASSERT(args.size() > 0, "If operation must have input condition value"); auto condition_value = args[0].data()[0]; auto branch_index = (condition_value) ? op::v8::If::THEN_BODY_INDEX : op::v8::If::ELSE_BODY_INDEX; @@ -24,16 +24,16 @@ void if_reference(const std::vector>& bodies, auto inputs_size = args.size(); auto output_size = out.size(); for (const auto& input_desc : input_descs[branch_index]) { - NGRAPH_CHECK(inputs_size > input_desc->m_input_index, - "Incorrect associating! If has not input with id ", - input_desc->m_input_index); + OPENVINO_ASSERT(inputs_size > input_desc->m_input_index, + "Incorrect associating! If has not input with id ", + input_desc->m_input_index); inputs_to_body[input_desc->m_body_parameter_index] = args[input_desc->m_input_index]; } reference::function(bodies[branch_index], inputs_to_body, outs_from_body); for (const auto& out_descr : out_descs[branch_index]) { - NGRAPH_CHECK(output_size > out_descr->m_output_index, - "Incorrect associating! If has not output with id ", - out_descr->m_output_index); + OPENVINO_ASSERT(output_size > out_descr->m_output_index, + "Incorrect associating! If has not output with id ", + out_descr->m_output_index); auto res = outs_from_body[out_descr->m_body_value_index]; res.copy_to(out[out_descr->m_output_index]); } diff --git a/src/core/reference/src/op/interpolate.cpp b/src/core/reference/src/op/interpolate.cpp index 64bf9f8b4693c2..e7b4deb9e84de9 100644 --- a/src/core/reference/src/op/interpolate.cpp +++ b/src/core/reference/src/op/interpolate.cpp @@ -6,9 +6,8 @@ #include -using namespace ov::reference; - -using Coordinate = ngraph::Coordinate; +namespace ov { +namespace reference { float InterpolateEvalHelper::triangle_coeff(float dz) { return std::max(0.0f, 1.0f - std::fabs(dz)); @@ -122,19 +121,15 @@ InterpolateEvalHelper::InfoForLinearMode InterpolateEvalHelper::get_info_for_lin std::vector a(num_of_axes); std::vector r(num_of_axes); - NGRAPH_SUPPRESS_DEPRECATED_START - CoordinateTransform output_transform(m_out_shape); - CoordinateTransform input_transform(m_input_data_shape); - - std::vector vector_for_indeces(num_of_axes); + std::vector vector_for_indices(num_of_axes); float prod_a = 1; for (std::size_t i = 0; i < num_of_axes; ++i) { a[i] = antialias ? m_scales[i] : 1.0f; prod_a *= a[i]; r[i] = (m_scales[i] > 1.0) ? static_cast(2) : static_cast(std::ceil(2.0f / a[i])); - vector_for_indeces[i] = 2 * r[i] + 1; + vector_for_indices[i] = 2 * r[i] + 1; } - Shape shape_for_indeces{vector_for_indeces}; + Shape shape_for_indices{vector_for_indices}; InfoForLinearMode result; @@ -142,8 +137,7 @@ InterpolateEvalHelper::InfoForLinearMode InterpolateEvalHelper::get_info_for_lin result.a = a; result.r = r; result.prod_a = prod_a; - result.shape_for_indeces = shape_for_indeces; - NGRAPH_SUPPRESS_DEPRECATED_END + result.shape_for_indices = shape_for_indices; return result; } @@ -228,3 +222,5 @@ InterpolateEvalHelper::LinearModeInnerIterationResult InterpolateEvalHelper::inn return result; } +} // namespace reference +} // namespace ov diff --git a/src/core/reference/src/op/irdft.cpp b/src/core/reference/src/op/irdft.cpp index d66c7a8e556ae8..4046f8460e6708 100644 --- a/src/core/reference/src/op/irdft.cpp +++ b/src/core/reference/src/op/irdft.cpp @@ -10,7 +10,7 @@ #include #include -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" #include "openvino/reference/fft.hpp" #include "openvino/reference/utils/fft_common.hpp" diff --git a/src/core/reference/src/op/matmul.cpp b/src/core/reference/src/op/matmul.cpp index 446e941e984123..f84287c0256acf 100644 --- a/src/core/reference/src/op/matmul.cpp +++ b/src/core/reference/src/op/matmul.cpp @@ -9,14 +9,12 @@ #include #include -#include "ngraph/shape_util.hpp" - namespace ov { namespace reference { namespace details { std::vector get_transpose_order(const Shape& input_shape) { size_t rank = input_shape.size(); - NGRAPH_CHECK(rank > 1, "Invalid input for transpose"); + OPENVINO_ASSERT(rank > 1, "Invalid input for transpose"); std::vector axes_order(rank); std::iota(axes_order.begin(), axes_order.end(), 0); std::swap(axes_order[rank - 1], axes_order[rank - 2]); diff --git a/src/core/reference/src/op/matrix_nms.cpp b/src/core/reference/src/op/matrix_nms.cpp index bc6b38478549b6..2dfe451afe62a7 100644 --- a/src/core/reference/src/op/matrix_nms.cpp +++ b/src/core/reference/src/op/matrix_nms.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/matrix_nms.hpp" +#include "openvino/op/matrix_nms.hpp" #include #include @@ -10,7 +10,7 @@ #include #include -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" #include "openvino/reference/matrix_nms.hpp" #include "openvino/reference/utils/nms_common.hpp" diff --git a/src/core/reference/src/op/multiclass_nms.cpp b/src/core/reference/src/op/multiclass_nms.cpp index cee59347ac1e2d..b38091c7dd7ce1 100644 --- a/src/core/reference/src/op/multiclass_nms.cpp +++ b/src/core/reference/src/op/multiclass_nms.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/multiclass_nms.hpp" +#include "openvino/op/multiclass_nms.hpp" #include #include @@ -10,14 +10,13 @@ #include #include -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" #include "openvino/reference/multiclass_nms.hpp" #include "openvino/reference/utils/nms_common.hpp" namespace ov { namespace reference { namespace multiclass_nms_impl { -OPENVINO_SUPPRESS_DEPRECATED_START using Rectangle = reference::nms_common::Rectangle; using BoxInfo = reference::nms_common::BoxInfo; diff --git a/src/core/reference/src/op/pad.cpp b/src/core/reference/src/op/pad.cpp index ad8ab0f9f0bf87..e589472135765d 100644 --- a/src/core/reference/src/op/pad.cpp +++ b/src/core/reference/src/op/pad.cpp @@ -6,8 +6,7 @@ #include -#include "ngraph/axis_vector.hpp" -#include "ngraph/check.hpp" +#include "openvino/core/except.hpp" #include "openvino/reference/utils/coordinate_index.hpp" #include "openvino/reference/utils/coordinate_transform.hpp" @@ -153,9 +152,9 @@ struct SymmetricAndReflectPad : PadBase { void check_inputs() const override { for (size_t i = 0; i != padding_begin.size(); ++i) { const auto axis_size = static_cast(data_shape[i]); - NGRAPH_CHECK(padding_begin.at(i) - axis_correction < axis_size, - "padding below should be less than data shape"); - NGRAPH_CHECK(padding_end.at(i) - axis_correction < axis_size, "padding should be less than data shape"); + OPENVINO_ASSERT(padding_begin.at(i) - axis_correction < axis_size, + "padding below should be less than data shape"); + OPENVINO_ASSERT(padding_end.at(i) - axis_correction < axis_size, "padding should be less than data shape"); } } diff --git a/src/core/reference/src/op/random_uniform.cpp b/src/core/reference/src/op/random_uniform.cpp index 99bd70aca404d0..01215b095d2100 100644 --- a/src/core/reference/src/op/random_uniform.cpp +++ b/src/core/reference/src/op/random_uniform.cpp @@ -6,8 +6,8 @@ #include -#include "ngraph/shape.hpp" #include "openvino/core/except.hpp" +#include "openvino/core/shape.hpp" namespace ov { namespace reference { @@ -140,7 +140,7 @@ void run_philox(uint64_t key, uint64_t counter, uint64_t n, size_t n_rounds, std template void convert_to_output_type(const std::vector& res, size_t step, - const ngraph::element::Type& elem_type, + const element::Type& elem_type, const char* min_val, const char* max_val, char* out, @@ -185,7 +185,7 @@ std::pair random_uniform(const uint64_t* out_shape, const char* max_val, char* out, const Shape& out_shape_shape, - const ngraph::element::Type& elem_type, + const element::Type& elem_type, uint64_t seed, uint64_t seed2, std::pair prev_state) { @@ -229,11 +229,11 @@ std::pair random_uniform(const uint64_t* out_shape, // convert values to corresponding output_type switch (elem_type) { - case ngraph::element::Type_t::f32: { + case element::Type_t::f32: { convert_to_output_type(res, step, elem_type, min_val, max_val, out, k, elem_count, uint32_to_float); break; } - case ngraph::element::Type_t::f16: { + case element::Type_t::f16: { convert_to_output_type(res, step, elem_type, @@ -245,7 +245,7 @@ std::pair random_uniform(const uint64_t* out_shape, uint32_to_float16); break; } - case ngraph::element::Type_t::bf16: { + case element::Type_t::bf16: { convert_to_output_type(res, step, elem_type, @@ -257,7 +257,7 @@ std::pair random_uniform(const uint64_t* out_shape, uint32_to_bfloat16); break; } - case ngraph::element::Type_t::f64: { + case element::Type_t::f64: { convert_to_output_type(res, step, elem_type, @@ -272,7 +272,7 @@ std::pair random_uniform(const uint64_t* out_shape, }); break; } - case ngraph::element::Type_t::i32: { + case element::Type_t::i32: { convert_to_output_type(res, step, elem_type, @@ -288,7 +288,7 @@ std::pair random_uniform(const uint64_t* out_shape, }); break; } - case ngraph::element::Type_t::i64: { + case element::Type_t::i64: { convert_to_output_type(res, step, elem_type, diff --git a/src/core/reference/src/op/rdft.cpp b/src/core/reference/src/op/rdft.cpp index 771b05f73a1a4b..f24eadfa559bc0 100644 --- a/src/core/reference/src/op/rdft.cpp +++ b/src/core/reference/src/op/rdft.cpp @@ -19,7 +19,7 @@ #include #include -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" #include "openvino/reference/fft.hpp" #include "openvino/reference/utils/fft_common.hpp" diff --git a/src/core/reference/src/op/reorg_yolo.cpp b/src/core/reference/src/op/reorg_yolo.cpp index 1bf40680e168c9..68d1936e893577 100644 --- a/src/core/reference/src/op/reorg_yolo.cpp +++ b/src/core/reference/src/op/reorg_yolo.cpp @@ -8,7 +8,7 @@ #include -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/src/op/reshape.cpp b/src/core/reference/src/op/reshape.cpp index 2da555a542a6fe..dec23afda868d7 100644 --- a/src/core/reference/src/op/reshape.cpp +++ b/src/core/reference/src/op/reshape.cpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph/check.hpp" +#include "openvino/core/except.hpp" #include "openvino/reference/utils/coordinate_range.hpp" #include "openvino/reference/utils/coordinate_transform.hpp" @@ -17,7 +17,7 @@ namespace { std::vector reorder(const std::vector& origin, const AxisVector& order) { std::vector reordered = origin; auto out = begin(reordered); - NGRAPH_CHECK(origin.size() <= order.size()); + OPENVINO_ASSERT(origin.size() <= order.size()); for (size_t i = 0; i < origin.size(); ++i) { *out = origin.at(order[i]); ++out; diff --git a/src/core/reference/src/op/reverse.cpp b/src/core/reference/src/op/reverse.cpp index 0794a81ce95259..e0555cde41ede0 100644 --- a/src/core/reference/src/op/reverse.cpp +++ b/src/core/reference/src/op/reverse.cpp @@ -8,11 +8,9 @@ #include #include -#include "ngraph/check.hpp" +#include "openvino/core/except.hpp" #include "openvino/reference/utils/coordinate_range.hpp" -using namespace ngraph; - namespace ov { namespace reference { void reverse(const char* arg, @@ -21,7 +19,7 @@ void reverse(const char* arg, const Shape& out_shape, const AxisSet& reversed_axes, size_t elem_size) { - NGRAPH_CHECK(shape_size(arg_shape) == shape_size(out_shape)); + OPENVINO_ASSERT(shape_size(arg_shape) == shape_size(out_shape)); const bool nothing_to_revers = reversed_axes.empty(); if (nothing_to_revers) { diff --git a/src/core/reference/src/op/slice.cpp b/src/core/reference/src/op/slice.cpp index 4ea9bc6be23cf8..4f01cbce8a8890 100644 --- a/src/core/reference/src/op/slice.cpp +++ b/src/core/reference/src/op/slice.cpp @@ -6,9 +6,9 @@ #include -#include "ngraph/check.hpp" #include "openvino/core/except.hpp" #include "openvino/reference/utils/coordinate_range.hpp" +#include "openvino/util/common_util.hpp" namespace ov { namespace reference { @@ -64,12 +64,21 @@ void slice(const char* arg, const Strides& strides, const Shape& out_shape, size_t elem_size) { - NGRAPH_SUPPRESS_DEPRECATED_START - const CoordinateTransform input_transform(arg_shape, lower_bounds, upper_bounds, strides); + const auto rank = arg_shape.size(); + OPENVINO_ASSERT( + lower_bounds.size() == rank && upper_bounds.size() == rank && strides.size() == rank && + out_shape.size() == rank, + "arg_shape, lower_bounds, upper_bounds, strides and out_shape are expected to have the same rank equal ", + rank); - const CoordinateTransform output_transform(out_shape); - - NGRAPH_CHECK(shape_size(input_transform.get_target_shape()) == shape_size(output_transform.get_target_shape())); + auto expected_out_shape = Shape(arg_shape); + for (size_t i = 0; i < rank; ++i) + expected_out_shape[i] = util::ceil_div(upper_bounds[i] - lower_bounds[i], strides[i]); + OPENVINO_ASSERT(out_shape == expected_out_shape, + "Expected output shape is ", + expected_out_shape, + ". Got ", + out_shape); auto dst_mem = out; @@ -81,7 +90,6 @@ void slice(const char* arg, std::advance(dst_mem, elem_size); } } - NGRAPH_SUPPRESS_DEPRECATED_END } } // namespace reference } // namespace ov diff --git a/src/core/reference/src/op/space_to_depth.cpp b/src/core/reference/src/op/space_to_depth.cpp index 646550bb2a6dd4..247efe39412362 100644 --- a/src/core/reference/src/op/space_to_depth.cpp +++ b/src/core/reference/src/op/space_to_depth.cpp @@ -6,8 +6,8 @@ #include -#include "ngraph/check.hpp" #include "ngraph/runtime/opt_kernel/reshape.hpp" +#include "openvino/core/except.hpp" namespace ov { namespace reference { @@ -34,13 +34,13 @@ void space_to_depth(const char* const in, const size_t spatial_dims = in_shape.size() - spatial_dim_index; for (size_t i = spatial_dim_index; i < in_shape.size(); ++i) { - NGRAPH_CHECK(block_size > 0 && in_shape.at(i) % block_size == 0, - "SpaceToDepth: The dimension on position: ", - i, - " equal to: ", - in_shape.at(i), - " must be a multiple of blocksize: ", - block_size); + OPENVINO_ASSERT(block_size > 0 && in_shape.at(i) % block_size == 0, + "SpaceToDepth: The dimension on position: ", + i, + " equal to: ", + in_shape.at(i), + " must be a multiple of blocksize: ", + block_size); } Shape dispersed_shape{n_dim, c_dim}; diff --git a/src/core/reference/src/op/split.cpp b/src/core/reference/src/op/split.cpp index 41a2dba235a31f..6186bdd5af941d 100644 --- a/src/core/reference/src/op/split.cpp +++ b/src/core/reference/src/op/split.cpp @@ -8,8 +8,6 @@ #include -#include "ngraph/check.hpp" - using namespace ov; void reference::split(const char* data, diff --git a/src/core/reference/src/op/strided_slice.cpp b/src/core/reference/src/op/strided_slice.cpp index 457a65dec5d0c1..2ff07ba8500308 100644 --- a/src/core/reference/src/op/strided_slice.cpp +++ b/src/core/reference/src/op/strided_slice.cpp @@ -8,7 +8,6 @@ #include -#include "ngraph/check.hpp" #include "ngraph/runtime/aligned_buffer.hpp" #include "ngraph/runtime/opt_kernel/reshape.hpp" diff --git a/src/core/reference/src/op/transpose.cpp b/src/core/reference/src/op/transpose.cpp index 1706775f57224e..5b893ccc5697ed 100644 --- a/src/core/reference/src/op/transpose.cpp +++ b/src/core/reference/src/op/transpose.cpp @@ -10,7 +10,7 @@ #include #include "ngraph/runtime/opt_kernel/reshape.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/src/op/utils/fft_common.cpp b/src/core/reference/src/op/utils/fft_common.cpp index ec9c91a1211ba3..6bf248190edc4a 100644 --- a/src/core/reference/src/op/utils/fft_common.cpp +++ b/src/core/reference/src/op/utils/fft_common.cpp @@ -10,12 +10,10 @@ #include #include -#include "ngraph/check.hpp" - namespace ov { namespace reference { namespace fft_common { -std::vector reverse_shape_of_emulated_complex_tensor(const ngraph::Shape& shape) { +std::vector reverse_shape_of_emulated_complex_tensor(const Shape& shape) { assert(shape.size() >= 2); std::vector reversed_shape(shape.begin(), shape.end() - 1); std::reverse(reversed_shape.begin(), reversed_shape.end()); diff --git a/src/core/reference/src/op/utils/nms_common.cpp b/src/core/reference/src/op/utils/nms_common.cpp index a8dfe16687ba99..427ef985fe8d4d 100644 --- a/src/core/reference/src/op/utils/nms_common.cpp +++ b/src/core/reference/src/op/utils/nms_common.cpp @@ -8,7 +8,7 @@ #include #include -#include "ngraph/check.hpp" +#include "openvino/core/except.hpp" namespace ov { namespace reference { @@ -16,11 +16,11 @@ namespace nms_common { void nms_common_postprocessing(void* prois, void* pscores, void* pselected_num, - const ngraph::element::Type& output_type, + const element::Type& output_type, const std::vector& selected_outputs, const std::vector& selected_indices, const std::vector& valid_outputs, - const ngraph::element::Type& selected_outputs_type) { + const element::Type& selected_outputs_type) { int64_t total_num = std::accumulate(valid_outputs.begin(), valid_outputs.end(), int64_t(0)); switch (selected_outputs_type) { @@ -41,11 +41,11 @@ void nms_common_postprocessing(void* prois, memcpy(ptr, selected_outputs.data(), total_num * sizeof(float) * 6); } break; default: - NGRAPH_UNREACHABLE("unsupported element type, should be [bf16, f16, f32]"); + OPENVINO_THROW("unsupported element type, should be [bf16, f16, f32]"); } if (pscores) { - if (output_type == ngraph::element::i64) { + if (output_type == element::i64) { int64_t* indices_ptr = static_cast(pscores); memcpy(indices_ptr, selected_indices.data(), total_num * sizeof(int64_t)); } else { @@ -57,7 +57,7 @@ void nms_common_postprocessing(void* prois, } if (pselected_num) { - if (output_type == ngraph::element::i64) { + if (output_type == element::i64) { int64_t* valid_outputs_ptr = static_cast(pselected_num); std::copy(valid_outputs.begin(), valid_outputs.end(), valid_outputs_ptr); } else { diff --git a/src/core/reference/src/op/utils/round_guard.cpp b/src/core/reference/src/op/utils/round_guard.cpp deleted file mode 100644 index 565fb2db598020..00000000000000 --- a/src/core/reference/src/op/utils/round_guard.cpp +++ /dev/null @@ -1,11 +0,0 @@ -#include "openvino/reference/round_guard.hpp" - -namespace ov { -RoundGuard::RoundGuard(int mode) : m_prev_round_mode{std::fegetround()} { - std::fesetround(mode); -} - -RoundGuard::~RoundGuard() { - std::fesetround(m_prev_round_mode); -} -} // namespace ov diff --git a/src/core/reference/src/op/utils/rounding_guard.cpp b/src/core/reference/src/op/utils/rounding_guard.cpp new file mode 100644 index 00000000000000..70b0ce5897eb13 --- /dev/null +++ b/src/core/reference/src/op/utils/rounding_guard.cpp @@ -0,0 +1,11 @@ +#include "openvino/reference/rounding_guard.hpp" + +namespace ov { +RoundingGuard::RoundingGuard(int mode) : m_prev_round_mode{std::fegetround()} { + std::fesetround(mode); +} + +RoundingGuard::~RoundingGuard() { + std::fesetround(m_prev_round_mode); +} +} // namespace ov diff --git a/src/core/reference/src/runtime/opt_kernel/reshape.cpp b/src/core/reference/src/runtime/opt_kernel/reshape.cpp index f1bfd265182c91..e0ca720845c3a0 100644 --- a/src/core/reference/src/runtime/opt_kernel/reshape.cpp +++ b/src/core/reference/src/runtime/opt_kernel/reshape.cpp @@ -7,7 +7,6 @@ #include #include -#include "ngraph/check.hpp" #include "openvino/core/parallel.hpp" #include "openvino/reference/reshape.hpp" diff --git a/src/core/reference/src/utils/coordinate_transform.cpp b/src/core/reference/src/utils/coordinate_transform.cpp index e62f61546529bf..cd97834e6d0245 100644 --- a/src/core/reference/src/utils/coordinate_transform.cpp +++ b/src/core/reference/src/utils/coordinate_transform.cpp @@ -11,17 +11,16 @@ #include #include -#include "ngraph/axis_vector.hpp" -#include "ngraph/coordinate_diff.hpp" -#include "ngraph/except.hpp" -#include "ngraph/shape.hpp" -#include "ngraph/strides.hpp" #include "ngraph/util.hpp" +#include "openvino/core/axis_vector.hpp" +#include "openvino/core/coordinate_diff.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/core/strides.hpp" #include "openvino/reference/utils/coordinate_index.hpp" using namespace ov; -NGRAPH_SUPPRESS_DEPRECATED_START +OPENVINO_SUPPRESS_DEPRECATED_START namespace { Strides default_strides(size_t n_axes) { return Strides(n_axes, 1); @@ -42,6 +41,7 @@ Coordinate default_source_end_corner(const Shape& source_shape) { return source_shape; } } // namespace +OPENVINO_SUPPRESS_DEPRECATED_END CoordinateTransformBasic::CoordinateTransformBasic(const Shape& source_shape) : m_source_shape(source_shape) {} @@ -58,6 +58,7 @@ const CoordinateIterator& CoordinateTransformBasic::end() const noexcept { return CoordinateIterator::end(); } +OPENVINO_SUPPRESS_DEPRECATED_START CoordinateTransform::CoordinateTransform(const Shape& source_shape, const Coordinate& source_start_corner, const Coordinate& source_end_corner, @@ -123,11 +124,9 @@ CoordinateTransform::CoordinateTransform(const Shape& source_shape, std::vector padded_upper_bounds; for (size_t i = 0; i < m_n_axes; i++) { - NGRAPH_SUPPRESS_DEPRECATED_START std::ptrdiff_t padded_upper_bound = ngraph::subtract_or_zero(source_shape[i], size_t(1)) * target_dilation_strides[i] + 1 + target_padding_below[i] + target_padding_above[i]; - NGRAPH_SUPPRESS_DEPRECATED_END if (padded_upper_bound < 0) { std::stringstream ss; @@ -343,6 +342,7 @@ CoordinateIterator CoordinateTransform::begin() const noexcept { const CoordinateIterator& CoordinateTransform::end() const noexcept { return CoordinateIterator::end(); } +OPENVINO_SUPPRESS_DEPRECATED_END // The "is_end" parameter is true if we want the "end()" iterator. CoordinateIterator::CoordinateIterator(const Shape& target_shape, bool is_end) diff --git a/src/core/src/op/interpolate.cpp b/src/core/src/op/interpolate.cpp index d541cc9ed373bf..47cfe4e169fc2b 100644 --- a/src/core/src/op/interpolate.cpp +++ b/src/core/src/op/interpolate.cpp @@ -174,30 +174,6 @@ std::vector get_scales_vector(const ov::TensorVector& args, } } // namespace -static void pad_input_data(const uint8_t* data_ptr, - uint8_t* padded_data_ptr, - size_t type_size, - const ov::Shape& input_shape, - const ov::Shape& padded_input_shape, - const std::vector& pads_begin) { - NGRAPH_SUPPRESS_DEPRECATED_START - ov::CoordinateTransform input_transform(input_shape); - ov::CoordinateTransform padded_transform(padded_input_shape); - - for (const ngraph::Coordinate& input_coord : input_transform) { - auto padded_coord = input_coord; - size_t i = 0; - for (size_t pad : pads_begin) { - padded_coord[i] += pad; - ++i; - } - uint8_t* dst_ptr = padded_data_ptr + type_size * padded_transform.index(padded_coord); - const uint8_t* src_ptr = data_ptr + type_size * input_transform.index(input_coord); - memcpy(dst_ptr, src_ptr, type_size); - } - NGRAPH_SUPPRESS_DEPRECATED_END -} - bool ov::op::v4::Interpolate::evaluate_interpolate(TensorVector& outputs, const TensorVector& inputs) const { auto input_shapes = std::vector(); const auto inputs_num = inputs.size(); @@ -229,7 +205,12 @@ bool ov::op::v4::Interpolate::evaluate_interpolate(TensorVector& outputs, const auto* data_ptr = static_cast(inputs[data_port].data()); auto* padded_data_ptr = padded_input_data.data(); - pad_input_data(data_ptr, padded_data_ptr, type_size, inputs[data_port].get_shape(), padded_input_shape, pads_begin); + reference::pad_input_data(data_ptr, + padded_data_ptr, + type_size, + inputs[data_port].get_shape(), + padded_input_shape, + pads_begin); switch (input_et) { case element::Type_t::f32: diff --git a/src/core/src/op/topk.cpp b/src/core/src/op/topk.cpp index 36ba74a7977cd0..485dc4e91fea5a 100644 --- a/src/core/src/op/topk.cpp +++ b/src/core/src/op/topk.cpp @@ -8,26 +8,25 @@ #include #include "itt.hpp" -#include "ngraph/attribute_visitor.hpp" -#include "ngraph/axis_vector.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/util/op_types.hpp" -#include "ngraph/runtime/host_tensor.hpp" -#include "ngraph/shape.hpp" -#include "ngraph/validation_util.hpp" +#include "openvino/core/attribute_visitor.hpp" +#include "openvino/core/axis_vector.hpp" #include "openvino/core/dimension_tracker.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/core/validation_util.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/util/op_types.hpp" #include "openvino/reference/topk.hpp" using namespace std; -using namespace ngraph; +namespace ov { OPENVINO_SUPPRESS_DEPRECATED_START namespace topk { namespace { template -inline bool evaluate_execute(const HostTensorPtr& arg0, - const HostTensorPtr& out_indices, - const HostTensorPtr& out_values, +inline bool evaluate_execute(const ngraph::HostTensorPtr& arg0, + const ngraph::HostTensorPtr& out_indices, + const ngraph::HostTensorPtr& out_values, const ov::Shape out_shape, const size_t axis, const size_t k, @@ -61,9 +60,9 @@ inline bool evaluate_execute(const HostTensorPtr& arg0, } break template -bool evaluate(const HostTensorPtr& arg, - const HostTensorPtr& out_indices, - const HostTensorPtr& out_values, +bool evaluate(const ngraph::HostTensorPtr& arg, + const ngraph::HostTensorPtr& out_indices, + const ngraph::HostTensorPtr& out_values, const ov::Shape out_shape, const size_t axis, const size_t k, @@ -81,9 +80,9 @@ bool evaluate(const HostTensorPtr& arg, return rc; } -bool evaluate_topk(const HostTensorPtr& arg, - const HostTensorPtr& out_indices, - const HostTensorPtr& out_values, +bool evaluate_topk(const ngraph::HostTensorPtr& arg, + const ngraph::HostTensorPtr& out_indices, + const ngraph::HostTensorPtr& out_values, const ov::Shape out_shape, const size_t axis, const size_t k, @@ -185,12 +184,12 @@ bool op::v1::TopK::has_evaluate() const { OV_OP_SCOPE(v1_TopK_has_evaluate); switch (get_input_element_type(0)) { - case ngraph::element::i32: - case ngraph::element::i64: - case ngraph::element::u32: - case ngraph::element::u64: - case ngraph::element::f16: - case ngraph::element::f32: + case element::i32: + case element::i64: + case element::u32: + case element::u64: + case element::f16: + case element::f32: break; default: return false; @@ -198,23 +197,23 @@ bool op::v1::TopK::has_evaluate() const { if (op::util::is_constant(input_value(1).get_node())) { switch (get_input_element_type(1)) { - case ngraph::element::i8: - case ngraph::element::i32: - case ngraph::element::i64: + case element::i8: + case element::i32: + case element::i64: break; default: return false; } } else { switch (get_input_element_type(1)) { - case ngraph::element::i8: - case ngraph::element::i16: - case ngraph::element::i32: - case ngraph::element::i64: - case ngraph::element::u8: - case ngraph::element::u16: - case ngraph::element::u32: - case ngraph::element::u64: + case element::i8: + case element::i16: + case element::i32: + case element::i64: + case element::u8: + case element::u16: + case element::u32: + case element::u64: break; default: return false; @@ -258,12 +257,12 @@ bool op::v3::TopK::has_evaluate() const { OV_OP_SCOPE(v3_TopK_has_evaluate); switch (get_input_element_type(0)) { - case ngraph::element::i32: - case ngraph::element::i64: - case ngraph::element::u32: - case ngraph::element::u64: - case ngraph::element::f16: - case ngraph::element::f32: + case element::i32: + case element::i64: + case element::u32: + case element::u64: + case element::f16: + case element::f32: break; default: return false; @@ -271,23 +270,23 @@ bool op::v3::TopK::has_evaluate() const { if (op::util::is_constant(input_value(1).get_node())) { switch (get_input_element_type(1)) { - case ngraph::element::i8: - case ngraph::element::i32: - case ngraph::element::i64: + case element::i8: + case element::i32: + case element::i64: break; default: return false; } } else { switch (get_input_element_type(1)) { - case ngraph::element::i8: - case ngraph::element::i16: - case ngraph::element::i32: - case ngraph::element::i64: - case ngraph::element::u8: - case ngraph::element::u16: - case ngraph::element::u32: - case ngraph::element::u64: + case element::i8: + case element::i16: + case element::i32: + case element::i64: + case element::u8: + case element::u16: + case element::u32: + case element::u64: break; default: return false; @@ -360,15 +359,16 @@ bool ov::op::v11::TopK::has_evaluate() const { OV_OP_SCOPE(v11_TopK_has_evaluate); switch (get_input_element_type(0)) { - case ngraph::element::i32: - case ngraph::element::i64: - case ngraph::element::u32: - case ngraph::element::u64: - case ngraph::element::f16: - case ngraph::element::f32: + case element::i32: + case element::i64: + case element::u32: + case element::u64: + case element::f16: + case element::f32: break; default: return false; } return true; } +} // namespace ov diff --git a/src/plugins/template/backend/ops/interpolate.cpp b/src/plugins/template/backend/ops/interpolate.cpp index 1d1cc4caced291..180488da0ffb9d 100644 --- a/src/plugins/template/backend/ops/interpolate.cpp +++ b/src/plugins/template/backend/ops/interpolate.cpp @@ -170,30 +170,6 @@ std::vector get_scales_vector(const ov::TensorVector& args, return scales; } -static void pad_input_data(const uint8_t* data_ptr, - uint8_t* padded_data_ptr, - size_t type_size, - const ov::Shape& input_shape, - const ov::Shape& padded_input_shape, - const std::vector& pads_begin) { - OPENVINO_SUPPRESS_DEPRECATED_START - ov::CoordinateTransform input_transform(input_shape); - ov::CoordinateTransform padded_transform(padded_input_shape); - - for (const ov::Coordinate& input_coord : input_transform) { - auto padded_coord = input_coord; - size_t i = 0; - for (size_t pad : pads_begin) { - padded_coord[i] += pad; - ++i; - } - uint8_t* dst_ptr = padded_data_ptr + type_size * padded_transform.index(padded_coord); - const uint8_t* src_ptr = data_ptr + type_size * input_transform.index(input_coord); - memcpy(dst_ptr, src_ptr, type_size); - } - OPENVINO_SUPPRESS_DEPRECATED_END -} - namespace v11 { bool evaluate_interpolate(const std::shared_ptr& op, ov::TensorVector& outputs, @@ -236,12 +212,12 @@ bool evaluate_interpolate(const std::shared_ptr& op, const uint8_t* data_ptr = static_cast(inputs[0].data()); uint8_t* padded_data_ptr = padded_input_data.data(); - pad_input_data(data_ptr, - padded_data_ptr, - type_size, - input_shape.to_shape(), - padded_input_shape, - m_attrs.pads_begin); + reference::pad_input_data(data_ptr, + padded_data_ptr, + type_size, + input_shape.to_shape(), + padded_input_shape, + m_attrs.pads_begin); switch (input_et) { case element::f32: diff --git a/src/plugins/template/tests/functional/op_reference/avg_pool.cpp b/src/plugins/template/tests/functional/op_reference/avg_pool.cpp index 7bf582184f317f..fdd5205f99337e 100644 --- a/src/plugins/template/tests/functional/op_reference/avg_pool.cpp +++ b/src/plugins/template/tests/functional/op_reference/avg_pool.cpp @@ -60,7 +60,7 @@ struct AvgPoolParams { class ReferenceAvgPoolLayerTest : public testing::TestWithParam, public CommonReferenceTest { public: void SetUp() override { - auto params = GetParam(); + const auto& params = GetParam(); function = CreateFunction(params.m_input_shape, params.m_input_type, params.m_strides, @@ -75,11 +75,11 @@ class ReferenceAvgPoolLayerTest : public testing::TestWithParam, } static std::string getTestCaseName(const testing::TestParamInfo& obj) { - auto params = obj.param; + const auto& params = obj.param; std::ostringstream result; result << "iShape=" << params.m_input_shape << "_"; result << "iType=" << params.m_input_type << "_"; - result << "iShape=" << params.m_output_shape << "_"; + result << "oShape=" << params.m_output_shape << "_"; result << "oType=" << params.m_output_type << "_"; result << "excludePad=" << params.m_exclude_pad << "_"; result << "roundingType=" << params.m_rounding_type << "_"; @@ -126,6 +126,32 @@ std::vector generateParamsForAvgPool() { using T = typename element_type_traits::value_type; std::vector params{ + AvgPoolParams(ov::Shape{1, 1, 5}, + ov::Shape{1, 1, 5}, + IN_ET, + IN_ET, + std::vector{1, 2, 3, 4, 5}, + std::vector{1.5, 2.5, 3.5, 4.5, 5}, + Strides{1}, + Shape{0}, + Shape{1}, + Shape{2}, + true, + op::RoundingType::FLOOR, + op::PadType::EXPLICIT), + AvgPoolParams(ov::Shape{1, 1, 8}, + ov::Shape{1, 1, 4}, + IN_ET, + IN_ET, + std::vector{1, 2, 3, 4, 5, 6, 7, 8}, + std::vector{2, 4, 6, 7.5}, + Strides{2}, + Shape{0}, + Shape{0}, + Shape{3}, + false, + op::RoundingType::CEIL, + op::PadType::EXPLICIT), AvgPoolParams(ov::Shape{1, 1, 3, 3}, ov::Shape{1, 1, 2, 2}, IN_ET, From 876237be916d623832ced12cc6956a26c202b8c7 Mon Sep 17 00:00:00 2001 From: Oleksii Khovan Date: Thu, 5 Oct 2023 08:36:58 +0200 Subject: [PATCH 070/257] [GPU] ScatterElementsUpdate-12 (#19254) * GPU primitive and kernel changes to support ScatterElementsUpdate-12 * Add single-layer test for ScatterElementsUpdate-12 * Unit tests: - fix formats test - add separate data type for indices; - add tests for Reduction. --- .../intel_gpu/plugin/primitives_list.hpp | 1 + .../primitives/scatter_elements_update.hpp | 23 +- .../impls/ocl/scatter_elements_update.cpp | 22 ++ .../cl_kernels/scatter_elements_update_ref.cl | 73 +++++ .../src/kernel_selector/common_types.h | 11 + .../scatter_elements_update_kernel_ref.cpp | 5 + .../scatter_elements_update_kernel_ref.h | 6 +- .../plugin/ops/scatter_elements_update.cpp | 14 +- .../scatter_elements_update.cpp | 30 ++ .../scatter_elements_update_gpu_test.cpp | 306 +++++++++++++++--- .../scatter_elements_update.hpp | 4 + .../single_layer/scatter_elements_update.hpp | 17 + .../single_layer/scatter_elements_update.cpp | 55 ++++ 13 files changed, 523 insertions(+), 44 deletions(-) diff --git a/src/plugins/intel_gpu/include/intel_gpu/plugin/primitives_list.hpp b/src/plugins/intel_gpu/include/intel_gpu/plugin/primitives_list.hpp index 7dd03dc685e02a..1ae7ef4f76618e 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/plugin/primitives_list.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/plugin/primitives_list.hpp @@ -258,6 +258,7 @@ REGISTER_FACTORY(v11, TopK); // ------------------------------ Supported v12 ops ----------------------------- // REGISTER_FACTORY(v12, Pad); +REGISTER_FACTORY(v12, ScatterElementsUpdate); // --------------------------- Supported internal ops --------------------------- // REGISTER_FACTORY(internal, NonMaxSuppressionIEInternal); diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/scatter_elements_update.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/scatter_elements_update.hpp index 4b823a165c0749..4ad578a841a214 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/primitives/scatter_elements_update.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/primitives/scatter_elements_update.hpp @@ -4,9 +4,12 @@ #pragma once #include "primitive.hpp" +#include "openvino/op/scatter_elements_update.hpp" namespace cldnn { +using ScatterElementsUpdateOp = ov::op::v12::ScatterElementsUpdate; + /// @brief /// @details struct scatter_elements_update : public primitive_base { @@ -20,20 +23,29 @@ struct scatter_elements_update : public primitive_base /// @param idx Input indexes primitive id. /// @param idupd Input updates primitive id. /// @param axis Gathering axis. + /// @param mode Reduction mode. scatter_elements_update(const primitive_id& id, const input_info& data, const input_info& idx, const input_info& idupd, const int64_t axis, + const ScatterElementsUpdateOp::Reduction mode = ScatterElementsUpdateOp::Reduction::NONE, + const bool use_init_val = true, const padding& output_padding = padding()) - : primitive_base(id, {data, idx, idupd}, {output_padding}), axis(axis) {} + : primitive_base(id, {data, idx, idupd}, {output_padding}), axis(axis), mode(mode), use_init_val(use_init_val) {} /// @brief ScatterElementsUpdate axis - int64_t axis = 0; + int64_t axis{0}; + /// @brief Reduction mode + ScatterElementsUpdateOp::Reduction mode{ScatterElementsUpdateOp::Reduction::NONE}; + /// @brief Use initial value for reduction + bool use_init_val{true}; size_t hash() const override { size_t seed = primitive::hash(); seed = hash_combine(seed, axis); + seed = hash_combine(seed, mode); + seed = hash_combine(seed, use_init_val); return seed; } @@ -43,17 +55,22 @@ struct scatter_elements_update : public primitive_base auto rhs_casted = downcast(rhs); - return axis == rhs_casted.axis; + return axis == rhs_casted.axis && mode == rhs_casted.mode + && use_init_val == rhs_casted.use_init_val; } void save(BinaryOutputBuffer& ob) const override { primitive_base::save(ob); ob << axis; + ob << make_data(&mode, sizeof(ScatterElementsUpdateOp::Reduction)); + ob << use_init_val; } void load(BinaryInputBuffer& ib) override { primitive_base::load(ib); ib >> axis; + ib >> make_data(&mode, sizeof(ScatterElementsUpdateOp::Reduction)); + ib >> use_init_val; } }; } // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/scatter_elements_update.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/scatter_elements_update.cpp index 251d494034d3b1..daf3b4953014b9 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/scatter_elements_update.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/scatter_elements_update.cpp @@ -35,6 +35,26 @@ kernel_selector::scatter_update_axis convert_axis(int64_t axis, size_t rank) { } return kernel_selector::scatter_update_axis::X; } + +kernel_selector::ScatterUpdateReduction convert_reduction_mode(const ScatterElementsUpdateOp::Reduction mode) { + switch (mode) { + case ScatterElementsUpdateOp::Reduction::NONE: + return kernel_selector::ScatterUpdateReduction::NONE; + case ScatterElementsUpdateOp::Reduction::SUM: + return kernel_selector::ScatterUpdateReduction::SUM; + case ScatterElementsUpdateOp::Reduction::PROD: + return kernel_selector::ScatterUpdateReduction::PROD; + case ScatterElementsUpdateOp::Reduction::MIN: + return kernel_selector::ScatterUpdateReduction::MIN; + case ScatterElementsUpdateOp::Reduction::MAX: + return kernel_selector::ScatterUpdateReduction::MAX; + case ScatterElementsUpdateOp::Reduction::MEAN: + return kernel_selector::ScatterUpdateReduction::MEAN; + default: + OPENVINO_ASSERT(false, "[GPU] Invalid ScatterElementsUpdate::Reduction enum value"); + } + return kernel_selector::ScatterUpdateReduction::NONE; +} } // namespace struct scatter_elements_update_impl : typed_primitive_impl_ocl { @@ -55,6 +75,8 @@ struct scatter_elements_update_impl : typed_primitive_impl_ocl(impl_param.get_program()); params.axis = convert_axis(primitive->axis, impl_param.get_input_layout(0).get_rank()); + params.mode = convert_reduction_mode(primitive->mode); + params.use_init_val = primitive->use_init_val; params.inputs.push_back(convert_data_tensor(impl_param.get_input_layout(1))); params.inputs.push_back(convert_data_tensor(impl_param.get_input_layout(2))); diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/scatter_elements_update_ref.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/scatter_elements_update_ref.cl index 4486bcfb006748..1a239e1b5db58b 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/scatter_elements_update_ref.cl +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/scatter_elements_update_ref.cl @@ -22,6 +22,47 @@ #error "OUTPUT_DIMS is supposed to be same as INPUT2_DIMS" #endif +#ifdef REDUCE_MODE + #define SUM 1 + #define PROD 2 + #define MIN 3 + #define MAX 4 + #define MEAN 5 + + #if USE_INIT_VAL == 0 + #if REDUCE_MODE == SUM + #define REDUCTION_NEUTRAL_VALUE INPUT0_VAL_ZERO + #elif REDUCE_MODE == PROD + #define REDUCTION_NEUTRAL_VALUE INPUT0_VAL_ONE + #elif REDUCE_MODE == MIN + #define REDUCTION_NEUTRAL_VALUE INPUT0_VAL_MAX + #elif REDUCE_MODE == MAX + #define REDUCTION_NEUTRAL_VALUE INPUT0_VAL_MIN + #elif REDUCE_MODE == MEAN + #define REDUCTION_NEUTRAL_VALUE INPUT0_VAL_ZERO + #else + #error "Invalid REDUCE_MODE value" + #endif + #endif + + inline INPUT2_TYPE FUNC(reduce)(INPUT2_TYPE a, INPUT2_TYPE b) + { + #if REDUCE_MODE == SUM + return a + b; + #elif REDUCE_MODE == PROD + return a * b; + #elif REDUCE_MODE == MIN + return min(a, b); + #elif REDUCE_MODE == MAX + return max(a, b); + #elif REDUCE_MODE == MEAN + return (a + b) / (INPUT2_TYPE)(1 + USE_INIT_VAL); + #else + #error "Invalid REDUCE_MODE value" + #endif + } +#endif + KERNEL(scatter_elements_update_ref)(const __global INPUT0_TYPE* data, const __global INPUT1_TYPE* indices, const __global INPUT2_TYPE* updates, @@ -92,38 +133,53 @@ KERNEL(scatter_elements_update_ref)(const __global INPUT0_TYPE* data, #if OUTPUT_DIMS == 4 #if AXIS_VALUE == 0 + if (index < 0) { index += INPUT0_BATCH_NUM; } const uint x = idx_x; const uint y = idx_y; const uint f = idx_f; const uint b = index; #elif AXIS_VALUE == 1 + if (index < 0) { index += INPUT0_FEATURE_NUM; } const uint x = idx_x; const uint y = idx_y; const uint f = index; const uint b = idx_b; #elif AXIS_VALUE == 2 + if (index < 0) { index += INPUT0_SIZE_Y; } const uint x = idx_x; const uint y = index; const uint f = idx_f; const uint b = idx_b; #elif AXIS_VALUE == 3 + if (index < 0) { index += INPUT0_SIZE_X; } const uint x = index; const uint y = idx_y; const uint f = idx_f; const uint b = idx_b; #endif // AXIS_VALUE #elif OUTPUT_DIMS == 5 #if AXIS_VALUE == 0 + if (index < 0) { index += INPUT0_BATCH_NUM; } const uint x = idx_x; const uint y = idx_y; const uint z = idx_z; const uint f = idx_f; const uint b = index; #elif AXIS_VALUE == 1 + if (index < 0) { index += INPUT0_FEATURE_NUM; } const uint x = idx_x; const uint y = idx_y; const uint z = idx_z; const uint f = index; const uint b = idx_b; #elif AXIS_VALUE == 2 + if (index < 0) { index += INPUT0_SIZE_Z; } const uint x = idx_x; const uint y = idx_y; const uint z = index; const uint f = idx_f; const uint b = idx_b; #elif AXIS_VALUE == 3 + if (index < 0) { index += INPUT0_SIZE_Y; } const uint x = idx_x; const uint y = index; const uint z = idx_z; const uint f = idx_f; const uint b = idx_b; #elif AXIS_VALUE == 4 + if (index < 0) { index += INPUT0_SIZE_X; } const uint x = index; const uint y = idx_y; const uint z = idx_z; const uint f = idx_f; const uint b = idx_b; #endif // AXIS_VALUE #elif OUTPUT_DIMS == 6 #if AXIS_VALUE == 0 + if (index < 0) { index += INPUT0_BATCH_NUM; } const uint x = idx_x; const uint y = idx_y; const uint z = idx_z; const uint w = idx_w; const uint f = idx_f; const uint b = index; #elif AXIS_VALUE == 1 + if (index < 0) { index += INPUT0_FEATURE_NUM; } const uint x = idx_x; const uint y = idx_y; const uint z = idx_z; const uint w = idx_w; const uint f = index; const uint b = idx_b; #elif AXIS_VALUE == 2 + if (index < 0) { index += INPUT0_SIZE_W; } const uint x = idx_x; const uint y = idx_y; const uint z = idx_z; const uint w = index; const uint f = idx_f; const uint b = idx_b; #elif AXIS_VALUE == 3 + if (index < 0) { index += INPUT0_SIZE_Z; } const uint x = idx_x; const uint y = idx_y; const uint z = index; const uint w = idx_w; const uint f = idx_f; const uint b = idx_b; #elif AXIS_VALUE == 4 + if (index < 0) { index += INPUT0_SIZE_Y; } const uint x = idx_x; const uint y = index; const uint z = idx_z; const uint w = idx_w; const uint f = idx_f; const uint b = idx_b; #elif AXIS_VALUE == 5 + if (index < 0) { index += INPUT0_SIZE_X; } const uint x = index; const uint y = idx_y; const uint z = idx_z; const uint w = idx_w; const uint f = idx_f; const uint b = idx_b; #endif // AXIS_VALUE #endif @@ -131,6 +187,14 @@ KERNEL(scatter_elements_update_ref)(const __global INPUT0_TYPE* data, const uint updates_idx = GET_UPDATES_INDEX(IDX_ORDER); INPUT2_TYPE val = updates[(int)updates_idx]; + + #ifdef REDUCE_MODE + #if USE_INIT_VAL == 0 + output[output_idx] = REDUCTION_NEUTRAL_VALUE; + #endif + val = FUNC_CALL(reduce)(output[output_idx], val); + #endif + #if HAS_FUSED_OPS FUSED_OPS_SECOND_KERNEL; output[output_idx] = TO_OUTPUT_TYPE(FUSED_OPS_RESULT_SECOND_KERNEL); @@ -140,6 +204,15 @@ KERNEL(scatter_elements_update_ref)(const __global INPUT0_TYPE* data, #endif } +#ifdef REDUCE_MODE + #undef SUM + #undef PROD + #undef MIN + #undef MAX + #undef MEAN + #undef REDUCTION_NEUTRAL_VALUE +#endif + #undef GET_INDICES_INDEX #undef GET_UPDATES_INDEX #undef GET_OUTPUT_INDEX diff --git a/src/plugins/intel_gpu/src/kernel_selector/common_types.h b/src/plugins/intel_gpu/src/kernel_selector/common_types.h index 464877342ea6a0..148b6c10e39183 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/common_types.h +++ b/src/plugins/intel_gpu/src/kernel_selector/common_types.h @@ -491,6 +491,17 @@ enum class ScatterUpdateAxis { FEATURE, BATCH, }; +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +// ScatterUpdateReduction +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +enum class ScatterUpdateReduction { + NONE = 0, + SUM, + PROD, + MIN, + MAX, + MEAN +}; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ReduceMode diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_elements_update_kernel_ref.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_elements_update_kernel_ref.cpp index e7a5a9ca69abff..aa2df575e29c6b 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_elements_update_kernel_ref.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_elements_update_kernel_ref.cpp @@ -133,6 +133,11 @@ JitConstants ScatterElementsUpdateKernelRef::GetJitConstants(const scatter_eleme jit.AddConstant(MakeJitConstant("AXIS_VALUE", GetScatterElementsUpdateChannelIndex(params))); + if (params.mode != ScatterUpdateReduction::NONE) { + jit.AddConstant(MakeJitConstant("REDUCE_MODE", static_cast(params.mode))); + jit.AddConstant(MakeJitConstant("USE_INIT_VAL", params.use_init_val)); + } + if (!params.fused_ops.empty()) { FusedOpsConfiguration conf1 = { "_FIRST_KERNEL", GetDefaultOrder(params.outputs[0].GetDims().size()), "val", params.inputs[0].GetDType() }; FusedOpsConfiguration conf2 = { "_SECOND_KERNEL", GetDefaultOrder(params.outputs[0].GetDims().size()), "val", params.inputs[0].GetDType() }; diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_elements_update_kernel_ref.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_elements_update_kernel_ref.h index 23581c4b31ad0c..74058a63a88067 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_elements_update_kernel_ref.h +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_elements_update_kernel_ref.h @@ -11,9 +11,11 @@ namespace kernel_selector { // scatter_elements_update_params //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// struct scatter_elements_update_params : public base_params { - scatter_elements_update_params() : base_params(KernelType::SCATTER_ELEMENTS_UPDATE), axis(ScatterUpdateAxis::BATCH) {} + scatter_elements_update_params() : base_params(KernelType::SCATTER_ELEMENTS_UPDATE) {} - ScatterUpdateAxis axis; + ScatterUpdateAxis axis{ScatterUpdateAxis::BATCH}; + ScatterUpdateReduction mode{ScatterUpdateReduction::NONE}; + bool use_init_val{true}; }; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/src/plugins/intel_gpu/src/plugin/ops/scatter_elements_update.cpp b/src/plugins/intel_gpu/src/plugin/ops/scatter_elements_update.cpp index 75ea1491675bff..46fca3439360f8 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/scatter_elements_update.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/scatter_elements_update.cpp @@ -13,7 +13,7 @@ namespace ov { namespace intel_gpu { -static void CreateScatterElementsUpdateOp(ProgramBuilder& p, const std::shared_ptr& op) { +static void CreateScatterElementsUpdateOp(ProgramBuilder& p, const std::shared_ptr& op) { validate_inputs_count(op, {4}); auto inputs = p.GetInputInfo(op); std::string layerName = layer_type_name_ID(op); @@ -26,16 +26,26 @@ static void CreateScatterElementsUpdateOp(ProgramBuilder& p, const std::shared_p int64_t axis = ov::normalize_axis(op.get(), axes_constant->cast_vector()[0], op->get_input_partial_shape(0).rank()); OPENVINO_SUPPRESS_DEPRECATED_END + auto mode = cldnn::ScatterElementsUpdateOp::Reduction::NONE; + auto use_init_val = true; + if (const auto op_v12 = std::dynamic_pointer_cast(op)) { + mode = op_v12->get_reduction(); + use_init_val = op_v12->get_use_init_val(); + } + auto primitive = cldnn::scatter_elements_update(layerName, inputs[0], inputs[1], inputs[2], - axis); + axis, + mode, + use_init_val); p.add_primitive(*op, primitive); } REGISTER_FACTORY_IMPL(v3, ScatterElementsUpdate); +REGISTER_FACTORY_IMPL(v12, ScatterElementsUpdate); } // namespace intel_gpu } // namespace ov diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/scatter_elements_update.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/scatter_elements_update.cpp index 0e4b1bfdf75b71..952cb4c82dc0f6 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/scatter_elements_update.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/scatter_elements_update.cpp @@ -44,4 +44,34 @@ INSTANTIATE_TEST_SUITE_P( ::testing::ValuesIn(idxPrecisions), ::testing::Values(ov::test::utils::DEVICE_GPU)), ScatterElementsUpdateLayerTest::getTestCaseName); + + +const std::vector reduceModes{ + // Reduction::NONE is omitted intentionally, because v12 with Reduction::NONE is converted to v3, + // and v3 is already tested by smoke_ScatterEltsUpdate testsuite. It doesn't make sense to test the same code twice. + // Don't forget to add Reduction::NONE when/if ConvertScatterElementsUpdate12ToScatterElementsUpdate3 + // transformation will be disabled (in common transforamtions pipeline or for GPU only). + ov::op::v12::ScatterElementsUpdate::Reduction::SUM, + ov::op::v12::ScatterElementsUpdate::Reduction::PROD, + ov::op::v12::ScatterElementsUpdate::Reduction::MIN, + ov::op::v12::ScatterElementsUpdate::Reduction::MAX, + ov::op::v12::ScatterElementsUpdate::Reduction::MEAN +}; + +const std::vector> idxWithNegativeValues = { + {1, 0, 4, 6, 2, 3, 7, 5}, + {-1, 0, -4, -6, -2, -3, -7, -5}, +}; + +INSTANTIATE_TEST_SUITE_P( + smoke_ScatterEltsUpdate12, + ScatterElementsUpdate12LayerTest, + ::testing::Combine(::testing::ValuesIn(ScatterElementsUpdateLayerTest::combineShapes(axesShapeInShape)), + ::testing::ValuesIn(idxWithNegativeValues), + ::testing::ValuesIn(reduceModes), + ::testing::ValuesIn({true, false}), + ::testing::Values(inputPrecisions[0]), + ::testing::Values(idxPrecisions[0]), + ::testing::Values(ov::test::utils::DEVICE_GPU)), + ScatterElementsUpdate12LayerTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/scatter_elements_update_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/scatter_elements_update_gpu_test.cpp index a66202b116652c..e2e9f1bb144833 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/scatter_elements_update_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/scatter_elements_update_gpu_test.cpp @@ -15,6 +15,26 @@ using namespace cldnn; using namespace ::tests; +#ifdef OPENVINO_STATIC_LIBRARY +namespace ov { +namespace reference { +using Reduction = ov::op::v12::ScatterElementsUpdate::Reduction; + +template +void scatter_elem_update(const DataType *input_data, + const IndicesType *indices, + const DataType *updates, + const int64_t axis, + DataType *out_buf, + const Shape &data_shape, + const Shape &indices_shape, + const Reduction reduction_type = Reduction::NONE, + const bool use_init_val = true); +} +} +#else +#include "openvino/reference/scatter_elements_update.hpp" +#endif template void test_d2411_axisF(bool is_caching_test) { @@ -97,26 +117,34 @@ TEST(scatter_elements_update_gpu_fp16, d2411_axisF) { } namespace { -template +template struct ScatterElementsUpdateParams { int64_t axis; tensor data_tensor; std::vector data; tensor indices_tensor; - std::vector indices; + std::vector indices; std::vector updates; std::vector expected; }; -template +template using ScatterElementsUpdateParamsWithFormat = std::tuple< - ScatterElementsUpdateParams, + ScatterElementsUpdateParams, format::type, // source (plain) layout format::type, // target (blocked) data layout format::type, // target (blocked) indices layout format::type // target (blocked) updates layout >; +template +using ScatterElementsUpdateReduceParamsWithFormat = std::tuple< + ScatterElementsUpdateParams, + ScatterElementsUpdateOp::Reduction, + bool, // use_init_value + format::type +>; + const std::vector formats2D{ format::bfyx, format::b_fs_yx_fsv16, @@ -143,14 +171,14 @@ std::vector getValues(const std::vector &values) { return result; } -template -std::vector> generateScatterElementsUpdateParams2D() { - const std::vector> result = { +template +std::vector> generateScatterElementsUpdateParams2D() { + const std::vector > result = { { 1, tensor{2, 4, 1, 1}, getValues({ 0, 1, 2, 3, 4, 5, 6, 7 }), tensor{2, 2, 1, 1}, - getValues({ 0, 1, 2, 3 }), + getValues({ 0, 1, 2, 3 }), getValues({ -10, -11, -12, -13 }), getValues({ -10, -11, 2, 3, 4, 5, -12, -13 }) }, @@ -158,7 +186,7 @@ std::vector> generateScatterElementsUpdateParams2 tensor{2, 1, 2, 2}, getValues({ 0, 1, 2, 3, 4, 5, 6, 7 }), tensor{2, 1, 2, 1}, - getValues({ 0, 1, 0, 1 }), + getValues({ 0, 1, 0, 1 }), getValues({ -10, -11, -12, -13 }), getValues({ -10, 1, 2, -11, -12, 5, 6, -13 }) }, @@ -166,7 +194,7 @@ std::vector> generateScatterElementsUpdateParams2 tensor{2, 1, 2, 2}, getValues({ 0, 1, 2, 3, 4, 5, 6, 7 }), tensor{2, 1, 1, 2}, - getValues({ 0, 1, 0, 1 }), + getValues({ 0, 1, 0, 1 }), getValues({ -10, -11, -12, -13 }), getValues({ -10, 1, 2, -11, -12, 5, 6, -13 }) }, @@ -175,14 +203,15 @@ std::vector> generateScatterElementsUpdateParams2 return result; } -template -std::vector> generateScatterElementsUpdateParams3D() { - const std::vector> result = { + +template +std::vector> generateScatterElementsUpdateParams3D() { + const std::vector> result = { { 1, tensor{2, 4, 1, 1, 3}, getValues({ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 }), tensor{2, 1, 1, 1, 2}, - getValues({ 0, 3, 1, 2 }), + getValues({ 0, 3, 1, 2 }), getValues({ -100, -110, -120, -130 }), getValues({ -100, 1, 2, 3, 4, 5, 6, 7, 8, 9, -110, 11, 12, 13, 14, -120, 16, 17, 18, -130, 20, 21, 22, 23 }) }, @@ -190,7 +219,7 @@ std::vector> generateScatterElementsUpdateParams3 tensor{2, 4, 1, 1, 3}, getValues({ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 }), tensor{2, 1, 1, 1, 2}, - getValues({ 0, 1, 0, 1 }), + getValues({ 0, 1, 0, 1 }), getValues({ -100, -110, -120, -130 }), getValues({ -100, 1, -110, 3, 4, 5, 6, 7, 8, 9, 10, 11, -120, 13, -130, 15, 16, 17, 18, 19, 20, 21, 22, 23 }) }, @@ -199,15 +228,15 @@ std::vector> generateScatterElementsUpdateParams3 return result; } -template -std::vector> generateScatterElementsUpdateParams4D() { - const std::vector> result = { +template +std::vector> generateScatterElementsUpdateParams4D() { + const std::vector> result = { { 5, tensor{2, 4, 2, 1, 1, 3}, getValues({0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47}), tensor{2, 1, 1, 1, 1, 2}, - getValues({2, 1, 1, 1, 2}), + getValues({2, 1, 1, 1, 2}), getValues({-100, -110, -120, -130}), getValues({0, 1, -100, -110, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, -120, 26, -130, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47}), @@ -233,10 +262,10 @@ float getError() { } struct PrintToStringParamName { - template - std::string operator()(const testing::TestParamInfo > ¶m) { + template + std::string operator()(const testing::TestParamInfo > ¶m) { std::stringstream buf; - ScatterElementsUpdateParams p; + ScatterElementsUpdateParams p; format::type plain_format; format::type target_data_format; format::type target_indices_format; @@ -251,16 +280,34 @@ struct PrintToStringParamName { << "_targetUpdatesFormat=" << fmt_to_str(target_updates_format); return buf.str(); } + + template + std::string operator()(const testing::TestParamInfo > ¶m) { + std::stringstream buf; + ScatterElementsUpdateParams p; + ScatterElementsUpdateOp::Reduction mode; + bool use_init_value; + format::type plain_format; + std::tie(p, mode, use_init_value, plain_format) = param.param; + using ov::op::operator<<; + buf << "_axis=" << p.axis + << "_mode=" << mode + << "_use_init_value=" << use_init_value + << "_data=" << p.data_tensor.to_string() + << "_indices=" << p.indices_tensor.to_string() + << "_plainFormat=" << fmt_to_str(plain_format); + return buf.str(); + } }; -}; // namespace -template +template struct scatter_elements_update_gpu_formats_test - : public ::testing::TestWithParam > { + : public ::testing::TestWithParam > { public: void test(bool is_caching_test) { const auto data_type = ov::element::from(); - ScatterElementsUpdateParams params; + const auto indices_type = ov::element::from(); + ScatterElementsUpdateParams params; format::type plain_format; format::type target_data_format; format::type target_indices_format; @@ -276,7 +323,7 @@ struct scatter_elements_update_gpu_formats_test auto& engine = get_test_engine(); const auto data = engine.allocate_memory({data_type, plain_format, params.data_tensor}); - const auto indices = engine.allocate_memory({data_type, plain_format, params.indices_tensor}); + const auto indices = engine.allocate_memory({indices_type, plain_format, params.indices_tensor}); const auto updates = engine.allocate_memory({data_type, plain_format, params.indices_tensor}); set_values(data, params.data); @@ -288,7 +335,7 @@ struct scatter_elements_update_gpu_formats_test topology.add(input_layout("Indices", indices->get_layout())); topology.add(input_layout("Updates", updates->get_layout())); topology.add(reorder("DataReordered", input_info("Data"), target_data_format, data_type)); - topology.add(reorder("IndicesReordered", input_info("Indices"), target_indices_format, data_type)); + topology.add(reorder("IndicesReordered", input_info("Indices"), target_indices_format, indices_type)); topology.add(reorder("UpdatesReordered", input_info("Updates"), target_updates_format, data_type)); topology.add( scatter_elements_update("ScatterEelementsUpdate", input_info("DataReordered"), input_info("IndicesReordered"), @@ -315,9 +362,94 @@ struct scatter_elements_update_gpu_formats_test } }; -using scatter_elements_update_gpu_formats_test_f32 = scatter_elements_update_gpu_formats_test; -using scatter_elements_update_gpu_formats_test_f16 = scatter_elements_update_gpu_formats_test; -using scatter_elements_update_gpu_formats_test_i32 = scatter_elements_update_gpu_formats_test; +using scatter_elements_update_gpu_formats_test_f32 = scatter_elements_update_gpu_formats_test; +using scatter_elements_update_gpu_formats_test_f16 = scatter_elements_update_gpu_formats_test; +using scatter_elements_update_gpu_formats_test_i32 = scatter_elements_update_gpu_formats_test; + +template +struct scatter_elements_update_gpu_reduction_test + : public ::testing::TestWithParam > { +public: + void test(bool is_caching_test) { + const auto data_type = ov::element::from(); + const auto indices_type = ov::element::from(); + ScatterElementsUpdateParams params; + format::type plain_format; + ScatterElementsUpdateOp::Reduction mode; + bool use_init_value; + std::tie(params, mode, use_init_value, plain_format) = this->GetParam(); + params.expected = generateReferenceOutput(plain_format, params, mode, use_init_value); + + auto& engine = get_test_engine(); + const auto data = engine.allocate_memory({data_type, plain_format, params.data_tensor}); + const auto indices = engine.allocate_memory({indices_type, plain_format, params.indices_tensor}); + const auto updates = engine.allocate_memory({data_type, plain_format, params.indices_tensor}); + + set_values(data, params.data); + set_values(indices, params.indices); + set_values(updates, params.updates); + + topology topology; + topology.add(input_layout("Data", data->get_layout())); + topology.add(input_layout("Indices", indices->get_layout())); + topology.add(input_layout("Updates", updates->get_layout())); + topology.add( + scatter_elements_update("ScatterElementsUpdate", + input_info("Data"), + input_info("Indices"), + input_info("Updates"), + params.axis, + mode, + use_init_value) + ); + cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); + + network->set_input_data("Data", data); + network->set_input_data("Indices", indices); + network->set_input_data("Updates", updates); + + const auto outputs = network->execute(); + const auto output = outputs.at("ScatterElementsUpdate").get_memory(); + const cldnn::mem_lock output_ptr(output, get_test_stream()); + + ASSERT_EQ(params.data.size(), output_ptr.size()); + ASSERT_EQ(params.expected.size(), output_ptr.size()); + for (uint32_t i = 0; i < output_ptr.size(); i++) { + ASSERT_NEAR(output_ptr[i], params.expected[i], getError()) << ", i=" << i; + } + } +private: + static ov::Shape tensorToShape(const tensor &t, const format f) { + std::vector vec(cldnn::format::dimension(f)); + for (size_t i = 0; i < vec.size(); ++i) { + vec[i] = t.sizes()[i]; + } + std::reverse(vec.begin() + 2, vec.end()); + + return ov::Shape(vec.begin(), vec.end()); + } + + static std::vector generateReferenceOutput(const format fmt, + const ScatterElementsUpdateParams& p, + const ScatterElementsUpdateOp::Reduction mode, + const bool use_init_value) { + std::vector out(p.data_tensor.count()); + const auto data_shape = tensorToShape(p.data_tensor, fmt); + const auto indices_shape = tensorToShape(p.indices_tensor, fmt); + + ov::reference::scatter_elem_update(p.data.data(), + p.indices.data(), + p.updates.data(), + p.axis, + out.data(), + data_shape, + indices_shape, + mode, + use_init_value); + return out; + } +}; +}; // namespace TEST_P(scatter_elements_update_gpu_formats_test_f32, basic) { ASSERT_NO_FATAL_FAILURE(test(false)); @@ -335,7 +467,7 @@ TEST_P(scatter_elements_update_gpu_formats_test_i32, basic) { INSTANTIATE_TEST_SUITE_P(scatter_elements_update_gpu_formats_test_f32_2d, scatter_elements_update_gpu_formats_test_f32, ::testing::Combine( - ::testing::ValuesIn(generateScatterElementsUpdateParams2D()), + ::testing::ValuesIn(generateScatterElementsUpdateParams2D()), ::testing::Values(format::bfyx), ::testing::ValuesIn(formats2D), ::testing::Values(format::any), @@ -346,7 +478,7 @@ INSTANTIATE_TEST_SUITE_P(scatter_elements_update_gpu_formats_test_f32_2d, INSTANTIATE_TEST_SUITE_P(scatter_elements_update_gpu_formats_test_f16_2d, scatter_elements_update_gpu_formats_test_f16, ::testing::Combine( - ::testing::ValuesIn(generateScatterElementsUpdateParams2D()), + ::testing::ValuesIn(generateScatterElementsUpdateParams2D()), ::testing::Values(format::bfyx), ::testing::ValuesIn(formats2D), ::testing::Values(format::any), @@ -357,7 +489,7 @@ INSTANTIATE_TEST_SUITE_P(scatter_elements_update_gpu_formats_test_f16_2d, INSTANTIATE_TEST_SUITE_P(scatter_elements_update_gpu_formats_test_i32_2d, scatter_elements_update_gpu_formats_test_i32, ::testing::Combine( - ::testing::ValuesIn(generateScatterElementsUpdateParams2D()), + ::testing::ValuesIn(generateScatterElementsUpdateParams2D()), ::testing::Values(format::bfyx), ::testing::ValuesIn(formats2D), ::testing::Values(format::any), @@ -368,7 +500,7 @@ INSTANTIATE_TEST_SUITE_P(scatter_elements_update_gpu_formats_test_i32_2d, INSTANTIATE_TEST_SUITE_P(scatter_elements_update_gpu_formats_test_f32_3d, scatter_elements_update_gpu_formats_test_f32, ::testing::Combine( - ::testing::ValuesIn(generateScatterElementsUpdateParams3D()), + ::testing::ValuesIn(generateScatterElementsUpdateParams3D()), ::testing::Values(format::bfzyx), ::testing::ValuesIn(formats3D), ::testing::Values(format::any), @@ -379,7 +511,7 @@ INSTANTIATE_TEST_SUITE_P(scatter_elements_update_gpu_formats_test_f32_3d, INSTANTIATE_TEST_SUITE_P(scatter_elements_update_gpu_formats_test_f32_4d, scatter_elements_update_gpu_formats_test_f32, ::testing::Combine( - ::testing::ValuesIn(generateScatterElementsUpdateParams4D()), + ::testing::ValuesIn(generateScatterElementsUpdateParams4D()), ::testing::Values(format::bfwzyx), ::testing::ValuesIn(formats4D), ::testing::ValuesIn(formats4D), @@ -390,7 +522,7 @@ INSTANTIATE_TEST_SUITE_P(scatter_elements_update_gpu_formats_test_f32_4d, INSTANTIATE_TEST_SUITE_P(scatter_elements_update_gpu_formats_test_mixed_inputs, scatter_elements_update_gpu_formats_test_f32, ::testing::Combine( - ::testing::ValuesIn(generateScatterElementsUpdateParams2D()), + ::testing::ValuesIn(generateScatterElementsUpdateParams2D()), ::testing::Values(format::bfyx), ::testing::ValuesIn({format::b_fs_yx_fsv16, format::b_fs_yx_fsv32}), ::testing::ValuesIn({format::bs_fs_yx_bsv16_fsv16, format::bs_fs_yx_bsv32_fsv16}), @@ -398,6 +530,101 @@ INSTANTIATE_TEST_SUITE_P(scatter_elements_update_gpu_formats_test_mixed_inputs, ), PrintToStringParamName()); +using scatter_elements_update_gpu_reduction_test_f32 = scatter_elements_update_gpu_reduction_test; +using scatter_elements_update_gpu_reduction_test_i32 = scatter_elements_update_gpu_reduction_test; + +TEST_P(scatter_elements_update_gpu_reduction_test_f32, basic) { + ASSERT_NO_FATAL_FAILURE(test(false)); +} + +TEST_P(scatter_elements_update_gpu_reduction_test_i32, basic) { + ASSERT_NO_FATAL_FAILURE(test(false)); +} + +const std::vector reduce_modes{ + ov::op::v12::ScatterElementsUpdate::Reduction::SUM, + ov::op::v12::ScatterElementsUpdate::Reduction::PROD, + ov::op::v12::ScatterElementsUpdate::Reduction::MIN, + // MAX mode omitted intentionally - see dedicated MAX tests below + ov::op::v12::ScatterElementsUpdate::Reduction::MEAN +}; + + +INSTANTIATE_TEST_SUITE_P(scatter_elements_update_gpu_reduction_test_f32_2d, + scatter_elements_update_gpu_reduction_test_f32, + ::testing::Combine( + ::testing::ValuesIn(generateScatterElementsUpdateParams2D()), + ::testing::ValuesIn(reduce_modes), + ::testing::ValuesIn({true, false}), + ::testing::Values(format::bfyx) + ), + PrintToStringParamName()); + +INSTANTIATE_TEST_SUITE_P(scatter_elements_update_gpu_reduction_test_i32_2d, + scatter_elements_update_gpu_reduction_test_i32, + ::testing::Combine( + ::testing::ValuesIn(generateScatterElementsUpdateParams2D()), + ::testing::ValuesIn(reduce_modes), + ::testing::ValuesIn({true, false}), + ::testing::Values(format::bfyx) + ), + PrintToStringParamName()); + +INSTANTIATE_TEST_SUITE_P(scatter_elements_update_gpu_reduction_test_f32_3d, + scatter_elements_update_gpu_reduction_test_f32, + ::testing::Combine( + ::testing::ValuesIn(generateScatterElementsUpdateParams3D()), + ::testing::ValuesIn(reduce_modes), + ::testing::ValuesIn({true, false}), + ::testing::Values(format::bfzyx) + ), + PrintToStringParamName()); + +INSTANTIATE_TEST_SUITE_P(scatter_elements_update_gpu_reduction_test_f32_4d, + scatter_elements_update_gpu_reduction_test_f32, + ::testing::Combine( + ::testing::ValuesIn(generateScatterElementsUpdateParams4D()), + ::testing::ValuesIn(reduce_modes), + ::testing::ValuesIn({true, false}), + ::testing::Values(format::bfwzyx) + ), + PrintToStringParamName()); + +INSTANTIATE_TEST_SUITE_P(scatter_elements_update_gpu_reduction_none_test_f32_2d, + scatter_elements_update_gpu_reduction_test_f32, + ::testing::Combine( + ::testing::ValuesIn(generateScatterElementsUpdateParams2D()), + ::testing::Values(ov::op::v12::ScatterElementsUpdate::Reduction::NONE), + ::testing::Values(true), + ::testing::Values(format::bfyx) + ), + PrintToStringParamName()); + +INSTANTIATE_TEST_SUITE_P(scatter_elements_update_gpu_max_reduction_use_init, + scatter_elements_update_gpu_reduction_test_f32, + ::testing::Combine( + ::testing::ValuesIn(generateScatterElementsUpdateParams2D()), + ::testing::Values(ov::op::v12::ScatterElementsUpdate::Reduction::MAX), + ::testing::Values(true), + ::testing::Values(format::bfyx) + ), + PrintToStringParamName()); + +// Disabled due to bug in reference implementation - see function reduction_neutral_value() +// in core/reference/include/ngraph/runtime/reference/scatter_elements_update.hpp. +// For MAX reduction it returns numeric_limits::min() which is minimal *positive*, not truly minimal value of type T, +// which causes wrong result on negative input/update values. +// Enable when/if reference implementation is fixed. +INSTANTIATE_TEST_SUITE_P(DISABLED_scatter_elements_update_gpu_max_reduction_dont_use_init, + scatter_elements_update_gpu_reduction_test_f32, + ::testing::Combine( + ::testing::ValuesIn(generateScatterElementsUpdateParams2D()), + ::testing::Values(ov::op::v12::ScatterElementsUpdate::Reduction::MAX), + ::testing::Values(false), + ::testing::Values(format::bfyx) + ), + PrintToStringParamName()); + #ifdef RUN_ALL_MODEL_CACHING_TESTS TEST_P(scatter_elements_update_gpu_formats_test_f32, basic_cached) { ASSERT_NO_FATAL_FAILURE(test(true)); @@ -414,3 +641,8 @@ TEST_P(scatter_elements_update_gpu_formats_test_i32, basic_cached) { TEST(scatter_elements_update_gpu_fp16, d2411_axisF_cached) { test_d2411_axisF(true); } + +TEST_P(scatter_elements_update_gpu_reduction_test_f32, cached) { + ASSERT_NO_FATAL_FAILURE(test(true)); +} + diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/scatter_elements_update.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/scatter_elements_update.hpp index 6175776cd8a3b0..bb3b3ad153d5fe 100644 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/scatter_elements_update.hpp +++ b/src/tests/functional/plugin/shared/include/single_layer_tests/scatter_elements_update.hpp @@ -12,4 +12,8 @@ TEST_P(ScatterElementsUpdateLayerTest, CompareWithRefs) { Run(); }; +TEST_P(ScatterElementsUpdate12LayerTest, CompareWithRefs) { + Run(); +}; + } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/scatter_elements_update.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/scatter_elements_update.hpp index dd4be07851afc0..46c5b0d3c42d51 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/scatter_elements_update.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/scatter_elements_update.hpp @@ -24,6 +24,15 @@ using scatterElementsUpdateParamsTuple = typename std::tuple< InferenceEngine::Precision, // indices precision std::string>; // Device name +using scatterElementsUpdate12ParamsTuple = typename std::tuple< + axisShapeInShape, // shape description + std::vector, // indices value + ov::op::v12::ScatterElementsUpdate::Reduction, // Reduce mode + bool, // Use init value + InferenceEngine::Precision, // Network precision + InferenceEngine::Precision, // indices precision + std::string>; // Device name + class ScatterElementsUpdateLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: @@ -34,4 +43,12 @@ class ScatterElementsUpdateLayerTest : public testing::WithParamInterface, + virtual public LayerTestsUtils::LayerTestsCommon { +public: + static std::string getTestCaseName(const testing::TestParamInfo &obj); +protected: + void SetUp() override; +}; } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/scatter_elements_update.cpp b/src/tests/functional/shared_test_classes/src/single_layer/scatter_elements_update.cpp index 440e8ef543faa5..c2541ca4cd4aba 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/scatter_elements_update.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/scatter_elements_update.cpp @@ -5,6 +5,9 @@ #include "ov_models/builders.hpp" #include "shared_test_classes/single_layer/scatter_elements_update.hpp" +#include "openvino/op/scatter_elements_update.hpp" +using ov::op::operator<<; + namespace LayerTestsDefinitions { std::string ScatterElementsUpdateLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { @@ -59,4 +62,56 @@ void ScatterElementsUpdateLayerTest::SetUp() { ngraph::ResultVector results{std::make_shared(s2d)}; function = std::make_shared(results, paramVector, "ScatterElementsUpdate"); } + +std::string ScatterElementsUpdate12LayerTest::getTestCaseName(const testing::TestParamInfo &obj) { + axisShapeInShape shapeDescript; + std::vector indicesValue; + ov::op::v12::ScatterElementsUpdate::Reduction reduceMode; + bool useInitVal; + InferenceEngine::Precision inputPrecision; + InferenceEngine::Precision indicesPrecision; + std::string targetName; + std::tie(shapeDescript, indicesValue, reduceMode, useInitVal, inputPrecision, indicesPrecision, targetName) = obj.param; + std::ostringstream result; + result << "InputShape=" << ov::test::utils::vec2str(std::get<0>(shapeDescript)) << "_"; + result << "IndicesShape=" << ov::test::utils::vec2str(std::get<1>(shapeDescript)) << "_"; + result << "Axis=" << std::get<2>(shapeDescript) << "_"; + result << "ReduceMode=" << reduceMode << "_"; + result << "UseInitVal=" << useInitVal << "_"; + result << "Indices=" << ov::test::utils::vec2str(indicesValue) << "_"; + result << "inPrc=" << inputPrecision.name() << "_"; + result << "idxPrc=" << indicesPrecision.name() << "_"; + result << "targetDevice=" << targetName << "_"; + return result.str(); +} + +void ScatterElementsUpdate12LayerTest::SetUp() { + InferenceEngine::SizeVector inShape; + InferenceEngine::SizeVector indicesShape; + int axis; + ov::op::v12::ScatterElementsUpdate::Reduction reduceMode; + bool useInitVal; + axisShapeInShape shapeDescript; + std::vector indicesValue; + InferenceEngine::Precision inputPrecision; + InferenceEngine::Precision indicesPrecision; + std::tie(shapeDescript, indicesValue, reduceMode, useInitVal, inputPrecision, indicesPrecision, targetDevice) = this->GetParam(); + std::tie(inShape, indicesShape, axis) = shapeDescript; + const auto inPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); + const auto idxPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(indicesPrecision); + ov::ParameterVector paramVector; + const auto inputParams = std::make_shared(inPrc, ov::Shape(inShape)); + paramVector.push_back(inputParams); + const auto updateParams = std::make_shared(inPrc, ov::Shape(indicesShape)); + paramVector.push_back(updateParams); + + const auto indicesNode = std::make_shared(idxPrc, indicesShape, indicesValue); + const auto axisNode = std::make_shared(ov::element::Type_t::i32, ov::Shape{}, + std::vector{axis}); + const auto seuNode = std::make_shared(paramVector[0], indicesNode, + paramVector[1], axisNode, reduceMode, useInitVal); + + ov::ResultVector results{std::make_shared(seuNode)}; + function = std::make_shared(results, paramVector, "ScatterElementsUpdate"); +} } // namespace LayerTestsDefinitions From 77cde47801930578e1fe4d906a3eb0fc6b4a58d1 Mon Sep 17 00:00:00 2001 From: Maciej Smyk Date: Thu, 5 Oct 2023 09:01:40 +0200 Subject: [PATCH 071/257] [DOCS] Inference with OpenVINO Runtime update for master (#20237) * Update openvino_intro.md * Update docs/articles_en/openvino_workflow/openvino_intro.md Co-authored-by: Karol Blaszczak * Update docs/articles_en/openvino_workflow/openvino_intro.md Co-authored-by: Karol Blaszczak --------- Co-authored-by: Karol Blaszczak --- .../openvino_workflow/openvino_intro.md | 26 +++++++++---------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/docs/articles_en/openvino_workflow/openvino_intro.md b/docs/articles_en/openvino_workflow/openvino_intro.md index 79395a748b4ce5..40db0d15b52bd5 100644 --- a/docs/articles_en/openvino_workflow/openvino_intro.md +++ b/docs/articles_en/openvino_workflow/openvino_intro.md @@ -22,27 +22,25 @@ on different platforms. -OpenVINO Runtime is a set of C++ libraries with C and Python bindings providing a common API to deliver inference solutions on the platform of your choice. Use the OpenVINO Runtime API to read an Intermediate Representation (IR), TensorFlow, TensorFlow Lite, ONNX, or PaddlePaddle model and execute it on preferred devices. - -OpenVINO Runtime uses a plugin architecture. Its plugins are software components that contain complete implementation for inference on a particular Intel® hardware device: CPU, GPU, GNA, etc. Each plugin implements the unified API and provides additional hardware-specific APIs for configuring devices or API interoperability between OpenVINO Runtime and underlying plugin backend. - -The scheme below illustrates the typical workflow for deploying a trained deep learning model: +OpenVINO Runtime is a set of C++ libraries with C and Python bindings providing a common API to deliver inference solutions on the platform of your choice. Use the OpenVINO Runtime API to read PyTorch, TensorFlow, TensorFlow Lite, ONNX, and PaddlePaddle models and execute them on preferred devices. OpenVINO gives you the option to use these models directly or convert them to the OpenVINO IR (Intermediate Representation) format explicitly, for maximum performance. -.. image:: _static/images/BASIC_FLOW_IE_C.svg +.. note:: + For more detailed information on how to convert, read, and compile supported model formats + see the :doc:`Supported Formats article `. + + Note that TensorFlow models can be run using the + :doc:`torch.compile feature `, as well as the standard ways of + :doc:`converting TensorFlow ` + or reading them directly. -Video -#################### +OpenVINO Runtime uses a plugin architecture. Its plugins are software components that contain complete implementation for inference on a particular Intel® hardware device: CPU, GPU, GNA, etc. Each plugin implements the unified API and provides additional hardware-specific APIs for configuring devices or API interoperability between OpenVINO Runtime and underlying plugin backend. +The scheme below illustrates the typical workflow for deploying a trained deep learning model: -.. list-table:: - * - .. raw:: html +.. image:: _static/images/BASIC_FLOW_IE_C.svg - - * - **OpenVINO Runtime Concept**. Duration: 3:43 @endsphinxdirective From 397c4fac98f9e07e35ebb7fff5d476686e8cc27e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Oct 2023 11:32:40 +0400 Subject: [PATCH 072/257] Bump actions/checkout from 3 to 4 (#20249) Bumps [actions/checkout](https://github.com/actions/checkout) from 3 to 4. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/linux_cuda.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/linux_cuda.yml b/.github/workflows/linux_cuda.yml index 9e74ec11ec6be2..517b34fb573f5b 100644 --- a/.github/workflows/linux_cuda.yml +++ b/.github/workflows/linux_cuda.yml @@ -57,13 +57,13 @@ jobs: apt install -y git curl git git-lfs unzip wget - name: Clone OpenVINO - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: path: ${{ env.OPENVINO_REPO }} submodules: 'true' - name: Clone OpenVINO Contrib - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: repository: 'openvinotoolkit/openvino_contrib' path: ${{ env.OPENVINO_CONTRIB_REPO }} From 747a799e8b1a0e6cb9cd3cd2ffc410363b488abd Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Thu, 5 Oct 2023 11:53:06 +0400 Subject: [PATCH 073/257] [PyTorch FE] Set upper-bound for PyTorch (#20251) Signed-off-by: Kazantsev, Roman --- tests/constraints.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/constraints.txt b/tests/constraints.txt index 97c88475f178f7..671c60bc937c83 100644 --- a/tests/constraints.txt +++ b/tests/constraints.txt @@ -22,3 +22,4 @@ pytest-html==3.2.0 pytest-timeout==2.1.0 jax<=0.4.14 jaxlib<=0.4.14 +torch<2.1.0 \ No newline at end of file From 0583b9424d08e63c12844c91ca1a19d5126f973c Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Thu, 5 Oct 2023 10:37:55 +0200 Subject: [PATCH 074/257] [DOCS] Removing Media Processing and CV Libraries (#20235) * Removing Media Processing and CV Libraries Addresses Jira Ticket: 122095 * update refs --- docs/articles_en/documentation.md | 7 - .../media_processing_cv_libraries.md | 19 - .../dlstreamer.md | 28 - .../gapi_intro.md | 78 --- .../gapi_intro/face_beautification.md | 514 ------------------ .../gapi_face_analytics_pipeline.md | 372 ------------- .../gapi_intro/kernel_api.md | 215 -------- .../deployment_intro/local-distribution.md | 9 - 8 files changed, 1242 deletions(-) delete mode 100644 docs/articles_en/documentation/media_processing_cv_libraries.md delete mode 100644 docs/articles_en/documentation/media_processing_cv_libraries/dlstreamer.md delete mode 100644 docs/articles_en/documentation/media_processing_cv_libraries/gapi_intro.md delete mode 100644 docs/articles_en/documentation/media_processing_cv_libraries/gapi_intro/face_beautification.md delete mode 100644 docs/articles_en/documentation/media_processing_cv_libraries/gapi_intro/gapi_face_analytics_pipeline.md delete mode 100644 docs/articles_en/documentation/media_processing_cv_libraries/gapi_intro/kernel_api.md diff --git a/docs/articles_en/documentation.md b/docs/articles_en/documentation.md index 276e4e6e0930a6..1a512d2a38f61a 100644 --- a/docs/articles_en/documentation.md +++ b/docs/articles_en/documentation.md @@ -16,7 +16,6 @@ Legacy Features Tool Ecosystem OpenVINO Extensibility - Media Processing and CV Libraries OpenVINO™ Security @@ -31,12 +30,6 @@ This section provides reference documents that guide you through the OpenVINO to | :doc:`OpenVINO Extensibility Mechanism ` | The Intel® Distribution of OpenVINO™ toolkit supports neural network models trained with various frameworks, including TensorFlow, PyTorch, ONNX, TensorFlow Lite, and PaddlePaddle (OpenVINO support for Apache MXNet, Caffe, and Kaldi is being deprecated and will be removed in the future). Learn how to extend OpenVINO functionality with custom settings. -| :doc:`Media Processing and Computer Vision Libraries ` -| The OpenVINO™ toolkit also works with the following media processing frameworks and libraries: -| • Intel® Deep Learning Streamer (Intel® DL Streamer) — A streaming media analytics framework based on GStreamer, for creating complex media analytics pipelines optimized for Intel hardware platforms. Go to the Intel® DL Streamer documentation website to learn more. -| • Intel® oneAPI Video Processing Library (oneVPL) — A programming interface for video decoding, encoding, and processing to build portable media pipelines on CPUs, GPUs, and other accelerators. -| You can also add computer vision capabilities to your application using optimized versions of OpenCV. - | :doc:`OpenVINO™ Security ` | Learn how to use OpenVINO securely and protect your data to meet specific security and privacy requirements. diff --git a/docs/articles_en/documentation/media_processing_cv_libraries.md b/docs/articles_en/documentation/media_processing_cv_libraries.md deleted file mode 100644 index 250660402b0173..00000000000000 --- a/docs/articles_en/documentation/media_processing_cv_libraries.md +++ /dev/null @@ -1,19 +0,0 @@ -# Media Processing and CV Libraries {#media_processing_cv_libraries} - -@sphinxdirective - -.. meta:: - :description: Explore OpenCV Graph API and other media processing frameworks - used for development of computer vision solutions. - -.. toctree:: - :maxdepth: 1 - - Intel® Deep Learning Streamer - openvino_docs_gapi_gapi_intro - OpenCV Developer Guide - OpenCL™ Developer Guide - OneVPL Developer Guide - -@endsphinxdirective - diff --git a/docs/articles_en/documentation/media_processing_cv_libraries/dlstreamer.md b/docs/articles_en/documentation/media_processing_cv_libraries/dlstreamer.md deleted file mode 100644 index 8f5a86cc4d1583..00000000000000 --- a/docs/articles_en/documentation/media_processing_cv_libraries/dlstreamer.md +++ /dev/null @@ -1,28 +0,0 @@ -# Intel® Deep Learning Streamer (Intel® DL Streamer) {#openvino_docs_dlstreamer} - -@sphinxdirective - -.. meta:: - :description: Explore Intel® DL Streamer - a framework used to analyze audio - and video streams to detect, classify, track, identify and count - objects, events and people. - -Intel® DL Streamer is a streaming media analytics framework, based on GStreamer* multimedia framework, for creating complex media analytics pipelines. - -Intel® DL Streamer makes Media analytics easy: - -* Write less code and get better performance -* Quickly develop, optimize, benchmark, and deploy video & audio analytics pipelines in the Cloud and at the Edge -* Analyze video and audio streams, create actionable results, capture results, and send them to the cloud -* Leverage the efficiency and computational power of Intel hardware platforms - -Go to `Intel® DL Streamer documentation website `__ for information on how to download, install, and use. - -**Media analytics** is the analysis of audio & video streams to detect, classify, track, identify and count objects, events and people. The analyzed results can be used to take actions, coordinate events, identify patterns and gain insights across multiple domains. - -**Media analytics pipelines** transform media streams into insights through audio / video processing, inference, and analytics operations across multiple IP blocks. - -\* Other names and brands may be claimed as the property of others. - -@endsphinxdirective - diff --git a/docs/articles_en/documentation/media_processing_cv_libraries/gapi_intro.md b/docs/articles_en/documentation/media_processing_cv_libraries/gapi_intro.md deleted file mode 100644 index 3abd4cdf999c01..00000000000000 --- a/docs/articles_en/documentation/media_processing_cv_libraries/gapi_intro.md +++ /dev/null @@ -1,78 +0,0 @@ -# Introduction to OpenCV Graph API (G-API) {#openvino_docs_gapi_gapi_intro} - -@sphinxdirective - -.. meta:: - :description: Learn about OpenCV Graph API (G-API) which is used to accelerate - regular image and video processing and ensure its portability. - - -.. toctree:: - :maxdepth: 1 - :hidden: - - openvino_docs_gapi_kernel_api - openvino_docs_gapi_face_beautification - openvino_docs_gapi_gapi_face_analytics_pipeline - -OpenCV Graph API (G-API) is an OpenCV module targeted to make regular image and video processing fast and portable. G-API is a special module in OpenCV – in contrast with the majority of other main modules, this one acts as a framework rather than some specific CV algorithm. - -G-API is positioned as a next level optimization enabler for computer vision, focusing not on particular CV functions but on the whole algorithm optimization. - -G-API provides means to define CV operations, construct graphs (in form of expressions) using it, and finally implement and run the operations for a particular backend. - -The idea behind G-API is that if an algorithm can be expressed in a special embedded language (currently in C++), the framework can catch its sense and apply a number of optimizations to the whole thing automatically. Particular optimizations are selected based on which :doc:`kernels ` and `backends `__ are involved in the graph compilation process, for example, the graph can be offloaded to GPU via the OpenCL backend, or optimized for memory consumption with the Fluid backend. Kernels, backends, and their settings are parameters to the graph compilation, so the graph itself does not depend on any platform-specific details and can be ported easily. - -.. note:: - Graph API (G-API) was introduced in the most recent major OpenCV 4.0 release and now is being actively developed. The API is volatile at the moment and there may be minor but compatibility-breaking changes in the future. - -G-API Concepts -############## - -* *Graphs* are built by applying operations to data objects. - - * API itself has no "graphs", it is expression-based instead. - -* *Data objects* do not hold actual data, only capture dependencies. -* *Operations* consume and produce data objects. -* A graph is defined by specifying its boundaries with data objects: - - * What data objects are inputs to the graph? - * What are its outputs? - -The paragraphs below explain the G-API programming model and development workflow. - -Programming Model -################# - -Building graphs is easy with G-API. In fact, there is no notion of graphs exposed in the API, so the user doesn’t need to operate in terms of “nodes” and “edges” — instead, graphs are constructed implicitly via expressions in a "functional" way. Expression-based graphs are built using two major concepts: :doc:`operations ` and `data objects `__ . - -In G-API, every graph begins and ends with data objects; data objects are passed to operations which produce (“return”) their results — new data objects, which are then passed to other operations, and so on. You can declare their own operations, G-API does not distinguish user-defined operations from its own predefined ones in any way. - -After the graph is defined, it needs to be compiled for execution. During the compilation, G-API figures out what the graph looks like, which kernels are available to run the operations in the graph, how to manage heterogeneity and to optimize the execution path. The result of graph compilation is a so-called “compiled” object. This object encapsulates the execution sequence for the graph inside and operates on real image data. You can set up the compilation process using various `compilation arguments `__. Backends expose some of their options as these arguments; also, actual kernels and DL network settings are passed into the framework this way. - -G-API supports graph compilation for two execution modes, *regular* and *streaming*, producing different types of compiled objects as the result. - -* **Regular** compiled objects are represented with class GCompiled, which follows functor-like semantics and has an overloaded operator(). When called for execution on the given input data, the GCompiled functor blocks the current thread and processes the data immediately — like a regular C++ function. By default, G-API tries to optimize the execution time for latency in this compilation mode. -* Starting with OpenCV 4.2, G-API can also produce GStreamingCompiled objects that better fit the asynchronous pipelined execution model. This compilation mode is called **streaming mode**, and G-API tries to optimize the overall throughput by implementing the pipelining technique as described above. We will use both in our example. - -The overall process for the regular case is summarized in the diagram below: - -.. image:: _static/images/gapi_programming_model.png - -The graph is built with operations so having operations defined (**0**) is a basic prerequisite; a constructed expression graph (**1**) forms a ``cv::GComputation`` object; kernels (**2**) which implement operations are the basic requirement to the graph compilation (**3**); the actual execution (**4**) is handled by a ``cv::GCompiled`` object with takes input and produces output data. - -Development Workflow -#################### - -One of the ways to organize a G-API development workflow is presented in the diagram below: - -.. image:: _static/images/gapi_development_workflow.png - -Basically, it is a derivative from the programming model illustrated in the previous chapter. You start with an algorithm or a data flow in mind (**0**), mapping it to a graph model (**1**), then identifying what operations you need (**2**) to construct this graph. These operations may already exist in G-API or be missing, in the latter case we implement the missing ones as kernels (**3**). Then decide which execution model fits our case better, pass kernels and DL networks as arguments to the compilation process (**4**), and finally switch to the execution (**5**). The process is iterative, so if you want to change anything based on the execution results, get back to steps (**0**) or (**1**) (a dashed line). - - -@endsphinxdirective - - - diff --git a/docs/articles_en/documentation/media_processing_cv_libraries/gapi_intro/face_beautification.md b/docs/articles_en/documentation/media_processing_cv_libraries/gapi_intro/face_beautification.md deleted file mode 100644 index 5b11a5c45e69d1..00000000000000 --- a/docs/articles_en/documentation/media_processing_cv_libraries/gapi_intro/face_beautification.md +++ /dev/null @@ -1,514 +0,0 @@ -# Implementing a Face Beautification Algorithm {#openvino_docs_gapi_face_beautification} - -@sphinxdirective - -.. meta:: - :description: Learn how to use a simple face beautification algorithm in a - pipeline with G-API that can be run on a video stream. - -Introduction -############ - -In this tutorial you will learn: - -* Basics of a sample face beautification algorithm; -* How to infer different networks inside a pipeline with G-API; -* How to run a G-API pipeline on a video stream. - -Prerequisites -############# - -This sample requires: - -* PC with GNU/Linux or Microsoft Windows (Apple macOS is supported but was not tested) -* OpenCV 4.2 or higher built with `Intel® Distribution of OpenVINO™ Toolkit `__ (building with `Intel® TBB `__ is a plus) -* The following pre-trained models from the :doc:`Open Model Zoo ` - - * `face-detection-adas-0001 `__ - * `facial-landmarks-35-adas-0002 `__ - -To download the models from the Open Model Zoo, use the :doc:`Model Downloader ` tool. - -Face Beautification Algorithm -############################# - -We will implement a simple face beautification algorithm using a combination of modern Deep Learning techniques and traditional Computer Vision. The general idea behind the algorithm is to make face skin smoother while preserving face features like eyes or a mouth contrast. The algorithm identifies parts of the face using a DNN inference, applies different filters to the parts found, and then combines it into the final result using basic image arithmetics: - -.. image:: _static/images/gapi_face_beautification_algorithm.png - -Briefly the algorithm is described as follows: - -- Input image :math:`I` is passed to unsharp mask and bilateral filters - (\f$U\f$ and :math:`L` respectively); -- Input image :math:`I` is passed to an SSD-based face detector; -- SSD result (a :math:`[1 \times 1 \times 200 \times 7]` blob) is parsed and converted to an array of faces; -- Every face is passed to a landmarks detector; -- Based on landmarks found for every face, three image masks are generated: - - - A background mask :math:`b` -- indicating which areas from the original image to keep as-is; - - A face part mask :math:`p` -- identifying regions to preserve (sharpen). - - A face skin mask :math:`s` -- identifying regions to blur; -- The final result :math:`O` is a composition of features above calculated as :math:`O = b\*I + p\*U + s\*L`. - -Generating face element masks based on a limited set of features (just 35 per face, including all its parts) is not very trivial and is described in the sections below. - -Constructing a G-API Pipeline -############################# - -Declare Deep Learning Topologies -++++++++++++++++++++++++++++++++ - -This sample is using two DNN detectors. Every network takes one input and produces one output. In G-API, networks are defined with macro G_API_NET(): - -.. code-block:: cpp - - G_API_NET(FaceDetector, , "face_detector"); - G_API_NET(LandmDetector, , "landm_detector"); - -To get more information, see Declaring Deep Learning topologies described in the "Face Analytics pipeline" tutorial. - -Describe the Processing Graph -+++++++++++++++++++++++++++++ - -The code below generates a graph for the algorithm above: - -.. code-block:: cpp - - cv::GComputation pipeline([=]() - { - cv::GMat gimgIn; // input - cv::GMat faceOut = cv::gapi::infer(gimgIn); - GArrayROI garRects = custom::GFacePostProc::on(faceOut, gimgIn, config::kConfThresh); // post-proc - cv::GArray landmOut = cv::gapi::infer(garRects, gimgIn); - cv::GArray garElems; // | - cv::GArray garJaws; // |output arrays - std::tie(garElems, garJaws) = custom::GLandmPostProc::on(landmOut, garRects); // post-proc - cv::GArray garElsConts; // face elements - cv::GArray garFaceConts; // whole faces - std::tie(garElsConts, garFaceConts) = custom::GGetContours::on(garElems, garJaws); // interpolation - cv::GMat mskSharp = custom::GFillPolyGContours::on(gimgIn, garElsConts); // | - cv::GMat mskSharpG = cv::gapi::gaussianBlur(mskSharp, config::kGKernelSize, // | - config::kGSigma); // | - cv::GMat mskBlur = custom::GFillPolyGContours::on(gimgIn, garFaceConts); // | - cv::GMat mskBlurG = cv::gapi::gaussianBlur(mskBlur, config::kGKernelSize, // | - config::kGSigma); // |draw masks - // The first argument in mask() is Blur as we want to subtract from // | - // BlurG the next step: // | - cv::GMat mskBlurFinal = mskBlurG - cv::gapi::mask(mskBlurG, mskSharpG); // | - cv::GMat mskFacesGaussed = mskBlurFinal + mskSharpG; // | - cv::GMat mskFacesWhite = cv::gapi::threshold(mskFacesGaussed, 0, 255, cv::THRESH_BINARY); // | - cv::GMat mskNoFaces = cv::gapi::bitwise_not(mskFacesWhite); // | - cv::GMat gimgBilat = custom::GBilatFilter::on(gimgIn, config::kBSize, - config::kBSigmaCol, config::kBSigmaSp); - cv::GMat gimgSharp = custom::unsharpMask(gimgIn, config::kUnshSigma, - config::kUnshStrength); - // Applying the masks - // Custom function mask3C() should be used instead of just gapi::mask() - // as mask() provides CV_8UC1 source only (and we have CV_8U3C) - cv::GMat gimgBilatMasked = custom::mask3C(gimgBilat, mskBlurFinal); - cv::GMat gimgSharpMasked = custom::mask3C(gimgSharp, mskSharpG); - cv::GMat gimgInMasked = custom::mask3C(gimgIn, mskNoFaces); - cv::GMat gimgBeautif = gimgBilatMasked + gimgSharpMasked + gimgInMasked; - return cv::GComputation(cv::GIn(gimgIn), cv::GOut(gimgBeautif, - cv::gapi::copy(gimgIn), - garFaceConts, - garElsConts, - garRects)); - }); - - -The resulting graph is a mixture of G-API's standard operations, user-defined operations (namespace custom::), and DNN inference. The generic function ``cv::gapi::infer<>()`` allows you to trigger inference within the pipeline; networks to infer are specified as template parameters. The sample code is using two versions of ``cv::gapi::infer<>()``: - -* A frame-oriented one is used to detect faces on the input frame. -* An ROI-list oriented one is used to run landmarks inference on a list of faces – this version produces an array of landmarks per every face. More on this in "Face Analytics pipeline" (:ref:`Building a GComputation ` section). - -Unsharp mask in G-API -+++++++++++++++++++++ - -The unsharp mask :math:`U` for image :math:`I` is defined as: - -.. math:: - - U = I - s \* L(M(I)) - -where :math:`M()` is a median filter, :math:`L()` is the Laplace operator, and :math:`s` is a strength coefficient. While G-API doesn't provide this function out-of-the-box, it is expressed naturally with the existing G-API operations: - -.. code-block:: cpp - - inline cv::GMat custom::unsharpMask(const cv::GMat &src, - const int sigma, - const float strength) - { - cv::GMat blurred = cv::gapi::medianBlur(src, sigma); - cv::GMat laplacian = custom::GLaplacian::on(blurred, CV_8U); - return (src - (laplacian \* strength)); - } - -Note that the code snipped above is a regular C++ function defined with G-API types. Users can write functions like this to simplify graph construction; when called, this function just puts the relevant nodes to the pipeline it is used in. - -Custom Operations -################# - -The face beautification graph is using custom operations extensively. This chapter focuses on the most interesting kernels, refer to G-API Kernel API for general information on defining operations and implementing kernels in G-API. - -Face detector post-processing -+++++++++++++++++++++++++++++ - -A face detector output is converted to an array of faces with the following kernel: - -.. code-block:: cpp - - using VectorROI = std::vector; - GAPI_OCV_KERNEL(GCPUFacePostProc, GFacePostProc) - { - static void run(const cv::Mat &inDetectResult, - const cv::Mat &inFrame, - const float faceConfThreshold, - VectorROI &outFaces) - { - const int kObjectSize = 7; - const int imgCols = inFrame.size().width; - const int imgRows = inFrame.size().height; - const cv::Rect borders({0, 0}, inFrame.size()); - outFaces.clear(); - const int numOfDetections = inDetectResult.size[2]; - const float \*data = inDetectResult.ptr(); - for (int i = 0; i < numOfDetections; i++) - { - const float faceId = data[i \* kObjectSize + 0]; - if (faceId < 0.f) // indicates the end of detections - { - break; - } - const float faceConfidence = data[i \* kObjectSize + 2]; - // We can cut detections by the `conf` field - // to avoid mistakes of the detector. - if (faceConfidence > faceConfThreshold) - { - const float left = data[i \* kObjectSize + 3]; - const float top = data[i \* kObjectSize + 4]; - const float right = data[i \* kObjectSize + 5]; - const float bottom = data[i \* kObjectSize + 6]; - // These are normalized coordinates and are between 0 and 1; - // to get the real pixel coordinates we should multiply it by - // the image sizes respectively to the directions: - cv::Point tl(toIntRounded(left \* imgCols), - toIntRounded(top \* imgRows)); - cv::Point br(toIntRounded(right \* imgCols), - toIntRounded(bottom \* imgRows)); - outFaces.push_back(cv::Rect(tl, br) & borders); - } - } - } - }; - -Facial Landmarks Post-Processing -++++++++++++++++++++++++++++++++ - -The algorithm infers locations of face elements (like the eyes, the mouth and the head contour itself) using a generic facial landmarks detector (details) from OpenVINO™ Open Model Zoo. However, the detected landmarks as-is are not enough to generate masks — this operation requires regions of interest on the face represented by closed contours, so some interpolation is applied to get them. This landmarks processing and interpolation is performed by the following kernel: - -.. code-block:: cpp - - GAPI_OCV_KERNEL(GCPUGetContours, GGetContours) - { - static void run(const std::vector &vctPtsFaceElems, // 18 landmarks of the facial elements - const std::vector &vctCntJaw, // 17 landmarks of a jaw - std::vector &vctElemsContours, - std::vector &vctFaceContours) - { - size_t numFaces = vctCntJaw.size(); - CV_Assert(numFaces == vctPtsFaceElems.size()); - CV_Assert(vctElemsContours.size() == 0ul); - CV_Assert(vctFaceContours.size() == 0ul); - // vctFaceElemsContours will store all the face elements' contours found - // in an input image, namely 4 elements (two eyes, nose, mouth) for every detected face: - vctElemsContours.reserve(numFaces \* 4); - // vctFaceElemsContours will store all the faces' contours found in an input image: - vctFaceContours.reserve(numFaces); - Contour cntFace, cntLeftEye, cntRightEye, cntNose, cntMouth; - cntNose.reserve(4); - for (size_t i = 0ul; i < numFaces; i++) - { - // The face elements contours - // A left eye: - // Approximating the lower eye contour by half-ellipse (using eye points) and storing in cntLeftEye: - cntLeftEye = getEyeEllipse(vctPtsFaceElems[i][1], vctPtsFaceElems[i][0]); - // Pushing the left eyebrow clock-wise: - cntLeftEye.insert(cntLeftEye.end(), {vctPtsFaceElems[i][12], vctPtsFaceElems[i][13], - vctPtsFaceElems[i][14]}); - // A right eye: - // Approximating the lower eye contour by half-ellipse (using eye points) and storing in vctRightEye: - cntRightEye = getEyeEllipse(vctPtsFaceElems[i][2], vctPtsFaceElems[i][3]); - // Pushing the right eyebrow clock-wise: - cntRightEye.insert(cntRightEye.end(), {vctPtsFaceElems[i][15], vctPtsFaceElems[i][16], - vctPtsFaceElems[i][17]}); - // A nose: - // Storing the nose points clock-wise - cntNose.clear(); - cntNose.insert(cntNose.end(), {vctPtsFaceElems[i][4], vctPtsFaceElems[i][7], - vctPtsFaceElems[i][5], vctPtsFaceElems[i][6]}); - // A mouth: - // Approximating the mouth contour by two half-ellipses (using mouth points) and storing in vctMouth: - cntMouth = getPatchedEllipse(vctPtsFaceElems[i][8], vctPtsFaceElems[i][9], - vctPtsFaceElems[i][10], vctPtsFaceElems[i][11]); - // Storing all the elements in a vector: - vctElemsContours.insert(vctElemsContours.end(), {cntLeftEye, cntRightEye, cntNose, cntMouth}); - // The face contour: - // Approximating the forehead contour by half-ellipse (using jaw points) and storing in vctFace: - cntFace = getForeheadEllipse(vctCntJaw[i][0], vctCntJaw[i][16], vctCntJaw[i][8]); - // The ellipse is drawn clock-wise, but jaw contour points goes vice versa, so it's necessary to push - // cntJaw from the end to the begin using a reverse iterator: - std::copy(vctCntJaw[i].crbegin(), vctCntJaw[i].crend(), std::back_inserter(cntFace)); - // Storing the face contour in another vector: - vctFaceContours.push_back(cntFace); - } - } - }; - - -The kernel takes two arrays of denormalized landmarks coordinates and returns an array of elements' closed contours and an array of faces' closed contours; in other words, outputs are, the first, an array of contours of image areas to be sharpened and, the second, another one to be smoothed. - -Here and below ``Contour`` is a vector of points. - -Get an Eye Contour ------------------- - -Eye contours are estimated with the following function: - -.. code-block:: cpp - - inline int custom::getLineInclinationAngleDegrees(const cv::Point &ptLeft, const cv::Point &ptRight) - { - const cv::Point residual = ptRight - ptLeft; - if (residual.y == 0 && residual.x == 0) - return 0; - else - return toIntRounded(atan2(toDouble(residual.y), toDouble(residual.x)) \* 180.0 / CV_PI); - } - inline Contour custom::getEyeEllipse(const cv::Point &ptLeft, const cv::Point &ptRight) - { - Contour cntEyeBottom; - const cv::Point ptEyeCenter((ptRight + ptLeft) / 2); - const int angle = getLineInclinationAngleDegrees(ptLeft, ptRight); - const int axisX = toIntRounded(cv::norm(ptRight - ptLeft) / 2.0); - // According to research, in average a Y axis of an eye is approximately - // 1/3 of an X one. - const int axisY = axisX / 3; - // We need the lower part of an ellipse: - static constexpr int kAngEyeStart = 0; - static constexpr int kAngEyeEnd = 180; - cv::ellipse2Poly(ptEyeCenter, cv::Size(axisX, axisY), angle, kAngEyeStart, kAngEyeEnd, config::kAngDelta, - cntEyeBottom); - return cntEyeBottom; - } - -Briefly, this function restores the bottom side of an eye by a half-ellipse based on two points in left and right eye corners. In fact, ``cv::ellipse2Poly()`` is used to approximate the eye region, and the function only defines ellipse parameters based on just two points: - -- The ellipse center and the :math:`X` half-axis calculated by two eye Points. -- The :math:`Y` half-axis calculated according to the assumption that an average eye width is :math:`1/3` of its length. -- The start and the end angles which are 0 and 180 (refer to ``cv::ellipse()`` documentation). -- The angle delta: how much points to produce in the contour. -- The inclination angle of the axes. - -The use of the ``atan2()`` instead of just ``atan()`` in function ``custom::getLineInclinationAngleDegrees()`` is essential as it allows to return a negative value depending on the ``x`` and the ``y`` signs so we can get the right angle even in case of upside-down face arrangement (if we put the points in the right order, of course). - -Get a Forehead Contour ----------------------- - -The function approximates the forehead contour: - -.. code-block:: cpp - - inline Contour custom::getForeheadEllipse(const cv::Point &ptJawLeft, - const cv::Point &ptJawRight, - const cv::Point &ptJawLower) - { - Contour cntForehead; - // The point amid the top two points of a jaw: - const cv::Point ptFaceCenter((ptJawLeft + ptJawRight) / 2); - // This will be the center of the ellipse. - // The angle between the jaw and the vertical: - const int angFace = getLineInclinationAngleDegrees(ptJawLeft, ptJawRight); - // This will be the inclination of the ellipse - // Counting the half-axis of the ellipse: - const double jawWidth = cv::norm(ptJawLeft - ptJawRight); - // A forehead width equals the jaw width, and we need a half-axis: - const int axisX = toIntRounded(jawWidth / 2.0); - const double jawHeight = cv::norm(ptFaceCenter - ptJawLower); - // According to research, in average a forehead is approximately 2/3 of - // a jaw: - const int axisY = toIntRounded(jawHeight \* 2 / 3.0); - // We need the upper part of an ellipse: - static constexpr int kAngForeheadStart = 180; - static constexpr int kAngForeheadEnd = 360; - cv::ellipse2Poly(ptFaceCenter, cv::Size(axisX, axisY), angFace, kAngForeheadStart, kAngForeheadEnd, - config::kAngDelta, cntForehead); - return cntForehead; - } - - -As we have only jaw points in our detected landmarks, we have to get a half-ellipse based on three points of a jaw: the leftmost, the rightmost and the lowest one. The jaw width is assumed to be equal to the forehead width and the latter is calculated using the left and the right points. Speaking of the :math:`Y` axis, we have no points to get it directly, and instead assume that the forehead height is about :math:`2/3` of the jaw height, which can be figured out from the face center (the middle between the left and right points) and the lowest jaw point. - -Draw Masks -++++++++++ - -When we have all the contours needed, you are able to draw masks: - -.. code-block:: cpp - - cv::GMat mskSharp = custom::GFillPolyGContours::on(gimgIn, garElsConts); // | - cv::GMat mskSharpG = cv::gapi::gaussianBlur(mskSharp, config::kGKernelSize, // | - config::kGSigma); // | - cv::GMat mskBlur = custom::GFillPolyGContours::on(gimgIn, garFaceConts); // | - cv::GMat mskBlurG = cv::gapi::gaussianBlur(mskBlur, config::kGKernelSize, // | - config::kGSigma); // |draw masks - // The first argument in mask() is Blur as we want to subtract from // | - // BlurG the next step: // | - cv::GMat mskBlurFinal = mskBlurG - cv::gapi::mask(mskBlurG, mskSharpG); // | - cv::GMat mskFacesGaussed = mskBlurFinal + mskSharpG; // | - cv::GMat mskFacesWhite = cv::gapi::threshold(mskFacesGaussed, 0, 255, cv::THRESH_BINARY); // | - cv::GMat mskNoFaces = cv::gapi::bitwise_not(mskFacesWhite); // | - - -The steps to get the masks are: - -* the "sharp" mask calculation: - - * fill the contours that should be sharpened; - * blur that to get the "sharp" mask (``mskSharpG``); -* the "bilateral" mask calculation: - - * fill all the face contours fully; - * blur that; - * subtract areas which intersect with the "sharp" mask --- and get the "bilateral" mask (``mskBlurFinal``); -* the background mask calculation: - - * add two previous masks - * set all non-zero pixels of the result as 255 (by ``cv::gapi::threshold()``) - * revert the output (by ``cv::gapi::bitwise_not``) to get the background mask (``mskNoFaces``). - -Configuring and Running the Pipeline -#################################### - -Once the graph is fully expressed, we can finally compile it and run on real data. G-API graph compilation is the stage where the G-API framework actually understands which kernels and networks to use. This configuration happens via G-API compilation arguments. - -DNN Parameters -++++++++++++++ - -This sample is using OpenVINO™ Toolkit OpenVINO Runtime backend for DL inference, which is configured the following way: - -.. code-block:: cpp - - auto faceParams = cv::gapi::ie::Params - { - /\*std::string\*/ faceXmlPath, - /\*std::string\*/ faceBinPath, - /\*std::string\*/ faceDevice - }; - auto landmParams = cv::gapi::ie::Params - { - /\*std::string\*/ landmXmlPath, - /\*std::string\*/ landmBinPath, - /\*std::string\*/ landmDevice - }; - -Every ``cv::gapi::ie::Params<>`` object is related to the network specified in its template argument. We should pass there the network type we have defined in ``G_API_NET()`` in the early beginning of the tutorial. - -Network parameters are then wrapped in ``cv::gapi::NetworkPackage``: - -.. code-block:: cpp - - auto networks = cv::gapi::networks(faceParams, landmParams); - - -More details in "Face Analytics Pipeline" (:ref:`Configuring the Pipeline ` section). - -Kernel Packages -+++++++++++++++ - -In this example we use a lot of custom kernels, in addition to that we use Fluid backend to optimize out memory for G-API's standard kernels where applicable. The resulting kernel package is formed like this: - -.. code-block:: cpp - - auto customKernels = cv::gapi::kernels(); - auto kernels = cv::gapi::combine(cv::gapi::core::fluid::kernels(), - customKernels); - - -Compiling the Streaming Pipeline -++++++++++++++++++++++++++++++++ - -G-API optimizes execution for video streams when compiled in the "Streaming" mode. - -.. code-block:: cpp - - cv::GStreamingCompiled stream = pipeline.compileStreaming(cv::compile_args(kernels, networks)); - -More on this in "Face Analytics Pipeline" (:ref:`Configuring the Pipeline ` section). - -Running the streaming pipeline -++++++++++++++++++++++++++++++ - - -In order to run the G-API streaming pipeline, all we need is to specify the input video source, call ``cv::GStreamingCompiled::start()``, and then fetch the pipeline processing results: - -.. code-block:: cpp - - if (parser.has("input")) - { - stream.setSource(cv::gapi::wip::make_src(parser.get("input"))); - } - auto out_vector = cv::gout(imgBeautif, imgShow, vctFaceConts, - vctElsConts, vctRects); - stream.start(); - avg.start(); - while (stream.running()) - { - if (!stream.try_pull(std::move(out_vector))) - { - // Use a try_pull() to obtain data. - // If there's no data, let UI refresh (and handle keypress) - if (cv::waitKey(1) >= 0) break; - else continue; - } - frames++; - // Drawing face boxes and landmarks if necessary: - if (flgLandmarks == true) - { - cv::polylines(imgShow, vctFaceConts, config::kClosedLine, - config::kClrYellow); - cv::polylines(imgShow, vctElsConts, config::kClosedLine, - config::kClrYellow); - } - if (flgBoxes == true) - for (auto rect : vctRects) - cv::rectangle(imgShow, rect, config::kClrGreen); - cv::imshow(config::kWinInput, imgShow); - cv::imshow(config::kWinFaceBeautification, imgBeautif); - } - - -Once results are ready and can be pulled from the pipeline we display it on the screen and handle GUI events. - -See :ref:`Running the pipeline ` section in the "Face Analytics Pipeline" tutorial for more details. - -Conclusion -########## - -The tutorial has two goals: to show the use of brand new features of G-API introduced in OpenCV 4.2, and give a basic understanding on a sample face beautification algorithm. - -The result of the algorithm application: - -.. image:: _static/images/gapi_face_beautification_example.jpg - -On the test machine (Intel® Core™ i7-8700) the G-API-optimized video pipeline outperforms its serial (non-pipelined) version by a factor of 2.7 – meaning that for such a non-trivial graph, the proper pipelining can bring almost 3x increase in performance. - -@endsphinxdirective - diff --git a/docs/articles_en/documentation/media_processing_cv_libraries/gapi_intro/gapi_face_analytics_pipeline.md b/docs/articles_en/documentation/media_processing_cv_libraries/gapi_intro/gapi_face_analytics_pipeline.md deleted file mode 100644 index dc34bcbb03ba7b..00000000000000 --- a/docs/articles_en/documentation/media_processing_cv_libraries/gapi_intro/gapi_face_analytics_pipeline.md +++ /dev/null @@ -1,372 +0,0 @@ -# Building a Face Analytics Pipeline {#openvino_docs_gapi_gapi_face_analytics_pipeline} - -@sphinxdirective - -.. meta:: - :description: Learn how to integrate a deep learning inference in a G-API - graph that can be run on a video stream to obtain data. - - -Overview -######## - -In this tutorial you will learn: - -* How to integrate Deep Learning inference in a G-API graph. -* How to run a G-API graph on a video stream and obtain data from it. - -Prerequisites -############# - -This sample requires: - -* PC with GNU/Linux or Microsoft Windows (Apple macOS is supported but was not tested) -* OpenCV 4.2 or higher built with `Intel® Distribution of OpenVINO™ Toolkit `__ (building with `Intel® TBB `__ is a plus) -* The following pre-trained models from the :doc:`Open Model Zoo ` - - * `face-detection-adas-0001 `__ - * `age-gender-recognition-retail-0013 `__ - * `emotions-recognition-retail-0003 `__ - -To download the models from the Open Model Zoo, use the :doc:`Model Downloader ` tool. - -Introduction: Why G-API -####################### - -Many computer vision algorithms run on a video stream rather than on individual images. Stream processing usually consists of multiple steps – like decode, preprocessing, detection, tracking, classification (on detected objects), and visualization – forming a *video processing pipeline*. Moreover, many these steps of such pipeline can run in parallel – modern platforms have different hardware blocks on the same chip like decoders and GPUs, and extra accelerators can be plugged in as extensions for deep learning offload. - -Given all this manifold of options and a variety in video analytics algorithms, managing such pipelines effectively quickly becomes a problem. For sure it can be done manually, but this approach doesn't scale: if a change is required in the algorithm (e.g. a new pipeline step is added), or if it is ported on a new platform with different capabilities, the whole pipeline needs to be re-optimized. - -Starting with version 4.2, OpenCV offers a solution to this problem. OpenCV G-API now can manage Deep Learning inference (a cornerstone of any modern analytics pipeline) with a traditional Computer Vision as well as video capturing/decoding, all in a single pipeline. G-API takes care of pipelining itself – so if the algorithm or platform changes, the execution model adapts to it automatically. - -Pipeline Overview -################# - -Our sample application is based on `Interactive Face Detection `__ demo from Open Model Zoo. A simplified pipeline consists of the following steps: - -1. Image acquisition and decode -2. Detection with preprocessing -3. Classification with preprocessing for every detected object with two networks -4. Visualization - -.. image:: _static/images/gapi_face_analytics_pipeline.png - -.. _gapi_ifd_constructing: - -Construct a pipeline -#################### - -Constructing a G-API graph for a video streaming case does not differ much from a `regular usage `__ of G-API -- it is still about defining graph *data* (with cv::GMat, ``cv::GScalar``, and ``cv::GArray``) and *operations* over it. Inference also becomes an operation in the graph, but is defined in a little bit different way. - -.. _gapi_ifd_declaring_nets: - -Declare Deep Learning topologies -++++++++++++++++++++++++++++++++ - -In contrast with traditional CV functions (see `core `__ and `imgproc `__) where G-API declares distinct operations for every function, inference in G-API is a single generic operation ``cv::gapi::infer<>``. As usual, it is just an interface and it can be implemented in a number of ways under the hood. In OpenCV 4.2, only OpenVINO™ Runtime-based backend is available, and OpenCV's own DNN module-based backend is to come. - -The ``cv::gapi::infer<>`` is _parametrized_ by the details of a topology we are going to execute. Like operations, topologies in G-API are strongly typed and are defined with a special macro ``G_API_NET()``: - -.. code-block:: cpp - - // Face detector: takes one Mat, returns another Mat - G_API_NET(Faces, , "face-detector"); - // Age/Gender recognition - takes one Mat, returns two: - // one for Age and one for Gender. In G-API, multiple-return-value operations - // are defined using std::tuple<>. - using AGInfo = std::tuple; - G_API_NET(AgeGender, , "age-gender-recoginition"); - // Emotion recognition - takes one Mat, returns another. - G_API_NET(Emotions, , "emotions-recognition"); - -Similar to how operations are defined with ``G_API_OP()``, network description requires three parameters: - -1. A type name. Every defined topology is declared as a distinct C++ type which is used further in the program -- see below. -2. A ``std::function<>``-like API signature. G-API traits networks as regular "functions" which take and return data. Here network ``Faces`` (a detector) takes a ``cv::GMat`` and returns a ``cv::GMat``, while network ``AgeGender`` is known to provide two outputs (age and gender blobs, respectively) -- so its has a ``std::tuple<>`` as a return type. -3. A topology name -- can be any non-empty string, G-API is using these names to distinguish networks inside. Names should be unique in the scope of a single graph. - -.. _gapi_ifd_gcomputation: - -Building a GComputation -####################### - -Now the above pipeline is expressed in G-API like this: - -.. code-block:: cpp - - cv::GComputation pp([]() { - // Declare an empty GMat - the beginning of the pipeline. - cv::GMat in; - // Run face detection on the input frame. Result is a single GMat, - // internally representing an 1x1x200x7 SSD output. - // This is a single-patch version of infer: - // - Inference is running on the whole input image; - // - Image is converted and resized to the network's expected format - // automatically. - cv::GMat detections = cv::gapi::infer(in); - // Parse SSD output to a list of ROI (rectangles) using - // a custom kernel. Note: parsing SSD may become a "standard" kernel. - cv::GArray faces = custom::PostProc::on(detections, in); - // Now run Age/Gender model on every detected face. This model has two - // outputs (for age and gender respectively). - // A special ROI-list-oriented form of infer<>() is used here: - // - First input argument is the list of rectangles to process, - // - Second one is the image where to take ROI from; - // - Crop/Resize/Layout conversion happens automatically for every image patch - // from the list - // - Inference results are also returned in form of list (GArray<>) - // - Since there're two outputs, infer<> return two arrays (via std::tuple). - cv::GArray ages; - cv::GArray genders; - std::tie(ages, genders) = cv::gapi::infer(faces, in); - // Recognize emotions on every face. - // ROI-list-oriented infer<>() is used here as well. - // Since custom::Emotions network produce a single output, only one - // GArray<> is returned here. - cv::GArray emotions = cv::gapi::infer(faces, in); - // Return the decoded frame as a result as well. - // Input matrix can't be specified as output one, so use copy() here - // (this copy will be optimized out in the future). - cv::GMat frame = cv::gapi::copy(in); - // Now specify the computation's boundaries - our pipeline consumes - // one images and produces five outputs. - return cv::GComputation(cv::GIn(in), - cv::GOut(frame, faces, ages, genders, emotions)); - }); - -Every pipeline starts with declaring empty data objects – which act as inputs to the pipeline. Then we call a generic ``cv::gapi::infer<>`` specialized to Faces detection network. ``cv::gapi::infer<>`` inherits its signature from its template parameter – and in this case it expects one input cv::GMat and produces one output cv::GMat. - -In this sample we use a pre-trained SSD-based network and its output needs to be parsed to an array of detections (object regions of interest, ROIs). It is done by a custom operation custom::PostProc, which returns an array of rectangles (of type ``cv::GArray``) back to the pipeline. This operation also filters out results by a confidence threshold – and these details are hidden in the kernel itself. Still, at the moment of graph construction we operate with interfaces only and don't need actual kernels to express the pipeline – so the implementation of this post-processing will be listed later. - -After detection result output is parsed to an array of objects, we can run classification on any of those. G-API doesn't support syntax for in-graph loops like ``for_each()`` yet, but instead ``cv::gapi::infer<>`` comes with a special list-oriented overload. - -User can call ``cv::gapi::infer<>`` with a ``cv::GArray`` as the first argument, so then G-API assumes it needs to run the associated network on every rectangle from the given list of the given frame (second argument). Result of such operation is also a list – a cv::GArray of ``cv::GMat``. - -Since AgeGender network itself produces two outputs, it's output type for a list-based version of ``cv::gapi::infer`` is a tuple of arrays. We use ``std::tie()`` to decompose this input into two distinct objects. - -Emotions network produces a single output so its list-based inference's return type is ``cv::GArray``. - -.. _gapi_ifd_configuration: - -Configure the Pipeline -###################### - -G-API strictly separates construction from configuration -- with the idea to keep algorithm code itself platform-neutral. In the above listings we only declared our operations and expressed the overall data flow, but didn't even mention that we use OpenVINO™. We only described *what* we do, but not *how* we do it. Keeping these two aspects clearly separated is the design goal for G-API. - -Platform-specific details arise when the pipeline is *compiled* -- i.e. is turned from a declarative to an executable form. The way *how* to run stuff is specified via compilation arguments, and new inference/streaming features are no exception from this rule. - -G-API is built on backends which implement interfaces (see `Architecture `__ and :doc:`Kernels ` for details) thus ``cv::gapi::infer<>`` is a function which can be implemented by different backends. In OpenCV 4.2, only OpenVINO™ Runtime backend for inference is available. Every inference backend in G-API has to provide a special parameterizable structure to express *backend-specific* neural network parameters and in this case, it is ``cv::gapi::ie::Params``: - -.. code-block:: cpp - - auto det_net = cv::gapi::ie::Params { - cmd.get("fdm"), // read cmd args: path to topology IR - cmd.get("fdw"), // read cmd args: path to weights - cmd.get("fdd"), // read cmd args: device specifier - }; - auto age_net = cv::gapi::ie::Params { - cmd.get("agem"), // read cmd args: path to topology IR - cmd.get("agew"), // read cmd args: path to weights - cmd.get("aged"), // read cmd args: device specifier - }.cfgOutputLayers({ "age_conv3", "prob" }); - auto emo_net = cv::gapi::ie::Params { - cmd.get("emom"), // read cmd args: path to topology IR - cmd.get("emow"), // read cmd args: path to weights - cmd.get("emod"), // read cmd args: device specifier - }; - - -Here we define three parameter objects: ``det_net``, ``age_net``, and ``emo_net``. Every object is a ``cv::gapi::ie::Params`` structure parametrization for each particular network we use. On a compilation stage, G-API automatically matches network parameters with their ``cv::gapi::infer<>`` calls in graph using this information. - -Regardless of the topology, every parameter structure is constructed with three string arguments – specific to the OpenVINO™ Runtime: - -* Path to the topology's intermediate representation (.xml file); -* Path to the topology's model weights (.bin file); -* Device where to run – "CPU", "GPU", and others – based on your OpenVINO™ Toolkit installation. These arguments are taken from the command-line parser. - -Once networks are defined and custom kernels are implemented, the pipeline is compiled for streaming: - -.. code-block:: cpp - - // Form a kernel package (with a single OpenCV-based implementation of our - // post-processing) and a network package (holding our three networks). - auto kernels = cv::gapi::kernels(); - auto networks = cv::gapi::networks(det_net, age_net, emo_net); - // Compile our pipeline and pass our kernels & networks as - // parameters. This is the place where G-API learns which - // networks & kernels we're actually operating with (the graph - // description itself known nothing about that). - auto cc = pp.compileStreaming(cv::compile_args(kernels, networks)); - - -The ``cv::GComputation::compileStreaming()`` triggers a special video-oriented form of graph compilation where G-API is trying to optimize throughput. Result of this compilation is an object of special type ``cv::GStreamingCompiled`` – in contrast to a traditional callable ``cv::GCompiled``, these objects are closer to media players in their semantics. - -.. note:: - There is no need to pass metadata arguments describing the format of the input video stream in ``cv::GComputation::compileStreaming()`` – G-API figures automatically what are the formats of the input vector and adjusts the pipeline to these formats on-the-fly. User still can pass metadata there as with regular ``cv::GComputation::compile()`` in order to fix the pipeline to the specific input format. - -.. _gapi_ifd_running: - -Running the Pipeline -#################### - -Pipelining optimization is based on processing multiple input video frames simultaneously, running different steps of the pipeline in parallel. This is why it works best when the framework takes full control over the video stream. - -The idea behind streaming API is that user specifies an *input source* to the pipeline and then G-API manages its execution automatically until the source ends or user interrupts the execution. G-API pulls new image data from the source and passes it to the pipeline for processing. - -Streaming sources are represented by the interface ``cv::gapi::wip::IStreamSource``. Objects implementing this interface may be passed to ``GStreamingCompiled`` as regular inputs via ``cv::gin()`` helper function. In OpenCV 4.2, only one streaming source is allowed per pipeline -- this requirement will be relaxed in the future. - -OpenCV comes with a great class cv::VideoCapture and by default G-API ships with a stream source class based on it -- ``cv::gapi::wip::GCaptureSource``. Users can implement their own -streaming sources e.g. using `VAAPI `__ or other Media or Networking APIs. - -Sample application specifies the input source as follows: - -.. code-block:: cpp - - auto in_src = cv::gapi::wip::make_src(input); - cc.setSource(cv::gin(in_src)); - -Please note that a GComputation may still have multiple inputs like ``cv::GMat``, ``cv::GScalar``, or ``cv::GArray`` objects. User can pass their respective host-side types (``cv::Mat``, ``cv::Scalar``, ``std::vector<>``) in the input vector as well, but in Streaming mode these objects will create "endless" constant streams. Mixing a real video source stream and a const data stream is allowed. - -Running a pipeline is easy – just call ``cv::GStreamingCompiled::start()`` and fetch your data with blocking ``cv::GStreamingCompiled::pull()`` or non-blocking ``cv::GStreamingCompiled::try_pull()``; repeat until the stream ends: - -.. code-block:: cpp - - // After data source is specified, start the execution - cc.start(); - // Declare data objects we will be receiving from the pipeline. - cv::Mat frame; // The captured frame itself - std::vector faces; // Array of detected faces - std::vector out_ages; // Array of inferred ages (one blob per face) - std::vector out_genders; // Array of inferred genders (one blob per face) - std::vector out_emotions; // Array of classified emotions (one blob per face) - // Implement different execution policies depending on the display option - // for the best performance. - while (cc.running()) { - auto out_vector = cv::gout(frame, faces, out_ages, out_genders, out_emotions); - if (no_show) { - // This is purely a video processing. No need to balance - // with UI rendering. Use a blocking pull() to obtain - // data. Break the loop if the stream is over. - if (!cc.pull(std::move(out_vector))) - break; - } else if (!cc.try_pull(std::move(out_vector))) { - // Use a non-blocking try_pull() to obtain data. - // If there's no data, let UI refresh (and handle keypress) - if (cv::waitKey(1) >= 0) break; - else continue; - } - // At this point we have data for sure (obtained in either - // blocking or non-blocking way). - frames++; - labels::DrawResults(frame, faces, out_ages, out_genders, out_emotions); - labels::DrawFPS(frame, frames, avg.fps(frames)); - if (!no_show) cv::imshow("Out", frame); - } - -The above code may look complex but in fact it handles two modes – with and without graphical user interface (GUI): - -* When a sample is running in a "headless" mode (``--pure`` option is set), this code simply pulls data from the pipeline with the blocking ``pull()`` until it ends. This is the most performant mode of execution. -* When results are also displayed on the screen, the Window System needs to take some time to refresh the window contents and handle GUI events. In this case, the demo pulls data with a non-blocking ``try_pull()`` until there is no more data available (but it does not mark end of the stream – just means new data is not ready yet), and only then displays the latest obtained result and refreshes the screen. Reducing the time spent in GUI with this trick increases the overall performance a little bit. - -Comparison with Serial Mode -########################### - -The sample can also run in a serial mode for a reference and benchmarking purposes. In this case, a regular ``cv::GComputation::compile()`` is used and a regular single-frame ``cv::GCompiled`` object is produced; the pipelining optimization is not applied within G-API; it is the user responsibility to acquire image frames from ``cv::VideoCapture`` object and pass those to G-API. - -.. code-block:: cpp - - cv::VideoCapture cap(input); - cv::Mat in_frame, frame; // The captured frame itself - std::vector faces; // Array of detected faces - std::vector out_ages; // Array of inferred ages (one blob per face) - std::vector out_genders; // Array of inferred genders (one blob per face) - std::vector out_emotions; // Array of classified emotions (one blob per face) - while (cap.read(in_frame)) { - pp.apply(cv::gin(in_frame), - cv::gout(frame, faces, out_ages, out_genders, out_emotions), - cv::compile_args(kernels, networks)); - labels::DrawResults(frame, faces, out_ages, out_genders, out_emotions); - frames++; - if (frames == 1u) { - // Start timer only after 1st frame processed -- compilation - // happens on-the-fly here - avg.start(); - } else { - // Measure & draw FPS for all other frames - labels::DrawFPS(frame, frames, avg.fps(frames-1)); - } - if (!no_show) { - cv::imshow("Out", frame); - if (cv::waitKey(1) >= 0) break; - } - } - -On a test machine (Intel® Core™ i5-6600), with OpenCV built with `Intel® TBB `__ support, detector network assigned to CPU, and classifiers to iGPU, the pipelined sample outperforms the serial one by the factor of 1.36x (thus adding +36% in overall throughput). - -Conclusion -########### - -G-API introduces a technological way to build and optimize hybrid pipelines. Switching to a new execution model does not require changes in the algorithm code expressed with G-API – only the way how graph is triggered differs. - -Listing: Post-Processing Kernel -############################### - -G-API gives an easy way to plug custom code into the pipeline even if it is running in a streaming mode and processing tensor data. Inference results are represented by multi-dimensional ``cv::Mat`` objects so accessing those is as easy as with a regular DNN module. - -The OpenCV-based SSD post-processing kernel is defined and implemented in this sample as follows: - -.. code-block:: cpp - - // SSD Post-processing function - this is not a network but a kernel. - // The kernel body is declared separately, this is just an interface. - // This operation takes two Mats (detections and the source image), - // and returns a vector of ROI (filtered by a default threshold). - // Threshold (or a class to select) may become a parameter, but since - // this kernel is custom, it doesn't make a lot of sense. - G_API_OP(PostProc, (cv::GMat, cv::GMat)>, "custom.fd_postproc") { - static cv::GArrayDesc outMeta(const cv::GMatDesc &, const cv::GMatDesc &) { - // This function is required for G-API engine to figure out - // what the output format is, given the input parameters. - // Since the output is an array (with a specific type), - // there's nothing to describe. - return cv::empty_array_desc(); - } - }; - // OpenCV-based implementation of the above kernel. - GAPI_OCV_KERNEL(OCVPostProc, PostProc) { - static void run(const cv::Mat &in_ssd_result, - const cv::Mat &in_frame, - std::vector &out_faces) { - const int MAX_PROPOSALS = 200; - const int OBJECT_SIZE = 7; - const cv::Size upscale = in_frame.size(); - const cv::Rect surface({0,0}, upscale); - out_faces.clear(); - const float \*data = in_ssd_result.ptr(); - for (int i = 0; i < MAX_PROPOSALS; i++) { - const float image_id = data[i \* OBJECT_SIZE + 0]; // batch id - const float confidence = data[i \* OBJECT_SIZE + 2]; - const float rc_left = data[i \* OBJECT_SIZE + 3]; - const float rc_top = data[i \* OBJECT_SIZE + 4]; - const float rc_right = data[i \* OBJECT_SIZE + 5]; - const float rc_bottom = data[i \* OBJECT_SIZE + 6]; - if (image_id < 0.f) { // indicates end of detections - break; - } - if (confidence < 0.5f) { // a hard-coded snapshot - continue; - } - // Convert floating-point coordinates to the absolute image - // frame coordinates; clip by the source image boundaries. - cv::Rect rc; - rc.x = static_cast(rc_left \* upscale.width); - rc.y = static_cast(rc_top \* upscale.height); - rc.width = static_cast(rc_right \* upscale.width) - rc.x; - rc.height = static_cast(rc_bottom \* upscale.height) - rc.y; - out_faces.push_back(rc & surface); - } - } - }; - -@endsphinxdirective - diff --git a/docs/articles_en/documentation/media_processing_cv_libraries/gapi_intro/kernel_api.md b/docs/articles_en/documentation/media_processing_cv_libraries/gapi_intro/kernel_api.md deleted file mode 100644 index 05e3bc188f00f1..00000000000000 --- a/docs/articles_en/documentation/media_processing_cv_libraries/gapi_intro/kernel_api.md +++ /dev/null @@ -1,215 +0,0 @@ -# Graph API Kernel API {#openvino_docs_gapi_kernel_api} - -@sphinxdirective - -.. meta:: - :description: Learn how to build a pipeline with Graph API (G-API) and ensure its portability, using custom kernel interfaces. - -The core idea behind Graph API (G-API) is portability – a pipeline built with G-API must be portable (or at least able to be portable). It means that either it works out-of-the box when compiled for new platform, or G-API provides necessary tools to make it running there, with little-to-no changes in the algorithm itself. - -This idea can be achieved by separating kernel interface from its implementation. Once a pipeline is built using kernel interfaces, it becomes implementation-neutral – the implementation details (i.e. which kernels to use) are passed on a separate stage (graph compilation). - -Kernel-implementation hierarchy may look like: - -.. image:: _static/images/gapi_kernel_implementation_hierarchy.png - -A pipeline itself then can be expressed only in terms of ``A``, ``B``, and so on, and choosing which implementation to use in execution becomes an external parameter. - -Define a Kernel -############### - -G-API provides a macro to define a new kernel interface ``G_TYPED_KERNEL()``: - -.. code-block:: cpp - - #include - G_TYPED_KERNEL(GFilter2D, - , - "org.opencv.imgproc.filters.filter2D") - { - static cv::GMatDesc // outMeta's return value type - outMeta(cv::GMatDesc in , // descriptor of input GMat - int ddepth , // depth parameter - cv::Mat /\* coeffs \*/, // (unused) - cv::Point /\* anchor \*/, // (unused) - double /\* scale \*/, // (unused) - int /\* border \*/, // (unused) - cv::Scalar /\* bvalue \*/ ) // (unused) - { - return in.withDepth(ddepth); - } - }; - - -This macro is a shortcut to a new type definition. It takes three arguments to register a new type, and requires type body to be present (see below). The macro arguments are: - -* Kernel interface name -- Also serves as a name of new type defined with this macro; -* Kernel signature -- An ``std::function<>``-like signature which defines API of the kernel; -* Kernel's unique name -- Used to identify kernel when its type information is stripped within the system. -* Kernel declaration may be seen as function declaration -- In both cases a new entity must be used then according to the way it was defined. - -Kernel signature defines kernel's usage syntax which parameters it takes during graph construction. Implementations can also use this signature to derive it into backend-specific callback signatures (see next chapter). - -Kernel may accept values of any type, and G-API dynamic types are handled in a special way. All other types are opaque to G-API and passed to kernel in ``outMeta()`` or in execution callbacks as-is. - -Kernel's return value can only be of G-API dynamic type – ``cv::GMat``, ``cv::GScalar``, or ``cv::GArray``. If an operation has more than one output, it should be wrapped into an ``std::tuple<>`` (which can contain only mentioned G-API types). Arbitrary-output-number operations are not supported. - -Once a kernel is defined, it can be used in pipelines with special, G-API-supplied method ``on()``. This method has the same signature as defined in kernel, so the following code is a perfectly legal construction: - -.. code-block:: cpp - - cv::GMat in; - cv::GMat out = GFilter2D::on(/\* GMat \*/ in, - /\* int \*/ -1, - /\* Mat \*/ conv_kernel_mat, - /\* Point \*/ cv::Point(-1,-1), - /\* double \*/ 0., - /\* int \*/ cv::BORDER_DEFAULT, - /\* Scalar \*/ cv::Scalar(0)); - - -This example has some verbosity, though, so usually a kernel declaration comes with a C++ function wrapper ("factory method") which enables optional parameters, more compact syntax, Doxygen comments, etc.: - -.. code-block:: cpp - - cv::GMat filter2D(cv::GMat in, - int ddepth, - cv::Mat k, - cv::Point anchor = cv::Point(-1,-1), - double scale = 0., - int border = cv::BORDER_DEFAULT, - cv::Scalar bval = cv::Scalar(0)) - { - return GFilter2D::on(in, ddepth, k, anchor, scale, border, bval); - } - - -So now it can be used like: - -.. code-block:: cpp - - cv::GMat in; - cv::GMat out = filter2D(in, -1, conv_kernel_mat); - - -Extra information -+++++++++++++++++ - -In the current version, kernel declaration body (everything within the curly braces) must contain a static function ``outMeta()``. This function establishes a functional dependency between operation's input and output metadata. - -Metadata is an information about data kernel operates on. Since non-G-API types are opaque to G-API, G-API cares only about G* data descriptors (i.e. dimensions and format of ``cv::GMat``, etc). - -The ``outMeta()`` is also an example of how kernel's signature can be transformed into a derived callback – note that in this example, outMeta() signature exactly follows the kernel signature (defined within the macro) but is different – where kernel expects ``cv::GMat``, ``outMeta()`` takes and returns ``cv::GMatDesc`` (a G-API structure metadata for ``cv::GMat``). - -The point of ``outMeta()`` is to propagate metadata information within computation from inputs to outputs and infer metadata of internal (intermediate, temporary) data objects. This information is required for further pipeline optimizations, memory allocation, and other operations done by G-API framework during graph compilation. - -Implement a Kernel -################## - -Once a kernel is declared, its interface can be used to implement versions of this kernel in different backends. This concept is naturally projected from object-oriented programming "Interface/Implementation" idiom: an interface can be implemented multiple times, and different implementations of a kernel should be substitutable with each other without breaking the algorithm (pipeline) logic (Liskov Substitution Principle). - -Every backend defines its own way to implement a kernel interface. This way is regular, though – whatever plugin is, its kernel implementation must be "derived" from a kernel interface type. - -Kernel implementation are then organized into kernel packages. Kernel packages are passed to ``cv::GComputation::compile()`` as compile arguments, with some hints to G-API on how to select proper kernels. - -For example, the aforementioned Filter2D is implemented in "reference" CPU (OpenCV) plugin this way (NOTE – this is a simplified form with improper border handling): - -.. code-block:: cpp - - #include // GAPI_OCV_KERNEL() - #include // cv::filter2D() - GAPI_OCV_KERNEL(GCPUFilter2D, GFilter2D) - { - static void - run(const cv::Mat &in, // in - derived from GMat - const int ddepth, // opaque (passed as-is) - const cv::Mat &k, // opaque (passed as-is) - const cv::Point &anchor, // opaque (passed as-is) - const double delta, // opaque (passed as-is) - const int border, // opaque (passed as-is) - const cv::Scalar &, // opaque (passed as-is) - cv::Mat &out) // out - derived from GMat (retval) - { - cv::filter2D(in, out, ddepth, k, anchor, delta, border); - } - }; - - -Note how CPU (OpenCV) plugin has transformed the original kernel signature: - -* Input ``cv::GMat`` has been substituted with ``cv::Mat``, holding actual input data for the underlying OpenCV function call; -* Output ``cv::GMat`` has been transformed into extra output parameter, thus ``GCPUFilter2D::run()`` takes one argument more than the original kernel signature. - -The basic intuition for kernel developer here is not to care where that cv::Mat objects come from instead of the original ``cv::GMat`` – and just follow the signature conventions defined by the plugin. G-API will call this method during execution and supply all the necessary information (and forward the original opaque data as-is). - -Compound Kernels -################ - -Sometimes kernel is a single thing only on API level. It is convenient for users, but on a particular implementation side it would be better to have multiple kernels (a subgraph) doing the thing instead. An example is ``goodFeaturesToTrack()`` – while in OpenCV backend it may remain a single kernel, with Fluid it becomes compound – Fluid can handle Harris response calculation but can't do sparse non-maxima suppression and point extraction to an STL vector: - -A compound kernel implementation can be defined using a generic macro ``GAPI_COMPOUND_KERNEL()``: - -.. code-block:: cpp - - #include // GAPI_COMPOUND_KERNEL() - using PointArray2f = cv::GArray; - G_TYPED_KERNEL(HarrisCorners, - , - "org.opencv.imgproc.harris_corner") - { - static cv::GArrayDesc outMeta(const cv::GMatDesc &, - int, - double, - double, - int, - double) - { - // No special metadata for arrays in G-API (yet) - return cv::empty_array_desc(); - } - }; - // Define Fluid-backend-local kernels which form GoodFeatures - G_TYPED_KERNEL(HarrisResponse, - , - "org.opencv.fluid.harris_response") - { - static cv::GMatDesc outMeta(const cv::GMatDesc &in, - double, - int, - double) - { - return in.withType(CV_32F, 1); - } - }; - G_TYPED_KERNEL(ArrayNMS, - , - "org.opencv.cpu.nms_array") - { - static cv::GArrayDesc outMeta(const cv::GMatDesc &, - int, - double) - { - return cv::empty_array_desc(); - } - }; - GAPI_COMPOUND_KERNEL(GFluidHarrisCorners, HarrisCorners) - { - static PointArray2f - expand(cv::GMat in, - int maxCorners, - double quality, - double minDist, - int blockSize, - double k) - { - cv::GMat response = HarrisResponse::on(in, quality, blockSize, k); - return ArrayNMS::on(response, maxCorners, minDist); - } - }; - // Then implement HarrisResponse as Fluid kernel and NMSresponse - // as a generic (OpenCV) kernel - -It is important to distinguish a compound kernel from G-API high-order function, i.e. a C++ function which looks like a kernel but in fact generates a subgraph. The core difference is that a compound kernel is an *implementation detail* and a kernel implementation may be either compound or not (depending on backend capabilities), while a high-order function is a "macro" in terms of G-API and so cannot act as an interface which then needs to be implemented by a backend. - -@endsphinxdirective - diff --git a/docs/articles_en/openvino_workflow/deployment_intro/local-distribution.md b/docs/articles_en/openvino_workflow/deployment_intro/local-distribution.md index 7dfa3fea0f1dcf..530072404c29bc 100644 --- a/docs/articles_en/openvino_workflow/deployment_intro/local-distribution.md +++ b/docs/articles_en/openvino_workflow/deployment_intro/local-distribution.md @@ -145,15 +145,6 @@ Depending on the model format types that are used in the application in `ov::Cor To optimize the size of the final distribution package, it is recommended to convert models to OpenVINO IR by using :doc:`model conversion API `. This way you do not have to keep TensorFlow, TensorFlow Lite, ONNX, PaddlePaddle, and other frontend libraries in the distribution package. -(Legacy) Preprocessing via G-API -++++++++++++++++++++++++++++++++ - -.. note:: - - :doc:`G-API ` preprocessing is a legacy functionality, use :doc:`preprocessing capabilities from OpenVINO 2.0 ` which do not require any additional libraries. - -If the application uses `InferenceEngine::PreProcessInfo::setColorFormat `__ or `InferenceEngine::PreProcessInfo::setResizeAlgorithm `__ methods, OpenVINO Runtime dynamically loads `openvino_gapi_preproc` plugin to perform preprocessing via G-API. - Examples #################### From b20afe850e22cc089b3982541a4be97419b165ba Mon Sep 17 00:00:00 2001 From: Aleksandr Voron Date: Thu, 5 Oct 2023 11:51:12 +0200 Subject: [PATCH 075/257] init (#20260) --- README.md | 6 +++--- .../compatibility_and_support/Supported_Devices.md | 2 +- .../openvino_workflow/openvino_intro/Device_Plugins.md | 1 + .../openvino_workflow/openvino_intro/Device_Plugins/CPU.md | 2 +- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 844e44a0ace216..adc6f9f2b965ea 100644 --- a/README.md +++ b/README.md @@ -73,9 +73,9 @@ The OpenVINO™ Runtime can infer models on different hardware devices. This sec Intel Xeon with Intel® Advanced Vector Extensions 2 (Intel® AVX2), Intel® Advanced Vector Extensions 512 (Intel® AVX-512), and AVX512_BF16, Intel Core Processors with Intel AVX2, Intel Atom Processors with Intel® Streaming SIMD Extensions (Intel® SSE) - ARM CPU - openvino_arm_cpu_plugin - Raspberry Pi™ 4 Model B, Apple® Mac mini with M1 chip, NVIDIA® Jetson Nano™, Android™ devices + ARM CPU + openvino_arm_cpu_plugin + Raspberry Pi™ 4 Model B, Apple® Mac mini with Apple silicon GPU diff --git a/docs/articles_en/about_openvino/compatibility_and_support/Supported_Devices.md b/docs/articles_en/about_openvino/compatibility_and_support/Supported_Devices.md index 8f4f1833914a0f..9d85463af542c2 100644 --- a/docs/articles_en/about_openvino/compatibility_and_support/Supported_Devices.md +++ b/docs/articles_en/about_openvino/compatibility_and_support/Supported_Devices.md @@ -30,7 +30,7 @@ Currently, processors of the 11th generation and later (up to the 13th generatio || | Intel® Core™ Processors with Intel® AVX2, | || | Intel® Atom® Processors with Intel® Streaming SIMD Extensions (Intel® SSE) | || | | -|| (Arm®) | Raspberry Pi™ 4 Model B, Apple® Mac mini with M1 chip, NVIDIA® Jetson Nano™, Android™ devices | +|| (Arm®) | Raspberry Pi™ 4 Model B, Apple® Mac mini with Apple silicon | || | | +---------------------------------------------------------------------+------------------------------------------------------------------------------------------------------+ || :doc:`GPU ` | Intel® Processor Graphics including Intel® HD Graphics and Intel® Iris® Graphics, | diff --git a/docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins.md b/docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins.md index 53778ab9b7f45a..3849109927cbd5 100644 --- a/docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins.md +++ b/docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins.md @@ -24,6 +24,7 @@ OpenVINO™ Runtime can infer deep learning models using the following device ty * :doc:`CPU ` * :doc:`GPU ` * :doc:`GNA ` +* :doc:`Arm® CPU ` For a more detailed list of hardware, see :doc:`Supported Devices `. diff --git a/docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins/CPU.md b/docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins/CPU.md index 04ecd9b2222cb8..320cec2583e030 100644 --- a/docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins/CPU.md +++ b/docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins/CPU.md @@ -52,7 +52,7 @@ CPU plugin supports the following data types as inference precision of internal - Floating-point data types: - ``f32`` (Intel® x86-64, Arm®) - - ``bf16``(Intel® x86-64) + - ``bf16`` (Intel® x86-64) - Integer data types: - ``i32`` (Intel® x86-64, Arm®) From 4ad68e8ff250ca6108628c21cba9b94c1ecc92f3 Mon Sep 17 00:00:00 2001 From: Andrey Kashchikhin Date: Thu, 5 Oct 2023 11:39:28 +0100 Subject: [PATCH 076/257] [CI] [GHA] Introduce conformance tests (#19841) * check dirs * cmake, build, instlll * use make * use build dir * use target * add missing * execute conformance tests * correct path for requirements * setupvars * add API conformance * conformance as a separate job; install all necessary files * uncomment * merge * install deps * use matrix, upload expected failures to gh cache * use 8-core * use the same paths * uncomment * comment * change * use csv * add exit if there are failed tests * always upload logs * check dir * use another dir for expected_failures * upload always * rm * new key * rm unused * change * update * update * rm unused * do not exit if update is set * copy file * do not fail in failures check * use specific branch * run clean * add expected failures * uncomment * comment out * correctly add failed tests to fix_priority; check for unexpected failures in case of an update * use azure runners for conformance * use 4-core * uncomment * comment out * split deps installation * add missing deps for setup-python * print error * install certs * do not verify ssl * add ca-certificates install * uncomment * comment * pack artifacts, rm unused deps * rm unused dep * always pack artifacts * rm for func tests * use less cores * use var * store int * do not exit if non-zero code * only 4 core * use gh runner * use sudo * add missing sudo * use expected failures * uncomment * comment * create fresh expected failures * use expected failures * use specific ref and repo * use expected failures * uncomment * comment out * check caches * fix str * rm unused * restore by restore key * create dir * use array * use diff path * mv after each download * add sleeping and more logs * add saving of hash table * change * uncomment * comment * download cache entries * check dir * use better dir * uncomment * rm unused * add skip configs * update lists * rm unused dir; add docs * rm unused * rm hardcoded repo ref * rm unused options; use better name for job * rm unnecessary dir creation --- .github/workflows/linux.yml | 105 +- .../plugin/conformance/test_runner/README.md | 22 +- .../functional_test_utils/CMakeLists.txt | 4 +- .../layer_tests_summary/run_conformance.py | 107 +- .../layer_tests_summary/run_parallel.py | 6 +- .../CPU/expected_failures_API.csv | 3760 +++++++++++++++++ .../skip_configs/CPU/expected_failures_OP.csv | 1131 +++++ 7 files changed, 5074 insertions(+), 61 deletions(-) create mode 100644 src/tests/test_utils/functional_test_utils/layer_tests_summary/skip_configs/CPU/expected_failures_API.csv create mode 100644 src/tests/test_utils/functional_test_utils/layer_tests_summary/skip_configs/CPU/expected_failures_OP.csv diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 052f8ebfcd6081..48d2f75d150d7f 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -10,16 +10,12 @@ on: - 'docs/**' - '**/**.md' - '**.md' - - '**/layer_tests_summary/**' - - '**/conformance/**' push: paths-ignore: - '**/docs/**' - 'docs/**' - '**/**.md' - '**.md' - - '**/layer_tests_summary/**' - - '**/conformance/**' branches: - master - 'releases/**' @@ -384,6 +380,101 @@ jobs: path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml if-no-files-found: 'error' + Conformance: + needs: Build + defaults: + run: + shell: bash + runs-on: ubuntu-20.04-8-cores + strategy: + max-parallel: 2 + fail-fast: false + matrix: + include: + # 'OP' for Opset, 'API' for API + - TEST_TYPE: 'OP' + - TEST_TYPE: 'API' + env: + DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input + INSTALL_DIR: ${{ github.workspace }}/install + INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests + CONFORMANCE_TOOLS_DIR: ${{ github.workspace }}/install/tests/functional_test_utils/layer_tests_summary + CONFORMANCE_ARTIFACTS_DIR: ${{ github.workspace }}/install/conformance_artifacts + TEST_DEVICE: 'CPU' + + steps: + + - name: Create Directories + run: | + mkdir -p ${CONFORMANCE_ARTIFACTS_DIR} + + # + # Dependencies + # + + - name: Download OpenVINO package + uses: actions/download-artifact@v3 + with: + name: openvino_package + path: ${{ env.INSTALL_DIR }} + + - name: Download OpenVINO tests package + uses: actions/download-artifact@v3 + with: + name: openvino_tests + path: ${{ env.INSTALL_TEST_DIR }} + + - name: Extract OpenVINO packages + run: | + pushd ${INSTALL_DIR} + tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} + popd + pushd ${INSTALL_TEST_DIR} + tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR} + popd + + - uses: actions/setup-python@v4 + with: + python-version: ${{ env.PYTHON_VERSION }} + + - name: Install Dependencies + run: | + sudo -E ${INSTALL_DIR}/install_dependencies/install_openvino_dependencies.sh -c=core -y + + # Needed for downloading IRs from storage.openvinotoolkit with Python urllib + sudo apt-get update && sudo apt-get install --assume-yes --no-install-recommends ca-certificates + + python3 -m pip install -r ${CONFORMANCE_TOOLS_DIR}/requirements.txt + + # + # Tests + # + + - name: Conformance Tests + run: | + source ${INSTALL_DIR}/setupvars.sh + + python3 ${CONFORMANCE_TOOLS_DIR}/run_conformance.py -ov=${INSTALL_DIR}/tests \ + -d=${TEST_DEVICE} \ + -t=${{ matrix.TEST_TYPE }} \ + -w=${CONFORMANCE_ARTIFACTS_DIR} \ + -f=${CONFORMANCE_TOOLS_DIR}/skip_configs/${TEST_DEVICE}/expected_failures_${{ matrix.TEST_TYPE }}.csv + + - name: Pack Conformance Artifacts + if: ${{ always() }} + run: | + pushd ${CONFORMANCE_ARTIFACTS_DIR} + tar -czvf ${CONFORMANCE_ARTIFACTS_DIR}/conformance_artifacts.tar.gz * + popd + + - name: Upload Conformance Artifacts + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: conformance_artifacts_${{ matrix.TEST_TYPE }}-${{ env.TEST_DEVICE }} + path: ${{ env.CONFORMANCE_ARTIFACTS_DIR }}/conformance_artifacts.tar.gz + if-no-files-found: 'error' + ONNX_Runtime: needs: Build defaults: @@ -735,7 +826,7 @@ jobs: path: 'openvino' # - # Initilaize OpenVINO + # Initialize OpenVINO # - uses: actions/setup-python@v4 @@ -938,7 +1029,7 @@ jobs: env: INSTALL_DIR: /__w/openvino/openvino/install INSTALL_TEST_DIR: /__w/openvino/openvino/install/tests - PARALLEL_TEST_SCRIPT: /__w/openvino/openvino/install/tests/functional_test_utils/run_parallel.py + PARALLEL_TEST_SCRIPT: /__w/openvino/openvino/install/tests/functional_test_utils/layer_tests_summary/run_parallel.py PARALLEL_TEST_CACHE: /__w/openvino/openvino/install/tests/test_cache.lst steps: @@ -977,7 +1068,7 @@ jobs: python-version: ${{ env.PYTHON_VERSION }} - name: Install python dependencies for run_parallel.py - run: python3 -m pip install -r ${INSTALL_TEST_DIR}/functional_test_utils/requirements.txt + run: python3 -m pip install -r ${INSTALL_TEST_DIR}/functional_test_utils/layer_tests_summary/requirements.txt - name: Restore tests execution time uses: actions/cache/restore@v3 diff --git a/src/tests/functional/plugin/conformance/test_runner/README.md b/src/tests/functional/plugin/conformance/test_runner/README.md index 90735507f7359d..ccdbe634052919 100644 --- a/src/tests/functional/plugin/conformance/test_runner/README.md +++ b/src/tests/functional/plugin/conformance/test_runner/README.md @@ -50,7 +50,7 @@ Run the following commands in the build directory: make --jobs=$(nproc --all) lib_plugin_name ``` -## How to run using [simple conformance runner](./../../../../ie_test_utils/functional_test_utils/layer_tests_summary/run_conformance.py) +## How to run using [simple conformance runner](./../../../../../tests/test_utils/functional_test_utils/layer_tests_summary/run_conformance.py) There is a simple python runner to complete the whole conformance pipeline locally. Some steps could be excluded from the pipeline by command-line parameter configuration. @@ -89,7 +89,7 @@ The script has the following optional arguments: * `p PARALLEL_DEVICES, --parallel_devices PARALLEL_DEVICES` Parallel over HW devices. For example run tests over `GPU.0` and `GPU.1` in case when device are the same * `f EXPECTED_FAILURES, --expected_failures EXPECTED_FAILURES` - Excepted failures list file path as csv + Excepted failures list file path as csv. See more in the [Working with expected failures](#working-with-expected-failures) section. * `u EXPECTED_FAILURES_UPDATE, --expected_failures_update EXPECTED_FAILURES_UPDATE` Overwrite expected failures list in case same failures were fixed * `-cache_path CACHE_PATH` @@ -155,7 +155,7 @@ The target is able to take the following command-line arguments: > **NOTE**: > -> Using [`parallel_runner`](./../../../../ie_test_utils/functional_test_utils/layer_tests_summary/run_parallel.py) tool to run a conformance suite helps to report crashed tests and collect correct statistics after unexpected crashes. +> Using [`parallel_runner`](./../../../../../tests/test_utils/functional_test_utils/layer_tests_summary/run_parallel.py) tool to run a conformance suite helps to report crashed tests and collect correct statistics after unexpected crashes. > The tool is able to work in two modes: > * one test is run in a separate thread (first run, as the output the cache will be saved as a custom file). > * similar load time per one worker based on test execution time. May contain different test count per worker. @@ -169,16 +169,26 @@ The target is able to take the following command-line arguments: > All arguments after `--` symbol is forwarding to `conformanceTests` target. > > If you use the `--report_unique_name` argument, run -> [the merge xml script](./../../../../ie_test_utils/functional_test_utils/layer_tests_summary/merge_xmls.py) +> [the merge xml script](./../../../../../tests/test_utils/functional_test_utils/layer_tests_summary/merge_xmls.py) > to aggregate the results to one *xml* file. Check command-line arguments with `--help` before running the command. > The example of usage is: > ``` > python3 merge_xmls.py --input_folders=/path/to/temp_output_report_folder --output_folder=/path/to/output_report_folder --output_filename=report_aggregated > ``` +## Working with expected failures + +The `run_conformace.py` script has an optional `--expected_failures` argument which accepts a path to a csv file with a list of tests that should not be run. + +You can find the files with the most up-to-date expected failures for different devices and conformance types [here](./../../../../../tests/test_utils/functional_test_utils/layer_tests_summary/skip_configs). + +These files are used in [the Linux GitHub workflow](./../../../../../../.github/workflows/linux.yml) for test skip. + +You can update the file(s) you need with either new passing tests, i.e., when something is fixed, or with new failing tests to skip them. The changes will be reflected in the GitHub actions pipeline, in the `Conformance_Tests` job. + ## How to create a conformance report -Run [the summarize script](./../../../../ie_test_utils/functional_test_utils/layer_tests_summary/summarize.py) to generate `html` and `csv` report. Check command-line arguments with `--help` before running the command. +Run [the summarize script](./../../../../../tests/test_utils/functional_test_utils/layer_tests_summary/summarize.py) to generate `html` and `csv` report. Check command-line arguments with `--help` before running the command. The example of using the script is: ``` python3 summarize.py --xml /opt/repo/infrastructure-master/thirdparty/gtest-parallel/report_opset.xml --out /opt/repo/infrastructure-master/thirdparty/gtest-parallel/ -t OP @@ -186,7 +196,7 @@ python3 summarize.py --xml /opt/repo/infrastructure-master/thirdparty/gtest-para ``` python3 summarize.py --xml /opt/repo/infrastructure-master/thirdparty/gtest-parallel/report_api.xml --out /opt/repo/infrastructure-master/thirdparty/gtest-parallel/ -t API ``` -> **NOTE**: Remember to copy [styles folder](./../../../../ie_test_utils/functional_test_utils/layer_tests_summary/template) to the output directory. It helps to provide a report with filters and other useful features. +> **NOTE**: Remember to copy [styles folder](./../../../../../tests/test_utils/functional_test_utils/layer_tests_summary/template) to the output directory. It helps to provide a report with filters and other useful features. The report contains statistics based on conformance results and filter fields at the top of the page. diff --git a/src/tests/test_utils/functional_test_utils/CMakeLists.txt b/src/tests/test_utils/functional_test_utils/CMakeLists.txt index 5fb2fd6e7d2725..ba3f83db88a185 100644 --- a/src/tests/test_utils/functional_test_utils/CMakeLists.txt +++ b/src/tests/test_utils/functional_test_utils/CMakeLists.txt @@ -29,9 +29,7 @@ addIeTarget( $ ) -install(PROGRAMS layer_tests_summary/run_parallel.py DESTINATION tests/functional_test_utils COMPONENT tests EXCLUDE_FROM_ALL) -install(FILES layer_tests_summary/requirements.txt DESTINATION tests/functional_test_utils COMPONENT tests EXCLUDE_FROM_ALL) -install(DIRECTORY layer_tests_summary/utils DESTINATION tests/functional_test_utils COMPONENT tests EXCLUDE_FROM_ALL) +install(DIRECTORY layer_tests_summary DESTINATION tests/functional_test_utils COMPONENT tests EXCLUDE_FROM_ALL) ov_build_target_faster(${TARGET_NAME} PCH PRIVATE "src/precomp.hpp" diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_conformance.py b/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_conformance.py index 8c64a0eeb9d3d7..d3e128e7b89a1e 100644 --- a/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_conformance.py +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_conformance.py @@ -1,23 +1,23 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import csv +import os +import urllib.request as ur from argparse import ArgumentParser -from subprocess import Popen -from shutil import copytree, rmtree -from summarize import create_summary, create_api_summary -from merge_xmls import merge_xml -from run_parallel import TestParallelRunner from pathlib import Path +from shutil import copytree, rmtree, copyfile +from subprocess import Popen +from urllib.parse import urlparse import defusedxml.ElementTree as ET -from urllib.parse import urlparse -import os -import csv -import urllib.request as ur +from merge_xmls import merge_xml +from run_parallel import TestParallelRunner +from summarize import create_summary, create_api_summary from utils import constants -from utils.conformance_utils import get_logger from utils import file_utils +from utils.conformance_utils import get_logger logger = get_logger('conformance_runner') has_python_api = True @@ -34,10 +34,12 @@ SCRIPT_DIR_PATH, SCRIPT_NAME = os.path.split(os.path.abspath(__file__)) NO_MODEL_CONSTANT = os.path.join(SCRIPT_DIR_PATH, "data", "models.lst") + def get_default_working_dir(): path = Path(__file__).parent.resolve() return os.path.join(path, "temp") + def parse_arguments(): parser = ArgumentParser() @@ -56,27 +58,31 @@ def parse_arguments(): cache_path_help = "Path to the cache file with test_name list sorted by execution time as `.lst` file!" expected_failures_update_help = "Overwrite expected failures list in case same failures were fixed" - parser.add_argument("-d", "--device", help= device_help, type=str, required=False, default="CPU") + parser.add_argument("-d", "--device", help=device_help, type=str, required=False, default="CPU") parser.add_argument("-t", "--type", help=type_help, type=str, required=False, default=constants.OP_CONFORMANCE) parser.add_argument("--gtest_filter", help=gtest_filter_helper, type=str, required=False, default="*") - parser.add_argument("-w", "--working_dir", help=working_dir_help, type=str, required=False, default=get_default_working_dir()) - parser.add_argument("-m", "--models_path", help=models_path_help, type=str, required=False, default=NO_MODEL_CONSTANT) + parser.add_argument("-w", "--working_dir", help=working_dir_help, type=str, required=False, + default=get_default_working_dir()) + parser.add_argument("-m", "--models_path", help=models_path_help, type=str, required=False, + default=NO_MODEL_CONSTANT) parser.add_argument("-ov", "--ov_path", help=ov_help, type=str, required=False, default="") - parser.add_argument("-j", "--workers", help=workers_help, type=int, required=False, default=os.cpu_count()-1) + parser.add_argument("-j", "--workers", help=workers_help, type=int, required=False, default=os.cpu_count() - 1) parser.add_argument("-c", "--ov_config_path", help=ov_config_path_helper, type=str, required=False, default="") parser.add_argument("-s", "--dump_graph", help=dump_graph_help, type=int, required=False, default=0) parser.add_argument("-sm", "--special_mode", help=special_mode_help, type=str, required=False, default="") parser.add_argument("-p", "--parallel_devices", help=parallel_help, type=bool, required=False, default=False) parser.add_argument("-f", "--expected_failures", help=expected_failures_help, type=str, required=False, default="") - parser.add_argument("-u", "--expected_failures_update", help=expected_failures_update_help, type=bool, required=False, default=False) + parser.add_argument("-u", "--expected_failures_update", help=expected_failures_update_help, required=False, + default=False, action='store_true') parser.add_argument("--cache_path", help=cache_path_help, type=str, required=False, default="") return parser.parse_args() + class Conformance: - def __init__(self, device:str, model_path:os.path, ov_path:os.path, type:str, workers:int, - gtest_filter:str, working_dir:os.path, ov_config_path:os.path, special_mode:str, - cache_path:str, parallel_devices:bool, expected_failures_file: str, + def __init__(self, device: str, model_path: os.path, ov_path: os.path, type: str, workers: int, + gtest_filter: str, working_dir: os.path, ov_config_path: os.path, special_mode: str, + cache_path: str, parallel_devices: bool, expected_failures_file: str, expected_failures_update: bool): self._device = device self._model_path = model_path @@ -107,7 +113,8 @@ def __init__(self, device:str, model_path:os.path, ov_path:os.path, type:str, wo logger.error(f'Incorrect value to set API scope: {special_mode}. Please check to get possible values') exit(-1) else: - logger.error(f"Incorrect conformance type: {type}. Please use '{constants.OP_CONFORMANCE}' or '{constants.API_CONFORMANCE}'") + logger.error( + f"Incorrect conformance type: {type}. Please use '{constants.OP_CONFORMANCE}' or '{constants.API_CONFORMANCE}'") exit(-1) self._type = type self._workers = workers @@ -117,6 +124,7 @@ def __init__(self, device:str, model_path:os.path, ov_path:os.path, type:str, wo self._ov_config_path = ov_config_path self._is_parallel_over_devices = parallel_devices self._expected_failures = set() + self._unexpected_failures = set() self._expected_failures_file = expected_failures_file if os.path.isfile(expected_failures_file): self._expected_failures = self.__get_failed_test_from_csv(expected_failures_file) @@ -124,25 +132,27 @@ def __init__(self, device:str, model_path:os.path, ov_path:os.path, type:str, wo logger.warning(f"Expected failures testlist `{self._expected_failures_file}` does not exist!") self._expected_failures_update = expected_failures_update + self.is_successful_run = False + def __download_models(self, url_to_download, path_to_save): _, file_name = os.path.split(urlparse(url_to_download).path) download_path = os.path.join(path_to_save, file_name) try: logger.info(f"Conformance IRs will be downloaded from {url_to_download} to {download_path}") ur.urlretrieve(url_to_download, filename=download_path) - except: - logger.error(f"Please verify URL: {url_to_download}. Looks like that is incorrect") + except Exception as exc: + logger.error(f"Please verify URL: {url_to_download}. It might be incorrect. See below for the full error.") + logger.exception(f'FULL ERROR: {exc}') exit(-1) logger.info(f"Conformance IRs were downloaded from {url_to_download} to {download_path}") if not os.path.isfile(download_path): logger.error(f"{download_path} is not a file. Exit!") exit(-1) if file_utils.is_archieve(download_path): - logger.info(f"The file {download_path} is archieve. Should be unzip to {path_to_save}") + logger.info(f"The file {download_path} is archived. Should be unzipped to {path_to_save}") return file_utils.unzip_archieve(download_path, path_to_save) return download_path - def __dump_subgraph(self): subgraph_dumper_path = os.path.join(self._ov_path, f'{SUBGRAPH_DUMPER_BIN_NAME}{constants.OS_BIN_FILE_EXT}') if not os.path.isfile(subgraph_dumper_path): @@ -153,7 +163,9 @@ def __dump_subgraph(self): logger.info(f"Remove directory {conformance_ir_path}") rmtree(conformance_ir_path) os.mkdir(conformance_ir_path) - self._model_path = file_utils.prepare_filelist(self._model_path, ["*.onnx", "*.pdmodel", "*.__model__", "*.pb", "*.xml", "*.tflite"]) + self._model_path = file_utils.prepare_filelist(self._model_path, + ["*.onnx", "*.pdmodel", "*.__model__", "*.pb", "*.xml", + "*.tflite"]) logger.info(f"Stating model dumping from {self._model_path}") cmd = f'{subgraph_dumper_path} --input_folders="{self._model_path}" --output_folder="{conformance_ir_path}"' process = Popen(cmd, shell=True) @@ -172,11 +184,12 @@ def __dump_subgraph(self): save_rel_weights(Path(self._model_path), op_rel_weight) logger.info(f"All conformance IRs in {self._model_path} were renamed based on hash") else: - logger.warning("The OV Python was not built or Environment was not updated to requirments. Skip the step to rename Conformance IR based on a hash") - + logger.warning( + "The OV Python was not built or Environment was not updated to requirements. " + "Skip the step to rename Conformance IR based on a hash") @staticmethod - def __get_failed_test_from_csv(csv_file:str): + def __get_failed_test_from_csv(csv_file: str): failures = set() with open(csv_file, "r") as failures_file: for row in csv.reader(failures_file, delimiter=','): @@ -195,14 +208,20 @@ def __check_expected_failures(self): diff = this_run_failures.difference(self._expected_failures) if len(diff) > 0: logger.error(f"Unexpected failures: {diff}") - exit(-1) - - intersection = self._expected_failures.intersection(this_run_failures) - if this_run_failures != self._expected_failures and self._expected_failures_update: - logger.info(f"Expected failures file {self._expected_failures} will be updated!!!") + self._unexpected_failures = diff + self.is_successful_run = False + + # we do not want to update the expected failures file if there are failures that were not present + # in the passed expected failures file, i.e. if len(self._unexpected_failures) > 0 + if this_run_failures != self._expected_failures and self._expected_failures_update and \ + not len(self._unexpected_failures): + logger.info(f"Expected failures file {self._expected_failures_file} will be updated! " + f"The following will be deleted as they are passing now: " + f"{self._expected_failures.difference(this_failures_file)}") os.remove(self._expected_failures_file) - this_failures_file = Path(this_failures_file) - this_failures_file.rename(self._expected_failures_file) + copyfile(this_failures_file, self._expected_failures_file) + + self.is_successful_run = True def __run_conformance(self): conformance_path = None @@ -212,7 +231,7 @@ def __run_conformance(self): conformance_path = os.path.join(self._ov_path, f'{API_CONFORMANCE_BIN_NAME}{constants.OS_BIN_FILE_EXT}') if not os.path.isfile(conformance_path): - logger.error(f"{conformance_path} is not exist!") + logger.error(f"{conformance_path} does not exist!") exit(-1) logs_dir = os.path.join(self._working_dir, f'{self._device}_logs') @@ -241,7 +260,7 @@ def __run_conformance(self): is_parallel_devices=self._is_parallel_over_devices, excluded_tests=self._expected_failures if not self._expected_failures_update else set()) conformance.run() - conformance.postprocess_logs() + self.is_successful_run = conformance.postprocess_logs() if os.path.isfile(self._expected_failures_file): self.__check_expected_failures() @@ -249,13 +268,15 @@ def __run_conformance(self): final_report_name = f'report_{self._type.lower()}' merge_xml([parallel_report_dir], report_dir, final_report_name, self._type, True) - logger.info(f"Conformance is successful. XML reportwas saved to {report_dir}") - return (os.path.join(report_dir, final_report_name + ".xml"), report_dir) + logger.info(f"XML report was saved to {report_dir}") + return os.path.join(report_dir, final_report_name + ".xml"), report_dir - def __summarize(self, xml_report_path:os.path, report_dir: os.path): + def __summarize(self, xml_report_path: os.path, report_dir: os.path): if self._type == constants.OP_CONFORMANCE: summary_root = ET.parse(xml_report_path).getroot() - rel_weights_path = os.path.join(self._model_path, constants.REL_WEIGHTS_FILENAME.replace(constants.REL_WEIGHTS_REPLACE_STR, self._special_mode)) + rel_weights_path = os.path.join(self._model_path, + constants.REL_WEIGHTS_FILENAME.replace(constants.REL_WEIGHTS_REPLACE_STR, + self._special_mode)) create_summary(summary_root, report_dir, [], "", "", True, True, rel_weights_path) else: create_api_summary([xml_report_path], report_dir, [], "", "") @@ -303,7 +324,7 @@ def run(self, dump_models: bool): if dump_models: self.__dump_subgraph() if not os.path.exists(self._model_path): - logger.error(f"The model direstory {self._model_path} does not exist!") + logger.error(f"The model directory {self._model_path} does not exist!") exit(-1) if not os.path.exists(self._model_path): logger.error(f"Directory {self._model_path} does not exist") @@ -311,6 +332,7 @@ def run(self, dump_models: bool): xml_report, report_dir = self.__run_conformance() self.__summarize(xml_report, report_dir) + if __name__ == "__main__": args = parse_arguments() conformance = Conformance(args.device, args.models_path, @@ -321,4 +343,5 @@ def run(self, dump_models: bool): args.parallel_devices, args.expected_failures, args.expected_failures_update) conformance.run(args.dump_graph) - + if not conformance.is_successful_run: + exit(-1) diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_parallel.py b/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_parallel.py index 67077c9d055d4f..9ca56067b3b851 100644 --- a/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_parallel.py +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_parallel.py @@ -331,7 +331,7 @@ def __get_test_list_by_runtime(self, test_unit = constants.TEST_UNIT_NAME): if constants.DISABLED_PREFIX in real_test_name: self._disabled_tests.append(real_test_name) elif test_unit == constants.TEST_UNIT_NAME: - tests_dict[real_test_name] = 1 + tests_dict[real_test_name] = -1 self._total_test_cnt += 1 elif test_unit == constants.SUITE_UNIT_NAME: tests_dict[test_suite] = tests_dict.get(test_suite, 0) + 1 @@ -643,8 +643,8 @@ def __save_log(logs_dir, dir, test_name): test_results[dir] += 1 else: test_results[dir] = 1 - if dir != "passed" and ref_k != None: - fix_priority.append((ref_k, test_name)) + if dir != "passed": + fix_priority.append((ref_k or 0, test_name)) ref_k = None test_cnt_real_saved_now += 1 test_name = None diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/skip_configs/CPU/expected_failures_API.csv b/src/tests/test_utils/functional_test_utils/layer_tests_summary/skip_configs/CPU/expected_failures_API.csv new file mode 100644 index 00000000000000..7e956981216699 --- /dev/null +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/skip_configs/CPU/expected_failures_API.csv @@ -0,0 +1,3760 @@ +Test Name,Fix Priority +ov_plugin_mandatory/OVPropertiesTests.canSetPropertyAndCheckGetProperty/target_device=HETERO_properties={PERF_COUNT:YES},1.0 +ov_plugin_mandatory/OVPropertiesTests.canSetPropertyAndCheckGetProperty/target_device=BATCH_properties={PERF_COUNT:YES},1.0 +ov_plugin_mandatory/OVPropertiesTests.SetCorrectProperties/target_device=HETERO_properties={PERF_COUNT:YES},1.0 +ov_plugin_mandatory/OVPropertiesTests.SetCorrectProperties/target_device=BATCH_properties={PERF_COUNT:YES},1.0 +ov_plugin_mandatory/OVHoldersTestOnImportedNetwork.LoadedTensor/target_device=MULTI.CPU,1.0 +ov_plugin_mandatory/OVHoldersTestOnImportedNetwork.LoadedTensor/target_device=BATCH.CPU,1.0 +ov_plugin_mandatory/OVHoldersTestOnImportedNetwork.LoadedTensor/target_device=AUTO.CPU,1.0 +ov_plugin_mandatory/OVHoldersTestOnImportedNetwork.CreateRequestWithCoreRemoved/target_device=MULTI.CPU,1.0 +ov_plugin_mandatory/OVHoldersTestOnImportedNetwork.CreateRequestWithCoreRemoved/target_device=BATCH.CPU,1.0 +ov_plugin_mandatory/OVHoldersTestOnImportedNetwork.CreateRequestWithCoreRemoved/target_device=AUTO.CPU,1.0 +ov_plugin_mandatory/OVHoldersTest.Orders/target_device=BATCH.CPU,1.0 +ov_plugin_mandatory/OVHoldersTest.LoadedTensor/target_device=BATCH.CPU,1.0 +ov_plugin_mandatory/OVHoldersTest.LoadedState/target_device=BATCH.CPU,1.0 +ov_plugin_mandatory/OVHoldersTest.LoadedRemoteContext/target_device=BATCH.CPU,1.0 +ov_plugin_mandatory/OVHoldersTest.LoadedAny/target_device=BATCH.CPU,1.0 +ov_plugin_mandatory/OVGetMetricPropsTest.GetMetricAndPrintNoThrow_OPTIMIZATION_CAPABILITIES/3,1.0 +ov_plugin_mandatory/OVGetMetricPropsTest.GetMetricAndPrintNoThrow_AVAILABLE_DEVICES/4,1.0 +ov_plugin_mandatory/OVGetMetricPropsTest.GetMetricAndPrintNoThrow_AVAILABLE_DEVICES/3,1.0 +ov_plugin_mandatory/OVGetMetricPropsTest.GetMetricAndPrintNoThrow_AVAILABLE_DEVICES/2,1.0 +ov_plugin_mandatory/OVGetMetricPropsTest.GetMetricAndPrintNoThrow_AVAILABLE_DEVICES/1,1.0 +ov_plugin_mandatory/OVGetAvailableDevicesPropsTest.GetAvailableDevicesNoThrow/4,1.0 +ov_plugin_mandatory/OVGetAvailableDevicesPropsTest.GetAvailableDevicesNoThrow/3,1.0 +ov_plugin_mandatory/OVGetAvailableDevicesPropsTest.GetAvailableDevicesNoThrow/2,1.0 +ov_plugin_mandatory/OVGetAvailableDevicesPropsTest.GetAvailableDevicesNoThrow/1,1.0 +ov_plugin_mandatory/OVClassQueryModelTest.QueryModelWithMatMul/4,1.0 +ov_plugin_mandatory/OVClassQueryModelTest.QueryModelWithMatMul/3,1.0 +ov_plugin_mandatory/OVClassQueryModelTest.QueryModelWithMatMul/2,1.0 +ov_plugin_mandatory/OVClassQueryModelTest.QueryModelWithMatMul/1,1.0 +ov_plugin_mandatory/OVClassQueryModelTest.QueryModelWithInvalidDeviceIDThrows/4,1.0 +ov_plugin_mandatory/OVClassQueryModelTest.QueryModelWithInvalidDeviceIDThrows/2,1.0 +ov_plugin_mandatory/OVClassQueryModelTest.QueryModelWithBigDeviceIDThrows/4,1.0 +ov_plugin_mandatory/OVClassQueryModelTest.QueryModelWithBigDeviceIDThrows/2,1.0 +ov_plugin_mandatory/OVClassQueryModelTest.QueryModelHETEROWithDeviceIDNoThrow/4,1.0 +ov_plugin_mandatory/OVClassQueryModelTest.QueryModelHETEROWithDeviceIDNoThrow/3,1.0 +ov_plugin_mandatory/OVClassQueryModelTest.QueryModelHETEROWithDeviceIDNoThrow/2,1.0 +ov_plugin_mandatory/OVClassQueryModelTest.QueryModelHETEROWithDeviceIDNoThrow/1,1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={NUM_STREAMS:3},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={PERF_COUNT:YES},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={PERF_COUNT:NO},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={PERFORMANCE_HINT_NUM_REQUESTS:1},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={PERFORMANCE_HINT:THROUGHPUT},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={PERFORMANCE_HINT:LATENCY},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={PERFORMANCE_HINT:CUMULATIVE_THROUGHPUT},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={NUM_STREAMS:3},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={LOG_LEVEL:LOG_WARNING},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={LOG_LEVEL:LOG_TRACE},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={LOG_LEVEL:LOG_NONE},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={LOG_LEVEL:LOG_INFO},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={LOG_LEVEL:LOG_ERROR},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={LOG_LEVEL:LOG_DEBUG},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={EXECUTION_MODE_HINT:PERFORMANCE},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={EXECUTION_MODE_HINT:ACCURACY},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={LOG_LEVEL:LOG_WARNING},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={LOG_LEVEL:LOG_TRACE},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={LOG_LEVEL:LOG_NONE},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={LOG_LEVEL:LOG_INFO},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={LOG_LEVEL:LOG_ERROR},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={LOG_LEVEL:LOG_DEBUG},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={PERF_COUNT:YES},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={PERF_COUNT:NO},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={PERFORMANCE_HINT_NUM_REQUESTS:1},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={PERFORMANCE_HINT:THROUGHPUT},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={PERFORMANCE_HINT:LATENCY},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={PERFORMANCE_HINT:CUMULATIVE_THROUGHPUT},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={NUM_STREAMS:3},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={LOG_LEVEL:LOG_WARNING},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={LOG_LEVEL:LOG_TRACE},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={LOG_LEVEL:LOG_NONE},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={LOG_LEVEL:LOG_INFO},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={LOG_LEVEL:LOG_ERROR},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={LOG_LEVEL:LOG_DEBUG},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={EXECUTION_MODE_HINT:PERFORMANCE},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={EXECUTION_MODE_HINT:ACCURACY},1.0 +ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={NUM_STREAMS:3},1.0 +ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={EXECUTION_DEVICES:},1.0 +ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={DEVICE_TYPE:},1.0 +ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={DEVICE_ARCHITECTURE:},1.0 +ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={EXECUTION_DEVICES:},1.0 +ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={DEVICE_TYPE:},1.0 +ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={DEVICE_ARCHITECTURE:},1.0 +ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={EXECUTION_DEVICES:},1.0 +ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={DEVICE_TYPE:},1.0 +ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={DEVICE_ARCHITECTURE:},1.0 +ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={EXECUTION_DEVICES:},1.0 +ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={DEVICE_TYPE:},1.0 +ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={DEVICE_ARCHITECTURE:},1.0 +ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={EXECUTION_DEVICES:},1.0 +ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={DEVICE_TYPE:},1.0 +ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={DEVICE_ARCHITECTURE:},1.0 +ov_plugin_mandatory/OVCheckChangePropComplieModleGetPropTests_InferencePrecision.ChangeCorrectProperties/target_device=MULTI.CPU_,1.0 +ov_plugin_mandatory/OVCheckChangePropComplieModleGetPropTests_InferencePrecision.ChangeCorrectProperties/target_device=HETERO.CPU_,1.0 +ov_plugin_mandatory/OVCheckChangePropComplieModleGetPropTests_InferencePrecision.ChangeCorrectProperties/target_device=CPU_,1.0 +ov_plugin_mandatory/OVCheckChangePropComplieModleGetPropTests_InferencePrecision.ChangeCorrectProperties/target_device=BATCH.CPU_,1.0 +ov_plugin_mandatory/OVCheckChangePropComplieModleGetPropTests_InferencePrecision.ChangeCorrectProperties/target_device=AUTO.CPU_,1.0 +ov_plugin_mandatory/OVCheckChangePropComplieModleGetPropTests_DEVICE_ID.ChangeCorrectDeviceProperties/target_device=MULTI.CPU_,1.0 +ov_plugin_mandatory/OVCheckChangePropComplieModleGetPropTests_DEVICE_ID.ChangeCorrectDeviceProperties/target_device=HETERO.CPU_,1.0 +ov_plugin_mandatory/OVCheckChangePropComplieModleGetPropTests_DEVICE_ID.ChangeCorrectDeviceProperties/target_device=BATCH.CPU_,1.0 +ov_plugin_mandatory/OVCheckChangePropComplieModleGetPropTests_DEVICE_ID.ChangeCorrectDeviceProperties/target_device=AUTO.CPU_,1.0 +ov_infer_request_mandatory/OVInferenceChainingStatic.StaticOutputToStaticInput/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestPerfCountersTest.NotEmptyAfterSyncInfer/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestPerfCountersTest.NotEmptyAfterAsyncInfer/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestPerfCountersTest.CheckOperationInProfilingInfo/targetDevice=HETERO.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestPerfCountersTest.CheckOperationInProfilingInfo/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestMultithreadingTests.canRun3SyncRequestsConsistentlyFromThreads/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestMultithreadingTests.canRun3AsyncRequestsParallelWithWait/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestMultithreadingTests.canRun3AsyncRequestsConsistentlyWithWait/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestMultithreadingTests.canRun3AsyncRequestsConsistentlyFromThreadsWithoutWait/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorTest.secondCallGetOutputDoNotReAllocateData/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorTest.secondCallGetOutputAfterInferSync/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorTest.secondCallGetInputDoNotReAllocateData/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorTest.secondCallGetInputAfterInferSync/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorTest.failToSetTensorWithIncorrectName/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorTest.failToSetOutputWithIncorrectSizes/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorTest.failToSetNullptrForOutput/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorTest.failToSetNullptrForInput/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorTest.failToSetInputWithIncorrectSizes/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorTest.canSetAndGetOutput/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorTest.canSetAndGetInput/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorTest.canInferWithoutSetAndGetInOutSync/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorTest.canInferWithoutSetAndGetInOutAsync/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorTest.canInferWithSetInOutBlobs/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorTest.canInferWithGetIn/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorTest.canInferAfterIOBlobReallocation/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorTest.InferStaticNetworkSetChangedOutputTensorThrow/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorTest.InferStaticNetworkSetChangedInputTensorThrow/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=u8_target_device=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=u64_target_device=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=u32_target_device=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=u16_target_device=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=i8_target_device=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=i64_target_device=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=i32_target_device=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=i16_target_device=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=f64_target_device=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=f32_target_device=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=f16_target_device=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=boolean_target_device=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=u8_target_device=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=u64_target_device=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=u32_target_device=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=u16_target_device=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=i8_target_device=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=i64_target_device=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=i32_target_device=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=i16_target_device=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=f64_target_device=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=f32_target_device=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=f16_target_device=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=boolean_target_device=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=u8_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=u64_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=u32_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=u16_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=i8_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=i64_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=i32_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=i16_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=f64_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=f32_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=f16_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=boolean_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=u8_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=u64_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=u32_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=u16_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=i8_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=i64_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=i32_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=i16_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=f64_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=f32_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=f16_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=boolean_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=u8_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=u64_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=u32_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=u16_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=i8_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=i64_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=i32_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=i16_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=f64_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=f32_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=f16_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=boolean_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=u8_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=u64_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=u32_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=u16_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=i8_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=i64_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=i32_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=i16_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=f64_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=f32_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=f16_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=boolean_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=u8_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=u64_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=u32_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=u16_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=i8_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=i64_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=i32_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=i16_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=f64_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=f32_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=f16_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=boolean_target_device=BATCH:CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCancellationTests.canCancelInferRequest/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCancellationTests.canCancelBeforeAsyncRequest/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCancellationTests.canCancelAsyncRequest/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCancellationTests.CanResetAfterCancelAsyncRequest/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCallbackTests.syncInferDoesNotCallCompletionCallback/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCallbackTests.returnGeneralErrorIfCallbackThrowException/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCallbackTests.canStartSeveralAsyncInsideCompletionCallbackWithSafeDtor/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCallbackTests.canCallAsyncWithCompletionCallback/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCallbackTests.ReturnResultNotReadyFromWaitInAsyncModeForTooSmallTimeout/targetDevice=BATCH.CPU_,1.0 +ov_infer_request_mandatory/OVInferRequestCallbackTests.ImplDoesNotCopyCallback/targetDevice=BATCH.CPU_,1.0 +ov_compiled_model_mandatory/OVCompiledModelPropertiesDefaultSupportedTests.CanCompileWithDefaultValueFromPlugin/target_device=MULTI:CPU,1.0 +ov_compiled_model_mandatory/OVCompiledModelPropertiesDefaultSupportedTests.CanCompileWithDefaultValueFromPlugin/target_device=HETERO:CPU,1.0 +ov_compiled_model_mandatory/OVCompiledModelPropertiesDefaultSupportedTests.CanCompileWithDefaultValueFromPlugin/target_device=BATCH:CPU,1.0 +ov_compiled_model_mandatory/OVCompiledModelPropertiesDefaultSupportedTests.CanCompileWithDefaultValueFromPlugin/target_device=AUTO:CPU,1.0 +ov_compiled_model_mandatory/OVCompiledModelBaseTest.precisionsAsInOriginalFunction/targetDevice=BATCH.CPU_,1.0 +ov_compiled_model_mandatory/OVCompiledModelBaseTest.loadIncorrectV11Model/targetDevice=BATCH.CPU_,1.0 +ov_compiled_model_mandatory/OVCompiledModelBaseTest.getOutputsFromSplitFunctionWithSeveralOutputs/targetDevice=BATCH.CPU_,1.0 +ov_compiled_model_mandatory/OVCompiledModelBaseTest.getOutputsFromFunctionWithSeveralOutputs/targetDevice=BATCH.CPU_,1.0 +ov_compiled_model_mandatory/OVCompiledModelBaseTest.getOutputFromFunctionWithSingleInput/targetDevice=BATCH.CPU_,1.0 +ov_compiled_model_mandatory/OVCompiledModelBaseTest.getInputsFromFunctionWithSeveralInputs/targetDevice=BATCH.CPU_,1.0 +ov_compiled_model_mandatory/OVCompiledModelBaseTest.getInputFromFunctionWithSingleInput/targetDevice=BATCH.CPU_,1.0 +ov_compiled_model_mandatory/OVCompiledModelBaseTest.canCreateTwoCompiledModel/targetDevice=BATCH.CPU_,1.0 +ov_compiled_model_mandatory/OVCompiledModelBaseTest.canCompileModelAndCreateInferRequest/targetDevice=BATCH.CPU_,1.0 +ov_compiled_model_mandatory/OVCompiledModelBaseTest.canCompileModel/targetDevice=BATCH.CPU_,1.0 +ov_compiled_model_mandatory/OVCompiledModelBaseTest.CanGetOutputsInfoAndCheck/targetDevice=BATCH.CPU_,1.0 +ov_compiled_model_mandatory/OVCompiledModelBaseTest.CanGetOutputsInfo/targetDevice=BATCH.CPU_,1.0 +ov_compiled_model_mandatory/OVCompiledModelBaseTest.CanGetInputsInfoAndCheck/targetDevice=BATCH.CPU_,1.0 +ov_compiled_model_mandatory/OVCompiledModelBaseTest.CanGetInputsInfo/targetDevice=BATCH.CPU_,1.0 +ov_compiled_model_mandatory/OVClassCompiledModelPropertiesTests.canCompileModelWithPropertiesAndCheckGetProperty/targetDevice=BATCH.CPU_properties={PERF_COUNT:NO},1.0 +ov_compiled_model_mandatory/OVClassCompiledModelPropertiesTests.CanUseCache/targetDevice=BATCH.CPU_properties={PERF_COUNT:NO},1.0 +ov_compiled_model_mandatory/OVClassCompiledModelPropertiesDefaultTests.CheckDefaultValues/targetDevice=MULTI.CPU_properties={PERF_COUNT:NO},1.0 +ov_compiled_model_mandatory/OVClassCompiledModelPropertiesDefaultTests.CheckDefaultValues/targetDevice=HETERO.CPU_properties={PERF_COUNT:NO},1.0 +ov_compiled_model_mandatory/OVClassCompiledModelPropertiesDefaultTests.CheckDefaultValues/targetDevice=BATCH.CPU_properties={PERF_COUNT:NO},1.0 +ov_compiled_model_mandatory/OVClassCompiledModelPropertiesDefaultTests.CheckDefaultValues/targetDevice=AUTO.CPU_properties={PERF_COUNT:NO},1.0 +ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/7,1.0 +ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/6,1.0 +ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/5,1.0 +ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/4,1.0 +ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/3,1.0 +ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/2,1.0 +ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/15,1.0 +ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/14,1.0 +ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/13,1.0 +ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/12,1.0 +ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/1,1.0 +ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/0,1.0 +ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest.GetMetricNoThrow_SUPPORTED_CONFIG_KEYS/3,1.0 +ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest.GetMetricNoThrow_OPTIMAL_NUMBER_OF_INFER_REQUESTS/3,1.0 +ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest.GetMetricNoThrow_NETWORK_NAME/3,1.0 +ov_compiled_model_mandatory/OVClassCompiledModelGetIncorrectPropertyTest.GetConfigThrows/3,1.0 +ov_compiled_model_mandatory/OVClassCompiledModelGetConfigTest.GetConfigNoThrow/3,1.0 +ov_compiled_model_mandatory/OVClassCompiledModelGetConfigTest.GetConfigFromCoreAndFromCompiledModel/4,1.0 +ov_compiled_model_mandatory/OVClassCompiledModelGetConfigTest.GetConfigFromCoreAndFromCompiledModel/3,1.0 +ov_compiled_model_mandatory/OVClassCompiledModelGetConfigTest.GetConfigFromCoreAndFromCompiledModel/2,1.0 +ov_compiled_model_mandatory/OVClassCompiledModelGetConfigTest.GetConfigFromCoreAndFromCompiledModel/1,1.0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch2_CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch1_CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch2_CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch1_CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch2_CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch1_CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch2_CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch1_CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch2_CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch1_CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch2_CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch1_CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch2_CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch1_CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch1_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch2_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch2_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch2_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch2_AUTO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch1_MULTI.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch1_HETERO.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch1_BATCH.CPU,0 +ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch1_AUTO.CPU,0 +ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch2_MULTI.CPU,0 +ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch2_HETERO.CPU,0 +ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch2_BATCH.CPU,0 +ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch2_AUTO.CPU,0 +ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch1_MULTI.CPU,0 +ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch1_HETERO.CPU,0 +ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch1_BATCH.CPU,0 +ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch1_AUTO.CPU,0 +ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch2_MULTI.CPU,0 +ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch2_HETERO.CPU,0 +ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch2_BATCH.CPU,0 +ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch2_AUTO.CPU,0 +ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch1_MULTI.CPU,0 +ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch1_HETERO.CPU,0 +ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch1_BATCH.CPU,0 +ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch1_AUTO.CPU,0 +ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch2_MULTI.CPU,0 +ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch2_HETERO.CPU,0 +ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch2_BATCH.CPU,0 +ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch2_AUTO.CPU,0 +ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch1_MULTI.CPU,0 +ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch1_HETERO.CPU,0 +ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch1_BATCH.CPU,0 +ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch1_AUTO.CPU,0 +ov_plugin/OVGetMetricPropsOptionalTest.GetMetricAndPrintNoThrow_RANGE_FOR_STREAMS/4,0 +ov_plugin/OVGetMetricPropsOptionalTest.GetMetricAndPrintNoThrow_RANGE_FOR_STREAMS/3,0 +ov_plugin/OVGetMetricPropsOptionalTest.GetMetricAndPrintNoThrow_RANGE_FOR_STREAMS/2,0 +ov_plugin/OVGetMetricPropsOptionalTest.GetMetricAndPrintNoThrow_RANGE_FOR_STREAMS/1,0 +ov_plugin/OVGetMetricPropsOptionalTest.GetMetricAndPrintNoThrow_RANGE_FOR_ASYNC_INFER_REQUESTS/4,0 +ov_plugin/OVGetMetricPropsOptionalTest.GetMetricAndPrintNoThrow_RANGE_FOR_ASYNC_INFER_REQUESTS/3,0 +ov_plugin/OVGetMetricPropsOptionalTest.GetMetricAndPrintNoThrow_RANGE_FOR_ASYNC_INFER_REQUESTS/2,0 +ov_plugin/OVGetMetricPropsOptionalTest.GetMetricAndPrintNoThrow_RANGE_FOR_ASYNC_INFER_REQUESTS/1,0 +ov_plugin/OVClassModelOptionalTestP.getVersionsNonEmpty/4,0 +ov_plugin/OVClassModelOptionalTestP.getVersionsNonEmpty/3,0 +ov_plugin/OVClassModelOptionalTestP.getVersionsNonEmpty/2,0 +ov_plugin/OVClassModelOptionalTestP.getVersionsNonEmpty/1,0 +ov_plugin/OVClassModelOptionalTestP.CompileModelCreateDefaultExecGraphResult/3,0 +ov_plugin/OVClassModelOptionalTestP.CompileModelActualHeteroDeviceUsingDevicePropertiesNoThrow/4,0 +ov_plugin/OVClassModelOptionalTestP.CompileModelActualHeteroDeviceNoThrow/4,0 +ov_plugin/OVClassModelOptionalTestP.CompileModelActualHeteroDeviceNoThrow/3,0 +ov_plugin/OVClassModelOptionalTestP.CompileModelActualHeteroDevice2NoThrow/4,0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={SCHEDULING_CORE_TYPE:PCORE_ONLY},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={SCHEDULING_CORE_TYPE:ECORE_ONLY},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={SCHEDULING_CORE_TYPE:ANY_CORE},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={INFERENCE_NUM_THREADS:1},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={ENABLE_MMAP:YES},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={ENABLE_MMAP:NO},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={ENABLE_HYPER_THREADING:YES},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={ENABLE_HYPER_THREADING:NO},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={ENABLE_CPU_PINNING:YES},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={ENABLE_CPU_PINNING:NO},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={COMPILATION_NUM_THREADS:1},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={AFFINITY:NUMA},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={AFFINITY:NONE},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={AFFINITY:HYBRID_AWARE},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={AFFINITY:CORE},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={SCHEDULING_CORE_TYPE:PCORE_ONLY},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={SCHEDULING_CORE_TYPE:ECORE_ONLY},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={SCHEDULING_CORE_TYPE:ANY_CORE},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={INFERENCE_NUM_THREADS:1},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={ENABLE_MMAP:YES},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={ENABLE_MMAP:NO},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={ENABLE_HYPER_THREADING:YES},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={ENABLE_HYPER_THREADING:NO},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={ENABLE_CPU_PINNING:YES},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={ENABLE_CPU_PINNING:NO},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={COMPILATION_NUM_THREADS:1},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={AFFINITY:NUMA},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={AFFINITY:NONE},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={AFFINITY:HYBRID_AWARE},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={AFFINITY:CORE},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={ENABLE_MMAP:YES},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={ENABLE_MMAP:NO},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={COMPILATION_NUM_THREADS:1},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={SCHEDULING_CORE_TYPE:PCORE_ONLY},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={SCHEDULING_CORE_TYPE:ECORE_ONLY},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={SCHEDULING_CORE_TYPE:ANY_CORE},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={INFERENCE_NUM_THREADS:1},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={ENABLE_MMAP:YES},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={ENABLE_MMAP:NO},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={ENABLE_HYPER_THREADING:YES},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={ENABLE_HYPER_THREADING:NO},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={ENABLE_CPU_PINNING:YES},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={ENABLE_CPU_PINNING:NO},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={COMPILATION_NUM_THREADS:1},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={AFFINITY:NUMA},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={AFFINITY:NONE},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={AFFINITY:HYBRID_AWARE},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={AFFINITY:CORE},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={SCHEDULING_CORE_TYPE:PCORE_ONLY},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={SCHEDULING_CORE_TYPE:ECORE_ONLY},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={SCHEDULING_CORE_TYPE:ANY_CORE},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={INFERENCE_NUM_THREADS:1},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={ENABLE_MMAP:YES},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={ENABLE_MMAP:NO},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={ENABLE_HYPER_THREADING:YES},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={ENABLE_HYPER_THREADING:NO},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={ENABLE_CPU_PINNING:YES},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={ENABLE_CPU_PINNING:NO},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={COMPILATION_NUM_THREADS:1},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={AFFINITY:NUMA},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={AFFINITY:NONE},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={AFFINITY:HYBRID_AWARE},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={AFFINITY:CORE},0 +ov_plugin/OVCheckMetricsPropsTests_ModelDependceProps.ChangeCorrectDeviceProperties/target_device=MULTI.CPU_properties={OPTIMAL_BATCH_SIZE:},0 +ov_plugin/OVCheckMetricsPropsTests_ModelDependceProps.ChangeCorrectDeviceProperties/target_device=MULTI.CPU_properties={MAX_BATCH_SIZE:},0 +ov_plugin/OVCheckMetricsPropsTests_ModelDependceProps.ChangeCorrectDeviceProperties/target_device=HETERO.CPU_properties={OPTIMAL_BATCH_SIZE:},0 +ov_plugin/OVCheckMetricsPropsTests_ModelDependceProps.ChangeCorrectDeviceProperties/target_device=HETERO.CPU_properties={MAX_BATCH_SIZE:},0 +ov_plugin/OVCheckMetricsPropsTests_ModelDependceProps.ChangeCorrectDeviceProperties/target_device=CPU_properties={OPTIMAL_BATCH_SIZE:},0 +ov_plugin/OVCheckMetricsPropsTests_ModelDependceProps.ChangeCorrectDeviceProperties/target_device=CPU_properties={MAX_BATCH_SIZE:},0 +ov_plugin/OVCheckMetricsPropsTests_ModelDependceProps.ChangeCorrectDeviceProperties/target_device=BATCH.CPU_properties={OPTIMAL_BATCH_SIZE:},0 +ov_plugin/OVCheckMetricsPropsTests_ModelDependceProps.ChangeCorrectDeviceProperties/target_device=BATCH.CPU_properties={MAX_BATCH_SIZE:},0 +ov_plugin/OVCheckMetricsPropsTests_ModelDependceProps.ChangeCorrectDeviceProperties/target_device=AUTO.CPU_properties={OPTIMAL_BATCH_SIZE:},0 +ov_plugin/OVCheckMetricsPropsTests_ModelDependceProps.ChangeCorrectDeviceProperties/target_device=AUTO.CPU_properties={MAX_BATCH_SIZE:},0 +ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={LOADED_FROM_CACHE:},0 +ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={DEVICE_UUID:},0 +ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={DEVICE_THERMAL:},0 +ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={DEVICE_LUID:},0 +ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={DEVICE_GOPS:},0 +ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={LOADED_FROM_CACHE:},0 +ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={DEVICE_UUID:},0 +ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={DEVICE_THERMAL:},0 +ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={DEVICE_LUID:},0 +ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={DEVICE_GOPS:},0 +ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={LOADED_FROM_CACHE:},0 +ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={DEVICE_UUID:},0 +ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={DEVICE_THERMAL:},0 +ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={DEVICE_LUID:},0 +ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={DEVICE_GOPS:},0 +ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={LOADED_FROM_CACHE:},0 +ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={DEVICE_UUID:},0 +ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={DEVICE_THERMAL:},0 +ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={DEVICE_LUID:},0 +ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={DEVICE_GOPS:},0 +ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={LOADED_FROM_CACHE:},0 +ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={DEVICE_UUID:},0 +ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={DEVICE_THERMAL:},0 +ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={DEVICE_LUID:},0 +ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={DEVICE_GOPS:},0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u8_batch2_MULTI.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u8_batch2_HETERO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u8_batch2_BATCH.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u8_batch2_AUTO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u8_batch1_MULTI.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u8_batch1_HETERO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u8_batch1_BATCH.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u8_batch1_AUTO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u64_batch2_MULTI.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u64_batch2_HETERO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u64_batch2_BATCH.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u64_batch2_AUTO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u64_batch1_MULTI.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u64_batch1_HETERO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u64_batch1_BATCH.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u64_batch1_AUTO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u32_batch2_MULTI.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u32_batch2_HETERO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u32_batch2_BATCH.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u32_batch2_AUTO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u32_batch1_MULTI.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u32_batch1_HETERO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u32_batch1_BATCH.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u32_batch1_AUTO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u16_batch2_MULTI.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u16_batch2_HETERO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u16_batch2_BATCH.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u16_batch2_AUTO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u16_batch1_MULTI.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u16_batch1_HETERO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u16_batch1_BATCH.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u16_batch1_AUTO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i8_batch2_MULTI.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i8_batch2_HETERO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i8_batch2_BATCH.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i8_batch2_AUTO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i8_batch1_MULTI.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i8_batch1_HETERO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i8_batch1_BATCH.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i8_batch1_AUTO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i64_batch2_MULTI.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i64_batch2_HETERO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i64_batch2_BATCH.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i64_batch2_AUTO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i64_batch1_MULTI.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i64_batch1_HETERO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i64_batch1_BATCH.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i64_batch1_AUTO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i32_batch2_MULTI.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i32_batch2_HETERO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i32_batch2_BATCH.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i32_batch2_AUTO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i32_batch1_MULTI.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i32_batch1_HETERO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i32_batch1_BATCH.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i32_batch1_AUTO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i16_batch2_MULTI.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i16_batch2_HETERO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i16_batch2_BATCH.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i16_batch2_AUTO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i16_batch1_MULTI.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i16_batch1_HETERO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i16_batch1_BATCH.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i16_batch1_AUTO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f64_batch2_MULTI.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f64_batch2_HETERO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f64_batch2_BATCH.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f64_batch2_AUTO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f64_batch1_MULTI.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f64_batch1_HETERO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f64_batch1_BATCH.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f64_batch1_AUTO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f32_batch2_MULTI.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f32_batch2_HETERO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f32_batch2_BATCH.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f32_batch2_AUTO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f32_batch1_MULTI.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f32_batch1_HETERO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f32_batch1_BATCH.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f32_batch1_AUTO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f16_batch2_MULTI.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f16_batch2_HETERO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f16_batch2_BATCH.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f16_batch2_AUTO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f16_batch1_MULTI.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f16_batch1_HETERO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f16_batch1_BATCH.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f16_batch1_AUTO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_boolean_batch2_MULTI.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_boolean_batch2_HETERO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_boolean_batch2_BATCH.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_boolean_batch2_AUTO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_boolean_batch1_MULTI.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_boolean_batch1_HETERO.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_boolean_batch1_BATCH.CPU,0 +ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_boolean_batch1_AUTO.CPU,0 +ov_infer_request_2/OVInferRequestDynamicTests.InferUpperBoundNetworkWithGetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_2/OVInferRequestDynamicTests.InferUpperBoundNetworkAfterIOTensorsReshaping/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_2/OVInferRequestDynamicTests.InferOutOfRangeShapeNetworkWithGetTensorUpper/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_2/OVInferRequestDynamicTests.InferOutOfRangeShapeNetworkWithGetTensorLower/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_2/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithSetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=MULTI.CPU_,0 +ov_infer_request_2/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithSetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=HETERO.CPU_,0 +ov_infer_request_2/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithSetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=CPU_,0 +ov_infer_request_2/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithSetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_2/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithSetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=AUTO.CPU_,0 +ov_infer_request_2/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithGetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=MULTI.CPU_,0 +ov_infer_request_2/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithGetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=HETERO.CPU_,0 +ov_infer_request_2/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithGetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=CPU_,0 +ov_infer_request_2/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithGetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_2/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithGetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=AUTO.CPU_,0 +ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkWithoutSetShape/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkWithSetTensor2times/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkWithSetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkWithLocalCore/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkWithGetTensor2times/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkWithGetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkSetUnexpectedOutputTensorBeforeInfer/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkSetOutputTensorPreAllocatedMemoryBeforeInfer/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkSetOutputShapeBeforeInfer/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkGetOutputThenSetOutputTensorPreAllocatedMemoryBeforeInfer/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkBoundWithoutSetShape/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetwork/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_2/OVInferRequestDynamicTests.GetSameTensor2times/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_1/OVInferRequestDynamicTests.InferUpperBoundNetworkWithGetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_1/OVInferRequestDynamicTests.InferUpperBoundNetworkAfterIOTensorsReshaping/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_1/OVInferRequestDynamicTests.InferOutOfRangeShapeNetworkWithGetTensorUpper/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_1/OVInferRequestDynamicTests.InferOutOfRangeShapeNetworkWithGetTensorLower/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_1/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithSetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=MULTI.CPU_,0 +ov_infer_request_1/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithSetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=HETERO.CPU_,0 +ov_infer_request_1/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithSetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=CPU_,0 +ov_infer_request_1/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithSetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_1/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithSetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=AUTO.CPU_,0 +ov_infer_request_1/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithGetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=MULTI.CPU_,0 +ov_infer_request_1/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithGetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=HETERO.CPU_,0 +ov_infer_request_1/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithGetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=CPU_,0 +ov_infer_request_1/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithGetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_1/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithGetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=AUTO.CPU_,0 +ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkWithoutSetShape/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkWithSetTensor2times/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkWithSetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkWithLocalCore/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkWithGetTensor2times/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkWithGetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkSetUnexpectedOutputTensorBeforeInfer/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkSetOutputTensorPreAllocatedMemoryBeforeInfer/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkSetOutputShapeBeforeInfer/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkGetOutputThenSetOutputTensorPreAllocatedMemoryBeforeInfer/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkBoundWithoutSetShape/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetwork/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 +ov_infer_request_1/OVInferRequestDynamicTests.GetSameTensor2times/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 +ov_infer_request/OVInferenceChaining.StaticOutputToStaticInput/targetDevice=BATCH.CPU_,0 +ov_infer_request/OVInferenceChaining.StaticOutputToDynamicInput/targetDevice=BATCH.CPU_,0 +ov_infer_request/OVInferenceChaining.DynamicOutputToDynamicInput/targetDevice=MULTI.CPU_,0 +ov_infer_request/OVInferenceChaining.DynamicOutputToDynamicInput/targetDevice=HETERO.CPU_,0 +ov_infer_request/OVInferenceChaining.DynamicOutputToDynamicInput/targetDevice=CPU_,0 +ov_infer_request/OVInferenceChaining.DynamicOutputToDynamicInput/targetDevice=BATCH.CPU_,0 +ov_infer_request/OVInferenceChaining.DynamicOutputToDynamicInput/targetDevice=AUTO.CPU_,0 +ov_infer_request/OVInferenceChaining.DynamicInputToDynamicOutput/targetDevice=MULTI.CPU_,0 +ov_infer_request/OVInferenceChaining.DynamicInputToDynamicOutput/targetDevice=HETERO.CPU_,0 +ov_infer_request/OVInferenceChaining.DynamicInputToDynamicOutput/targetDevice=CPU_,0 +ov_infer_request/OVInferenceChaining.DynamicInputToDynamicOutput/targetDevice=BATCH.CPU_,0 +ov_infer_request/OVInferenceChaining.DynamicInputToDynamicOutput/targetDevice=AUTO.CPU_,0 +ov_infer_request/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=bf16_target_device=BATCH.CPU_,0 +ov_infer_request/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=bf16_target_device=BATCH.CPU_,0 +ov_infer_request/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=bf16_target_device=BATCH:CPU_,0 +ov_infer_request/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=bf16_target_device=BATCH:CPU_,0 +ov_infer_request/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=bf16_target_device=BATCH:CPU_,0 +ov_infer_request/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=bf16_target_device=BATCH:CPU_,0 +ov_infer_request/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=bf16_target_device=BATCH:CPU_,0 +"ov_compiled_model_AutoBatch/OVClassCompiledModelPropertiesTests.canCompileModelWithPropertiesAndCheckGetProperty/targetDevice=BATCH_properties={AUTO_BATCH_DEVICE_CONFIG:CPU,AUTO_BATCH_TIMEOUT:1}",0 +"ov_compiled_model_AutoBatch/OVClassCompiledModelPropertiesTests.canCompileModelWithPropertiesAndCheckGetProperty/targetDevice=BATCH_properties={AUTO_BATCH_DEVICE_CONFIG:CPU,AUTO_BATCH_TIMEOUT:10}",0 +"ov_compiled_model_AutoBatch/OVClassCompiledModelPropertiesTests.CanUseCache/targetDevice=BATCH_properties={AUTO_BATCH_DEVICE_CONFIG:CPU,AUTO_BATCH_TIMEOUT:1}",0 +"ov_compiled_model_AutoBatch/OVClassCompiledModelPropertiesTests.CanUseCache/targetDevice=BATCH_properties={AUTO_BATCH_DEVICE_CONFIG:CPU,AUTO_BATCH_TIMEOUT:10}",0 +ov_compiled_model/OVCompiledModelBaseTestOptional.checkGetExecGraphInfoIsNotNullptr/targetDevice=BATCH.CPU_,0 +ov_compiled_model/OVCompiledModelBaseTestOptional.CheckExecGraphInfoBeforeExecution/targetDevice=BATCH.CPU_,0 +ov_compiled_model/OVCompiledModelBaseTestOptional.CheckExecGraphInfoAfterExecution/targetDevice=BATCH.CPU_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=MULTI.CPU_elementType=u8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=MULTI.CPU_elementType=u64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=MULTI.CPU_elementType=u32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=MULTI.CPU_elementType=u16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=MULTI.CPU_elementType=i8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=MULTI.CPU_elementType=i64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=MULTI.CPU_elementType=i32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=MULTI.CPU_elementType=i16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=MULTI.CPU_elementType=f64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=MULTI.CPU_elementType=f32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=MULTI.CPU_elementType=f16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=MULTI.CPU_elementType=boolean_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=MULTI.CPU_elementType=bf16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=BATCH.CPU_elementType=u8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=BATCH.CPU_elementType=u64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=BATCH.CPU_elementType=u32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=BATCH.CPU_elementType=u16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=BATCH.CPU_elementType=i8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=BATCH.CPU_elementType=i64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=BATCH.CPU_elementType=i32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=BATCH.CPU_elementType=i16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=BATCH.CPU_elementType=f64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=BATCH.CPU_elementType=f32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=BATCH.CPU_elementType=f16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=BATCH.CPU_elementType=boolean_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=BATCH.CPU_elementType=bf16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=AUTO.CPU_elementType=u8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=AUTO.CPU_elementType=u64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=AUTO.CPU_elementType=u32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=AUTO.CPU_elementType=u16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=AUTO.CPU_elementType=i8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=AUTO.CPU_elementType=i64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=AUTO.CPU_elementType=i32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=AUTO.CPU_elementType=i16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=AUTO.CPU_elementType=f64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=AUTO.CPU_elementType=f32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=AUTO.CPU_elementType=f16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=AUTO.CPU_elementType=boolean_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=AUTO.CPU_elementType=bf16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=MULTI.CPU_elementType=u8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=MULTI.CPU_elementType=u64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=MULTI.CPU_elementType=u32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=MULTI.CPU_elementType=u16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=MULTI.CPU_elementType=i8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=MULTI.CPU_elementType=i64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=MULTI.CPU_elementType=i32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=MULTI.CPU_elementType=i16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=MULTI.CPU_elementType=f64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=MULTI.CPU_elementType=f32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=MULTI.CPU_elementType=f16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=MULTI.CPU_elementType=boolean_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=MULTI.CPU_elementType=bf16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=HETERO.CPU_elementType=boolean_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=CPU_elementType=boolean_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=BATCH.CPU_elementType=u8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=BATCH.CPU_elementType=u64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=BATCH.CPU_elementType=u32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=BATCH.CPU_elementType=u16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=BATCH.CPU_elementType=i8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=BATCH.CPU_elementType=i64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=BATCH.CPU_elementType=i32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=BATCH.CPU_elementType=i16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=BATCH.CPU_elementType=f64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=BATCH.CPU_elementType=f32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=BATCH.CPU_elementType=f16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=BATCH.CPU_elementType=boolean_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=BATCH.CPU_elementType=bf16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=AUTO.CPU_elementType=u8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=AUTO.CPU_elementType=u64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=AUTO.CPU_elementType=u32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=AUTO.CPU_elementType=u16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=AUTO.CPU_elementType=i8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=AUTO.CPU_elementType=i64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=AUTO.CPU_elementType=i32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=AUTO.CPU_elementType=i16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=AUTO.CPU_elementType=f64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=AUTO.CPU_elementType=f32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=AUTO.CPU_elementType=f16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=AUTO.CPU_elementType=boolean_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=AUTO.CPU_elementType=bf16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=MULTI.CPU_elementType=u8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=MULTI.CPU_elementType=u64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=MULTI.CPU_elementType=u32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=MULTI.CPU_elementType=u16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=MULTI.CPU_elementType=i8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=MULTI.CPU_elementType=i64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=MULTI.CPU_elementType=i32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=MULTI.CPU_elementType=i16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=MULTI.CPU_elementType=f64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=MULTI.CPU_elementType=f32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=MULTI.CPU_elementType=f16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=MULTI.CPU_elementType=boolean_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=MULTI.CPU_elementType=bf16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=BATCH.CPU_elementType=u8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=BATCH.CPU_elementType=u64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=BATCH.CPU_elementType=u32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=BATCH.CPU_elementType=u16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=BATCH.CPU_elementType=i8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=BATCH.CPU_elementType=i64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=BATCH.CPU_elementType=i32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=BATCH.CPU_elementType=i16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=BATCH.CPU_elementType=f64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=BATCH.CPU_elementType=f32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=BATCH.CPU_elementType=f16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=BATCH.CPU_elementType=boolean_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=BATCH.CPU_elementType=bf16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=AUTO.CPU_elementType=u8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=AUTO.CPU_elementType=u64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=AUTO.CPU_elementType=u32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=AUTO.CPU_elementType=u16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=AUTO.CPU_elementType=i8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=AUTO.CPU_elementType=i64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=AUTO.CPU_elementType=i32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=AUTO.CPU_elementType=i16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=AUTO.CPU_elementType=f64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=AUTO.CPU_elementType=f32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=AUTO.CPU_elementType=f16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=AUTO.CPU_elementType=boolean_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=AUTO.CPU_elementType=bf16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=MULTI.CPU_elementType=u8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=MULTI.CPU_elementType=u64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=MULTI.CPU_elementType=u32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=MULTI.CPU_elementType=u16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=MULTI.CPU_elementType=i8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=MULTI.CPU_elementType=i64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=MULTI.CPU_elementType=i32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=MULTI.CPU_elementType=i16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=MULTI.CPU_elementType=f64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=MULTI.CPU_elementType=f32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=MULTI.CPU_elementType=f16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=MULTI.CPU_elementType=boolean_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=MULTI.CPU_elementType=bf16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=BATCH.CPU_elementType=u8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=BATCH.CPU_elementType=u64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=BATCH.CPU_elementType=u32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=BATCH.CPU_elementType=u16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=BATCH.CPU_elementType=i8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=BATCH.CPU_elementType=i64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=BATCH.CPU_elementType=i32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=BATCH.CPU_elementType=i16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=BATCH.CPU_elementType=f64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=BATCH.CPU_elementType=f32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=BATCH.CPU_elementType=f16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=BATCH.CPU_elementType=boolean_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=BATCH.CPU_elementType=bf16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=AUTO.CPU_elementType=u8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=AUTO.CPU_elementType=u64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=AUTO.CPU_elementType=u32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=AUTO.CPU_elementType=u16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=AUTO.CPU_elementType=i8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=AUTO.CPU_elementType=i64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=AUTO.CPU_elementType=i32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=AUTO.CPU_elementType=i16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=AUTO.CPU_elementType=f64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=AUTO.CPU_elementType=f32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=AUTO.CPU_elementType=f16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=AUTO.CPU_elementType=boolean_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=AUTO.CPU_elementType=bf16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=MULTI.CPU_elementType=u8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=MULTI.CPU_elementType=u64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=MULTI.CPU_elementType=u32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=MULTI.CPU_elementType=u16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=MULTI.CPU_elementType=i8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=MULTI.CPU_elementType=i64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=MULTI.CPU_elementType=i32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=MULTI.CPU_elementType=i16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=MULTI.CPU_elementType=f64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=MULTI.CPU_elementType=f32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=MULTI.CPU_elementType=f16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=MULTI.CPU_elementType=boolean_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=MULTI.CPU_elementType=bf16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=HETERO.CPU_elementType=boolean_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=CPU_elementType=boolean_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=BATCH.CPU_elementType=u8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=BATCH.CPU_elementType=u64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=BATCH.CPU_elementType=u32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=BATCH.CPU_elementType=u16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=BATCH.CPU_elementType=i8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=BATCH.CPU_elementType=i64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=BATCH.CPU_elementType=i32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=BATCH.CPU_elementType=i16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=BATCH.CPU_elementType=f64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=BATCH.CPU_elementType=f32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=BATCH.CPU_elementType=f16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=BATCH.CPU_elementType=boolean_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=BATCH.CPU_elementType=bf16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=AUTO.CPU_elementType=u8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=AUTO.CPU_elementType=u64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=AUTO.CPU_elementType=u32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=AUTO.CPU_elementType=u16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=AUTO.CPU_elementType=i8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=AUTO.CPU_elementType=i64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=AUTO.CPU_elementType=i32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=AUTO.CPU_elementType=i16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=AUTO.CPU_elementType=f64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=AUTO.CPU_elementType=f32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=AUTO.CPU_elementType=f16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=AUTO.CPU_elementType=boolean_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=AUTO.CPU_elementType=bf16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=MULTI.CPU_elementType=u8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=MULTI.CPU_elementType=u64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=MULTI.CPU_elementType=u32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=MULTI.CPU_elementType=u16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=MULTI.CPU_elementType=i8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=MULTI.CPU_elementType=i64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=MULTI.CPU_elementType=i32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=MULTI.CPU_elementType=i16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=MULTI.CPU_elementType=f64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=MULTI.CPU_elementType=f32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=MULTI.CPU_elementType=f16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=MULTI.CPU_elementType=boolean_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=MULTI.CPU_elementType=bf16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=BATCH.CPU_elementType=u8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=BATCH.CPU_elementType=u64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=BATCH.CPU_elementType=u32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=BATCH.CPU_elementType=u16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=BATCH.CPU_elementType=i8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=BATCH.CPU_elementType=i64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=BATCH.CPU_elementType=i32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=BATCH.CPU_elementType=i16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=BATCH.CPU_elementType=f64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=BATCH.CPU_elementType=f32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=BATCH.CPU_elementType=f16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=BATCH.CPU_elementType=boolean_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=BATCH.CPU_elementType=bf16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=AUTO.CPU_elementType=u8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=AUTO.CPU_elementType=u64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=AUTO.CPU_elementType=u32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=AUTO.CPU_elementType=u16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=AUTO.CPU_elementType=i8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=AUTO.CPU_elementType=i64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=AUTO.CPU_elementType=i32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=AUTO.CPU_elementType=i16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=AUTO.CPU_elementType=f64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=AUTO.CPU_elementType=f32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=AUTO.CPU_elementType=f16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=AUTO.CPU_elementType=boolean_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=AUTO.CPU_elementType=bf16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=MULTI.CPU_elementType=u8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=MULTI.CPU_elementType=u64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=MULTI.CPU_elementType=u32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=MULTI.CPU_elementType=u16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=MULTI.CPU_elementType=i8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=MULTI.CPU_elementType=i64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=MULTI.CPU_elementType=i32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=MULTI.CPU_elementType=i16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=MULTI.CPU_elementType=f64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=MULTI.CPU_elementType=f32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=MULTI.CPU_elementType=f16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=MULTI.CPU_elementType=boolean_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=MULTI.CPU_elementType=bf16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=BATCH.CPU_elementType=u8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=BATCH.CPU_elementType=u64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=BATCH.CPU_elementType=u32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=BATCH.CPU_elementType=u16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=BATCH.CPU_elementType=i8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=BATCH.CPU_elementType=i64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=BATCH.CPU_elementType=i32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=BATCH.CPU_elementType=i16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=BATCH.CPU_elementType=f64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=BATCH.CPU_elementType=f32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=BATCH.CPU_elementType=f16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=BATCH.CPU_elementType=boolean_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=BATCH.CPU_elementType=bf16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=AUTO.CPU_elementType=u8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=AUTO.CPU_elementType=u64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=AUTO.CPU_elementType=u32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=AUTO.CPU_elementType=u16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=AUTO.CPU_elementType=i8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=AUTO.CPU_elementType=i64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=AUTO.CPU_elementType=i32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=AUTO.CPU_elementType=i16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=AUTO.CPU_elementType=f64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=AUTO.CPU_elementType=f32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=AUTO.CPU_elementType=f16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=AUTO.CPU_elementType=boolean_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=AUTO.CPU_elementType=bf16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=MULTI.CPU_elementType=u8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=MULTI.CPU_elementType=u64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=MULTI.CPU_elementType=u32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=MULTI.CPU_elementType=u16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=MULTI.CPU_elementType=i8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=MULTI.CPU_elementType=i64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=MULTI.CPU_elementType=i32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=MULTI.CPU_elementType=i16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=MULTI.CPU_elementType=f64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=MULTI.CPU_elementType=f32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=MULTI.CPU_elementType=f16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=MULTI.CPU_elementType=boolean_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=MULTI.CPU_elementType=bf16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=HETERO.CPU_elementType=boolean_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=CPU_elementType=boolean_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=BATCH.CPU_elementType=u8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=BATCH.CPU_elementType=u64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=BATCH.CPU_elementType=u32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=BATCH.CPU_elementType=u16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=BATCH.CPU_elementType=i8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=BATCH.CPU_elementType=i64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=BATCH.CPU_elementType=i32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=BATCH.CPU_elementType=i16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=BATCH.CPU_elementType=f64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=BATCH.CPU_elementType=f32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=BATCH.CPU_elementType=f16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=BATCH.CPU_elementType=boolean_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=BATCH.CPU_elementType=bf16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=AUTO.CPU_elementType=u8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=AUTO.CPU_elementType=u64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=AUTO.CPU_elementType=u32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=AUTO.CPU_elementType=u16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=AUTO.CPU_elementType=i8_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=AUTO.CPU_elementType=i64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=AUTO.CPU_elementType=i32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=AUTO.CPU_elementType=i16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=AUTO.CPU_elementType=f64_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=AUTO.CPU_elementType=f32_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=AUTO.CPU_elementType=f16_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=AUTO.CPU_elementType=boolean_,0 +ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=AUTO.CPU_elementType=bf16_,0 +ov_compiled_model/OVClassCompiledModelSetIncorrectConfigTest.canNotSetConfigToCompiledModelWithIncorrectConfig/3,0 +ov_compiled_model/OVClassCompiledModelImportExportTestP.smoke_ImportNetworkNoThrowWithDeviceName/4,0 +ov_compiled_model/OVClassCompiledModelImportExportTestP.smoke_ImportNetworkNoThrowWithDeviceName/3,0 +ov_compiled_model/OVClassCompiledModelImportExportTestP.smoke_ImportNetworkNoThrowWithDeviceName/2,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch2_CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch1_CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch2_CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch1_CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch2_CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch1_CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch2_CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch1_CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch2_CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch1_CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch2_CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch1_CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch2_CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch1_CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch1_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch2_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch2_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch2_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch2_AUTO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch1_MULTI.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch1_HETERO.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch1_BATCH.CPU,0 +ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch1_AUTO.CPU,0 +ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch2_MULTI.CPU,0 +ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch2_HETERO.CPU,0 +ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch2_BATCH.CPU,0 +ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch2_AUTO.CPU,0 +ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch1_MULTI.CPU,0 +ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch1_HETERO.CPU,0 +ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch1_BATCH.CPU,0 +ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch1_AUTO.CPU,0 +ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch2_MULTI.CPU,0 +ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch2_HETERO.CPU,0 +ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch2_BATCH.CPU,0 +ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch2_AUTO.CPU,0 +ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch1_MULTI.CPU,0 +ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch1_HETERO.CPU,0 +ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch1_BATCH.CPU,0 +ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch1_AUTO.CPU,0 +ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch2_MULTI.CPU,0 +ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch2_HETERO.CPU,0 +ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch2_BATCH.CPU,0 +ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch2_AUTO.CPU,0 +ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch1_MULTI.CPU,0 +ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch1_HETERO.CPU,0 +ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch1_BATCH.CPU,0 +ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch1_AUTO.CPU,0 +ie_plugin_AutoBatch/CorrectConfigTests.CanUseCache/target_device=BATCH_config=(=_AUTO_BATCH_DEVICE_CONFIG=CPU_),0 +ie_plugin_AutoBatch/CorrectConfigTests.CanLoadNetworkWithCorrectConfig/target_device=BATCH_config=(=_AUTO_BATCH_DEVICE_CONFIG=CPU_),0 +ie_plugin_/CoreThreadingTests.smoke_QueryNetwork/targetDevice=BATCH_config=AUTO_BATCH_DEVICE_CONFIG=_,0 +ie_plugin/VersionTest.pluginCurrentVersionIsCorrect/targetDevice=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessTest.SetScalePreProcessSetBlob/netPRC=FP32_targetDevice=BATCH_CPU_,0 +ie_plugin/InferRequestPreprocessTest.SetScalePreProcessSetBlob/netPRC=FP16_targetDevice=BATCH_CPU_,0 +ie_plugin/InferRequestPreprocessTest.SetScalePreProcessGetBlob/netPRC=FP32_targetDevice=BATCH_CPU_,0 +ie_plugin/InferRequestPreprocessTest.SetScalePreProcessGetBlob/netPRC=FP16_targetDevice=BATCH_CPU_,0 +ie_plugin/InferRequestPreprocessTest.SetPreProcessToInputInfo/netPRC=FP32_targetDevice=HETERO_CPU_,0 +ie_plugin/InferRequestPreprocessTest.SetPreProcessToInputInfo/netPRC=FP32_targetDevice=BATCH_CPU_,0 +ie_plugin/InferRequestPreprocessTest.SetPreProcessToInputInfo/netPRC=FP16_targetDevice=HETERO_CPU_,0 +ie_plugin/InferRequestPreprocessTest.SetPreProcessToInputInfo/netPRC=FP16_targetDevice=BATCH_CPU_,0 +ie_plugin/InferRequestPreprocessTest.SetPreProcessToInferRequest/netPRC=FP32_targetDevice=HETERO_CPU_,0 +ie_plugin/InferRequestPreprocessTest.SetPreProcessToInferRequest/netPRC=FP32_targetDevice=BATCH_CPU_,0 +ie_plugin/InferRequestPreprocessTest.SetPreProcessToInferRequest/netPRC=FP16_targetDevice=HETERO_CPU_,0 +ie_plugin/InferRequestPreprocessTest.SetPreProcessToInferRequest/netPRC=FP16_targetDevice=BATCH_CPU_,0 +ie_plugin/InferRequestPreprocessTest.SetMeanValuePreProcessSetBlob/netPRC=FP32_targetDevice=BATCH_CPU_,0 +ie_plugin/InferRequestPreprocessTest.SetMeanValuePreProcessSetBlob/netPRC=FP16_targetDevice=BATCH_CPU_,0 +ie_plugin/InferRequestPreprocessTest.SetMeanValuePreProcessGetBlob/netPRC=FP32_targetDevice=BATCH_CPU_,0 +ie_plugin/InferRequestPreprocessTest.SetMeanValuePreProcessGetBlob/netPRC=FP16_targetDevice=BATCH_CPU_,0 +ie_plugin/InferRequestPreprocessTest.SetMeanImagePreProcessSetBlob/netPRC=FP32_targetDevice=BATCH_CPU_,0 +ie_plugin/InferRequestPreprocessTest.SetMeanImagePreProcessSetBlob/netPRC=FP16_targetDevice=BATCH_CPU_,0 +ie_plugin/InferRequestPreprocessTest.SetMeanImagePreProcessGetBlob/netPRC=FP32_targetDevice=BATCH_CPU_,0 +ie_plugin/InferRequestPreprocessTest.SetMeanImagePreProcessGetBlob/netPRC=FP16_targetDevice=BATCH_CPU_,0 +ie_plugin/InferRequestPreprocessTest.ReverseInputChannelsPreProcessSetBlob/netPRC=FP32_targetDevice=HETERO_CPU_,0 +ie_plugin/InferRequestPreprocessTest.ReverseInputChannelsPreProcessSetBlob/netPRC=FP32_targetDevice=BATCH_CPU_,0 +ie_plugin/InferRequestPreprocessTest.ReverseInputChannelsPreProcessSetBlob/netPRC=FP16_targetDevice=BATCH_CPU_,0 +ie_plugin/InferRequestPreprocessTest.ReverseInputChannelsPreProcessGetBlob/netPRC=FP32_targetDevice=HETERO_CPU_,0 +ie_plugin/InferRequestPreprocessTest.ReverseInputChannelsPreProcessGetBlob/netPRC=FP32_targetDevice=BATCH_CPU_,0 +ie_plugin/InferRequestPreprocessTest.ReverseInputChannelsPreProcessGetBlob/netPRC=FP16_targetDevice=BATCH_CPU_,0 +ie_plugin/InferRequestPreprocessTest.ReverseInputChannelsPreProcessGetBlob/netPRC=FP16_targetDevice=HETERO_CPU_,0 +ie_plugin/InferRequestPreprocessTest.ReverseInputChannelsPreProcessSetBlob/netPRC=FP32_targetDevice=AUTO_CPU_,0 +ie_plugin/InferRequestPreprocessTest.InferWithRGB2BGRConversion/netPRC=FP32_targetDevice=BATCH_CPU_,0 +ie_plugin/InferRequestPreprocessTest.InferWithRGB2BGRConversion/netPRC=FP16_targetDevice=BATCH_CPU_,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=1_oPRC=1_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=1_oPRC=1_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=1_oPRC=1_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=1_oPRC=1_netLT=NCHW_iLT=0_oLT=0_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=1_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=MULTI_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=1_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=HETERO_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=1_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=1_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=1_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=AUTO_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=1_oPRC=0_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=1_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=MULTI_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=1_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=HETERO_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=1_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=1_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=1_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=AUTO_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=1_oPRC=0_netLT=NCHW_iLT=0_oLT=0_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=MULTI_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=HETERO_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=AUTO_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=MULTI_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=HETERO_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=AUTO_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=1_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=1_netLT=NCHW_iLT=0_oLT=0_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=MULTI_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=HETERO_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=AUTO_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=MULTI_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=HETERO_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=AUTO_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=MULTI_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=HETERO_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=AUTO_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=0_netLT=NCHW_iLT=0_oLT=0_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=1_oPRC=1_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=1_oPRC=1_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=1_oPRC=1_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=1_oPRC=1_netLT=NCHW_iLT=0_oLT=0_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=1_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=MULTI_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=1_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=HETERO_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=1_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=1_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=1_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=AUTO_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=1_oPRC=0_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=1_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=MULTI_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=1_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=HETERO_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=1_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=1_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=1_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=AUTO_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=1_oPRC=0_netLT=NCHW_iLT=0_oLT=0_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=MULTI_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=HETERO_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=AUTO_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=MULTI_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=HETERO_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=AUTO_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=1_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=1_netLT=NCHW_iLT=0_oLT=0_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=MULTI_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=HETERO_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=AUTO_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=MULTI_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=HETERO_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=AUTO_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=MULTI_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=HETERO_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=AUTO_CPU,0 +ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=0_netLT=NCHW_iLT=0_oLT=0_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 +ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 +ie_plugin/IEClassQueryNetworkTest.QueryNetworkWithInvalidDeviceIDThrows/4,0 +ie_plugin/IEClassQueryNetworkTest.QueryNetworkWithInvalidDeviceIDThrows/3,0 +ie_plugin/IEClassQueryNetworkTest.QueryNetworkWithInvalidDeviceIDThrows/2,0 +ie_plugin/IEClassQueryNetworkTest.QueryNetworkWithInvalidDeviceIDThrows/1,0 +ie_plugin/IEClassQueryNetworkTest.QueryNetworkWithDeviceID/4,0 +ie_plugin/IEClassQueryNetworkTest.QueryNetworkWithDeviceID/3,0 +ie_plugin/IEClassQueryNetworkTest.QueryNetworkWithDeviceID/2,0 +ie_plugin/IEClassQueryNetworkTest.QueryNetworkWithDeviceID/1,0 +ie_plugin/IEClassQueryNetworkTest.QueryNetworkWithBigDeviceIDThrows/4,0 +ie_plugin/IEClassQueryNetworkTest.QueryNetworkWithBigDeviceIDThrows/3,0 +ie_plugin/IEClassQueryNetworkTest.QueryNetworkWithBigDeviceIDThrows/2,0 +ie_plugin/IEClassQueryNetworkTest.QueryNetworkWithBigDeviceIDThrows/1,0 +ie_plugin/IEClassQueryNetworkTest.QueryNetworkHETEROWithDeviceIDNoThrow/4,0 +ie_plugin/IEClassQueryNetworkTest.QueryNetworkHETEROWithDeviceIDNoThrow/3,0 +ie_plugin/IEClassQueryNetworkTest.QueryNetworkHETEROWithDeviceIDNoThrow/2,0 +ie_plugin/IEClassQueryNetworkTest.QueryNetworkHETEROWithDeviceIDNoThrow/1,0 +ie_plugin/IEClassQueryNetworkTest.QueryNetworkHETEROWithBigDeviceIDThrows/4,0 +ie_plugin/IEClassQueryNetworkTest.QueryNetworkHETEROWithBigDeviceIDThrows/3,0 +ie_plugin/IEClassQueryNetworkTest.QueryNetworkHETEROWithBigDeviceIDThrows/2,0 +ie_plugin/IEClassQueryNetworkTest.QueryNetworkHETEROWithBigDeviceIDThrows/1,0 +ie_plugin/IEClassNetworkTestP.SetAffinityWithKSO/3,0 +ie_plugin/IEClassNetworkTestP.SetAffinityWithConstantBranches/3,0 +ie_plugin/IEClassNetworkTestP.SetAffinityWithConstantBranches/1,0 +ie_plugin/IEClassNetworkTestP.LoadNetworkCreateDefaultExecGraphResult/3,0 +ie_plugin/IEClassNetworkTestP.LoadNetworkActualNoThrow/3,0 +ie_plugin/IEClassNetworkTestP.LoadNetworkActualHeteroDeviceNoThrow/4,0 +ie_plugin/IEClassNetworkTestP.LoadNetworkActualHeteroDeviceNoThrow/3,0 +ie_plugin/IEClassNetworkTestP.LoadNetworkActualHeteroDevice2NoThrow/4,0 +ie_plugin/IEClassLoadNetworkTest.QueryNetworkMULTIWithHETERONoThrow_V10/4,0 +ie_plugin/IEClassLoadNetworkTest.QueryNetworkMULTIWithHETERONoThrow_V10/3,0 +ie_plugin/IEClassLoadNetworkTest.QueryNetworkMULTIWithHETERONoThrow_V10/2,0 +ie_plugin/IEClassLoadNetworkTest.QueryNetworkMULTIWithHETERONoThrow_V10/1,0 +ie_plugin/IEClassLoadNetworkTest.QueryNetworkHETEROWithMULTINoThrow_V10/4,0 +ie_plugin/IEClassLoadNetworkTest.QueryNetworkHETEROWithMULTINoThrow_V10/3,0 +ie_plugin/IEClassLoadNetworkTest.QueryNetworkHETEROWithMULTINoThrow_V10/2,0 +ie_plugin/IEClassLoadNetworkTest.QueryNetworkHETEROWithMULTINoThrow_V10/1,0 +ie_plugin/IEClassLoadNetworkTest.LoadNetworkWithInvalidDeviceIDThrows/4,0 +ie_plugin/IEClassLoadNetworkTest.LoadNetworkWithInvalidDeviceIDThrows/3,0 +ie_plugin/IEClassLoadNetworkTest.LoadNetworkWithInvalidDeviceIDThrows/2,0 +ie_plugin/IEClassLoadNetworkTest.LoadNetworkWithInvalidDeviceIDThrows/1,0 +ie_plugin/IEClassLoadNetworkTest.LoadNetworkWithDeviceIDNoThrow/4,0 +ie_plugin/IEClassLoadNetworkTest.LoadNetworkWithDeviceIDNoThrow/3,0 +ie_plugin/IEClassLoadNetworkTest.LoadNetworkWithDeviceIDNoThrow/2,0 +ie_plugin/IEClassLoadNetworkTest.LoadNetworkWithDeviceIDNoThrow/1,0 +ie_plugin/IEClassLoadNetworkTest.LoadNetworkWithBigDeviceIDThrows/4,0 +ie_plugin/IEClassLoadNetworkTest.LoadNetworkWithBigDeviceIDThrows/3,0 +ie_plugin/IEClassLoadNetworkTest.LoadNetworkWithBigDeviceIDThrows/2,0 +ie_plugin/IEClassLoadNetworkTest.LoadNetworkWithBigDeviceIDThrows/1,0 +ie_plugin/IEClassLoadNetworkTest.LoadNetworkMULTIwithHETERONoThrow/4,0 +ie_plugin/IEClassLoadNetworkTest.LoadNetworkMULTIwithHETERONoThrow/3,0 +ie_plugin/IEClassLoadNetworkTest.LoadNetworkMULTIwithHETERONoThrow/2,0 +ie_plugin/IEClassLoadNetworkTest.LoadNetworkMULTIwithHETERONoThrow/1,0 +ie_plugin/IEClassLoadNetworkTest.LoadNetworkHETEROwithMULTINoThrow/4,0 +ie_plugin/IEClassLoadNetworkTest.LoadNetworkHETEROwithMULTINoThrow/3,0 +ie_plugin/IEClassLoadNetworkTest.LoadNetworkHETEROwithMULTINoThrow/2,0 +ie_plugin/IEClassLoadNetworkTest.LoadNetworkHETEROwithMULTINoThrow/1,0 +ie_plugin/IEClassLoadNetworkTest.LoadNetworkHETEROWithDeviceIDNoThrow/4,0 +ie_plugin/IEClassLoadNetworkTest.LoadNetworkHETEROWithDeviceIDNoThrow/3,0 +ie_plugin/IEClassLoadNetworkTest.LoadNetworkHETEROWithDeviceIDNoThrow/2,0 +ie_plugin/IEClassLoadNetworkTest.LoadNetworkHETEROWithDeviceIDNoThrow/1,0 +ie_plugin/IEClassLoadNetworkTest.LoadNetworkHETEROWithBigDeviceIDThrows/4,0 +ie_plugin/IEClassLoadNetworkTest.LoadNetworkHETEROWithBigDeviceIDThrows/3,0 +ie_plugin/IEClassLoadNetworkTest.LoadNetworkHETEROWithBigDeviceIDThrows/2,0 +ie_plugin/IEClassLoadNetworkTest.LoadNetworkHETEROWithBigDeviceIDThrows/1,0 +ie_plugin/IEClassLoadNetworkTest.LoadNetworkHETEROAndDeviceIDThrows/4,0 +ie_plugin/IEClassLoadNetworkTest.LoadNetworkHETEROAndDeviceIDThrows/3,0 +ie_plugin/IEClassLoadNetworkTest.LoadNetworkHETEROAndDeviceIDThrows/2,0 +ie_plugin/IEClassLoadNetworkTest.LoadNetworkHETEROAndDeviceIDThrows/1,0 +ie_plugin/IEClassGetMetricTest_RANGE_FOR_STREAMS.GetMetricAndPrintNoThrow/4,0 +ie_plugin/IEClassGetMetricTest_RANGE_FOR_STREAMS.GetMetricAndPrintNoThrow/3,0 +ie_plugin/IEClassGetMetricTest_RANGE_FOR_STREAMS.GetMetricAndPrintNoThrow/2,0 +ie_plugin/IEClassGetMetricTest_RANGE_FOR_STREAMS.GetMetricAndPrintNoThrow/1,0 +ie_plugin/IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS.GetMetricAndPrintNoThrow/4,0 +ie_plugin/IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS.GetMetricAndPrintNoThrow/3,0 +ie_plugin/IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS.GetMetricAndPrintNoThrow/2,0 +ie_plugin/IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS.GetMetricAndPrintNoThrow/1,0 +ie_plugin/IEClassGetMetricTest_OPTIMIZATION_CAPABILITIES.GetMetricAndPrintNoThrow/3,0 +ie_plugin/IEClassGetMetricTest_AVAILABLE_DEVICES.GetMetricAndPrintNoThrow/4,0 +ie_plugin/IEClassGetMetricTest_AVAILABLE_DEVICES.GetMetricAndPrintNoThrow/3,0 +ie_plugin/IEClassGetMetricTest_AVAILABLE_DEVICES.GetMetricAndPrintNoThrow/2,0 +ie_plugin/IEClassGetMetricTest_AVAILABLE_DEVICES.GetMetricAndPrintNoThrow/1,0 +ie_plugin/IEClassGetConfigTest.GetConfigNoThrow/3,0 +ie_plugin/IEClassGetConfigTest.GetConfigHeteroNoThrow/3,0 +ie_plugin/IEClassBasicTestP.getVersionsNonEmpty/1,0 +ie_plugin/IEClassBasicTestP.SetConfigAllNoThrow/3,0 +ie_plugin/IEClassBasicTestP.SetConfigAllNoThrow/1,0 +ie_plugin/DefaultConfigurationTest.checkDeviceDefaultConfigurationValue/configKey=PERF_COUNT_targetDevice=HETERO,0 +ie_plugin/DefaultConfigurationTest.checkDeviceDefaultConfigurationValue/configKey=PERF_COUNT_targetDevice=BATCH,0 +ie_plugin/CorrectConfigTests.CanUseCache/target_device=CPU_config=(=_),0 +ie_plugin/CorrectConfigTests.CanLoadNetworkWithCorrectConfig/target_device=CPU_config=(=_),0 +ie_plugin/CorrectConfigCheck.canSetConfigTwiceAndCheckGetConfig/target_device=MULTI.CPU_config=(PERFORMANCE_HINT=THROUGHPUT_),0 +ie_plugin/CorrectConfigCheck.canSetConfigTwiceAndCheckGetConfig/target_device=MULTI.CPU_config=(PERFORMANCE_HINT=LATENCY_PERFORMANCE_HINT_NUM_REQUESTS=1_),0 +ie_plugin/CorrectConfigCheck.canSetConfigTwiceAndCheckGetConfig/target_device=MULTI.CPU_config=(PERFORMANCE_HINT=LATENCY_),0 +ie_plugin/CorrectConfigCheck.canSetConfigTwiceAndCheckGetConfig/target_device=MULTI.CPU_,0 +ie_plugin/CorrectConfigCheck.canSetConfigTwiceAndCheckGetConfig/target_device=HETERO.CPU_config=(PERFORMANCE_HINT=THROUGHPUT_),0 +ie_plugin/CorrectConfigCheck.canSetConfigTwiceAndCheckGetConfig/target_device=HETERO.CPU_config=(PERFORMANCE_HINT=LATENCY_PERFORMANCE_HINT_NUM_REQUESTS=1_),0 +ie_plugin/CorrectConfigCheck.canSetConfigTwiceAndCheckGetConfig/target_device=HETERO.CPU_config=(PERFORMANCE_HINT=LATENCY_),0 +ie_plugin/CorrectConfigCheck.canSetConfigTwiceAndCheckGetConfig/target_device=HETERO.CPU_,0 +ie_plugin/CorrectConfigCheck.canSetConfigTwiceAndCheckGetConfig/target_device=BATCH.CPU_config=(PERFORMANCE_HINT=THROUGHPUT_),0 +ie_plugin/CorrectConfigCheck.canSetConfigTwiceAndCheckGetConfig/target_device=BATCH.CPU_config=(PERFORMANCE_HINT=LATENCY_PERFORMANCE_HINT_NUM_REQUESTS=1_),0 +ie_plugin/CorrectConfigCheck.canSetConfigTwiceAndCheckGetConfig/target_device=BATCH.CPU_config=(PERFORMANCE_HINT=LATENCY_),0 +ie_plugin/CorrectConfigCheck.canSetConfigTwiceAndCheckGetConfig/target_device=AUTO.CPU_config=(PERFORMANCE_HINT=THROUGHPUT_),0 +ie_plugin/CorrectConfigCheck.canSetConfigTwiceAndCheckGetConfig/target_device=AUTO.CPU_config=(PERFORMANCE_HINT=LATENCY_PERFORMANCE_HINT_NUM_REQUESTS=1_),0 +ie_plugin/CorrectConfigCheck.canSetConfigTwiceAndCheckGetConfig/target_device=AUTO.CPU_config=(PERFORMANCE_HINT=LATENCY_),0 +ie_plugin/CorrectConfigCheck.canSetConfigTwiceAndCheckGetConfig/target_device=AUTO.CPU_,0 +ie_plugin/CorrectConfigCheck.canSetConfigAndCheckGetConfig/target_device=MULTI.CPU_config=(PERFORMANCE_HINT=THROUGHPUT_),0 +ie_plugin/CorrectConfigCheck.canSetConfigAndCheckGetConfig/target_device=MULTI.CPU_config=(PERFORMANCE_HINT=LATENCY_PERFORMANCE_HINT_NUM_REQUESTS=1_),0 +ie_plugin/CorrectConfigCheck.canSetConfigAndCheckGetConfig/target_device=MULTI.CPU_config=(PERFORMANCE_HINT=LATENCY_),0 +ie_plugin/CorrectConfigCheck.canSetConfigAndCheckGetConfig/target_device=MULTI.CPU_,0 +ie_plugin/CorrectConfigCheck.canSetConfigAndCheckGetConfig/target_device=HETERO.CPU_config=(PERFORMANCE_HINT=THROUGHPUT_),0 +ie_plugin/CorrectConfigCheck.canSetConfigAndCheckGetConfig/target_device=HETERO.CPU_config=(PERFORMANCE_HINT=LATENCY_PERFORMANCE_HINT_NUM_REQUESTS=1_),0 +ie_plugin/CorrectConfigCheck.canSetConfigAndCheckGetConfig/target_device=HETERO.CPU_config=(PERFORMANCE_HINT=LATENCY_),0 +ie_plugin/CorrectConfigCheck.canSetConfigAndCheckGetConfig/target_device=HETERO.CPU_,0 +ie_plugin/CorrectConfigCheck.canSetConfigAndCheckGetConfig/target_device=BATCH.CPU_config=(PERFORMANCE_HINT=THROUGHPUT_),0 +ie_plugin/CorrectConfigCheck.canSetConfigAndCheckGetConfig/target_device=BATCH.CPU_config=(PERFORMANCE_HINT=LATENCY_PERFORMANCE_HINT_NUM_REQUESTS=1_),0 +ie_plugin/CorrectConfigCheck.canSetConfigAndCheckGetConfig/target_device=BATCH.CPU_config=(PERFORMANCE_HINT=LATENCY_),0 +ie_plugin/CorrectConfigCheck.canSetConfigAndCheckGetConfig/target_device=AUTO.CPU_config=(PERFORMANCE_HINT=THROUGHPUT_),0 +ie_plugin/CorrectConfigCheck.canSetConfigAndCheckGetConfig/target_device=AUTO.CPU_config=(PERFORMANCE_HINT=LATENCY_PERFORMANCE_HINT_NUM_REQUESTS=1_),0 +ie_plugin/CorrectConfigCheck.canSetConfigAndCheckGetConfig/target_device=AUTO.CPU_config=(PERFORMANCE_HINT=LATENCY_),0 +ie_plugin/CorrectConfigCheck.canSetConfigAndCheckGetConfig/target_device=AUTO.CPU_,0 +ie_plugin/CoreThreadingTestsWithIterations.smoke_LoadNetwork_MultipleIECores/targetDevice=MULTI_config=MULTI_DEVICE_PRIORITIES=_numThreads=4_numIter=50,0 +ie_plugin/CoreThreadingTestsWithIterations.smoke_LoadNetwork_MultipleIECores/targetDevice=HETERO_config=TARGET_FALLBACK=_numThreads=4_numIter=50,0 +ie_plugin/CoreThreadingTestsWithIterations.smoke_LoadNetwork_MultipleIECores/targetDevice=BATCH_config=AUTO_BATCH_DEVICE_CONFIG=_numThreads=4_numIter=50,0 +ie_plugin/CoreThreadingTestsWithIterations.smoke_LoadNetworkAccuracy_SingleIECore/targetDevice=MULTI_config=MULTI_DEVICE_PRIORITIES=_numThreads=4_numIter=50,0 +ie_plugin/CoreThreadingTestsWithIterations.smoke_LoadNetworkAccuracy_SingleIECore/targetDevice=HETERO_config=TARGET_FALLBACK=_numThreads=4_numIter=50,0 +ie_plugin/CoreThreadingTestsWithIterations.smoke_LoadNetworkAccuracy_SingleIECore/targetDevice=BATCH_config=AUTO_BATCH_DEVICE_CONFIG=_numThreads=4_numIter=50,0 +ie_plugin/CoreThreadingTestsWithIterations.smoke_LoadNetworkAccuracy/targetDevice=MULTI_config=MULTI_DEVICE_PRIORITIES=_numThreads=4_numIter=50,0 +ie_plugin/CoreThreadingTestsWithIterations.smoke_LoadNetworkAccuracy/targetDevice=HETERO_config=TARGET_FALLBACK=_numThreads=4_numIter=50,0 +ie_plugin/CoreThreadingTestsWithIterations.smoke_LoadNetworkAccuracy/targetDevice=BATCH_config=AUTO_BATCH_DEVICE_CONFIG=_numThreads=4_numIter=50,0 +ie_plugin/CoreThreadingTestsWithIterations.smoke_LoadNetwork/targetDevice=MULTI_config=MULTI_DEVICE_PRIORITIES=_numThreads=4_numIter=50,0 +ie_plugin/CoreThreadingTestsWithIterations.smoke_LoadNetwork/targetDevice=HETERO_config=TARGET_FALLBACK=_numThreads=4_numIter=50,0 +ie_plugin/CoreThreadingTestsWithIterations.smoke_LoadNetwork/targetDevice=BATCH_config=AUTO_BATCH_DEVICE_CONFIG=_numThreads=4_numIter=50,0 +ie_plugin/CoreThreadingTests.smoke_SetConfigPluginExists/targetDevice=HETERO.CPU_config=PERF_COUNT=YES_,0 +ie_plugin/CoreThreadingTests.smoke_SetConfigPluginExists/targetDevice=BATCH.CPU_config=PERF_COUNT=YES_,0 +ie_plugin/CoreThreadingTests.smoke_QueryNetwork/targetDevice=MULTI.CPU_config=PERF_COUNT=YES_,0 +ie_plugin/CoreThreadingTests.smoke_QueryNetwork/targetDevice=HETERO.CPU_config=PERF_COUNT=YES_,0 +ie_plugin/CoreThreadingTests.smoke_QueryNetwork/targetDevice=BATCH.CPU_config=PERF_COUNT=YES_,0 +ie_plugin/CoreThreadingTests.smoke_QueryNetwork/targetDevice=AUTO.CPU_config=PERF_COUNT=YES_,0 +ie_plugin/CoreThreadingTests.smoke_GetMetric/targetDevice=MULTI.CPU_config=PERF_COUNT=YES_,0 +ie_plugin/CoreThreadingTests.smoke_GetMetric/targetDevice=HETERO.CPU_config=PERF_COUNT=YES_,0 +ie_plugin/CoreThreadingTests.smoke_GetMetric/targetDevice=BATCH.CPU_config=PERF_COUNT=YES_,0 +ie_plugin/CoreThreadingTests.smoke_GetMetric/targetDevice=AUTO.CPU_config=PERF_COUNT=YES_,0 +ie_plugin/CoreThreadingTests.smoke_GetConfig/targetDevice=MULTI.CPU_config=PERF_COUNT=YES_,0 +ie_plugin/CoreThreadingTests.smoke_GetConfig/targetDevice=HETERO.CPU_config=PERF_COUNT=YES_,0 +ie_plugin/CoreThreadingTests.smoke_GetConfig/targetDevice=BATCH.CPU_config=PERF_COUNT=YES_,0 +ie_plugin/CoreThreadingTests.smoke_GetConfig/targetDevice=AUTO.CPU_config=PERF_COUNT=YES_,0 +ie_infer_request/InferRequestWaitTests.returnDeviceBusyOnSetBlobAfterAsyncInfer/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestWaitTests.returnDeviceBusyOnGetBlobAfterAsyncInfer/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestWaitTests.canWaitWithotStartAsync/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestWaitTests.canStartAsyncInferWithGetInOutWithStatusOnlyWait/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestWaitTests.FailedAsyncInferWithNegativeTimeForWait/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestWaitTests.CorrectOneAsyncInferWithGetInOutWithInfWait/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestSetBlobByType.setInputBlobsByType/BlobType=Remote_Device=MULTI.CPU_Config=(),0 +ie_infer_request/InferRequestSetBlobByType.setInputBlobsByType/BlobType=Remote_Device=HETERO.CPU_Config=(),0 +ie_infer_request/InferRequestSetBlobByType.setInputBlobsByType/BlobType=Remote_Device=CPU_Config=(),0 +ie_infer_request/InferRequestSetBlobByType.setInputBlobsByType/BlobType=Remote_Device=BATCH.CPU_Config=(),0 +ie_infer_request/InferRequestSetBlobByType.setInputBlobsByType/BlobType=Remote_Device=AUTO.CPU_Config=(),0 +ie_infer_request/InferRequestSetBlobByType.setInputBlobsByType/BlobType=Batched_Device=MULTI.CPU_Config=(),0 +ie_infer_request/InferRequestSetBlobByType.setInputBlobsByType/BlobType=Batched_Device=HETERO.CPU_Config=(),0 +ie_infer_request/InferRequestSetBlobByType.setInputBlobsByType/BlobType=Batched_Device=BATCH.CPU_Config=(),0 +ie_infer_request/InferRequestSetBlobByType.setInputBlobsByType/BlobType=Batched_Device=AUTO.CPU_Config=(),0 +ie_infer_request/InferRequestPerfCountersTest.NotEmptyAfterSyncInfer/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestPerfCountersTest.NotEmptyAfterAsyncInfer/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestMultithreadingTests.canRun3SyncRequestsConsistentlyFromThreads/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestMultithreadingTests.canRun3AsyncRequestsConsistentlyWithWait/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestMultithreadingTests.canRun3AsyncRequestsConsistentlyFromThreadsWithoutWait/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestIOBBlobTest.setNotAllocatedOutput/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestIOBBlobTest.setNotAllocatedInput/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestIOBBlobTest.secondCallGetOutputDoNotReAllocateData/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestIOBBlobTest.secondCallGetOutputAfterInferSync/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestIOBBlobTest.secondCallGetInputDoNotReAllocateData/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestIOBBlobTest.secondCallGetInputAfterInferSync/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestIOBBlobTest.getAfterSetInputDoNotChangeOutput/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestIOBBlobTest.getAfterSetInputDoNotChangeInput/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestIOBBlobTest.failToSetUninitializedOutputBlob/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestIOBBlobTest.failToSetUninitializedInputBlob/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestIOBBlobTest.failToSetOutputWithIncorrectSizes/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestIOBBlobTest.failToSetNullptrForOutput/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestIOBBlobTest.failToSetNullptrForInput/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestIOBBlobTest.failToSetInputWithIncorrectSizes/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestIOBBlobTest.failToSetBlobWithIncorrectName/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestIOBBlobTest.canSetOutputBlobForInferRequest/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestIOBBlobTest.canSetInputBlobForInferRequest/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestIOBBlobTest.canReallocateExternalBlobViaGet/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestIOBBlobTest.canProcessDeallocatedOutputBlobAfterSetBlob/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestIOBBlobTest.canProcessDeallocatedOutputBlobAfterGetAndSetBlob/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestIOBBlobTest.canProcessDeallocatedInputBlobAfterSetBlobSync/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestIOBBlobTest.canProcessDeallocatedInputBlobAfterSetBlobAsync/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestIOBBlobTest.canProcessDeallocatedInputBlobAfterGetBlob/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestIOBBlobTest.canProcessDeallocatedInputBlobAfterGetAndSetBlob/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestIOBBlobTest.canInferWithoutSetAndGetInOutSync/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestIOBBlobTest.canInferWithoutSetAndGetInOutAsync/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestIOBBlobTest.canInferWithSetInOutBlobs/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestIOBBlobTest.canInferWithGetOut/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestIOBBlobTest.canInferWithGetIn/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestIOBBlobTest.CanCreateInferRequest/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestCancellationTests.canResetAfterCancelAsyncRequest/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestCancellationTests.canCancelInferRequest/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestCancellationTests.canCancelBeforeAsyncRequest/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestCancellationTests.canCancelAsyncRequest/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestCallbackTests.syncInferDoesNotCallCompletionCallback/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestCallbackTests.returnGeneralErrorIfCallbackThrowException/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestCallbackTests.canStartSeveralAsyncInsideCompletionCallbackWithSafeDtor/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestCallbackTests.canCallAsyncWithCompletionCallback/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestCallbackTests.ReturnResultNotReadyFromWaitInAsyncModeForTooSmallTimeout/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestCallbackTests.LegacyCastAndSetuserDataGetUserData/targetDevice=BATCH.CPU_,0 +ie_infer_request/InferRequestCallbackTests.ImplDoseNotCopyCallback/targetDevice=BATCH.CPU_,0 +ie_executable_network/IEClassImportExportTestP.smoke_ImportNetworkThrowsIfNoDeviceName/4,0 +ie_executable_network/IEClassImportExportTestP.smoke_ImportNetworkThrowsIfNoDeviceName/3,0 +ie_executable_network/IEClassImportExportTestP.smoke_ImportNetworkThrowsIfNoDeviceName/2,0 +ie_executable_network/IEClassImportExportTestP.smoke_ImportNetworkNoThrowWithDeviceName/4,0 +ie_executable_network/IEClassImportExportTestP.smoke_ImportNetworkNoThrowWithDeviceName/3,0 +ie_executable_network/IEClassImportExportTestP.smoke_ImportNetworkNoThrowWithDeviceName/2,0 +ie_executable_network/IEClassImportExportTestP.smoke_ExportUsingFileNameImportFromStreamNoThrowWithDeviceName/4,0 +ie_executable_network/IEClassImportExportTestP.smoke_ExportUsingFileNameImportFromStreamNoThrowWithDeviceName/3,0 +ie_executable_network/IEClassImportExportTestP.smoke_ExportUsingFileNameImportFromStreamNoThrowWithDeviceName/2,0 +ie_executable_network/IEClassImportExportTestP.smoke_ExportUsingFileNameImportFromStreamNoThrowWithDeviceName/1,0 +ie_executable_network/IEClassExecutableNetworkSetConfigTest.SetConfigThrows/3,0 +ie_executable_network/IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported.GetMetricThrow/3,0 +ie_executable_network/IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS.GetMetricNoThrow/3,0 +ie_executable_network/IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS.GetMetricNoThrow/3,0 +ie_executable_network/IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS.GetMetricNoThrow/3,0 +ie_executable_network/IEClassExecutableNetworkGetMetricTest_NETWORK_NAME.GetMetricNoThrow/3,0 +ie_executable_network/IEClassExecutableNetworkGetConfigTest.GetConfigThrows/3,0 +ie_executable_network/IEClassExecutableNetworkGetConfigTest.GetConfigNoThrow/4,0 +ie_executable_network/IEClassExecutableNetworkGetConfigTest.GetConfigNoThrow/3,0 +ie_executable_network/IEClassExecutableNetworkGetConfigTest.GetConfigNoThrow/1,0 +ie_executable_network/IEClassExecutableNetworkGetConfigTest.GetConfigNoEmptyNoThrow/4,0 +ie_executable_network/IEClassExecutableNetworkGetConfigTest.GetConfigNoEmptyNoThrow/3,0 +ie_executable_network/IEClassExecutableNetworkGetConfigTest.GetConfigNoEmptyNoThrow/2,0 +ie_executable_network/IEClassExecutableNetworkGetConfigTest.GetConfigNoEmptyNoThrow/1,0 +ie_executable_network/ExecutableNetworkBaseTest.loadIncorrectV11Model/target_device=BATCH.CPU_,0 +ie_executable_network/ExecutableNetworkBaseTest.loadIncorrectV10Model/target_device=BATCH.CPU_,0 +ie_executable_network/ExecutableNetworkBaseTest.checkGetMetric/target_device=BATCH.CPU_,0 +ie_executable_network/ExecutableNetworkBaseTest.checkGetExecGraphInfoIsNotNullptr/target_device=BATCH.CPU_,0 +ie_executable_network/ExecutableNetworkBaseTest.canSetConfigToExecNetWithIncorrectConfig/target_device=BATCH.CPU_,0 +ie_executable_network/ExecutableNetworkBaseTest.canSetConfigToExecNetAndCheckConfigAndCheck/target_device=MULTI.CPU_,0 +ie_executable_network/ExecutableNetworkBaseTest.canSetConfigToExecNetAndCheckConfigAndCheck/target_device=HETERO.CPU_,0 +ie_executable_network/ExecutableNetworkBaseTest.canSetConfigToExecNetAndCheckConfigAndCheck/target_device=CPU_,0 +ie_executable_network/ExecutableNetworkBaseTest.canSetConfigToExecNetAndCheckConfigAndCheck/target_device=BATCH.CPU_,0 +ie_executable_network/ExecutableNetworkBaseTest.canSetConfigToExecNetAndCheckConfigAndCheck/target_device=AUTO.CPU_,0 +ie_executable_network/ExecutableNetworkBaseTest.canSetConfigToExecNet/target_device=MULTI.CPU_,0 +ie_executable_network/ExecutableNetworkBaseTest.canSetConfigToExecNet/target_device=HETERO.CPU_,0 +ie_executable_network/ExecutableNetworkBaseTest.canSetConfigToExecNet/target_device=CPU_,0 +ie_executable_network/ExecutableNetworkBaseTest.canSetConfigToExecNet/target_device=BATCH.CPU_,0 +ie_executable_network/ExecutableNetworkBaseTest.canSetConfigToExecNet/target_device=AUTO.CPU_,0 +ie_executable_network/ExecutableNetworkBaseTest.canLoadCorrectNetworkToGetExecutableWithIncorrectConfig/target_device=MULTI.CPU_,0 +ie_executable_network/ExecutableNetworkBaseTest.canLoadCorrectNetworkToGetExecutableWithIncorrectConfig/target_device=HETERO.CPU_,0 +ie_executable_network/ExecutableNetworkBaseTest.canLoadCorrectNetworkToGetExecutableWithIncorrectConfig/target_device=AUTO.CPU_,0 +ie_executable_network/ExecutableNetworkBaseTest.canLoadCorrectNetworkToGetExecutableAndCreateInferRequest/target_device=BATCH.CPU_,0 +ie_executable_network/ExecutableNetworkBaseTest.canLoadCorrectNetworkToGetExecutableAndCheckConfig/target_device=BATCH.CPU_,0 +ie_executable_network/ExecutableNetworkBaseTest.canLoadCorrectNetworkToGetExecutable/target_device=BATCH.CPU_,0 +ie_executable_network/ExecutableNetworkBaseTest.canExport/target_device=MULTI.CPU_,0 +ie_executable_network/ExecutableNetworkBaseTest.canExport/target_device=BATCH.CPU_,0 +ie_executable_network/ExecutableNetworkBaseTest.canExport/target_device=AUTO.CPU_,0 +ie_executable_network/ExecutableNetworkBaseTest.CheckExecGraphInfoSerialization/target_device=BATCH.CPU_,0 +ie_executable_network/ExecutableNetworkBaseTest.CheckExecGraphInfoBeforeExecution/target_device=BATCH.CPU_,0 +ie_executable_network/ExecutableNetworkBaseTest.CheckExecGraphInfoAfterExecution/target_device=BATCH.CPU_,0 +ie_executable_network/ExecutableNetworkBaseTest.CanGetOutputsInfoAndCheck/target_device=BATCH.CPU_,0 +ie_executable_network/ExecutableNetworkBaseTest.CanGetOutputsInfo/target_device=BATCH.CPU_,0 +ie_executable_network/ExecutableNetworkBaseTest.CanGetInputsInfoAndCheck/target_device=BATCH.CPU_,0 +ie_executable_network/ExecutableNetworkBaseTest.CanGetInputsInfo/target_device=BATCH.CPU_,0 +ie_executable_network/ExecutableNetworkBaseTest.CanCreateTwoExeNetworksAndCheckFunction/target_device=BATCH.CPU_,0 +ie_executable_network/ExecutableNetworkBaseTest.CanCreateTwoExeNetworks/target_device=BATCH.CPU_,0 +ie_executable_network/ExecGraphUniqueNodeNames.CheckUniqueNodeNames/IS=(1.2.5.5)_inPRC=UNSPECIFIED_netPRC=FP32_targetDevice=BATCH_CPU,0 +ie_executable_network/ExecGraphSerializationTest.ExecutionGraph/TargetDevice=HETERO.CPU,0 +ie_executable_network/ExecGraphSerializationTest.ExecutionGraph/TargetDevice=BATCH.CPU,0 +ie_executable_network/IEClassImportExportTestP.smoke_ExportUsingFileNameImportFromStreamNoThrowWithDeviceName/0,0 diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/skip_configs/CPU/expected_failures_OP.csv b/src/tests/test_utils/functional_test_utils/layer_tests_summary/skip_configs/CPU/expected_failures_OP.csv new file mode 100644 index 00000000000000..23d497e22f5d12 --- /dev/null +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/skip_configs/CPU/expected_failures_OP.csv @@ -0,0 +1,1131 @@ +Test Name,Fix Priority +conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i32_IR=673350fadcd262cd7a62cde83e5fd9249c623abced2eb67197b730895bf0767b_Device=CPU_Shape=static_Config=(),1.0 +conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i32_IR=ee9526ee9d1aaeec99a0c59b65b9ea92c5578739e34ec6337d30472b2c1fc62e_Device=CPU_Shape=static_Config=(),0.999773 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=289d303553fbee0e0ab09eb7c9e022257af26a2ee5404009e07495fdce5e5cc5_Device=CPU_Shape=static_Config=(),0.901468 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i32_IR=3b12339cb1eb1fb0f5b717aa08e14ed1035bdee31e925738c09450e8d201f0e4_Device=CPU_Shape=static_Config=(),0.832254 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=i64_IR=e6072c00650315f4b1d496473d1aa4cd29d398de13cd06101b48a76585a1ce0d_Device=CPU_Shape=static_Config=(),0.672244 +conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=i64_IR=f16a04a44dad091a2091f9f22697ad9a697490d233e99afa7718cb3b7e8b4a26_Device=CPU_Shape=static_Config=(),0.671828 +conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i32_IR=641909d407f76c57ef31c99795284255ab110959b2c06d0d0105a40350ab3697_Device=CPU_Shape=static_Config=(),0.665562 +conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=i32_IR=23e75ebc73f2be90e07a59af3d3c482c1ffd7b0b6c02d737029ab461e6fb3690_Device=CPU_Shape=static_Config=(),0.665409 +conformance_Relu/ReadIRTest.ImportExport/Op=Relu.1_Type=f32_IR=947d3f49818bd1a73f7a998de4eee0da3d386c740999bce5eba85be60d7a7994_Device=CPU_Shape=static_Config=(),0.581056 +conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_IR=435723c35b233b67f5af5af4c09172deeb94d45650f3d7dfb43a3f33f594015c_Device=CPU_Shape=static_Config=(),0.378778 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=be811dcfd3d370d4bf74c8a96bc4bedc3abb3eadca0269c60d69cff1d86a6c76_Device=CPU_Shape=static_Config=(),0.373302 +conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=i32_IR=6ed49f949dfeb680604d7abf27a330885f7c733692f7747e9457bf7641c61822_Device=CPU_Shape=static_Config=(),0.332705 +conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i32_IR=db733dcfab287c327cba66fe3d3a3534d8d19ad9ab88e1b501bcefcc6f585ebe_Device=CPU_Shape=static_Config=(),0.332705 +conformance_Split/ReadIRTest.ImportExport/Op=Split.1_Type=i32_IR=eca73c8b50e0d6c12f3b1478ef02df65119bca2277e3d1c15ee61cc666c0fe01_Device=CPU_Shape=static_Config=(),0.332705 +conformance_FloorMod/ReadIRTest.ImportExport/Op=FloorMod.1_Type=i32_IR=95a4e5ec2946cc4464b3d5c079d701277daa69e895e1b21f25833a4d7345f2b8_Device=CPU_Shape=static_Config=(),0.332705 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i32_IR=02070b77af7b11928cb8ccc284b1491b8d51c8479f74b2900246585638456d21_Device=CPU_Shape=static_Config=(),0.332705 +conformance_Pad/ReadIRTest.ImportExport/Op=Pad.1_Type=i64_IR=79d866eba77de1c58b283aa573c9e4dcdbe98baa80e6efd9154e4f4f7ba99e38_Device=CPU_Shape=static_Config=(),0.331205 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_IR=a3b348df69cc23edf064b4df507e25fade0ec84e55a19ae4f34f6cbf9d15da0b_Device=CPU_Shape=static_Config=(),0.298373 +conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i64_IR=75f0ca24cdc5be0b8bfdeed80190dcc9253fa515947aefb8c1d5db0de709a930_Device=CPU_Shape=static_Config=(),0.289196 +conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=i64_IR=bc5429e365164c21ef4b57c1198dfb5d387242edb5fade35b7a99ad62747426b_Device=CPU_Shape=static_Config=(),0.274148 +conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_IR=750445fbeb8ad10861a677ede8b45a09a66924be75ac830e8760e4bfd2d02b21_Device=CPU_Shape=static_Config=(),0.271262 +conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=i64_IR=6b6b7a0e007e741f4ec85184693a560ea80ac146e87e9d3e4cfa41bb2105ed5c_Device=CPU_Shape=static_Config=(),0.246682 +conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_IR=58fa35690be29b683722f4d37ef0c6da4b201f5693001e9bc5807a8f044dc9f2_Device=CPU_Shape=static_Config=(),0.246682 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=959e7e2e931ab2d4afa5af6dfe811ea8673d66658a3f4f06268a88d5c42ee2e9_Device=CPU_Shape=static_Config=(),0.246669 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=4955b0047c769357dd8dbdf9612cd61c4882443087f0be34936f6feb804d696e_Device=CPU_Shape=static_Config=(),0.219421 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=b07cbf9a298168f96b4e0d0648b6a2dbf28994a0e3f6f925a3f01f5ad86d119b_Device=CPU_Shape=static_Config=(),0.213845 +conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_IR=4baf3e2f4f21767bc3e2cabdedd5f73bea933198f5856f28a7f271c0d92e0b2e_Device=CPU_Shape=dynamic_Config=(),0.209451 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=7c483857da60c84265f78cd2725fd7517163e3bba61a2735696c19a36610f186_Device=CPU_Shape=static_Config=(),0.204316 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_IR=7efcc0b11ccdc19094aaeb153c7b79023783aa683cbe12df25a65c2470dc17c3_Device=CPU_Shape=static_Config=(),0.186312 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=81f8c41d64c2b0b3f05f5e279efddcfd2d95baae8bac0ad4a5e60e64b5509e96_Device=CPU_Shape=static_Config=(),0.181998 +conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=i64_IR=3bafd2af6c5aea1c35751e1ef647259f09a3e77d43671074f976da67dfccf0a0_Device=CPU_Shape=static_Config=(),0.180103 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=32de11a5f1655b7c483ab5aaf23a91d5130a3ab5316505b07ac34d6ba5f0dc31_Device=CPU_Shape=static_Config=(),0.178089 +conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=i32_IR=9a463ace58ac105170c8caabfab1686634d342d4cf3cda95a2f8048acc934253_Device=CPU_Shape=static_Config=(),0.166352 +conformance_SpaceToBatch/ReadIRTest.ImportExport/Op=SpaceToBatch.2_Type=f32_IR=45b8453f72bf7feb3851f9a53e4396c782373d48ed6d44c9ba927833f4b4e36e_Device=CPU_Shape=static_Config=(),0.166352 +conformance_BatchToSpace/ReadIRTest.ImportExport/Op=BatchToSpace.2_Type=f32_IR=9f40615a720ffea87e596553df6edf33b0be19d5045366507ea1ae38774e6d9e_Device=CPU_Shape=static_Config=(),0.166352 +conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i32_IR=bea6472f16de5cd4d0d2f92c1a6726afd8301ba1a1fd0cd81245a65601e7894a_Device=CPU_Shape=static_Config=(),0.135704 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=i32_IR=c655670d7f95e1c1a8687f6dd6d9871119510507ea60d3f767d382bd12683e4c_Device=CPU_Shape=static_Config=(),0.135383 +conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=i32_IR=84f41b6615c2092dc715f00b76538eb2bbf1e948bff938268acc13dfb12c1719_Device=CPU_Shape=static_Config=(),0.134246 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=495a58f5c44e76df384c0fd03fcf059b2c855737e50935185d19ea4cb267a68c_Device=CPU_Shape=static_Config=(),0.132957 +conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=f32_IR=3baf0478996bfee873f9347dd16397f469979fa96e9d86d9046f80cc31d8c10b_Device=CPU_Shape=dynamic_Config=(),0.12194 +conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_IR=015eee57022b8981adcc83819d26d2e380536c2d501a1cb00a52eb22de483773_Device=CPU_Shape=dynamic_Config=(),0.118997 +conformance_Pad/ReadIRTest.ImportExport/Op=Pad.12_Type=f32_IR=8983b94a29122c128467eaeba13701417ba9c5791479bb6a4a3c0ec377dbab9a_Device=CPU_Shape=static_Config=(),0.0986761 +conformance_BatchNormInference/ReadIRTest.ImportExport/Op=BatchNormInference.5_Type=f32_IR=116e9cb63683baba544cd4250799395a5e1ec9406ade0e893c86b22d9f4bb3fd_Device=CPU_Shape=dynamic_Config=(),0.0957363 +conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_IR=8b5855402774fe64888224c96d51405ba201e85f560c8a945efcf0cc8b2c9270_Device=CPU_Shape=dynamic_Config=(),0.0931045 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=8b5855402774fe64888224c96d51405ba201e85f560c8a945efcf0cc8b2c9270_Device=CPU_Shape=dynamic_Config=(),0.0931045 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=7757f03df98458ac7581cee89b96c46d0e40e358cce06083cd51abe45ac09aea_Device=CPU_Shape=dynamic_Config=(),0.0931045 +conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_IR=8c0f296e7a87e896962c8bd80ba45b52d06c4003f43014d691747db2261b63f9_Device=CPU_Shape=static_Config=(),0.0880612 +conformance_PRelu/ReadIRTest.ImportExport/Op=PRelu.1_Type=f32_IR=012525d1c0f56e6da11f52eeff3e568dda3f560cca9c722d74ae11dc9aa8d7c1_Device=CPU_Shape=static_Config=(),0.0810536 +conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=i64_IR=515cc9ba645c2d222c6aacba8e931f2905ff072365f7e985ebc88a8fbfad45af_Device=CPU_Shape=static_Config=(),0.0795413 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=i64_IR=921061115928610953f85bf7f35f39d2625cc3416324eaac1b864785b70ba077_Device=CPU_Shape=static_Config=(),0.0776437 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=93da330692e3e2eb8ced7f94ad11c77ed4670da278a163269176fe1b47697e24_Device=CPU_Shape=dynamic_Config=(),0.0744718 +conformance_ReduceMean/ReadIRTest.ImportExport/Op=ReduceMean.1_Type=f32_IR=6c6e5f1549c61483dffdbc6d56b827636b6be45dadaab7b13d73aae27b1419da_Device=CPU_Shape=static_Config=(),0.0707316 +conformance_Relu/ReadIRTest.ImportExport/Op=Relu.1_Type=f32_IR=8ba9d67f24be8d1129847c020cfdfc396f5027c26671ac180cff7b8e10d4fa62_Device=CPU_Shape=dynamic_Config=(),0.0696201 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=7a278b3a3acec45758a464c87efa19e2c09fd53c6191d552f45f8b53ed1405ba_Device=CPU_Shape=static_Config=(),0.0688829 +conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=i64_IR=d05229ad04d9027cde364cb214386e8994e3b61253b3a9e38de74615c9b1e014_Device=CPU_Shape=static_Config=(),0.0652603 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=5c97baea94607deb7ec7273ca73be24708bf795cdcdf5e95db5a8b7bb482c781_Device=CPU_Shape=static_Config=(),0.0644895 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=e8f5e44d24a7c48953fab68bc27364ac2c603945ec50dcb127452450a7327aa9_Device=CPU_Shape=static_Config=(),0.0589244 +conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=i64_IR=91274090503dee360ed6957185820290ae03c632c3dea6479fbc712af0bc1594_Device=CPU_Shape=static_Config=(),0.0570404 +conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=i64_IR=c2920e603bf0c091c56afa12bd7c50adc2d6d41c0875c65bf0913e5389b9eee4_Device=CPU_Shape=static_Config=(),0.0569168 +conformance_Split/ReadIRTest.ImportExport/Op=Split.1_Type=i64_IR=79292d461bc7d40630332e279109a859a695a4212a04178a217673b63d8dab53_Device=CPU_Shape=static_Config=(),0.0569168 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=i64_IR=de3441e1c462b6cb4108e95b475dead8d2560cbafd9c02523ae1af84c276f4e9_Device=CPU_Shape=static_Config=(),0.0569168 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=i64_IR=7f49206436a90afd655edbae79c15cfcb70d1668cab626bffcd722c2db6dc010_Device=CPU_Shape=static_Config=(),0.0569168 +conformance_ReduceMean/ReadIRTest.ImportExport/Op=ReduceMean.1_Type=f32_IR=1deb7c8c4c65eb8bbc6aa29fc35be018f9487ef869eefa144898dbbb0fb85996_Device=CPU_Shape=static_Config=(),0.0563675 +conformance_Slice/ReadIRTest.ImportExport/Op=Slice.8_Type=i64_IR=d8c7ffedc1e0fcfe4fef6b027d51b30f56a9215d356811a858c96fdf80705211_Device=CPU_Shape=static_Config=(),0.0558155 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=96aa325c6b2fd9d3d121c2d5c3b4ece42f387ca9db81acafac9d0d4b50501806_Device=CPU_Shape=static_Config=(),0.0537137 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=56c74da508a2e5caa5252406596ee8eb19dde42d4b4d1252a019dca73f7738a0_Device=CPU_Shape=dynamic_Config=(),0.048913 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=a42d3e87f577469a5265c21941c8ead0947472bc310a78b91f142521408259bf_Device=CPU_Shape=static_Config=(),0.048853 +conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_IR=da1e0da18417d2ed2ce8b54142bde883d2f9b407cc4cc9a505999ccced3c8fec_Device=CPU_Shape=dynamic_Config=(),0.0477481 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=c49f1a91331b478cad0ecdfb910bf25118db70698d78097fa23ae0114924cd64_Device=CPU_Shape=static_Config=(),0.0428098 +conformance_Sigmoid/ReadIRTest.ImportExport/Op=Sigmoid.1_Type=f32_IR=2e413e64305e10fc63031011c51a7b7770e41d7ba73e36c93bb2a324f507a0ee_Device=CPU_Shape=static_Config=(),0.0427882 +conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=f32_IR=c2f6dc18a8a51d936cd53abf903dc784183366a50034b5cd30d4367dc0c190f6_Device=CPU_Shape=static_Config=(),0.042676 +conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_IR=5922124ba06a65ec3cdda85b1b13802914221acca21c10dafcc6083e48db45df_Device=CPU_Shape=static_Config=(),0.0421306 +conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_IR=4bd7161dddb8220a2f3c78190cacaf1e7958fa67b45ef8ef823afd56e4e5b3b3_Device=CPU_Shape=static_Config=(),0.0421082 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_IR=9b083ad61b4eabec6071d5f747aab982428ba468409571227940d8b21e2b5a68_Device=CPU_Shape=static_Config=(),0.0420677 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=156f9ddcae5546475fd827dcbb5fe9bb009fe2e93cc3daa519412ab1de96342a_Device=CPU_Shape=dynamic_Config=(),0.0418705 +conformance_MVN/ReadIRTest.ImportExport/Op=MVN.6_Type=f32_IR=9651b0191204a425cdf8fe72b04e0f4ec201bd9be7b03aeb0dfa28b938482cb4_Device=CPU_Shape=static_Config=(),0.041569 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=af7ccfd01fa7510e09b2c59d6a4955f3025b0d3d8881d51b81e17ad511e54365_Device=CPU_Shape=static_Config=(),0.0407652 +conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=i64_IR=493543671b825e6d048b8ea0af7b49bf2e940f776aa518342e02f95e06da6558_Device=CPU_Shape=static_Config=(),0.0398763 +conformance_Sqrt/ReadIRTest.ImportExport/Op=Sqrt.1_Type=f32_IR=2c36962e7c6fff7a6d504a6a5a8315432d023c5f33954e490d353b34e99a28bd_Device=CPU_Shape=static_Config=(),0.0398188 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_IR=a08770a83a9619323ab2db111996acee7f11b247ac4cae9a75b387b7eaa153de_Device=CPU_Shape=static_Config=(),0.0393199 +conformance_ReduceProd/ReadIRTest.ImportExport/Op=ReduceProd.1_Type=i64_IR=6edaa734e215f7c4ad6ba69798731a96fdf2bab7fe1c50f5bea84c36bb9e9453_Device=CPU_Shape=static_Config=(),0.0387517 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=62bf39c23da0f2840ab9e5a91a2c0cf7afba070aa66090bdaaa094b8cae573ea_Device=CPU_Shape=static_Config=(),0.0373961 +conformance_Slice/ReadIRTest.ImportExport/Op=Slice.8_Type=i64_IR=7d99619da324fe7940479a1e74bb78dbff2d978279d4ee97472632fcc0b58903_Device=CPU_Shape=static_Config=(),0.0373756 +conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i64_IR=92844ccbd8c494e23b839b10339e16f38ea98423989e53a0bfcdc0eff6d640f0_Device=CPU_Shape=static_Config=(),0.0335313 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=8d81a71edde523e9fd3d8a05af75bd2f93ef77372c3eb77f3dcc507625ae929a_Device=CPU_Shape=static_Config=(),0.0328868 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=2f8c29ce317db520ec217ba6bf1903193cf3ad239073528a06b536eb1a0eceba_Device=CPU_Shape=static_Config=(),0.0328868 +conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=i64_IR=effde21c71b10c54b9a75982d2990604839e673e202e0201d3ed1cd98228f452_Device=CPU_Shape=static_Config=(),0.0314886 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=4277adf0897c541911c5195f1940aa8cb1c553a0c169796309ba5540ca572f38_Device=CPU_Shape=static_Config=(),0.0314323 +conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_IR=6248a7a36da5bf2d22ec987a5a7a9546f99dc7fb105cbe1bdaa9c7f09eac06e3_Device=CPU_Shape=static_Config=(),0.0313963 +conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_IR=181677be9e90edcb02f53b9b5bdc3a7b78d05a590bd1794045d6e4fc37827295_Device=CPU_Shape=static_Config=(),0.0305188 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=287cf907287e73809a08e7668b8e8a221b6729c239a61bcdc13e7c2e72466f99_Device=CPU_Shape=static_Config=(),0.0297306 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=18119d6ae879f263f5c7e40c7afa6bdb04604ea91eda8b484c5669493133d951_Device=CPU_Shape=static_Config=(),0.0297306 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=5ba67bedf28f65266a777765f39eae6f23972fb76ecb75ad79b77708e8847a1d_Device=CPU_Shape=dynamic_Config=(),0.0297045 +conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_IR=24e4b673007068a0740c75c5acbb44d3b50b78161a2c63983860ef923c233f2e_Device=CPU_Shape=static_Config=(),0.0281836 +conformance_Sqrt/ReadIRTest.ImportExport/Op=Sqrt.1_Type=f32_IR=35792850a7dd5ad4a02683e1081bb4545a217b763ae1d96a1b23e10824115120_Device=CPU_Shape=static_Config=(),0.0281836 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=c1af66622df4ee3bab6949ae7d9997b10279370807ebd36b604c4e39ccedd1f4_Device=CPU_Shape=static_Config=(),0.0281836 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_IR=69be0539fcba434eab3fecde8ed60c58ff9484aa9613f3851948080651aeaa26_Device=CPU_Shape=static_Config=(),0.0275631 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=4ee6c09b308afe04fb7463a0fefc5d7213c7ea6d70d2b8c3ada6240567ecd2f3_Device=CPU_Shape=static_Config=(),0.0268406 +conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_IR=662130dbd301b809acaa39a553fdcfaf52d5d67c4e286f3c25e26ceee6bac36b_Device=CPU_Shape=static_Config=(),0.0253782 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=d8188607ce80ab08ebd10fa9538e11d19413aca4db7e3f63b49256d097bfd89f_Device=CPU_Shape=dynamic_Config=(),0.0250005 +conformance_Swish/ReadIRTest.ImportExport/Op=Swish.4_Type=f32_IR=f7ddbfc1322949a31ce9a4577e1019ee5855b47bde5272735093b60e3a4e3c5c_Device=CPU_Shape=static_Config=(),0.0247045 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=f9766ba3e833aad7e53270bbf40a31021b9c97cbb58b03d31efef04414978258_Device=CPU_Shape=static_Config=(),0.0246585 +conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_IR=42c894ffdf419c0d71fe6bfa20664d552394d52963b5ae2bca69193867c6731e_Device=CPU_Shape=static_Config=(),0.0242879 +conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_IR=411ba8c994b992fe5f486e5632010cc64c780b4560623fabdcce7eaeae914a49_Device=CPU_Shape=static_Config=(),0.0242879 +conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_IR=10d10c74f3b358661571be269594f003f240506d0b2df84e20f1181011124010_Device=CPU_Shape=dynamic_Config=(),0.0238858 +conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_IR=Softmax-8_596_Device=CPU_Shape=dynamic_Config=(),0.023874 +conformance_PRelu/ReadIRTest.ImportExport/Op=PRelu.1_Type=f32_IR=9519f0f007d875179d5a4236590f4254f0a2954c8ed453484a475b2b3a9d2d6b_Device=CPU_Shape=static_Config=(),0.0236502 +conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_IR=1e90f15ac4cccc70721a3d177d9a1c54da3d8a9534595d40fdb21c89594b0a8c_Device=CPU_Shape=dynamic_Config=(),0.023276 +conformance_Tanh/ReadIRTest.ImportExport/Op=Tanh.1_Type=f32_IR=5b7ebaab99946e71bcb8aafe28711160144a7aa3c17fdf014304711c7bf62bcb_Device=CPU_Shape=dynamic_Config=(),0.023276 +conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=i64_IR=a21b8641bb78c7aa79914b64afd28d63c5b503bf6ab15d823b280071e5660ff2_Device=CPU_Shape=static_Config=(),0.023276 +conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=boolean_IR=90fb30f8c780dbd8137e3aa5e6b5a575bbe658259931f44c7084809804e15b52_Device=CPU_Shape=dynamic_Config=(),0.023276 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=boolean_IR=90fb30f8c780dbd8137e3aa5e6b5a575bbe658259931f44c7084809804e15b52_Device=CPU_Shape=dynamic_Config=(),0.023276 +conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_IR=3de649a3fe1e333f3722ec17f8e4f518aa87d7e08a2bfd1ba21399ad3d469888_Device=CPU_Shape=dynamic_Config=(),0.0229607 +conformance_ScatterUpdate/ReadIRTest.ImportExport/Op=ScatterUpdate.3_Type=i64_IR=59cecddb7722c2a12a869435c38863e27e9b3ead7fa47a268241d0cb7b524d9b_Device=CPU_Shape=static_Config=(),0.0228497 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=9da6d7f7669f9f09c739496035cc760162d1371e31aec96752e0628f6f80d87a_Device=CPU_Shape=dynamic_Config=(),0.0228497 +conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_IR=621fc8f6cb7ec36474fd43d8658886d20ada78b8ff90f21a1dc5f44ef87a5797_Device=CPU_Shape=static_Config=(),0.0226964 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=c11c12ac7fe4b82c554799ef0a56b4605c3835ee505a5ae4e6f0f79231d43b4f_Device=CPU_Shape=dynamic_Config=(),0.0218955 +conformance_ReduceMean/ReadIRTest.ImportExport/Op=ReduceMean.1_Type=f32_IR=37b3937023f6db3dcb7e0c10452bd06d11bbd6d40e83b89bf04185a347a085a8_Device=CPU_Shape=dynamic_Config=(),0.0218805 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_IR=0f85dcbf41aeeb2642d5084973552e963986ebdf6ff4243b7e2b40465fb07786_Device=CPU_Shape=static_Config=(),0.021815 +conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_IR=a88f475a3b278f5f7af50d8ccca29d647b2d84101fea5f894aa02f6edb7e8fa0_Device=CPU_Shape=static_Config=(),0.0211883 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=5933593e09765497380873b7bfe96f7c5f609673d5943c7e810931c1617146e3_Device=CPU_Shape=static_Config=(),0.0206648 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=b2b3d853a37428ee35fa950ecc1e6dbd2fb70b62e49dba54329a464953002301_Device=CPU_Shape=static_Config=(),0.0203433 +conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_IR=69288cafa02066780e732eed8dfab50ebc3575e4d57fe4e2e4f55db87decef72_Device=CPU_Shape=static_Config=(),0.0195235 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=2eded39dfd7ac5046f51ef6ef6e33e759ce39c06d2641d877895d0a43cb4a6d2_Device=CPU_Shape=dynamic_Config=(),0.0166151 +conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_IR=6c8eed6a6895ce60fa6b86361e8eee4cec9c8f85128ffa2f913df16c48839191_Device=CPU_Shape=dynamic_Config=(),0.0164502 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=6c8eed6a6895ce60fa6b86361e8eee4cec9c8f85128ffa2f913df16c48839191_Device=CPU_Shape=dynamic_Config=(),0.0164502 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=c47e995bd374df7c18e3a95107fd4caff3615eda26c422b91ffa96c6418d4006_Device=CPU_Shape=dynamic_Config=(),0.0162285 +conformance_Minimum/ReadIRTest.ImportExport/Op=Minimum.1_Type=f32_IR=aea02f3c3b0404dcab9cdb13a0fe007af01e6be80d807ad870ec241d2d49f65a_Device=CPU_Shape=static_Config=(),0.0159963 +conformance_Maximum/ReadIRTest.ImportExport/Op=Maximum.1_Type=f32_IR=a390802bd8c25df38450ff2c8149a560200a2b125c1fea04b09535f2c71dc012_Device=CPU_Shape=static_Config=(),0.0159963 +conformance_MVN/ReadIRTest.ImportExport/Op=MVN.6_Type=f32_IR=3ae8829c4d6055ffe442b07c447b815ea50238534f49ddb610ce0feaaf769473_Device=CPU_Shape=dynamic_Config=(),0.0159241 +conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_IR=MatMul-1_646_Device=CPU_Shape=static_Config=(),0.0159167 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=f027755690bedff8f2782bb1993d2d92bcec4981b3a0e4c1d9c6dd3b10a00c2f_Device=CPU_Shape=dynamic_Config=(),0.0158331 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=37b3060eea0781b1d483e181cccd1a361570fbbdcf0ddbe5c281a7c541aec6db_Device=CPU_Shape=dynamic_Config=(),0.0158015 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=25f2e85c4559b20da7e5d98431dea3bbf4afa3cfcea581c25d3f754d3fc01d2f_Device=CPU_Shape=dynamic_Config=(),0.0158015 +conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_IR=65239e34c3c31c17a90305271e6d182d345b334d3731fd53ef96de1894c6859f_Device=CPU_Shape=static_Config=(),0.0151353 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=5ec27755fb04cb98f9e65c0279beb3b390ba06a409e5b117df85052fdf9e9dc5_Device=CPU_Shape=dynamic_Config=(),0.0150812 +conformance_MVN/ReadIRTest.ImportExport/Op=MVN.6_Type=f32_IR=fae78534370da36a4babd3fb038590501102f83d9ef2964e5a07ff15d99e3c3e_Device=CPU_Shape=static_Config=(),0.0149938 +conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_IR=681a46c0635072ade7d9d4a91834513ebe215af64b390b1962365ee16bf7c38c_Device=CPU_Shape=dynamic_Config=(),0.0148906 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=000c81a34f81c3efda11ab91a9bc7be609c2f46a8f9e7921f39337666e1ffdd0_Device=CPU_Shape=static_Config=(),0.0147421 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=0d293ddd339f696ae48e8a5fff22c95de3c06b84307f6116a2bb0103905e6b31_Device=CPU_Shape=dynamic_Config=(),0.0146742 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_IR=22778eceddc10aa01f8c55f51e290e067b136653b797c0a62986ca31e92e8a36_Device=CPU_Shape=static_Config=(),0.0145297 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_IR=04fe7a6d7151d405093cb00d55390a280fa94987869ad5129f01c4b96b28bca1_Device=CPU_Shape=static_Config=(),0.014321 +conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_IR=a81eeecb2ca6b3bea6a3e2e61bbc5c7f07fa3253e9ba19b9b53a7a3b1746cb2a_Device=CPU_Shape=static_Config=(),0.0142721 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=448efd2be628c2184cdbc37975b906144ff875bf252ff3218ae85ce3d9525084_Device=CPU_Shape=dynamic_Config=(),0.0142515 +conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_IR=907fd7700664738545f3f2f277c50406d767fc101701ba4d41dc532090114903_Device=CPU_Shape=static_Config=(),0.0142371 +conformance_Slice/ReadIRTest.ImportExport/Op=Slice.8_Type=f32_IR=3a90c9a2040b33603eee7a2b4e03215813089680ca26b7184450b38bfd1b5ca8_Device=CPU_Shape=static_Config=(),0.0142091 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_IR=880a326f16f31b6ad99e7e1ebb7b1d68237b626a4d08e9093b830025dacc13c4_Device=CPU_Shape=static_Config=(),0.0134903 +conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=i32_IR=ba6cda6f3967c05d846e67774df2b39815a21279658eb64503e8d60379d9b0d5_Device=CPU_Shape=static_Config=(),0.0134014 +conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i32_IR=87a6c487fa875ad12aef43b6bf87c0673810054173312e16534b2988fc80cdfb_Device=CPU_Shape=static_Config=(),0.0133972 +conformance_ScatterNDUpdate/ReadIRTest.ImportExport/Op=ScatterNDUpdate.4_Type=i32_IR=a732f273066d374acdca416747507d2faae556883e889e9b722fb66f4ee2b57a_Device=CPU_Shape=static_Config=(),0.0133972 +conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_IR=99034f2132c9d40f5157baf60481ef43c25bdba6b8878541d8bc32d9f4b6b662_Device=CPU_Shape=static_Config=(),0.0130759 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=626feb9f9600951976b771100e07f28dddcf8d13f68b29dc48c707346f9cb698_Device=CPU_Shape=dynamic_Config=(),0.0130047 +conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=f32_IR=affe5eee2813d0c7a9403233d62243b060eecc4b7bd5ee8948cbc7da77c98514_Device=CPU_Shape=static_Config=(),0.0127628 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=704ba6fbcc16d8f88a04decf46127516ab809befed2e9cf94bc746322913a961_Device=CPU_Shape=static_Config=(),0.0124724 +conformance_Clamp/ReadIRTest.ImportExport/Op=Clamp.1_Type=f32_IR=56d6a6e8decfcceeb35bc53e605ba6c83594056d26af7ea48f022a1b60fd103b_Device=CPU_Shape=static_Config=(),0.0124537 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=98a922389230aac2aff43a0a380e67d9f14b9d091ab61e90a37105529d0a211d_Device=CPU_Shape=static_Config=(),0.0122012 +conformance_Clamp/ReadIRTest.ImportExport/Op=Clamp.1_Type=f32_IR=Clamp-1_31_Device=CPU_Shape=static_Config=(),0.0121839 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=07dcad9b29dddad274496815af8a4e58170fb15188d6a6299a4a975f8c03c05b_Device=CPU_Shape=static_Config=(),0.0117967 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_IR=59929a87c852e17a6ae08c2d55a8d2591897c22d08977b9b6d843a3fd90a3908_Device=CPU_Shape=static_Config=(),0.0115026 +conformance_Select/ReadIRTest.ImportExport/Op=Select.1_Type=f32_IR=ed1a9abf3e73e2bc5df473132c64b3ed1795e33d18c76607fe3b5842edd88276_Device=CPU_Shape=dynamic_Config=(),0.0114804 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=3b2426ad5064ebc5ffdae90c7f895562ce3d046ecbf6c6fd8733245c6aed2066_Device=CPU_Shape=static_Config=(),0.0113197 +conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_IR=Softmax-8_283_Device=CPU_Shape=static_Config=(),0.0113045 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=i64_IR=75318a34c705d8c8f5e4861fdb4e6e9c0834d77dd5ffd3dd55b3f01cac70891f_Device=CPU_Shape=static_Config=(),0.0112084 +conformance_Sqrt/ReadIRTest.ImportExport/Op=Sqrt.1_Type=f32_IR=11cacdd465d7fe0e32568f8a17f8b623f4f2b58de34f30ae709d9643a00afc9b_Device=CPU_Shape=static_Config=(),0.0111659 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=95f51a08c808163b0e7c072480dc341da6243e4a7a0dd59af57c5d4f5e24acf9_Device=CPU_Shape=dynamic_Config=(),0.0110416 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=0bd257d3c0af5fa7cab5329c930be9f7a4b249b367f1ca78810627caf6ffe230_Device=CPU_Shape=dynamic_Config=(),0.0110416 +conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_IR=d48fdd131bef036852bc118ebfc179b359e7db7856d6369347588a2a26231204_Device=CPU_Shape=dynamic_Config=(),0.0109442 +conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_IR=e48ed87de5febdc5c1300e68dad4c51d7e97f0ea34b7f9c73949926634b95685_Device=CPU_Shape=dynamic_Config=(),0.0109401 +conformance_Sqrt/ReadIRTest.ImportExport/Op=Sqrt.1_Type=f32_IR=83e5261201c85bdc708176dd4631bd12875f7a197428f4b79e8c9877efe33139_Device=CPU_Shape=dynamic_Config=(),0.0109401 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_IR=c87da4885b3edcecc40038d053baf9059f674ba4a9e598f20fa83b61f7e9ec91_Device=CPU_Shape=static_Config=(),0.0107847 +conformance_SoftPlus/ReadIRTest.ImportExport/Op=SoftPlus.4_Type=f32_IR=95dc29ef42853ef728462a5a72b472018c058011b6faa222daed717cd72d7881_Device=CPU_Shape=static_Config=(),0.0107018 +conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_IR=644ec28c2d27db943c11c8802bf28aaf699eebdd1712f1ab6681d8a0cb0896bf_Device=CPU_Shape=static_Config=(),0.0106568 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=0940386130c9b189a4e1cac6c8798e1f6fd05cb2f9098204e74378df9a4c2b5a_Device=CPU_Shape=static_Config=(),0.0106568 +conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_IR=64e8a6ee56262645f17af6a3f334bf00e56e3712be5ef66f9f10cd32f80258cb_Device=CPU_Shape=static_Config=(),0.0106118 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=150df8d12195c8e144a3eef6338a3ffbd6b6f3fbbebfc66757b1f79270f9bcb3_Device=CPU_Shape=static_Config=(),0.0105383 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=dd5a14fcf166ed7f3e9b173cb3a4e89a48962e865d2a4025a7209b47325ec6c1_Device=CPU_Shape=static_Config=(),0.0104435 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=8edd2105c3fb600c9ffdf53b2949d3d98fdf682457508081805faa5b25447be2_Device=CPU_Shape=static_Config=(),0.0104219 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=3236962e2389af3c1af3caccbd56411e3b230a6d760ee4ed3c9fbe4daa22625d_Device=CPU_Shape=dynamic_Config=(),0.0102603 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=i64_IR=7f0978a6173bc967f1a56da2aa7ac9a8ea817f6988fb5ed96511af2d799dfd00_Device=CPU_Shape=static_Config=(),0.0102283 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=59d92654e12b73b99b4fd5bf34b64dfdbad6d1b34dbb5ad67e5baace21490bb4_Device=CPU_Shape=static_Config=(),0.00987668 +conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=f32_IR=baa686f705a406660a8e77cc04ea3bcd35351489b50b2f8fadbbffa502f694c0_Device=CPU_Shape=static_Config=(),0.00978874 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=e00d5b13c12c8d618de0de7a11dadd363e1d47bf3fd8499562cba78ccd5c81c3_Device=CPU_Shape=static_Config=(),0.00978274 +conformance_HardSigmoid/ReadIRTest.ImportExport/Op=HardSigmoid.1_Type=f32_IR=6dc0431d82bc80d48dfc97fbab04605e5cf8ada37f024b101153620df62424b9_Device=CPU_Shape=static_Config=(),0.00976088 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=b49875760194a46f0ec6cc449f39e549851b25f14b11fff14c13e8194e05974f_Device=CPU_Shape=dynamic_Config=(),0.00915451 +conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_IR=de1a57ba5fe4c6fcb783470723e90b7fa67c5b91d9fc3d0ac3563ba0ea854eb6_Device=CPU_Shape=static_Config=(),0.00895819 +conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=i64_IR=b564b2276303fad81c752012ef1b3324bb2dd79518cb1512c5e0bf353301ac43_Device=CPU_Shape=static_Config=(),0.00894717 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=78120d308d75322fb4a249d40ebde4d59cfaedfc7aa96504ba759ac5e1a76ffe_Device=CPU_Shape=static_Config=(),0.00887317 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=48383fceab6e40aa7658c277d564254ebe0dc2ad97add147355963758ab63ccb_Device=CPU_Shape=static_Config=(),0.00883933 +conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_IR=MatMul-1_707_Device=CPU_Shape=static_Config=(),0.00883452 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=1598476e13d17642505eaa9c1b8e70883e75a5304abd407434941c9487a96cb3_Device=CPU_Shape=static_Config=(),0.00845929 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=33719d2fc9285c40f5a7ef6a679d3d7cd6e1776a2af022bfd37d27fe14cf7711_Device=CPU_Shape=static_Config=(),0.00844865 +conformance_ReduceMean/ReadIRTest.ImportExport/Op=ReduceMean.1_Type=f32_IR=0bde620ed06caaece32792dedc44f685d5e25da07c429196c033637fa2cce99a_Device=CPU_Shape=static_Config=(),0.00825316 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=7e14ebac4d39514f21befc50dd0038b8de7a34b27847c80f0dbf802816a425b5_Device=CPU_Shape=static_Config=(),0.00824875 +conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i32_IR=d2b71d56aa3ceb9e89ae7d8f85d95490898c5d70732f550e9342591928348dc0_Device=CPU_Shape=static_Config=(),0.0081504 +conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_IR=b67a5e877b0a643550bf856cf873a5c49d4b25619052963a168027b55c961df4_Device=CPU_Shape=dynamic_Config=(),0.0081349 +conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_IR=15199c5cf33e6d47dbacc3f6996130e104a06e73fac4635e8ae75c0cc0aeb46f_Device=CPU_Shape=dynamic_Config=(),0.00807368 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=5caed40ca7c65cc5ce6f0958876237fefb1ee5e99a97230d52d373fe7deca7c9_Device=CPU_Shape=dynamic_Config=(),0.00807368 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=00712fc27968887da2b8c8305fd95d17ca73243b0a8199786409df03cbe894a8_Device=CPU_Shape=static_Config=(),0.00796827 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=61dfdb414561907fe7bbeb6a76956faf210c2e51a7253cc1423220452bf784ce_Device=CPU_Shape=static_Config=(),0.0079656 +conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=i64_IR=f956cf5fb56bf0ace1f7759a931579cf7857a5672bc0e43876e2640cdeb9c1ee_Device=CPU_Shape=static_Config=(),0.00790587 +conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_IR=684bc4ab91e211f69d642714baf6e16602736f54fcb7afa9d03f29e90c41aa92_Device=CPU_Shape=static_Config=(),0.00782781 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=f50865599f7260e05c92e4c4af2b23673d9c72228397e36a1e3fb694d01bd2b6_Device=CPU_Shape=static_Config=(),0.00782781 +conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_IR=cb5b2e21d2218773d120651ce7fa0513cc4cceb20f85eb6a5575a385dc377bb4_Device=CPU_Shape=static_Config=(),0.00782781 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=db82fcef6ae0e96d00ff5802813f136082c26c9830cf60131cfc9dd9edf97fd7_Device=CPU_Shape=static_Config=(),0.00782609 +conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_IR=1edf476069174dfd3a7cd3e44accf10c3307ae06a1e79531c87e2847daf3268c_Device=CPU_Shape=static_Config=(),0.00769562 +conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_IR=6e425463657c7ada080914883df93e6f76afb14da959dd90d60ffb32f4e40d30_Device=CPU_Shape=dynamic_Config=(),0.00765294 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=6e425463657c7ada080914883df93e6f76afb14da959dd90d60ffb32f4e40d30_Device=CPU_Shape=dynamic_Config=(),0.00765294 +conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=u8_IR=3e5e29399ace21ea387d6e9ef94266b16549271a55badc376ef9a42b024fcf23_Device=CPU_Shape=dynamic_Config=(),0.00761644 +conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=u8_IR=02078fa4aeea941f80b3d41995fe889c3d50696fbb83b00c7c2e1fd26eff0d22_Device=CPU_Shape=dynamic_Config=(),0.00761644 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=d02c304f5dc9d0e0e3b7b1bf59d67a2e9b25b55afc4137c38c2da33b6922df77_Device=CPU_Shape=dynamic_Config=(),0.00761644 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=7f826e9cff9d0bdfbe349b216151765ebb6bdf40d8f2e443f2d9412a16388af5_Device=CPU_Shape=static_Config=(),0.0073883 +conformance_Gelu/ReadIRTest.ImportExport/Op=Gelu.7_Type=f32_IR=4127903b253038cda36eece48daf11e7255ff9bc6daaad10cf763a0c528f9287_Device=CPU_Shape=static_Config=(),0.00735097 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=701c8cac9901694d74715cf8c855a3a399b021a7478e4a4e6c99ae2989b1162a_Device=CPU_Shape=static_Config=(),0.00731518 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_IR=1c610a7e945be537f80fb5dae18eccedfb650940acc12469588621ef4b9d46dc_Device=CPU_Shape=static_Config=(),0.00731309 +conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_IR=aa611e5d03e752c161f6b8f51a6fdd5f96870b93b3d6ed4ea3e2c91cf190ef4b_Device=CPU_Shape=static_Config=(),0.0072154 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=2aeee98715d34a6d195fb40b33a05a60b14970f766ddfa8436bfb703c24661cc_Device=CPU_Shape=dynamic_Config=(),0.0071632 +conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_IR=fdeaed3237a0794ce3ba6655c8e21590e6aeb35504730b0ea5f5ded0acb73f45_Device=CPU_Shape=dynamic_Config=(),0.00716217 +conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_IR=MatMul-1_823_Device=CPU_Shape=dynamic_Config=(),0.00715919 +conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=f32_IR=15e5b0e0515bc578b9cf0f3b33f1fc2049749b99ffe1525b27f82973f69e8c59_Device=CPU_Shape=static_Config=(),0.00714277 +conformance_ReduceProd/ReadIRTest.ImportExport/Op=ReduceProd.1_Type=i64_IR=d4367bfd7a56153cf1a3ade9af680d7c66dc5296f57a150d47238f0212259ce6_Device=CPU_Shape=static_Config=(),0.00706339 +conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i64_IR=9728dcf8550f62af4b4908e2467a03606982e94ec695582c10a86d75e5199a3b_Device=CPU_Shape=static_Config=(),0.0068293 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=i64_IR=660303cf2026018794a020306c6cdce198d2a7b1297ca36b94dc3de152c2121c_Device=CPU_Shape=static_Config=(),0.0068293 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=i64_IR=1b127181c8972ffed5fcb3b6d7bae2b1232ef1d287341c30dd2bca8029ee01e1_Device=CPU_Shape=static_Config=(),0.0068293 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=fd52395f8ed9e7833d5f9d723eafb990fc83ff45577b0df389e3d0975422713f_Device=CPU_Shape=static_Config=(),0.00666364 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=1eea2d24d41037f619efe5fd5159170ae7de93a26c9da9add795008cfd2395d9_Device=CPU_Shape=static_Config=(),0.00655082 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=f552ae7f024f140c80fc762b702bbfc97992935834acf40e4e1c1a4634b8166c_Device=CPU_Shape=static_Config=(),0.00644546 +conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i32_IR=510005ff9ba3a023fcba5026f3e289f98f4ad7a8ece1f058a573143acccee262_Device=CPU_Shape=dynamic_Config=(),0.00640201 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=i64_IR=76b583a30803001c17f27be1594d5f2591b55983398ab16f1ba143cecbf65e7e_Device=CPU_Shape=static_Config=(),0.00638788 +conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_IR=816214015175abe6cd3aada5e209a5107811de5010bfe8ee1a2ebcd998c0c444_Device=CPU_Shape=static_Config=(),0.00626376 +conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_IR=260d12b9a5e8a71c726df95d4b8fb30bb764e1ba82a50fbe4091e386a280a4ac_Device=CPU_Shape=static_Config=(),0.00626324 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=960183c0bc2aaff513e4e49e65f056479572bd2c3a7e5d58a230273a8eb255f0_Device=CPU_Shape=dynamic_Config=(),0.00622342 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=d65f51bdade5ded2bbb821d50954070419ca1ade57ebc69efb6e20105cf8707e_Device=CPU_Shape=static_Config=(),0.00607089 +conformance_Range/ReadIRTest.Inference/Op=Range.4_Type=i64_IR=e7b10e4cadc7aac08f4c2275f37ef0aa95351ddceb40e48422ea3c4893f69764_Device=CPU_Shape=static_Config=(),0.00589991 +conformance_Range/ReadIRTest.ImportExport/Op=Range.4_Type=i64_IR=e7b10e4cadc7aac08f4c2275f37ef0aa95351ddceb40e48422ea3c4893f69764_Device=CPU_Shape=static_Config=(),0.00589991 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=6f68825fed94645e4a5c23338a98770d770be8f9600566ab93521bc858733911_Device=CPU_Shape=static_Config=(),0.00576172 +conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_IR=ca090e13d7734505d44fb6281626c442a5740326a0575175de68b1f210bdca07_Device=CPU_Shape=static_Config=(),0.00576154 +conformance_Pad/ReadIRTest.ImportExport/Op=Pad.12_Type=f32_IR=abc38c684591dfbc656b1c9aa0553b107f2b6a593665cfb1d802d1812177518d_Device=CPU_Shape=static_Config=(),0.00573863 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=05bc655a96a41f363fff6e5d86bb46b1d81d52aa40eaa494aba10bc94a6ffb12_Device=CPU_Shape=static_Config=(),0.00564171 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=ce9b4ef17f734c1e93c026b42e90c7fd23f03bb1b3acaf125a689e9ebc99b910_Device=CPU_Shape=dynamic_Config=(),0.00563516 +conformance_BatchNormInference/ReadIRTest.ImportExport/Op=BatchNormInference.5_Type=f32_IR=674f918d6dc4e58697845798361884f78757bfd77105e77c2ed9a3373bafd26b_Device=CPU_Shape=static_Config=(),0.00561033 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=533c387f9897d975697f04ecebefd862bb6d00ae0c0d9e7a44c2e8cd15520358_Device=CPU_Shape=static_Config=(),0.00549911 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=e9ecdac1f1bbf6bb39fe2793cc4483eafecb7f19ce9d4cde3aa59b677b70270e_Device=CPU_Shape=static_Config=(),0.00545125 +conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_IR=3ac96e01f3190a6709a0e4708c2022350d132352cb72c04aaccfe5a2f57a715f_Device=CPU_Shape=static_Config=(),0.00539298 +conformance_Tanh/ReadIRTest.ImportExport/Op=Tanh.1_Type=f32_IR=94fc3ad4faaa80acf61d6ba93e2bb5ebebd64368c70f1e9f000030acac74f428_Device=CPU_Shape=static_Config=(),0.00536907 +conformance_Mish/ReadIRTest.ImportExport/Op=Mish.4_Type=f32_IR=46c99592c04d5338e2fb1133e3523a64116533d5a3bf2861b1c483e5ca1dfd8e_Device=CPU_Shape=static_Config=(),0.00530756 +conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_IR=c3926b158f4f047c751083863a119cf9d9cf7312cbb15622bfe803af9caeab68_Device=CPU_Shape=static_Config=(),0.00521133 +conformance_ROIPooling/ReadIRTest.ImportExport/Op=ROIPooling.2_Type=f32_IR=d963dcc3864672daa6b46f5ca80e74ee69849d1d63cf73a3e68430be6a6873d9_Device=CPU_Shape=static_Config=(),0.00519159 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=63cea76f3465d3438fe3fab7c354b67fe089d3587fa65df90ffda23427acb61d_Device=CPU_Shape=static_Config=(),0.00510348 +conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_IR=71051cba4c9a998a5cc0b0b0dcac8c1aaa21cd74ae3922114ec450f9963335d5_Device=CPU_Shape=static_Config=(),0.00509136 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=fe082a13b0104870535aa4114b1e2ad1250f1a7f0eab1a85a0ea1137a3a9560f_Device=CPU_Shape=static_Config=(),0.00508787 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=Concat-1_415_Device=CPU_Shape=static_Config=(),0.00508375 +conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_IR=7cd317501bbb5df948ba37f6938e6f98e79650dbd366568ab48b63c5363de438_Device=CPU_Shape=static_Config=(),0.00507665 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=Convolution-1_691_Device=CPU_Shape=static_Config=(),0.00492155 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=3b455db06e1767c713c41d6a018939f5e54a6e331d12f22a9fc6e6879d5ac733_Device=CPU_Shape=static_Config=(),0.00492155 +conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_IR=178ef8e30ed115607129b39d0da9f7ba3a3f3b636a720cd69a96ecddce06670f_Device=CPU_Shape=dynamic_Config=(),0.00467613 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=930f47a3f43d08eaa383c40c23d6477de2bacf1017d925516afe545889e5a3be_Device=CPU_Shape=static_Config=(),0.00464813 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=5e378ae29726d3ab15b6b1c87d668695d13001fdcac0e4e7b12dcf46382f1aa0_Device=CPU_Shape=static_Config=(),0.00452309 +conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_IR=dbd5259502eb92838d5c9623fa4c498318cb6802bdb56d7dc91d48c144db5ea8_Device=CPU_Shape=dynamic_Config=(),0.00435978 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=81a5cba7b8f166a3fe060fa1f663918a91f9c20d222c31e18ca015db4876b07c_Device=CPU_Shape=static_Config=(),0.00433097 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=09dc61efcefbafa700aadf5407e1910665d8503804995a4986fcafb5266f5619_Device=CPU_Shape=static_Config=(),0.00418814 +conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_IR=ab6ab6b03327d74e1bd90ec8aef8ecc04ce78003619df2a93e501db9519535ae_Device=CPU_Shape=static_Config=(),0.00411937 +conformance_PRelu/ReadIRTest.ImportExport/Op=PRelu.1_Type=f32_IR=d73496134933cf2b817367232cc3b8d772f8fff2f6c0a3309d669ac946f4a4d2_Device=CPU_Shape=static_Config=(),0.00404628 +conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_IR=458fd4f9fc9c2480c5fd282c704d3fdd67bafeb7e47e44596a1f7bd0d9d6d11f_Device=CPU_Shape=static_Config=(),0.00399299 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=18275c839f2d0626c30db50e1dc88ce16387d6585e27fa846e18f06f6a71e95a_Device=CPU_Shape=static_Config=(),0.00397768 +conformance_Slice/ReadIRTest.ImportExport/Op=Slice.8_Type=u8_IR=8e7ad7a4f3edddf289d7e31c4e20b1bd9d5f9908d6b3d1b5ac5560277515d04d_Device=CPU_Shape=dynamic_Config=(),0.00395025 +conformance_Slice/ReadIRTest.ImportExport/Op=Slice.8_Type=u8_IR=0a81684b0e1516061a9da90c8109a3886601b5338e995ac21efce7d56624f552_Device=CPU_Shape=dynamic_Config=(),0.00395025 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=a1455b163f00eb1dd765b3a90c98bc2b86671e0a0d0c7bc835aead2db0ba497b_Device=CPU_Shape=static_Config=(),0.00391343 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=f51172a1ec5f3abbd052e4117052d377c5f2be29ab33ba9bec96eae457788e2a_Device=CPU_Shape=static_Config=(),0.00389879 +conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=i64_IR=5369e1bf0be90823d2b1fbe6c33bb53e5e472d9bd468280463ffee061f730242_Device=CPU_Shape=dynamic_Config=(),0.00387919 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=i64_IR=5369e1bf0be90823d2b1fbe6c33bb53e5e472d9bd468280463ffee061f730242_Device=CPU_Shape=dynamic_Config=(),0.00387919 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=feb78daf38562ff738b472995782cf2029436b9141ae3347267f5036e5dde521_Device=CPU_Shape=static_Config=(),0.00380596 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=b9fb74824bf09cb294553346aea1d0bd832f937710195682483b047548624a02_Device=CPU_Shape=static_Config=(),0.00375229 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_173_Device=CPU_Shape=static_Config=(),0.00375163 +conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_IR=538bfd91d81c5037b702fa4ef55ca80ca95adc806d7d0ef4a156e63b814645f1_Device=CPU_Shape=static_Config=(),0.00373727 +conformance_ScatterUpdate/ReadIRTest.ImportExport/Op=ScatterUpdate.3_Type=i64_IR=85934ecf09fdd72598fc1990e56ba62425449c729acba542b2fe5c4cf88ae95c_Device=CPU_Shape=static_Config=(),0.00372432 +conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_IR=0665ac1959a4ddeb6837093c940c3a20c999ac5823a5b265da5eab0d7f99f466_Device=CPU_Shape=static_Config=(),0.0036044 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=i64_IR=ccd548c5a022f7e8b502e80ed6d1d9e4fb6d890fa965969be13c385bd3a21fdb_Device=CPU_Shape=static_Config=(),0.00358025 +conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_IR=4e31bc41f44952d1e204d953202075064726498abb59eaa3c5eec875b57d11a8_Device=CPU_Shape=static_Config=(),0.00356738 +conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.4_Type=f32_IR=861cbf1fb134c16c3e3d261d4bc24623005a9bc5717ca1beb94c59d4d94a7a57_Device=CPU_Shape=dynamic_Config=(),0.00355216 +conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.4_Type=f32_IR=861cbf1fb134c16c3e3d261d4bc24623005a9bc5717ca1beb94c59d4d94a7a57_Device=CPU_Shape=dynamic_Config=(),0.00355216 +conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_IR=68efffe08a572361f909108557384609e81e7af2d16d88ccfab2f0c1e1464e00_Device=CPU_Shape=static_Config=(),0.00352679 +conformance_Relu/ReadIRTest.ImportExport/Op=Relu.1_Type=f32_IR=d99cf84d9427bc9cfb033acaac6483a575ad2ad6223313530830ee62b4fbc71f_Device=CPU_Shape=static_Config=(),0.00349326 +conformance_Tile/ReadIRTest.ImportExport/Op=Tile.1_Type=f32_IR=25341a0e07b6326f7f6f39c2c219e79f5d47107002c8e43081608d4aa0a10d88_Device=CPU_Shape=static_Config=(),0.00347893 +conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=f32_IR=43f6a5f5df1d35e809d906ff3e090c1ad553210529150c5abea04ae6b0ad407e_Device=CPU_Shape=static_Config=(),0.00347893 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_357_Device=CPU_Shape=static_Config=(),0.00342249 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=f2ebc2722659946861b815e5297e0fbf9a0ae6371c8ccc19b7e29046d5d8c84f_Device=CPU_Shape=static_Config=(),0.00341457 +conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_IR=db442fe6323dbfc05cea296323949b8501a1d76ea48f923a6c014726c7c335d5_Device=CPU_Shape=static_Config=(),0.00341457 +conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_IR=2eb305b9f755a12e33ae641a197e8c8db0f5c691df1bfd70ad450f49847030a0_Device=CPU_Shape=static_Config=(),0.00341457 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=cdd6639b63587f2aa8eab5bb2be5cc035faa5651b9f00abc05bfe5d551785527_Device=CPU_Shape=static_Config=(),0.00336187 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=fa67b55cff69e125f4c8428fc96638757022850b5c4400ff9c3ab5c60f4ad2b4_Device=CPU_Shape=static_Config=(),0.00334917 +conformance_Gelu/ReadIRTest.ImportExport/Op=Gelu.7_Type=f32_IR=6f583d3cfb1ca982039b721b059f4ec51a3e6458999021d5c724bbd4a7d6b205_Device=CPU_Shape=static_Config=(),0.00331519 +conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_IR=e600e9ae6b48eb34f84102989a83e27f66a505de2d89029a93106c2f46b65784_Device=CPU_Shape=static_Config=(),0.00315934 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=62cc8f60ebb09be607c0a916bb4ae8c0e11e2f760aeae7ed788229b2251e23b4_Device=CPU_Shape=static_Config=(),0.00304211 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_552_Device=CPU_Shape=static_Config=(),0.00304203 +conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=f32_IR=9fddcbe0bc871e4a3b396aebb62df0fd1d47add6c71a42de4f0af7981c91d112_Device=CPU_Shape=dynamic_Config=(),0.00304197 +conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=i64_IR=da68667f0a4399a6a11d4965f89a43ad427d6a78205c94c80cd80b01f8e3c9fd_Device=CPU_Shape=static_Config=(),0.00301216 +conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_IR=9fa54c3adb08e981c96e0e55f79617f264b982018cc5d5a93b734cdd9f4b2f3b_Device=CPU_Shape=static_Config=(),0.00295612 +conformance_Abs/ReadIRTest.ImportExport/Op=Abs.1_Type=f32_IR=3c623601469fbdda6999501e41873f23bedad2ddce6c577483133547edf9c67e_Device=CPU_Shape=static_Config=(),0.00295612 +conformance_Abs/ReadIRTest.ImportExport/Op=Abs.1_Type=f32_IR=1e46e99909597b0493b0184f359c42a59a8f5e1f5c0faf0e86a6fb41482fab4c_Device=CPU_Shape=static_Config=(),0.00295612 +conformance_Erf/ReadIRTest.ImportExport/Op=Erf.1_Type=f32_IR=56088c6cb719f2acb0fecc391361473ed932a73d208b8a93d6f28d82bcdb578f_Device=CPU_Shape=dynamic_Config=(),0.00292031 +conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_IR=db59590787f5c6556484ba1e5f630ec3ab69c9653e1e307c5d47ac823a021ee2_Device=CPU_Shape=dynamic_Config=(),0.00292031 +conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_IR=939f70db639ab8103259decc5d4aa610919116bd0b0c6742a2422f5ee0fe801a_Device=CPU_Shape=static_Config=(),0.00289925 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=c1b3d6c7677bcbecbf5a9e3563a599f5d36b6113327cf118b48588009b973ede_Device=CPU_Shape=static_Config=(),0.00288947 +conformance_ROIPooling/ReadIRTest.ImportExport/Op=ROIPooling.2_Type=f32_IR=ROIPooling-2_361_Device=CPU_Shape=static_Config=(),0.00288503 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_218_Device=CPU_Shape=static_Config=(),0.00288503 +conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i64_IR=7f484b471cd794bc3149e37d69371ac7db90ec0f9c4ad44d337cc20aeebbab36_Device=CPU_Shape=static_Config=(),0.00279896 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=b4ff4ca4ddcdbaa749dd4317b457c474cb7f88877dc40464109216441c331187_Device=CPU_Shape=static_Config=(),0.00278428 +conformance_MVN/ReadIRTest.ImportExport/Op=MVN.6_Type=f32_IR=28427b222b58c0a643337e767c42d0a8f8218fc1a1c8f4b4d4831e15a2a304f0_Device=CPU_Shape=static_Config=(),0.0027644 +conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_IR=58cfbdfb631546bf631afe078eb03f065f0bcd4128116ba247237954d05db814_Device=CPU_Shape=static_Config=(),0.00276389 +conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_IR=1509c59ddbc083a013eea8367035705ffa19d64f7d7de0f81ff7a4a33792060d_Device=CPU_Shape=static_Config=(),0.00274947 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=fee152f9aa9e58118b3c20338fbd7260a0242669fcdf3e1b5609e5ba59883901_Device=CPU_Shape=dynamic_Config=(),0.00273145 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=cde993e07f256cfa6dd9812818758938a8d4988b5f294ef45ee363db1611b51e_Device=CPU_Shape=dynamic_Config=(),0.00273145 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=be41be0fb7469fab4d57619133ac8c394851a1d4a2507e15cfcf921a1b6a3476_Device=CPU_Shape=dynamic_Config=(),0.00273145 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=abdcd60fb7cbb8e0b829a58524a530fd79b8267aedc4b991f819892ddd78a837_Device=CPU_Shape=dynamic_Config=(),0.00273145 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=86385a3e20e9fedf62fe83adccff86912bf26c0a0b31434ee88e28e173395610_Device=CPU_Shape=dynamic_Config=(),0.00273145 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_IR=554183f144c4ebe76374c9f648ad34ee9c05276a19414b1c6566f2e1da3ee643_Device=CPU_Shape=static_Config=(),0.0027285 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=4941019c334ec49feaa36ac991e897b45eedbcbdde0f54b81101d27bfc978b6c_Device=CPU_Shape=static_Config=(),0.00270038 +conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i64_IR=0029548ee6faf2332d990bf282ea8e234afc74a89103071d6b36efa248ff0b29_Device=CPU_Shape=static_Config=(),0.00264197 +conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_IR=2822148946a36a04db9a47e2483ca082436672e5413671e8e63740c7945b6789_Device=CPU_Shape=static_Config=(),0.00263962 +conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_IR=6287113070f51e9657625f6137684c029078e795ff376daced90b412d8b9a6fa_Device=CPU_Shape=dynamic_Config=(),0.0026359 +conformance_Proposal/ReadIRTest.ImportExport/Op=Proposal.4_Type=f32_IR=b3747c5c51a2d8c135479cce6f248fa18f4d0ddf7dbfea0f7fedb4234f821e46_Device=CPU_Shape=static_Config=(),0.00261496 +conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_IR=600d7d5357fa360c5721df3091682a4186c5bb9a4daf62ec08cb4ad53996cbee_Device=CPU_Shape=static_Config=(),0.00255958 +conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_IR=3f5a6ad6cf5a08c7d5c512f503b0887e2d82f1f27207f24b018f6d9036d9c0cd_Device=CPU_Shape=static_Config=(),0.00255106 +conformance_BatchNormInference/ReadIRTest.ImportExport/Op=BatchNormInference.5_Type=f32_IR=0438a6d95025d6988703cb0fa507541364e63315a3808ec28332ae3f4298aed9_Device=CPU_Shape=static_Config=(),0.00242287 +conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_IR=20427a621b7176ec0040abfe0de9e8463aa0630b838a6218036c3e1f8417c86a_Device=CPU_Shape=static_Config=(),0.00241738 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i32_IR=ea04e03272f14274e58391356f85bfca758d0b6b2a3a57bcf43c5fce4faa646d_Device=CPU_Shape=static_Config=(),0.00236311 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=54ce3e3e2c10b4a7464ec1cc97b284c6d33da77d2647b722f58712f173e64b75_Device=CPU_Shape=static_Config=(),0.00233153 +conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.4_Type=f32_IR=cef57da724640624033aa9fe59d8112fbb9e0b453f7a76d2476ca4a5b840aa13_Device=CPU_Shape=static_Config=(),0.00230965 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=de3425001e84345da4e6d36f0131bc37961d8e10501615884da15d1841137c0f_Device=CPU_Shape=static_Config=(),0.0023093 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=ead04f1f792dcc7223266f1fd50a315cf3ef9581069a6f7ff058496fc00f67d2_Device=CPU_Shape=static_Config=(),0.00228101 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=b06875f1369c3f5b24934294396dcfa467cc15da4241b76c535063d8d32ff541_Device=CPU_Shape=static_Config=(),0.00227426 +conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_IR=96607c0244d5ab1e0b8898dac797e275557e6a16f31635ffd6fddf145e4b9bd4_Device=CPU_Shape=static_Config=(),0.00220675 +conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_IR=67e6efe4d3f9a640fa481bbbd9ab914f53391c70757a69f82f29ccba42586516_Device=CPU_Shape=static_Config=(),0.00220357 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=6371e50a4f696c471b5ae89585bd3f55b01591bdad1f050d3223eca0f8ab8be4_Device=CPU_Shape=static_Config=(),0.00216075 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=e09652edfbe32d6b619e1805e0d9eb7ef017137fcc07ee0b9d8ec00387a1386d_Device=CPU_Shape=static_Config=(),0.00216075 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=652fd8c7d8c062dc9ea784a8d0e189b1e0cf15d3fe5d03ad31853f83bce829d4_Device=CPU_Shape=static_Config=(),0.00207407 +conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_IR=0f365811024474cc556b000b46b320e5b6119d0e0d0a3b4ff6ab9f94f863c9a5_Device=CPU_Shape=static_Config=(),0.00206661 +conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_IR=e3c3f39010a384196def7feced98455dc41ad1242974df3dedd428b944f335d1_Device=CPU_Shape=dynamic_Config=(),0.00206538 +conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_IR=aca3fbdcfe4768148cc6ba26d50232f880c1e06bdbcc4156c20c9b1159c72e58_Device=CPU_Shape=dynamic_Config=(),0.00206275 +conformance_ROIPooling/ReadIRTest.ImportExport/Op=ROIPooling.2_Type=f32_IR=ROIPooling-2_359_Device=CPU_Shape=static_Config=(),0.00206206 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=fac4323fbc883646eaec76d38ada76e383732b45632cb58c9ab9609f2eea9c9d_Device=CPU_Shape=static_Config=(),0.00206206 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=123784c58cdc556baa03367b4693966ab4b260066d5cb3592e9fd96c02daa026_Device=CPU_Shape=static_Config=(),0.00206206 +conformance_Sin/ReadIRTest.ImportExport/Op=Sin.1_Type=f32_IR=efca25341ca2e6f359b9d2fb54aa226439c70e4093c0461c3b06e7159339b109_Device=CPU_Shape=static_Config=(),0.0020382 +conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_IR=d23c45343d51bcafc990238f0326291d893135e55c509dd66388b372353b0f1a_Device=CPU_Shape=static_Config=(),0.0020382 +conformance_CumSum/ReadIRTest.ImportExport/Op=CumSum.3_Type=f32_IR=203272d8b86300e50b9888a77ba70023f4841a46912390e1df1b20c20fa6b17f_Device=CPU_Shape=static_Config=(),0.0020382 +conformance_Cos/ReadIRTest.ImportExport/Op=Cos.1_Type=f32_IR=220eed11dd447f3daa78dae29066e78d8dcd57f55ae05e4e6d8e7e4ace361206_Device=CPU_Shape=static_Config=(),0.0020382 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=Concat-1_276_Device=CPU_Shape=static_Config=(),0.0020382 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=134e05436470da9789928f69ff10f1b79e579e0a51908590ed827cc0c68c717b_Device=CPU_Shape=static_Config=(),0.00203457 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_IR=a8e4944e942b6e72d5aca3dff6c48341017c497953cfe1e076267bfde01e5617_Device=CPU_Shape=static_Config=(),0.00202756 +conformance_Select/ReadIRTest.ImportExport/Op=Select.1_Type=f32_IR=356c2dba567ca294280bd00e21d233c7665f498430d0aee13237d80f687e9f25_Device=CPU_Shape=dynamic_Config=(),0.00201829 +conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_IR=34f915c4d9ad464fcaf3d1c7f796907a5fae626812a1f16b817fea9f8799eeea_Device=CPU_Shape=dynamic_Config=(),0.00201829 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=b7fab07303568f60e61fded86f3e62a80ffe344c106fbe53b1a05149ed38e75a_Device=CPU_Shape=dynamic_Config=(),0.00201829 +conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_IR=f2163daa6d032cbf90453d98931ce5161ca11d893faefe94e8c36ca8e8b470d8_Device=CPU_Shape=dynamic_Config=(),0.00199584 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=f2163daa6d032cbf90453d98931ce5161ca11d893faefe94e8c36ca8e8b470d8_Device=CPU_Shape=dynamic_Config=(),0.00199584 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=66aeab923c3f0a040d2f6ec872f2751ffc9110624bd0cfb41cb1caeaa8d1c45c_Device=CPU_Shape=dynamic_Config=(),0.00196649 +conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_IR=Softmax-8_447_Device=CPU_Shape=static_Config=(),0.00196368 +conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i64_IR=a7032427adec32a9a4246c6669029be566632be7fe11623777107e2d248769f0_Device=CPU_Shape=dynamic_Config=(),0.00195404 +conformance_Erf/ReadIRTest.ImportExport/Op=Erf.1_Type=f32_IR=045faf67aa648ff30404a7cad86dbe8460848ce7e95376da7ef546d1671081dd_Device=CPU_Shape=static_Config=(),0.00194466 +conformance_Range/ReadIRTest.ImportExport/Op=Range.4_Type=i64_IR=df149b95dfb03c2e4299b6d5eb2b069d3a87781cf75fb2c9e86c9eecc3ec53e6_Device=CPU_Shape=dynamic_Config=(),0.00193951 +conformance_Tile/ReadIRTest.ImportExport/Op=Tile.1_Type=f32_IR=f357e6bfb7a9d33be8f8d5a19a380acae1e10fed80a818335793633a5c6d7e07_Device=CPU_Shape=static_Config=(),0.0019359 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=5fea1d99cdd3b53ea9f19ea7eeb47d7f16333ce4057c4ccdccea297e512cea01_Device=CPU_Shape=static_Config=(),0.00193442 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_661_Device=CPU_Shape=static_Config=(),0.00193224 +conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_IR=MatMul-1_852_Device=CPU_Shape=static_Config=(),0.00187206 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=641c0e19add62dea6db1975963ec68dca6a309043eb4651650e6b8920a3babd2_Device=CPU_Shape=static_Config=(),0.00187206 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_50_Device=CPU_Shape=static_Config=(),0.00185189 +conformance_Einsum/ReadIRTest.ImportExport/Op=Einsum.7_Type=f32_IR=3b642bc0b7f41144ee2f97bfccca523bb892b945055c8f48349ceee1687e4145_Device=CPU_Shape=static_Config=(),0.00183344 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_635_Device=CPU_Shape=static_Config=(),0.00181953 +conformance_Erf/ReadIRTest.ImportExport/Op=Erf.1_Type=f32_IR=f2865ee0550a6ff85c769deb985feb0c58976d87fc46ddd8fbaba3635c94d2dc_Device=CPU_Shape=static_Config=(),0.00180203 +conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_IR=5c9d6dde3c9e9e600ff744ca698cceb3e40d9ddddf4dec674bdece39600cea38_Device=CPU_Shape=dynamic_Config=(),0.00179768 +conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.1_Type=i64_IR=d515cb30db019ddd10dfc0eabe8bb48f6564697323f5c6df0a169e56d086dad7_Device=CPU_Shape=static_Config=(),0.00179207 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_253_Device=CPU_Shape=static_Config=(),0.00178775 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=a53ec485395da84a687a69fa91d8a622e99ec1701d46e500e303ca47d8c6d8a3_Device=CPU_Shape=static_Config=(),0.00178323 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=cb2522d361c805056518707f0ae07309c6bad78389cf9a36170c9afe2e0bed04_Device=CPU_Shape=static_Config=(),0.00177128 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=afdbbf6d31fa18a3db2795bda00b1aa63eabe1239ab56c3fd2794c5343d5da5e_Device=CPU_Shape=static_Config=(),0.0017421 +conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_IR=a75cc90501633eb1dde02ec0cf499c1e17209fb81bac98c98815602538e3e850_Device=CPU_Shape=static_Config=(),0.00173194 +conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_IR=6e77e818e3e26a8d23120ef257d4318a4fa16bb7d4b6c29f20c1b856cf7c78d6_Device=CPU_Shape=static_Config=(),0.00173194 +conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_IR=3078fdc470a094b5feaa5c2499b9248320f7fd2addb4ed0358dd53215e8fba7b_Device=CPU_Shape=static_Config=(),0.00173194 +conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_IR=Softmax-8_616_Device=CPU_Shape=static_Config=(),0.00173194 +conformance_DetectionOutput/ReadIRTest.ImportExport/Op=DetectionOutput.8_Type=f32_IR=b0a0127148b9869a80213c43a1d272656332a96b24dbbb9780e1a6d63d6fa553_Device=CPU_Shape=static_Config=(),0.00173194 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_IR=9a84cc33f7c6ae74d44807e271e00cd6761e5286365ac8feae37ac61b5372313_Device=CPU_Shape=static_Config=(),0.00171604 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=10e5bfcf914927f70ebfaaea34b4db42054442cf84461442bc8e562d0b232412_Device=CPU_Shape=static_Config=(),0.00169767 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=64f87102b398d9ef37ca87e6f68b01662b3a73e64830af196aa241ca3bdb071b_Device=CPU_Shape=static_Config=(),0.00167393 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=32a7c4639e898f827c9c946cc80e62d42e9dc2cd70484f96f274c2438aa63445_Device=CPU_Shape=static_Config=(),0.00167313 +conformance_Elu/ReadIRTest.ImportExport/Op=Elu.1_Type=f32_IR=964ee358c09b8802eafae2cbba15bade7b0e2d1b00561505d996edde6eaf2189_Device=CPU_Shape=static_Config=(),0.00165184 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=d64fcecd88fb1043823dce2bc3d43bb0dbe7b20cd2c92bf303a7df3577499a07_Device=CPU_Shape=static_Config=(),0.00163771 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=739aabae66d0e83243943f0325f4b4a77f277812fc72e516235c457f9a225d0f_Device=CPU_Shape=static_Config=(),0.00163711 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_IR=93b9ad574d297981f1756a96ef67b5a1bd61474db4ebdd0a83457e114d332602_Device=CPU_Shape=static_Config=(),0.00152861 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=Concat-1_347_Device=CPU_Shape=static_Config=(),0.001525 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=4b71ff0f18300d06c8c51fc34b0d02029a1f89622828e1cee4529d1c98e9db1e_Device=CPU_Shape=dynamic_Config=(),0.00152226 +conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_IR=09284efccf665ceffb2dcf0f852fc0487fb5d67f0db632b588758f53e8c867e9_Device=CPU_Shape=static_Config=(),0.00150378 +conformance_HardSigmoid/ReadIRTest.ImportExport/Op=HardSigmoid.1_Type=f32_IR=bbfbd3996fba98536af25155f2eef85d0a0487b38373a6ccc72b44794929b2c3_Device=CPU_Shape=static_Config=(),0.00148146 +conformance_Equal/ReadIRTest.ImportExport/Op=Equal.1_Type=boolean_IR=f47e85c7d3e4249ac4a7b5f4797cbf087f988535ab65e38031a1e32c9e31d3b2_Device=CPU_Shape=static_Config=(),0.00147797 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_786_Device=CPU_Shape=static_Config=(),0.00147797 +conformance_ScatterNDUpdate/ReadIRTest.ImportExport/Op=ScatterNDUpdate.4_Type=f32_IR=5a03de8f6d5cce2605d26c224b95fbf0db576372f23978439ecfbf24bc2a785f_Device=CPU_Shape=static_Config=(),0.00146304 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_IR=8c2b69cbcef565a426e65d50dec6a2b054cc89b9db122e136b6eb5309397f853_Device=CPU_Shape=static_Config=(),0.00145598 +conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_IR=c4147b30fdbae4c10e488fdc534e1d3c441c0d2d6b22be772d7312e5eeb02af8_Device=CPU_Shape=static_Config=(),0.00145583 +conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_IR=ae48356df3666dec6a60ee8f49cf62936e4bce68a3854c2f57a7a404a9f9d378_Device=CPU_Shape=static_Config=(),0.00144508 +conformance_HSwish/ReadIRTest.ImportExport/Op=HSwish.4_Type=f32_IR=403fdd72f12115bc7bb4ac9d7802b20ee48a24c5f9864f6aa9f669528c35c2f1_Device=CPU_Shape=static_Config=(),0.00142068 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=3b696e9ae674f5a9e011f47a0041f2aa2c435a87014c78a4b895693dfc5bc500_Device=CPU_Shape=static_Config=(),0.00141267 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=bf632fb70e238b952f8fb95e75a81cd08742773eca647145ce1c93eccd6d3d7e_Device=CPU_Shape=static_Config=(),0.00138186 +conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_IR=8671d875fff11f60ce7a3897bff8e03f5ed0ed96f4493380530b122073a76384_Device=CPU_Shape=static_Config=(),0.00136261 +conformance_Floor/ReadIRTest.ImportExport/Op=Floor.1_Type=f32_IR=614e0755eb064a22a496f2b4fcf211a6691b6f70c2a870b66eadee2cb084eb7b_Device=CPU_Shape=static_Config=(),0.00136261 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=1cc3aea68e6ed44b77c3fbdd16df191781d3af9846b3c2f0b3ebe6f49a378f5f_Device=CPU_Shape=static_Config=(),0.00135254 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=801496d05617ee288fb7b3544f7708845c162a9bea88113b6d31e13ab1ba8c4c_Device=CPU_Shape=static_Config=(),0.00135199 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=9d0a442f5a469d186d024ae3f86385a1eef9a0807851c2c9db651e9872aa29f8_Device=CPU_Shape=static_Config=(),0.00134779 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=ed07d5352a805e13266b73df0608be7a40eb8fc2b34ea7151248f9a8a0734eae_Device=CPU_Shape=static_Config=(),0.00133205 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=121081232c8cc31b4b548e9d87ab6a8a3bdadc29817f21e62ad3fe30b86bddaa_Device=CPU_Shape=static_Config=(),0.00133168 +conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.4_Type=f32_IR=a4a56bdfc2aefd644d2c0731ba1d6ead715f2fbaf7a3a8f2cbdbf7b54c38363e_Device=CPU_Shape=static_Config=(),0.00132733 +conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=f32_IR=76621e332c0f1ab69f243793c338c59e8ca06ca4c65efac68d70d19b97ed49ff_Device=CPU_Shape=static_Config=(),0.00131472 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=86f11a576f71831388fd8b0c048a8301c64efaa7d425055aff724225ce648fc0_Device=CPU_Shape=static_Config=(),0.00128359 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_671_Device=CPU_Shape=static_Config=(),0.0012797 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=642f70339e67182227ed09635978146168c2ad9af72c09e529aa53af7d431a84_Device=CPU_Shape=static_Config=(),0.00127853 +conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=i64_IR=1eda6db3e52c5d066c2da1643f862c488e125a64d4188c735d662d12c6ec5725_Device=CPU_Shape=static_Config=(),0.00127395 +conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_IR=44dc53add3776593c70c89d823a8f3c4a0af2ff09dda542c2523f3409d749117_Device=CPU_Shape=static_Config=(),0.00126188 +conformance_PRelu/ReadIRTest.ImportExport/Op=PRelu.1_Type=f32_IR=56afbcf969ecf89775ce352d4a892959ad64c97bb99e445eeaf32e4caa7e0918_Device=CPU_Shape=dynamic_Config=(),0.00124781 +conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=f32_IR=00bfc760c075c84ac8a9600fc2320bcccde0c56bc784295ddf69a6011c82864f_Device=CPU_Shape=static_Config=(),0.00123639 +conformance_Range/ReadIRTest.ImportExport/Op=Range.4_Type=i32_IR=112647c3c470208a9dd0fc0fed993e322d5532760428d674033f3c26da355992_Device=CPU_Shape=static_Config=(),0.00123391 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=43cd567a0b8aeaf8cb7d80f4231a2784af92dbbec7edbbd5e5fdab2e69ca36bb_Device=CPU_Shape=static_Config=(),0.00121789 +conformance_Proposal/ReadIRTest.ImportExport/Op=Proposal.4_Type=f32_IR=Proposal-4_941_Device=CPU_Shape=static_Config=(),0.00121031 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=541ce61ee51c3aec940984d4308b740a812c3948cf64ea2f0976933627cee9d9_Device=CPU_Shape=static_Config=(),0.00120833 +conformance_GroupConvolutionBackpropData/ReadIRTest.ImportExport/Op=GroupConvolutionBackpropData.1_Type=f32_IR=ae148b767c0755950ff8624cd181229a89622fbb48bcf157596b35f92ca8e517_Device=CPU_Shape=static_Config=(),0.00120164 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=cb86c4cb1a3c95ed76e841de54dd50432e0a759ad3f2ef20acd6f71f682b781e_Device=CPU_Shape=static_Config=(),0.0011807 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=c48021eb54c56dc966a179cca81966eccbd4acf01d1910fa3a9aaa9e479d07f0_Device=CPU_Shape=static_Config=(),0.00117641 +conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i32_IR=a7dd6c61f34e039c376899b0c94c4d06c4c14172112ae0e25976876fef781825_Device=CPU_Shape=static_Config=(),0.00115753 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=Reshape-1_990_Device=CPU_Shape=static_Config=(),0.00115753 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=0309d7155bfc64e1eab204d76dbd6340d3146276ec57db16e59957fbf757eb82_Device=CPU_Shape=static_Config=(),0.00115753 +conformance_ReduceProd/ReadIRTest.ImportExport/Op=ReduceProd.1_Type=i32_IR=b2710dc13ec76c15875b4fcc0231b99d5c59dfa73e7957b726a769b17250f959_Device=CPU_Shape=static_Config=(),0.00115753 +conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_IR=ac860e8f9af8a45c088a6fbaf2aa1e1acfae94f7691543469d6a9cbb85b3e2af_Device=CPU_Shape=static_Config=(),0.00112143 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_688_Device=CPU_Shape=static_Config=(),0.00111959 +conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=f32_IR=bdc91572009f19aa348cb4b2d3e0ab6262061cbc94a4be9c794c9c840ba028b0_Device=CPU_Shape=static_Config=(),0.00109399 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=286917561e64e7099017c0977dec3771de03b78b87fcf7370ceb95296f513d35_Device=CPU_Shape=static_Config=(),0.00108758 +conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_IR=99f16766bd5311b2da02f2547838b0e19dc7a315866f9fa00ee7abda0a7f2f21_Device=CPU_Shape=static_Config=(),0.00107534 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=Concat-1_97_Device=CPU_Shape=static_Config=(),0.0010558 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=668daeca5ceeb15d60fc10bea4f6ccb2e183e52bf6f485efa5f0a5cb8b4ec22d_Device=CPU_Shape=static_Config=(),0.00105317 +conformance_LogicalNot/ReadIRTest.ImportExport/Op=LogicalNot.1_Type=boolean_IR=cf4e48f8253e7128ce7cad1d197a3509ded9dc504bd7a84747a94b8095fc7a65_Device=CPU_Shape=static_Config=(),0.00101901 +conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=boolean_IR=e854a5cf31021d197ff3e77c01d04865864dd64a82473502c32e3d9eb6fb8cc3_Device=CPU_Shape=static_Config=(),0.00101901 +conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=boolean_IR=70a5db6d59043db04d67a285c78c664ca1fe7a0715e0174c7454871dadbc5dca_Device=CPU_Shape=static_Config=(),0.00101901 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=boolean_IR=70a5db6d59043db04d67a285c78c664ca1fe7a0715e0174c7454871dadbc5dca_Device=CPU_Shape=static_Config=(),0.00101901 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=Concat-1_860_Device=CPU_Shape=static_Config=(),0.00101901 +conformance_Relu/ReadIRTest.ImportExport/Op=Relu.1_Type=f32_IR=7105f832c72fd33def244b3670ccee9c7e2031dee5f551a5fb3a28af46e1bfb8_Device=CPU_Shape=static_Config=(),0.00100889 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=df0572a295bb708ac5ef18811fe0b04e7c36cbe0b5fcc37e235c2e3f602d6168_Device=CPU_Shape=static_Config=(),0.00098975 +conformance_ReduceMax/ReadIRTest.ImportExport/Op=ReduceMax.1_Type=f32_IR=ff2f03cd964e107514e96ff8f7290272689d0c5439fecdf0cd40d96c642e98d9_Device=CPU_Shape=static_Config=(),0.000958856 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_IR=2be485f9d7041b425c832a7bac72acecde6b2dad8ca5b4a779ea2cd7e17582e7_Device=CPU_Shape=static_Config=(),0.000917405 +conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_IR=2be485f9d7041b425c832a7bac72acecde6b2dad8ca5b4a779ea2cd7e17582e7_Device=CPU_Shape=static_Config=(),0.000917405 +conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_IR=99fd0e62c96fb4c7d42d009574da8af983bdc62525cc6d79c4830d95fb0ff2bb_Device=CPU_Shape=static_Config=(),0.000916719 +conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_IR=962afe68df7ca93e98315b255faab467fd30cd16dddfc9f603a43843466fdd37_Device=CPU_Shape=static_Config=(),0.000916719 +conformance_Einsum/ReadIRTest.ImportExport/Op=Einsum.7_Type=f32_IR=Einsum-7_432_Device=CPU_Shape=static_Config=(),0.000916719 +conformance_Einsum/ReadIRTest.ImportExport/Op=Einsum.7_Type=f32_IR=01d20201de4cc725f09d2b4995114e12213d56c1337f53c42cd4bcf9ffde0467_Device=CPU_Shape=static_Config=(),0.000916719 +conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=f32_IR=2e5f0490b6cb1240d0eaba036b4f0d8b4bbda03034f3be3b3c2dba1d10ffaad7_Device=CPU_Shape=static_Config=(),0.000916375 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=246259acfac3eed0ccc776bcbdebff2748e12f2961248fc01c42eda61ec9eb1e_Device=CPU_Shape=static_Config=(),0.000909166 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=ad73c638b382ed50dc3c8bb44cf9c10d3a79b404bb667521aa93e856fcb095f8_Device=CPU_Shape=static_Config=(),0.000836392 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=6a4d890c129ea292c49f6046944f10ae1f16beeefa76be31ef5c09919ac3fc0c_Device=CPU_Shape=static_Config=(),0.000836392 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=f4eca4552a5488fdd594ab673ebf2d1f210b67337f300c48740cb0615fc8b7f0_Device=CPU_Shape=static_Config=(),0.00083459 +conformance_Slice/ReadIRTest.ImportExport/Op=Slice.8_Type=f32_IR=8639142b9cf5b3a191cad6533f04017c8ed33df26bffa1cdc5d53809f2afb0bd_Device=CPU_Shape=static_Config=(),0.000832988 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=dcd65481f62b4db1c8c0c0e965d3bedf255a6355e904fc619e94bb9f04b936e1_Device=CPU_Shape=static_Config=(),0.00082884 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=da75200540256321df4761c0ed0a302ce92314b2194c47c59b30645b8f014b74_Device=CPU_Shape=static_Config=(),0.00082884 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=c437af0c9031dfaec3bc3ce59a4f6a64ddc2595803a713e8ae28431a3e1fcb56_Device=CPU_Shape=static_Config=(),0.00082884 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=ab4e17e8b640cae726d4c0dbbe8813188c417cec8031c8671abbc77fdd7be280_Device=CPU_Shape=static_Config=(),0.00082884 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=285a48370fe7ef4dadb07d2a34b8ebab3648bd66519707f498238c12d32944cc_Device=CPU_Shape=static_Config=(),0.00082884 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=408c0c1b5cfebf617f8d7784d9571bdbf7fdf898c43c8fdf47365148007cabb3_Device=CPU_Shape=static_Config=(),0.000816396 +conformance_Exp/ReadIRTest.ImportExport/Op=Exp.1_Type=f32_IR=cdd1a0d6159d160d51d18f70532cb6000f852703cd3ab7551591eaec32a7ebc5_Device=CPU_Shape=static_Config=(),0.000807557 +conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_IR=1f1a50ee1a8e8faeea20a6bd028cbcf47e3eee195157e894a3c1ebe0c5fcb31e_Device=CPU_Shape=static_Config=(),0.000790994 +conformance_Split/ReadIRTest.ImportExport/Op=Split.1_Type=f32_IR=8ab43c6fb2385cd2857489bb1a23e8373982b219d21a8b008b8d918667d361cd_Device=CPU_Shape=static_Config=(),0.000789706 +conformance_ReduceMax/ReadIRTest.ImportExport/Op=ReduceMax.1_Type=f32_IR=93691c8886ef3f112b45bef52825b34eea27fa63d0a3f67660a3d35d27a1f9fe_Device=CPU_Shape=static_Config=(),0.000789706 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=4cd45eacad61238eed70a5028193aa42b07f185f4f2854bc4f5bf5d440d528a7_Device=CPU_Shape=static_Config=(),0.000789706 +conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_IR=f9240fe29c4597973ebe8f9f9afdbfa8402db46e3392a110bee4eb80577c19b0_Device=CPU_Shape=static_Config=(),0.000787561 +conformance_Exp/ReadIRTest.ImportExport/Op=Exp.1_Type=f32_IR=fdca6a3cfdc4ec92dbb61494041b1a50de26de4422602ecf915ba9244a1ee3c2_Device=CPU_Shape=static_Config=(),0.000775346 +conformance_Slice/ReadIRTest.ImportExport/Op=Slice.8_Type=f32_IR=5778bee9c208564def20231652b21f21ce970821efca07c5960000e8e1b203bc_Device=CPU_Shape=dynamic_Config=(),0.000767222 +conformance_PRelu/ReadIRTest.ImportExport/Op=PRelu.1_Type=f32_IR=8891e077700b85423f6f4d90172fa9e4228490c5bdfb0d69c21dec91b1c36c83_Device=CPU_Shape=static_Config=(),0.000766793 +conformance_Sigmoid/ReadIRTest.ImportExport/Op=Sigmoid.1_Type=f32_IR=2b1bfaf54f3cf8dc0f37ea22e9f72a434251984e94cd0ec9ed8c11e7f2a3c478_Device=CPU_Shape=static_Config=(),0.00075515 +conformance_FakeQuantize/ReadIRTest.ImportExport/Op=FakeQuantize.1_Type=f32_IR=a713927fb5a7b807cfdeea2e61fef2424083294d7216bfd0734f04c16b9e4061_Device=CPU_Shape=static_Config=(),0.000747913 +conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=i32_IR=f69265c471b368ef753c42d4ef6db2fe226ccb02bf7737a1717367fb7599bbe8_Device=CPU_Shape=static_Config=(),0.00074079 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=i32_IR=b2925f2bb50555f09e1016521b935243e9823528cdeaf9de34816eefac8eee00_Device=CPU_Shape=static_Config=(),0.00074079 +conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_IR=eee08ab9eb6d2d63076ba113ddb1ff1dc9cb0f15cff063856c6166a58b66cd18_Device=CPU_Shape=static_Config=(),0.00073873 +conformance_If/ReadIRTest.ImportExport/Op=If.8_Type=f32_IR=ca74155424ab492e4023d73c155b7c5d5cdef69750e748b525631e396b88d35c_Device=CPU_Shape=static_Config=(),0.00073873 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=31a8083ea0dbb05e751147fe8b1dc619722f25749cf4ed25d9b2363dfbc94b1c_Device=CPU_Shape=static_Config=(),0.000732437 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=010f04872452e7220273dcad65e15739a49244327d6226f8087bfadc4256ae70_Device=CPU_Shape=static_Config=(),0.000729147 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=0f498cbacb0a71f82e05e89360b804c32294019db0f606d09ac6f2b173f8fca4_Device=CPU_Shape=static_Config=(),0.000727917 +conformance_ReduceMean/ReadIRTest.ImportExport/Op=ReduceMean.1_Type=f32_IR=6f480e27dfb82f1184f796472baf79edb84968d33bdc9564dcaf5a72b9361f45_Device=CPU_Shape=static_Config=(),0.000723454 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=Convolution-1_60_Device=CPU_Shape=static_Config=(),0.000722768 +conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_IR=0501f73ab57313a4fb00d025091c52258f644d85e4b93c73a9b8e805c7e2e477_Device=CPU_Shape=static_Config=(),0.000715216 +conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_IR=MatMul-1_768_Device=CPU_Shape=static_Config=(),0.000712298 +conformance_HSwish/ReadIRTest.ImportExport/Op=HSwish.4_Type=f32_IR=13c8ea70958355a75fa25b4307cc222d544ff8dbaffca01b1fb203278c6d7cd5_Device=CPU_Shape=static_Config=(),0.000702171 +conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=f32_IR=ecae8fe0a14fbb0e17dd711a46e05fa4e6901bc3ba360990c54713775074d6b9_Device=CPU_Shape=static_Config=(),0.000699482 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_403_Device=CPU_Shape=static_Config=(),0.000695592 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=2702db0d741d74e9b7a040b9b861f7367f7c69193545cecd8b1acc9ea46bf13b_Device=CPU_Shape=static_Config=(),0.000683663 +conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_IR=9b7a1d1050acbadeb479db70fd51cdb0ccc5ba34aa0d1f7d77e814eca0893a22_Device=CPU_Shape=dynamic_Config=(),0 +conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_IR=830ab428e39124866276e2c0c352123f124a1167a4199c7acb1cc587cd952cf7_Device=CPU_Shape=dynamic_Config=(),0.000675739 +conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_IR=cbffaa2c9b5d9fe202f3f5bfaa43689b38a22adc631604a2c600205907a50655_Device=CPU_Shape=dynamic_Config=(), 0 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=830ab428e39124866276e2c0c352123f124a1167a4199c7acb1cc587cd952cf7_Device=CPU_Shape=dynamic_Config=(),0.000675739 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=962075102f4aee372758414155ba95227bbf15a567d8dc02e3b4dbdf5fe913b3_Device=CPU_Shape=static_Config=(),0.000671362 +conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_IR=647d1908985889e0e7bce82731a516b6f920f1773c6f22509d1955de50dbeac5_Device=CPU_Shape=static_Config=(),0.00066238 +conformance_Tanh/ReadIRTest.ImportExport/Op=Tanh.1_Type=f32_IR=061ff5946f7a5f9e40ce7689b2c3e116af129f43ceb1ac786a1406b0abf30ebf_Device=CPU_Shape=static_Config=(),0.000660492 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=8214e394631fc0d96a080e8541b828323c69b8d081d449a19acb1eed7f2dddbe_Device=CPU_Shape=static_Config=(),0.000654284 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=8328e8a781bd1156b3fc3d483594e6c27578a4b4de7b941f3cd7aafd8ff05bb6_Device=CPU_Shape=static_Config=(),0.00064842 +conformance_Sigmoid/ReadIRTest.ImportExport/Op=Sigmoid.1_Type=f32_IR=d5862889b4e4be84be17bace10560cd83176d11402f0a7ac1fd9b4451b557ba6_Device=CPU_Shape=static_Config=(),0.00064656 +conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i32_IR=6b8cdc6a29ffdeac883e9f5a04fa196808935ccbff06a4ce4a1c5826c5783584_Device=CPU_Shape=static_Config=(),0.000645588 +conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_IR=5c2ae0b57c4fedf886e428e33677bdd54a65b7a67b215e6b8f59bcae0db0a881_Device=CPU_Shape=static_Config=(),0.000642698 +conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=f32_IR=00458bb2b45f7c3058550c78c0dc4c2f19525a8cf2771b62602a37b8f0a1e156_Device=CPU_Shape=static_Config=(),0.000639981 +conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_IR=1f6080ebf34bf005a256a1841fc8063677103ad8745179ff9b8f10d61ce651b6_Device=CPU_Shape=static_Config=(),0.00063918 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_IR=eab33d79c162b6248c9cffdff23a0c48955f9ec1a0ca94408f5e5347d85dce0e_Device=CPU_Shape=static_Config=(),0.00063918 +conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i32_IR=50520c3fd16e490767a758125e793a11c4adab8fc75a60152d5c346b8679ecb1_Device=CPU_Shape=static_Config=(),0.000634488 +conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=f32_IR=64788cc4a297a9786df29f718709fafefdb95b2d40eb57c25543a407d2c87b76_Device=CPU_Shape=static_Config=(),0.000628138 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=i32_IR=e96df787e0c2200272830e58140ef50288f42f732959d107cdbbf0802b4b6337_Device=CPU_Shape=static_Config=(),0.000626564 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=22ae8d3d0f4ba99130074a080593d4bfce691ea1fecc6069063b40aca63cf7b1_Device=CPU_Shape=dynamic_Config=(),0.000624934 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=i64_IR=84e7f1f68f735b20bd5ccb15dacda98a8f96410e4fd1d43ead2a1776474894c6_Device=CPU_Shape=static_Config=(),0.000621959 +conformance_Relu/ReadIRTest.ImportExport/Op=Relu.1_Type=f32_IR=c23dbfffae465b4ffaf41bbfb4e306e6f3a1c8809e8ccc477d3c5c48020cacae_Device=CPU_Shape=static_Config=(),0.000619127 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=551b9e0519966cf813111a910a021565dbd4b630822dba6bfacc6cde4c7f59d1_Device=CPU_Shape=static_Config=(),0.000615494 +conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=i32_IR=9b63204476c966a4968c13381d07c1f3c456912aa2cfc3e4a87e50fa84b677e5_Device=CPU_Shape=static_Config=(),0.0006088 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=da1d2303796268309dffa2b8a8c01a35b93615057fb8b4da4e1bca1c14366f13_Device=CPU_Shape=static_Config=(),0.000592237 +conformance_ConvolutionBackpropData/ReadIRTest.ImportExport/Op=ConvolutionBackpropData.1_Type=f32_IR=14f7b821b76cfde7d2a57c11e3893796801f3e66937398b7c4bd83765e6b6aed_Device=CPU_Shape=static_Config=(),0.000592237 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=a7da11a30afaccbcb8126e1cebe65f8734aad1af688c59a10f6f366c7a2dd897_Device=CPU_Shape=static_Config=(),0.000587059 +conformance_Tile/ReadIRTest.ImportExport/Op=Tile.1_Type=f32_IR=dc0b460b468ef0fffeb9b87c3558d18cd4877ac3b70e86913fb8564a0f31ffe5_Device=CPU_Shape=static_Config=(),0.000578677 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=8088c1abf5b3c4d881a12a448637a987d97112bad5019777605008d6ee9ca681_Device=CPU_Shape=static_Config=(),0.000578677 +conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=f32_IR=0ea53d2b34565ced48ee8ebe364b1447642cd0ccc7d3374c5ecb5035e55f3598_Device=CPU_Shape=static_Config=(),0.00057719 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_759_Device=CPU_Shape=static_Config=(),0.000549413 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=1d4839f1256644ae80cd916aaa147a57771fb96ec7c69bde5ea24dc96a8942af_Device=CPU_Shape=static_Config=(),0.000544607 +conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_IR=MatMul-1_897_Device=CPU_Shape=static_Config=(),0.000544607 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=bf57b4e9289c0ffde6e49845d76a449bb5b607c68efb33ab6cd16835cd71087b_Device=CPU_Shape=static_Config=(),0.000537399 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_1046_Device=CPU_Shape=static_Config=(),0.000535911 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=c0902531ada30f4b5374061c2fd290d58039d7dbdfe1b1249f8fdcc50ef99b6b_Device=CPU_Shape=static_Config=(),0.0005265 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=70b94cd2a10562313f9d639d00a54f3840a6ad1c0a22f5652212d3575a1d6521_Device=CPU_Shape=static_Config=(),0.000525813 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=4cbc7c94cc9f5d5dcb24d9bc6cc649efba704ba21d9520f69c7243100ee750ac_Device=CPU_Shape=static_Config=(),0.000525813 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=2329af04f96c783d2d315c115f2613bc4e2c2a2a3d554a1f9ffb5794bc8e4fe2_Device=CPU_Shape=static_Config=(),0.000522981 +conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_IR=1d48f93dcc3cfffbc47832be5c7274da52f64770d341f795db3ff2de82718dc2_Device=CPU_Shape=static_Config=(),0.000515086 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=677b7a13707e6bc973cdafd14bbbedc582e4b9697d9b3a35233d505e2cce5e0c_Device=CPU_Shape=static_Config=(),0.000506761 +conformance_Einsum/ReadIRTest.ImportExport/Op=Einsum.7_Type=f32_IR=b25cb8d3723c79b8267499bbba42dc17fe39d542940fe5ead1583c8cc4c47800_Device=CPU_Shape=static_Config=(),0.000495176 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=40ca9ed9b04e22a430cbfc79d1e30148450ab2f3a02214f6e2dae3dfc4a9efb7_Device=CPU_Shape=dynamic_Config=(),0.000488081 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=i64_IR=72163ca8c8cac0c54fa744eb28c7bf024b510e1ae5eab238cd176c6b1a80d8e5_Device=CPU_Shape=static_Config=(),0.000483361 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_504_Device=CPU_Shape=static_Config=(),0.000478956 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=7ee4eacfda52ab676947d0168408d9ad03bd161b387e828074f20a329fa10d59_Device=CPU_Shape=static_Config=(),0.000477411 +conformance_GroupNormalization/ReadIRTest.ImportExport/Op=GroupNormalization.12_Type=f32_IR=8a965eb3a8e3a6f552b40228d13e82e580646c6c9d8ea2657e6329a1ac46c59b_Device=CPU_Shape=static_Config=(),0.000473263 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=621317783c379d8d86a795d11b2c0425a5be5f1a624a7a3155df38e1a0b8c910_Device=CPU_Shape=static_Config=(),0.00047046 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_719_Device=CPU_Shape=static_Config=(),0.00047046 +conformance_ShuffleChannels/ReadIRTest.ImportExport/Op=ShuffleChannels.1_Type=f32_IR=d78c6268923c55edff78ca4364650b2bdd0f15604082b8315dab7cef4e84823f_Device=CPU_Shape=static_Config=(),0.00046943 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=Concat-1_749_Device=CPU_Shape=static_Config=(),0.000461592 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=f64cc20fd2689c60b15d687f3c98ce4a284baf86788110e220486143720e7e8e_Device=CPU_Shape=static_Config=(),0.000460505 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=a0939f5059458ce87550050263316a7648ee216ec71af09e67ad2efeecfa703c_Device=CPU_Shape=static_Config=(),0.000460505 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=12a024b3d2381e824ab65a317ccb0663206f09c1437b200b0b66b5d2da27a4fa_Device=CPU_Shape=static_Config=(),0.000460505 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=Concat-1_454_Device=CPU_Shape=static_Config=(),0.000460505 +conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_IR=4fcb7c7f3a7fbfec41cb76d79275a66a20457772ef18b59e01def5cc213ce385_Device=CPU_Shape=static_Config=(),0.000458273 +conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_IR=MatMul-1_890_Device=CPU_Shape=static_Config=(),0.000454412 +conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.4_Type=f32_IR=47a56832a86e27182f8ec8b5b48ca6bb403533a8ed06fe472b5f4a9b9078377a_Device=CPU_Shape=static_Config=(),0.000452466 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=9655ec9d7324c92aa5de8d98c5d69fd11ba1579220d7d2874d93ab55eca9381a_Device=CPU_Shape=static_Config=(),0.000444685 +conformance_ReduceMean/ReadIRTest.ImportExport/Op=ReduceMean.1_Type=f32_IR=ed3c88d0209c6e97748fffe50da9971de78bad79cf5242b56d63f7b696534e59_Device=CPU_Shape=static_Config=(),0.000443513 +conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i64_IR=0a3c2e701575d2475ab18c7debc2f2f0bfae153d00aab98113760acb43399842_Device=CPU_Shape=static_Config=(),0.000436247 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=560d2c1c973533bf122b820b6915dc7939c6886bda0656cf39d31eb4f3d50ab5_Device=CPU_Shape=static_Config=(),0.000435646 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=92fe0846ddac9179f9b9d91165c2f031619e6d57742b5daeac23f0e15ec16dab_Device=CPU_Shape=static_Config=(),0.000433272 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=f3c8b0137ff20b748dcb5a258599046e22e81f9d75b50d98b965db993efaaf45_Device=CPU_Shape=static_Config=(),0.000422229 +conformance_ConvolutionBackpropData/ReadIRTest.ImportExport/Op=ConvolutionBackpropData.1_Type=f32_IR=8dc7b330f0d148b3433e309002080bda3ba23f8f9eea73a42e4c5f6c0080341a_Device=CPU_Shape=static_Config=(),0.000421657 +conformance_Einsum/ReadIRTest.ImportExport/Op=Einsum.7_Type=f32_IR=dd86f424d9e476be060057c64d9eeb4b4c99e73a82eb785b6e284939ad8dfa72_Device=CPU_Shape=static_Config=(),0.000421371 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=Concat-1_317_Device=CPU_Shape=static_Config=(),0.00042017 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=91d53b3a00a0e6988bbd3f9aa8aef53e8d922b2802570b142bd9c157fe49529a_Device=CPU_Shape=static_Config=(),0.000415879 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=56561bd3bbea1fe67a6a6ac11a36708539344dcb79ee605c803a7830f0365685_Device=CPU_Shape=static_Config=(),0.000414935 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=ec95298d085a695e52022ab8345eb5647f088059c20edad38dcfd7bb8b854562_Device=CPU_Shape=static_Config=(),0.000413133 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=6a10a5e0075c066d4526d1dc4d8f8d39b5d73a9c4125dc36bd349afa7d3172a4_Device=CPU_Shape=static_Config=(),0.000412217 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_762_Device=CPU_Shape=static_Config=(),0.000408327 +conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.11_Type=f32_IR=c771bce3afe248393d58eabb4a523588e44b53177e53b7002b25ca84d8094b2a_Device=CPU_Shape=dynamic_Config=(),0.000406725 +conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_IR=c771bce3afe248393d58eabb4a523588e44b53177e53b7002b25ca84d8094b2a_Device=CPU_Shape=dynamic_Config=(),0.000406725 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=e4e1b2348695d533974a4b152c0259188508d71407682171cf8c407496ab3490_Device=CPU_Shape=static_Config=(),0.000405724 +conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_IR=89f0ef44b3130bf3309d8761d095b2cc3ca909532cab7fd7547f0915d5c2e404_Device=CPU_Shape=static_Config=(),0.000403693 +conformance_Log/ReadIRTest.ImportExport/Op=Log.1_Type=f32_IR=ca164c0e2c8f96341bc57628210d5d1cbab6c5adb6adbed7693cdccfbe59c9c1_Device=CPU_Shape=static_Config=(),0.000403693 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=f8c9b4286fd521e95ceac2df65b1669fb6fa508b583bd5bcb1b677fa45e11271_Device=CPU_Shape=static_Config=(),0.000403693 +conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_IR=cfc66fb0ad297eddea5f5e24d243d4b4fde28919b81adcd1758e54336f3e629e_Device=CPU_Shape=static_Config=(),0.000400002 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_IR=8db18ffd723cea90bfedf7d68f08b2c71033b2fb6c53302c77ca41169cddb168_Device=CPU_Shape=static_Config=(),0.000398515 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=56609ec82c3af7c86a1de83a63142bfaf0e18b117953f1504f5e5d1ae1aa7236_Device=CPU_Shape=static_Config=(),0.000395912 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=d1739f6c802b95c2864cf3cf181d7f13cab3fa06886508fa04f2a8ed1ed498ab_Device=CPU_Shape=static_Config=(),0.000393795 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=52f27fe4542c1356109ef0becccf728a71703c3eea628b7ab3577cddff680f0f_Device=CPU_Shape=static_Config=(),0.000386014 +conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_IR=3c29b0a89a412615c1d830c9438ead870602337d0594d8d7568f4948bdc9ec88_Device=CPU_Shape=dynamic_Config=(),0.000383525 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_945_Device=CPU_Shape=dynamic_Config=(),0.000383525 +conformance_Equal/ReadIRTest.ImportExport/Op=Equal.1_Type=boolean_IR=aec39964f15dd1a3114e6b362335950e33e45dc1198759d754610967c50508ec_Device=CPU_Shape=dynamic_Config=(),0.000383525 +conformance_ConvolutionBackpropData/ReadIRTest.ImportExport/Op=ConvolutionBackpropData.1_Type=f32_IR=48caaa749863b7da1913c346efef34f58352c84264aeb35e8f1898a557db3937_Device=CPU_Shape=dynamic_Config=(),0.000383525 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_IR=4186c44cc2765159ad9672c627c3ff958bd921c09245c9db6ea6ca1dd228874f_Device=CPU_Shape=dynamic_Config=(),0.000383525 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_340_Device=CPU_Shape=static_Config=(),0.000379892 +conformance_Tanh/ReadIRTest.ImportExport/Op=Tanh.1_Type=f32_IR=5562799ab3f8d6a09393d5c3d68490369ae7bad1b2a49dbf146bc308d65f90f1_Device=CPU_Shape=static_Config=(),0.000369565 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_IR=88dc07aaac63f6117484febae9e5dd33cf08edeb8d76eaf2988bbcea21c13a5e_Device=CPU_Shape=static_Config=(),0.000369251 +conformance_ConvolutionBackpropData/ReadIRTest.ImportExport/Op=ConvolutionBackpropData.1_Type=f32_IR=4715e012355b7124610625b2ea4b21cbf394620e2c4385f9e597c2a333b97613_Device=CPU_Shape=static_Config=(),0.000362814 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=a72df254a1280e38288bea31975868d97cefb7f83c316ca5c41c4ca1cb0514bc_Device=CPU_Shape=static_Config=(),0.000359667 +conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_IR=1a1dfb32ab587686a22cb652d5cd5b18659ec7105f1f355844dea8cfeb82fdaa_Device=CPU_Shape=static_Config=(),0.000357121 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=d368661d3c3950c5d56a371fe0eb30511b3d3230dc290a8db492ad6e62e2c7c3_Device=CPU_Shape=static_Config=(),0.000351 +conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_IR=ce4703247246707b9d75641a013cdb2daeee225c4ab62fcae0dc69fd496a5c21_Device=CPU_Shape=static_Config=(),0.000349512 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=3eaf72e4d66b8bb05d491cfa30f3fbe8e6fcc0a98ff3dfb09b552920e8e5f902_Device=CPU_Shape=static_Config=(),0.000347138 +conformance_ReduceMean/ReadIRTest.ImportExport/Op=ReduceMean.1_Type=f32_IR=8c14eb91c50415fa8537c2e3b537a25748d464a037612705bf3d73f77e4ad155_Device=CPU_Shape=static_Config=(),0.00034688 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=cdcd76daca683207b94b713c1bbfa838f668a51f4ecaf5282f28229c0b7ac58c_Device=CPU_Shape=dynamic_Config=(),0.000340187 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=b8f76cdf8664f45fd096d38023d3b4266aaa31c625999ec9542d3935335443e4_Device=CPU_Shape=dynamic_Config=(),0.000340187 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=a4624404e0c659c08940baba2b66a40dcd08e33147eb09c508337321c418c968_Device=CPU_Shape=dynamic_Config=(),0.000340187 +conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_IR=6364c0bf3e5223331912ea8b7924a023779a767c11cbf4f2d6ee314430dc3a6e_Device=CPU_Shape=static_Config=(),0.000337927 +conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_IR=fd373104c5b513c4965fada2486ee2f05263d6cba159c9b07c1e929d207edae2_Device=CPU_Shape=dynamic_Config=(),0.000337784 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=fd373104c5b513c4965fada2486ee2f05263d6cba159c9b07c1e929d207edae2_Device=CPU_Shape=dynamic_Config=(),0.000337784 +conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_IR=a06622f6a28fc09117a3d1ab248564b5d99fa302f9948e81c78c72159351f21b_Device=CPU_Shape=dynamic_Config=(),0.000336239 +conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_IR=4be020bc729de9d030ecf3b5220016b690cb7506b1149cfd0ed73bb6558a8d83_Device=CPU_Shape=static_Config=(),0.00032574 +conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_IR=972c836e1653f221ec18bd2a2875bb01332cb939b3523e6884ea6c463090d307_Device=CPU_Shape=static_Config=(),0.000324596 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=96a70c2f1eb49e348da2b172e948f973510d3bdfd17c747a578f5aa98e48db64_Device=CPU_Shape=static_Config=(),0.000323967 +conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_IR=98d105d2cfe1892e817fa8918ad3a0187488eb0bfecd9d4e3c99f6eea349adba_Device=CPU_Shape=static_Config=(),0.000323967 +conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=boolean_IR=a0388351376c544acd9d4b28135430ff0be36e74ff5cbddcfa484c5a5a20c903_Device=CPU_Shape=static_Config=(),0.000319504 +conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=boolean_IR=13abc55c839ad896259a0766d0b394ed6921cd5d7b53bd9a4d7d67fc8c221af0_Device=CPU_Shape=static_Config=(),0.000319504 +conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_IR=2b2e6496b2a4dc6619574cf29d0238090d7444f2a3d42147710d133a4a47183f_Device=CPU_Shape=static_Config=(),0.000319504 +conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=boolean_IR=4b165e4f5a31fc0b54e4e915f6abdd02eac89d24637e72e961c506eac340be97_Device=CPU_Shape=static_Config=(),0.000319504 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_IR=1083b7c18bf9592652006600446a8f6c94d7c29f6b2cb553f8c606659e79bde5_Device=CPU_Shape=static_Config=(),0.000319504 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=boolean_IR=4b165e4f5a31fc0b54e4e915f6abdd02eac89d24637e72e961c506eac340be97_Device=CPU_Shape=static_Config=(),0.000319504 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_IR=1dc819c29d5bc4fab0a4d5476af7cc520ff9d45fce2ed8301cfd560ad184cd5e_Device=CPU_Shape=static_Config=(),0.000319504 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=1d39365843e3a7e456949cfafc377f2ce1f616bf3ccc12e8f36ee56c9b8e9847_Device=CPU_Shape=static_Config=(),0.000319504 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=boolean_IR=4e9e1af1e95fd42b13a376a18989a3dbfa08d06a2aa20f7f1827e5d14cf38f23_Device=CPU_Shape=static_Config=(),0.000319504 +conformance_HSigmoid/ReadIRTest.ImportExport/Op=HSigmoid.5_Type=f32_IR=fb2e627ff021b61561195fe303874f81198ceff114d020bc056d9cc6467df7bd_Device=CPU_Shape=static_Config=(),0.000319075 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=b63cc54c1780d2d7bcd1875984f0ce103d56907789712aab662b1d4fb906a3a6_Device=CPU_Shape=static_Config=(),0.000315871 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_951_Device=CPU_Shape=static_Config=(),0.000314098 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_760_Device=CPU_Shape=static_Config=(),0.000314098 +conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i64_IR=7eebdc50ae1f58bd7113358784e1c937bfb6951f43050153012de385d9ef8ae0_Device=CPU_Shape=static_Config=(),0.000313039 +conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.4_Type=f32_IR=565b7f5e6c5ac64a57fdc6e254f7ebf5d88973818c9d03be29fcfe9fba36abcd_Device=CPU_Shape=static_Config=(),0.000312896 +conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=i64_IR=b9a8f78fb3ecc1624fea0392e4d98d87822ca8436f996b263783cbc7049e3e59_Device=CPU_Shape=dynamic_Config=(),0.000304314 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=i64_IR=e3f2f931b0a4f62359e785f6677164eef6091e939d84fd7d488fcfa188528fb4_Device=CPU_Shape=dynamic_Config=(),0.000304314 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_IR=b8d8474b1297fec242565afa9967e0842f9d47892f725da2bc455cb3727b49ec_Device=CPU_Shape=static_Config=(),0.000304314 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=9b7a1d1050acbadeb479db70fd51cdb0ccc5ba34aa0d1f7d77e814eca0893a22_Device=CPU_Shape=dynamic_Config=(),0.000304314 +conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i64_IR=cdefaa0b4af627cd31d029ee6896781fb0d6a44a034924fe8dcde0d53d3efc9e_Device=CPU_Shape=static_Config=(),0.000302083 +conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i64_IR=76c9b3d6cdf875ef90e14f24184aa387215cadf3bc82d3fd800f2a6d2675d251_Device=CPU_Shape=static_Config=(),0.000302083 +conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_IR=a367e2320bb7edac76c3428a478a7b3b3b07f4facb7eaeefd3083e9ca229a8a2_Device=CPU_Shape=static_Config=(),0.000302083 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_IR=c8418694929dce79ca0283f9dee6398a83117dad3a0acf9211d85c939d8536a5_Device=CPU_Shape=static_Config=(),0.000302083 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=85abdf2f5fbdf1ac7af2487a5e93e4856a1656bfa217e00a342fe34d31ad40ca_Device=CPU_Shape=static_Config=(),0.000300595 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_IR=0937667ef842992f89bd87954bd5a95ca1fd223d9c9c4bbb18e0d9133a148e91_Device=CPU_Shape=static_Config=(),0.000298908 +conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_IR=0937667ef842992f89bd87954bd5a95ca1fd223d9c9c4bbb18e0d9133a148e91_Device=CPU_Shape=static_Config=(),0.000298908 +conformance_Minimum/ReadIRTest.ImportExport/Op=Minimum.1_Type=f32_IR=75a3f0c6c0208d78f1c5bcb8828b148e4e801880d2ec2021dfeaa02d08176fae_Device=CPU_Shape=static_Config=(),0.000297105 +conformance_Maximum/ReadIRTest.ImportExport/Op=Maximum.1_Type=f32_IR=ab315d896d8ec3193a1db909975a3d8adf1c0bdf8c7900f540323544b897d3ed_Device=CPU_Shape=static_Config=(),0.000297105 +conformance_HSigmoid/ReadIRTest.ImportExport/Op=HSigmoid.5_Type=f32_IR=d43b433b5bf8f5d8c23343277b4c42ef4ac604cd7671cc0ae097b52a896e9439_Device=CPU_Shape=static_Config=(),0.000295732 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=f544f4fb4101da0b04e07b04281b77dc25bf1b98a7de9dd34983f13ede85e4a8_Device=CPU_Shape=static_Config=(),0.000294245 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_662_Device=CPU_Shape=static_Config=(),0.000289096 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=GroupConvolution-1_458_Device=CPU_Shape=static_Config=(),0.00028778 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=fc363e7829f71b44f99c2686af079bc3e28173b0f2aaa14237daf5de1fa5944e_Device=CPU_Shape=static_Config=(),0.00028715 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_501_Device=CPU_Shape=static_Config=(),0.000282945 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=1cd3726a34717b86df72b6553b724af8b526677a5adfd3d4ac3ec4dbd83d4e71_Device=CPU_Shape=dynamic_Config=(),0.000277224 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=f8d2c8614467d60c83295e3cc81ac84ad751ba066c2251af7a4bedd08fa1fb0d_Device=CPU_Shape=static_Config=(),0.000266382 +conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_IR=7d1e3b5d9cd07ee2aa5968add906e8f3ce187e7ac5ec3cbd8e3e6eb53069b5be_Device=CPU_Shape=static_Config=(),0.000266153 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=i64_IR=4e336499274d6d73904a51ae124460aa4ac4b76a80aaed1fbe2c224c3c83fada_Device=CPU_Shape=static_Config=(),0.000261405 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_700_Device=CPU_Shape=static_Config=(),0.000258315 +conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.11_Type=f32_IR=Interpolate-11_999_Device=CPU_Shape=dynamic_Config=(),0.000255626 +conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_IR=Interpolate-11_999_Device=CPU_Shape=dynamic_Config=(),0.000255626 +conformance_HSigmoid/ReadIRTest.ImportExport/Op=HSigmoid.5_Type=f32_IR=5b60ab2deaed89dd893d7bf679de386221ccf31caa48d222a42ece14da21f4fc_Device=CPU_Shape=static_Config=(),0.000255226 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=22f9497b5b657c45977748d1ef69f29edb86e53c7018bd8fa3eed29908737aaf_Device=CPU_Shape=static_Config=(),0.000255226 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_530_Device=CPU_Shape=static_Config=(),0.000253824 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_419_Device=CPU_Shape=static_Config=(),0.000249304 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=a46c2aceffd7408b204aef16b3f0d48cf20acb96f1ffd77ac0b99927b2d092b4_Device=CPU_Shape=static_Config=(),0.000249304 +conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=i32_IR=faa706deb6c5c900e174d8d6d20ad66730ec5c4a43f545006988166df438609a_Device=CPU_Shape=static_Config=(),0.000247731 +conformance_Pad/ReadIRTest.Inference/Op=Pad.12_Type=f32_IR=702023aaf5c1730c643432167b54c07077107363935c2dd88dd08b73599f840b_Device=CPU_Shape=static_Config=(),0.000247502 +conformance_Pad/ReadIRTest.ImportExport/Op=Pad.12_Type=f32_IR=702023aaf5c1730c643432167b54c07077107363935c2dd88dd08b73599f840b_Device=CPU_Shape=static_Config=(),0.000247502 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=d3f30426638d16816674cb04b2a4ee1b0863092ab8ca8a6abb0c6b91ddb52bb2_Device=CPU_Shape=static_Config=(),0.00024733 +conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i32_IR=7dc07057973536b87885655e2b2eab3e79f4988c59f344acaf8101601d8245d4_Device=CPU_Shape=static_Config=(),0.000245614 +conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=i64_IR=50dce9c881bbb8d134f10c4da03a5d23cceee7b9e43484a0c0fac415a2033b77_Device=CPU_Shape=static_Config=(),0.000242696 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_249_Device=CPU_Shape=static_Config=(),0.00023174 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_391_Device=CPU_Shape=static_Config=(),0.000231397 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_836_Device=CPU_Shape=static_Config=(),0.000229566 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_IR=846d4ef316a40a73209fa88972fc433460f3e17a4233d2e4466dce9599cf08bb_Device=CPU_Shape=static_Config=(),0.000229137 +conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_IR=846d4ef316a40a73209fa88972fc433460f3e17a4233d2e4466dce9599cf08bb_Device=CPU_Shape=static_Config=(),0.000229137 +conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i32_IR=b9842b102a4983c1e2d355a1616f7176dfb4444c5ac933958fc820413287cf88_Device=CPU_Shape=static_Config=(),0.000228965 +conformance_Split/ReadIRTest.ImportExport/Op=Split.1_Type=f32_IR=e2bca0617c1689a669d557fe2fb1f73ec18800c8de37eed99a19bcc3876d5f29_Device=CPU_Shape=static_Config=(),0.000228965 +conformance_Split/ReadIRTest.ImportExport/Op=Split.1_Type=f32_IR=86f8d8715306c949c6ddd8fc809c57c038918b1899bb7a10ffa6b08e69a46a41_Device=CPU_Shape=static_Config=(),0.000228965 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=i32_IR=8406b5a3375089a0acb2738f25d275925223880c3ed6928ef4e0a90292d52f59_Device=CPU_Shape=static_Config=(),0.000228965 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=i32_IR=70f236c00b8808d74b6678e1a0c422422b95032e5d303db351672ad447525120_Device=CPU_Shape=static_Config=(),0.000228965 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_IR=59179c6f5b845d52cae7521a3c78ed818e2886e65d97b0c4afe106fab2702c0c_Device=CPU_Shape=static_Config=(),0.000228965 +conformance_PRelu/ReadIRTest.ImportExport/Op=PRelu.1_Type=f32_IR=419fd77682b743e20ab28d52825c449c60ac0824f22d3d2ee5d0f4ad7c642631_Device=CPU_Shape=dynamic_Config=(),0.000226991 +conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_IR=36100d3e45f626a87a878b0d17d9ef5a99199cbc9a7d7fe66f705d3059d52f0b_Device=CPU_Shape=dynamic_Config=(),0.000226991 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i32_IR=72530c1d00803ed543897ef701fc23226bf46517e4f70dee7c545401b19bcc9c_Device=CPU_Shape=static_Config=(),0.000221871 +conformance_Pad/ReadIRTest.ImportExport/Op=Pad.12_Type=i32_IR=480d223ae41adc7e7f8f8b2484967b2a348a8fd2b7a18906b97cc833575bf07a_Device=CPU_Shape=static_Config=(),0.000212946 +conformance_Pad/ReadIRTest.ImportExport/Op=Pad.12_Type=f32_IR=09ad8bc515988b284b5014a173777ba0add1775beb3ff379736aa3eaf3046241_Device=CPU_Shape=static_Config=(),0.000212946 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_705_Device=CPU_Shape=static_Config=(),0.000211201 +conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=i64_IR=adf95a634bea6bed7cf0849d5478b0bc8c2f7349095e0060802fc68cb61f879e_Device=CPU_Shape=static_Config=(),0.000208769 +conformance_Loop/ReadIRTest.ImportExport/Op=Loop.5_Type=i32_IR=3a543267b75bd430bda785c118815d05666a5f9442eeba1a5fb17dce89aa8df3_Device=CPU_Shape=static_Config=(),0.000207682 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_904_Device=CPU_Shape=static_Config=(),0.000207339 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=9b632a610884f12865e8ee66cda807eeb8980c068a993e2c29334fe71f8dd077_Device=CPU_Shape=static_Config=(),0.000206738 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=6e304064dcb3070f767e95e3597b3b9ae8995ea58cfaf938cf05651b642822fb_Device=CPU_Shape=static_Config=(),0.000206194 +conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.1_Type=f32_IR=4cbdaa51c99064416c87014a410a5666ca1a1c450843f2bb5bfe345fbfe92055_Device=CPU_Shape=dynamic_Config=(),0.000202819 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=35b63bcb1e04e51af2de99763ee5f1f860f0814af8c772c0df79fab9b98ea5ff_Device=CPU_Shape=static_Config=(),0.000198042 +conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_IR=b0cff01ec5f73ad9b40b2007aa686a9975af0969ecebf0b06a9f305c12e9ddf9_Device=CPU_Shape=static_Config=(),0.000197298 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=f28cc3ca7c4553ca9ceeeee53a2223fd12fb266857fa8815bb208185e6f08124_Device=CPU_Shape=static_Config=(),0.000197298 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=84b63ab1d5305ac6aee357e766507a9ddb1c47ebc7614c221adfa29456f7c040_Device=CPU_Shape=static_Config=(),0.000196125 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=4b684b1009e0b14d8fdd57ad69edfa15df355da015011d5dd0d2e9b8f8f3650e_Device=CPU_Shape=static_Config=(),0.00019581 +conformance_If/ReadIRTest.ImportExport/Op=If.8_Type=f32_IR=If-8_925_Device=CPU_Shape=static_Config=(),0.000194123 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=1f791d2e5218c42fbea8d9f4cf588e00570f1fab8a95d594576f6c921ca26831_Device=CPU_Shape=static_Config=(),0.000192921 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_90_Device=CPU_Shape=static_Config=(),0.00019232 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=i32_IR=723ff8a2e43baf5c59a29c1633d49898edaad3f33c4979ed4dba2cfa35438066_Device=CPU_Shape=static_Config=(),0.000191119 +conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_IR=d20222ed247c6872adab05df0d282b527b905f7b705e4881019517dba18565e4_Device=CPU_Shape=dynamic_Config=(),0.000188659 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_IR=d57d585418a6958afc58679a3cee0a0fc8eb4377f6696b9cb293767cee8c0dcf_Device=CPU_Shape=static_Config=(),0.000187343 +conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_IR=5a014f92c4553e6b7501f33b6794ff5adc19d16bee82e544e31a2947ca1ee274_Device=CPU_Shape=static_Config=(),0.00018351 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_IR=PriorBoxClustered-1_922_Device=CPU_Shape=static_Config=(),0.000183481 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_IR=PriorBoxClustered-1_767_Device=CPU_Shape=static_Config=(),0.000183481 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_IR=PriorBoxClustered-1_755_Device=CPU_Shape=static_Config=(),0.000183481 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_IR=PriorBoxClustered-1_704_Device=CPU_Shape=static_Config=(),0.000183481 +conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_IR=PriorBoxClustered-1_922_Device=CPU_Shape=static_Config=(),0.000183481 +conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_IR=PriorBoxClustered-1_767_Device=CPU_Shape=static_Config=(),0.000183481 +conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_IR=PriorBoxClustered-1_755_Device=CPU_Shape=static_Config=(),0.000183481 +conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_IR=PriorBoxClustered-1_704_Device=CPU_Shape=static_Config=(),0.000183481 +conformance_DetectionOutput/ReadIRTest.ImportExport/Op=DetectionOutput.8_Type=f32_IR=4a26e507a9635e0c082219f6f071fbb49fb8974106c6716e850c0701b1883064_Device=CPU_Shape=static_Config=(),0.000183481 +conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_IR=530687639a78bb0ba4e0d745aaf192d2eafcf4eac7f4d0802c581ca391cff83e_Device=CPU_Shape=dynamic_Config=(),0.000183338 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_542_Device=CPU_Shape=static_Config=(),0.000183023 +conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_IR=0218c0d5eef6211729b6aa8a20f0b2b74078df276a4a041c9696b3c45744379e_Device=CPU_Shape=dynamic_Config=(),0.000181936 +conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_IR=e94a9110eb4b47e0c518911d052b17e79bbd1f54dec565a951a6c262df626e83_Device=CPU_Shape=dynamic_Config=(),0.000181936 +conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_IR=6d699e8b6d4ab6ca840918713cbe280ef6b986e12876e3500966f96d4ac74c62_Device=CPU_Shape=dynamic_Config=(),0.000181936 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=e94a9110eb4b47e0c518911d052b17e79bbd1f54dec565a951a6c262df626e83_Device=CPU_Shape=dynamic_Config=(),0.000181936 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=6d699e8b6d4ab6ca840918713cbe280ef6b986e12876e3500966f96d4ac74c62_Device=CPU_Shape=dynamic_Config=(),0.000181936 +conformance_LogSoftmax/ReadIRTest.ImportExport/Op=LogSoftmax.5_Type=f32_IR=e52360303d9834c1bfb89eaf5352d1e57bb4121d511dc2250dab94fdcebd5c0b_Device=CPU_Shape=dynamic_Config=(),0.000181936 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=f8edd0961f6aec0341b2e0e6c3a9767aeaa5c5f1095df765323980c48b8396d9_Device=CPU_Shape=dynamic_Config=(),0.000181936 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=0579c28b042af9f42fab14ceecf839a561e21d1010322d840fab653af686e052_Device=CPU_Shape=dynamic_Config=(),0.000181936 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=cd365f99fda3004be7f2721dce07b7fc3782981258fc5139876c46d00a3c186b_Device=CPU_Shape=static_Config=(),0.000176358 +conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_IR=a2af3aeb61b317bb3f050aefb7e8859206fd127310754e4d78f860e2c8347264_Device=CPU_Shape=static_Config=(),0.000175157 +conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_IR=2c8fb040af57cf305364104a07b34dfe5fcfbd74fdf3b507a110922abf92c708_Device=CPU_Shape=static_Config=(),0.000174213 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=3d8b637c6494c98dbe8ff059d55e288a7b196712c23d54923d796b6e53ddb228_Device=CPU_Shape=static_Config=(),0.000173583 +conformance_Clamp/ReadIRTest.ImportExport/Op=Clamp.1_Type=f32_IR=Clamp-1_385_Device=CPU_Shape=static_Config=(),0.000172553 +conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_IR=1bf303d4cec837d3fd1dadb4d7d668a3363916c9b5524a4dc42f644afc7a5fdb_Device=CPU_Shape=static_Config=(),0.00017118 +conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_IR=a7a0dfc3dfff3af78129e5fb23f4e0783a679d532864a7173d47ac05c9f6d03a_Device=CPU_Shape=static_Config=(),0.000170208 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=b2a4034be357dc1e864604e0a1a1339f0ea8ac1f97c63c7cf6e2ed6ca74e6e45_Device=CPU_Shape=static_Config=(),0.00016975 +conformance_Slice/ReadIRTest.ImportExport/Op=Slice.8_Type=f32_IR=5265b728f97a97c1aea4c96b31e962ed24f57b78c24a6683076282b76fc22878_Device=CPU_Shape=dynamic_Config=(),0.000168806 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=91c6158284310c5355406d04d46c09179f8c237ce6060fe3809a61a9e05dd865_Device=CPU_Shape=static_Config=(),0.000168806 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=38760fbb498f53bc58cee8ab38e787cac30d68b14a6d2bb3ce7d84f0b826f9b9_Device=CPU_Shape=dynamic_Config=(),0.000168806 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=4e5e03c7aeeaa25146a7808601d5872e6491cc9fe02e66182b3ea24187ed392d_Device=CPU_Shape=dynamic_Config=(),0.000168806 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=60738b9dff196a757e34d3ca26b51a6980c57b4fe9fc30a9666e5a7f5c33fe37_Device=CPU_Shape=dynamic_Config=(),0.000168806 +conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_IR=a419e9bbeeebaaeca469be69343018f44bd6a494843d0781effc622d8c3b9fc4_Device=CPU_Shape=dynamic_Config=(),0.000168806 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=a419e9bbeeebaaeca469be69343018f44bd6a494843d0781effc622d8c3b9fc4_Device=CPU_Shape=dynamic_Config=(),0.000168806 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=7d8befa2fd0a8c0e118a59e732eba3a0179054518efee4dfa64ddec67e6fc14e_Device=CPU_Shape=dynamic_Config=(),0.000168806 +conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_IR=52e9696e18db2b6894c4cf87d5eae0c50fca131cf140e840c06589e9c0f1e089_Device=CPU_Shape=dynamic_Config=(),0.000168034 +conformance_LogSoftmax/ReadIRTest.ImportExport/Op=LogSoftmax.5_Type=f32_IR=fc33843c4222ba24142cf6c9274be0f35829ac2cbf111cda9ee48e5d7a3804fe_Device=CPU_Shape=static_Config=(),0.000167404 +conformance_Tile/ReadIRTest.ImportExport/Op=Tile.1_Type=i32_IR=24c49353906243a7ce15993a9ee31dc174f71ee17dec38bc8fedd019d68f3401_Device=CPU_Shape=static_Config=(),0.000163685 +conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=i32_IR=dcc54304d45d5394649dbbd7a57e689e4cc29d5c22bae3786a61236dbae75f35_Device=CPU_Shape=static_Config=(),0.000163685 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=i32_IR=e3089e9cc3fde201e178f67a3e0c498c81b7441e52fe110dc05057e19d7f4e2b_Device=CPU_Shape=static_Config=(),0.000163685 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_IR=9929ac7c06e32cacd2c1dc65614bcbc8304cd9dca25532d364782effd838c6b7_Device=CPU_Shape=static_Config=(),0.000163685 +conformance_Ceiling/ReadIRTest.ImportExport/Op=Ceiling.1_Type=f32_IR=1c16a437f93aaad29c0b0f3d1c18d5ac51713bff37faad1a6c1f183f7b6bb67c_Device=CPU_Shape=static_Config=(),0.000163685 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_551_Device=CPU_Shape=static_Config=(),0.000161654 +conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_IR=b61812605a2c528cf203dd9de7efba6cc0b8b7358d994356200a70b110dd6d20_Device=CPU_Shape=static_Config=(),0.000158594 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=f434ceb3c2bf2f53bfb1afc5aef5329a0b96212116132482ac7249e791d4ccbe_Device=CPU_Shape=static_Config=(),0.000156391 +conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_IR=dcd366c15188794221b8c93f9eac5f5c0ebb1f5aaf99e6286ea845ab2f902dd1_Device=CPU_Shape=static_Config=(),0.000155847 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=d7348f44f0bc1a8e9903f4a0edc136de768e75c8b34cba7a7cc4c4013346c665_Device=CPU_Shape=static_Config=(),0.000155132 +conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=i64_IR=a380dab4018caaf6c9c97dd05bd043131024a8a124aed741063dd61449fbd2c7_Device=CPU_Shape=dynamic_Config=(),0.00015456 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_IR=6391d4c4fed9adcc1a08e3134564859d582c2f0dfeda47ea75147c2dc0f686ad_Device=CPU_Shape=static_Config=(),0.000153788 +conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_IR=6391d4c4fed9adcc1a08e3134564859d582c2f0dfeda47ea75147c2dc0f686ad_Device=CPU_Shape=static_Config=(),0.000153788 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=f875aa55e00c0b36cfb97f074f5efc77dfacced6f86eb726269c3d888bc4db71_Device=CPU_Shape=dynamic_Config=(),0.000151757 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=4dfd41288074d59a6af000a128d3b0979682820859f96aea9d72ff27a2016b2d_Device=CPU_Shape=static_Config=(),0.000151385 +conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i64_IR=a48a048bc7a80c4310891ba16455c2ed9e2f859d2baf21802f21439427c5b291_Device=CPU_Shape=dynamic_Config=(),0.000149325 +conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i64_IR=45baf2f85ad76928d21607499598ddbe6596736c1d710e22d6e138316688d714_Device=CPU_Shape=dynamic_Config=(),0.000149325 +conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_IR=723818bbbd4cc22ae843abc9976f01e556124b329b11e0ca81b1bb834954b24b_Device=CPU_Shape=dynamic_Config=(),0.000149325 +conformance_Slice/ReadIRTest.ImportExport/Op=Slice.8_Type=i64_IR=629b1683acd381c63608645fb7ad6279c592518373dff9ffa823b65b6e64d117_Device=CPU_Shape=dynamic_Config=(),0.000149325 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_IR=854e2b2ef312ceca5b678cabafead3970536851ef1fe49a4ad5818dcc972b389_Device=CPU_Shape=dynamic_Config=(),0.000149325 +conformance_Equal/ReadIRTest.ImportExport/Op=Equal.1_Type=boolean_IR=6b56dbe75ec5972be2948f1ce6e5e9339eab2de0efb4a60e9e7d3554d0d1ba64_Device=CPU_Shape=static_Config=(),0.000149068 +conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_IR=1853d26c521ed847c0a00226712ffddda8c68fd8e1a35efb0b8d7de49ff30e92_Device=CPU_Shape=dynamic_Config=(),0.000148982 +conformance_GroupConvolutionBackpropData/ReadIRTest.ImportExport/Op=GroupConvolutionBackpropData.1_Type=f32_IR=f5fa4d6112e9a88edcd2041cbed0f0f4140fc4edc648a74642fedb73bc237704_Device=CPU_Shape=static_Config=(),0.000148982 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=a812fd814397a432fa536787935f47d104f658789d87ed4797980fcea21c6d8f_Device=CPU_Shape=static_Config=(),0.000148581 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=6ec39fbc1356f7ef2adb9e45d1a28077357b58681a1a5c35e9f023ae1b4fc53e_Device=CPU_Shape=static_Config=(),0.000148467 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_721_Device=CPU_Shape=static_Config=(),0.000145806 +conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_IR=03f2151a574ebdc260a55dc84d85bcd1f7661509bf3c3ac1e10fe9297bd72263_Device=CPU_Shape=static_Config=(),0.00014429 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=i64_IR=c9237bae31e47b657c974d49a1328c0d522acc006105885aa48cb6f08a32c637_Device=CPU_Shape=dynamic_Config=(),0.000134907 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=1cc8a3b004f29e32af14295defbbcf6a9b195e99169a42d30d7a8da51a9c7392_Device=CPU_Shape=static_Config=(),0.000134307 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=17f4ea041c836582962a5add42394aef53ca535cdcbb7552a2402ffde3246ea8_Device=CPU_Shape=static_Config=(),0.000133935 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=c66b39d44133f9e1804ca1613331e5d7537b2db68b61710d9cc9c73fc6df8848_Device=CPU_Shape=static_Config=(),0.000133878 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=16307d02716975df59936f493a135cc523d02ed0e87b5bef39f73a79532485df_Device=CPU_Shape=static_Config=(),0.000133878 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_713_Device=CPU_Shape=static_Config=(),0.000132676 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=6e46e15b4cedec678f2ce4c25059d462f317798c7588c6fa8577895d3f777678_Device=CPU_Shape=static_Config=(),0.000132562 +conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.1_Type=f32_IR=a5c035319dc8e359642f7c8f233b2e0cea3e6c701c7e8544a519da87924ee79c_Device=CPU_Shape=static_Config=(),0.000131818 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_422_Device=CPU_Shape=static_Config=(),0.000131475 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=c3994924d501b198695e8b7e5e98fcbe3252f69585a511f6b3f7d844737afd93_Device=CPU_Shape=static_Config=(),0.000131446 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=ea288ac5da2ab0f30b4212aaee4eacf151a1eb8ad7ed18f14115be8617dcc8eb_Device=CPU_Shape=static_Config=(),0.00013096 +conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i32_IR=a8f131ea0e1b5d7c704654c67c9752e6d1e11f26489d52e74682c14f8a5dacf6_Device=CPU_Shape=static_Config=(),0.000128271 +conformance_Tile/ReadIRTest.ImportExport/Op=Tile.1_Type=f32_IR=9e43c8b338fa223b65d9ddf09ce047a0455ac3985575a953be180eebbd1be5bd_Device=CPU_Shape=static_Config=(),0.000128214 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=a42d1c259763b5a0fe698d9ce07a79709d558599e5f9814db2e2b89cddff9fc7_Device=CPU_Shape=static_Config=(),0.000127613 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=659ef1bb1eda2fefc70d10b4fd68ed0eac042c2cdf1d53dfc8903d199f9cbba4_Device=CPU_Shape=static_Config=(),0.000127527 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=71dbb0ee89142ae893b27bc3947cc478eca94a8db535eed2ce8ec28ba0e0dd6f_Device=CPU_Shape=static_Config=(),0.000127527 +conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=f32_IR=3ab7d2071d9a85af091125d48056b5a2cc3e0019a122348ab175e29260ceab1e_Device=CPU_Shape=static_Config=(),0.000125725 +conformance_Proposal/ReadIRTest.ImportExport/Op=Proposal.4_Type=f32_IR=Proposal-4_790_Device=CPU_Shape=static_Config=(),0.000124952 +conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i32_IR=bcf8916aca87d3ced6cb58209d686ce9d6caad3f48f9360feaab69c271832d9d_Device=CPU_Shape=static_Config=(),0.000124209 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=f975799db23fefa661731167f4a04e5ea0881bb456b36972d393ce1ddbf2b39b_Device=CPU_Shape=static_Config=(),0.000123494 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=d0dcd3bdd6186a36197ea711d8829ab3e789cf43a9c578107d2eb0160f3d1c69_Device=CPU_Shape=static_Config=(),0.000122893 +conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_IR=260c754588aca529758574c050991c8376ab56bb43e22656c60b9bfb004ea0d1_Device=CPU_Shape=static_Config=(),0.000120747 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=059a5a92f33f1648d703e9a606988d1bec8ca2c6d91df6b7c61f6c49fa9e1d7f_Device=CPU_Shape=static_Config=(),0.000118945 +conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_IR=c321ca28fbb97736d33efc10213ef0893ec5283865b2eb586c23a70d4d4d16ee_Device=CPU_Shape=static_Config=(),0.00011843 +conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=f32_IR=87db2b780622e3ba76ced852882b11efc00c9977d166df2107a211f3b37a3cb4_Device=CPU_Shape=static_Config=(),0.00011843 +conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_IR=342120101ac5ad6b9a44924d31e8b26f1b0cfc49f408112bccd00b6d0950dcca_Device=CPU_Shape=static_Config=(),0.000116714 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=0325139df7d4b1aadbfc7a0b42f4d3fc5224dd5b854722097a9f938f29508ee8_Device=CPU_Shape=static_Config=(),0.000116714 +conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_IR=ee75ed9210bb34af068356ef3a3eb68cfe0f9d2d1da5af790df3a3d38a472995_Device=CPU_Shape=static_Config=(),0.000116714 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=f54d93bd13c8962a740b0dfe0aa28de0bf37a36b97f3cac2b7f5d8ed3797aca9_Device=CPU_Shape=static_Config=(),0.000116113 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=7fe0a5e83b2f96629312041fbfb18ba8e17408acd3ca49b25aee8d86185b1b3b_Device=CPU_Shape=static_Config=(),0.000116113 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=c65e0e4d58a65b295bfb604ce1f5171cefefdf8d3802680bc30634e4ba19a176_Device=CPU_Shape=static_Config=(),0.000116113 +conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_IR=ae4e792701d41de755ea11a9ee59ae3954d1b93e8e14ae8420e332fa8d0b63c8_Device=CPU_Shape=static_Config=(),0.000115598 +conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_IR=bc627f7c0be6f9805fd857239385693f3fcc7a786ac135993779928ac843427b_Device=CPU_Shape=static_Config=(),0.000115598 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_440_Device=CPU_Shape=static_Config=(),0.000115598 +conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_IR=9a12f233957e037d873d3de875f90ed4851e5342009ce4fe7587fb8150f4faf4_Device=CPU_Shape=static_Config=(),0.000115598 +conformance_DetectionOutput/ReadIRTest.ImportExport/Op=DetectionOutput.8_Type=f32_IR=5f5db9ad61f83dfa79423ecdf256f0f60daa670063852121223fee424510a3ea_Device=CPU_Shape=static_Config=(),0.000115598 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=d919fe33bbc33b73734376de5445291671cc70255744d814da068bad28bb292f_Device=CPU_Shape=static_Config=(),0.000115598 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=0e62e566bf4e94d5206fa34aa8fecfc43f311e69927a6c500ac6c336d09ca30a_Device=CPU_Shape=static_Config=(),0.000115598 +conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=f32_IR=dfac84e1b66bec5d3baee0478209d26bf9a20fbce48fb0fee640ddf2f3b4756f_Device=CPU_Shape=static_Config=(),0.000115598 +conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=f32_IR=8c3cfcb112ea6fd9896444cd4ace1c150f68a265b2aaa0ff58d7f3ba5215778c_Device=CPU_Shape=static_Config=(),0.000114397 +conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.4_Type=f32_IR=8a42d5d623d6b054889107a4f31bfeff76619f14b022ad3b79b9b71c951ef7b9_Device=CPU_Shape=static_Config=(),0.000114397 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=439457c2df590b3feec9bf8b733fce374ab7ba65c9c5023d84f8abc95f10c230_Device=CPU_Shape=static_Config=(),0.00011371 +conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_IR=00a5dde2172a6e345c2a08fe9a6983596f97b0578f7b19d85841998ba279aacb_Device=CPU_Shape=dynamic_Config=(),0.000113338 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_950_Device=CPU_Shape=dynamic_Config=(),0.000113281 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=c301804445f273eef62f41f02204711d9d6e571da28c76ab447d7d90983b0032_Device=CPU_Shape=dynamic_Config=(),0.000113281 +conformance_Sigmoid/ReadIRTest.ImportExport/Op=Sigmoid.1_Type=f32_IR=03d3f700f96f4ac1ec43711231fba5be4f384db1858d079f922519b2083e9105_Device=CPU_Shape=static_Config=(),0.000112509 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_IR=8906dce902238384668e8a6b7f3c8f185a6f45ba98a60aeb075382530d1dd1e9_Device=CPU_Shape=static_Config=(),0.000111393 +conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_IR=8906dce902238384668e8a6b7f3c8f185a6f45ba98a60aeb075382530d1dd1e9_Device=CPU_Shape=static_Config=(),0.000111393 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=i64_IR=bfc418259ffe1e22328a851dc11db92d7f774cee1f9c49e80f972eabdfef9ab5_Device=CPU_Shape=dynamic_Config=(),0.000110964 +conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_IR=3714d1cd3adf627869eb8a22a15911bd5bc8f5605382fcf356197fea1623e210_Device=CPU_Shape=static_Config=(),0.000109677 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=00b03e54257a6cb2ebb8ff6e0ef635e2f771aecaa3cb00256493c567962df045_Device=CPU_Shape=static_Config=(),0.000109333 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=ef80dd919af54694b22e60202f903ae7da4f2288301c8ea6014c9588fa13659c_Device=CPU_Shape=static_Config=(),0.000108876 +conformance_Clamp/ReadIRTest.ImportExport/Op=Clamp.1_Type=f32_IR=8039b636a4ef3f6f893521dd950c2fe6ac23436028471f222ace55d3eeb4e60b_Device=CPU_Shape=static_Config=(),0.000106416 +conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_IR=cc57d84669b66a060b8fb631cae164141b6dad76e8d47ed337abfb495db9a742_Device=CPU_Shape=static_Config=(),0.000105329 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=787b699a4f6595168dfdc841948cc4a42cea2b1b63ebf30447b51277796174d2_Device=CPU_Shape=static_Config=(),0.000102983 +conformance_ReduceMean/ReadIRTest.ImportExport/Op=ReduceMean.1_Type=f32_IR=b3a051e64ab99173e611b36b1d7008aab97a19a7c265d9c771b430754a67d15a_Device=CPU_Shape=static_Config=(),0.000101581 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_377_Device=CPU_Shape=static_Config=(),0.000101352 +conformance_TopK/ReadIRTest.ImportExport/Op=TopK.3_Type=f32_IR=6a9a38ca1bddbbc101beff8f1115c6d4927ad60c35b8355b4f5c23cbb29018f7_Device=CPU_Shape=dynamic_Config=(),0.000101324 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=4e96a8ffc8b3590a005ce6100805a339d48219df783da561b7a84e7db63d440d_Device=CPU_Shape=static_Config=(),0.000101266 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=601b2056aed7972deabb49e784edc58d76c059878d7beda5567e39b99180f34b_Device=CPU_Shape=static_Config=(),9.63747e-05 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=i32_IR=37b38ae41a2f207792960577812af0b976116507ef69a4f0656773010af6fb50_Device=CPU_Shape=static_Config=(),9.62889e-05 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_840_Device=CPU_Shape=static_Config=(),9.56024e-05 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_673_Device=CPU_Shape=static_Config=(),9.56024e-05 +conformance_DetectionOutput/ReadIRTest.ImportExport/Op=DetectionOutput.8_Type=f32_IR=e82c6a9d0ae85d730d89c5e97915de13b2cf54a03a0c41cf2db6f10dee14a367_Device=CPU_Shape=static_Config=(),9.53735e-05 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=a2ba0bffd25fb4db194b8ce32877bb44e1632c867fe229911a137c4e8aea62f2_Device=CPU_Shape=static_Config=(),9.32566e-05 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=e80718ea398a62bb4159a648dd745a1d822506fab2e032617cb4ed8b7bb421ea_Device=CPU_Shape=static_Config=(),9.03102e-05 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=i64_IR=ab86f34fc9f2dc496de061a30cab5a769dc44893dde53e33cbd3cd80df86ff26_Device=CPU_Shape=dynamic_Config=(),8.84508e-05 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=654ee1fc8f9adaee5eaf1891b7a7c0d1a9554b5e0f1b78390e000f7786cb20c3_Device=CPU_Shape=static_Config=(),8.83077e-05 +conformance_Negative/ReadIRTest.ImportExport/Op=Negative.1_Type=f32_IR=5c4b572c47a554a0b388af78c1fc25eb90a2c3c12858b693f115eacf6aed6791_Device=CPU_Shape=static_Config=(),8.75354e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=a5ba11df050b8a2c69c4d2c2667a1a1b680b0395135347bf72d130b75e60afd0_Device=CPU_Shape=static_Config=(),8.74496e-05 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_980_Device=CPU_Shape=static_Config=(),8.74496e-05 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=944508bde4733269b11515d6b736058f91c99a4e191fcca8affc542b33fc19ed_Device=CPU_Shape=static_Config=(),8.72493e-05 +conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=i64_IR=9f357137acd56688ac778bd8ebb4ee40f15ddda280e478abdaf20e192ea8c5e9_Device=CPU_Shape=dynamic_Config=(),8.70205e-05 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=Concat-1_869_Device=CPU_Shape=static_Config=(),8.70205e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=894070e5e6a934f09dbf647d6ac0f8655768b50bb197dce03326c2d8eb7694a9_Device=CPU_Shape=static_Config=(),8.65055e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=591fe2116e088316c4e07134cc21cb463924fb70a528aa1948e7abcfe3f50c41_Device=CPU_Shape=static_Config=(),8.50466e-05 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=58e7d89904f07a8f20d8564841d8510fd989a9da499c8d34eea4f37a64f13039_Device=CPU_Shape=static_Config=(),8.50466e-05 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=Concat-1_491_Device=CPU_Shape=static_Config=(),8.48178e-05 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_IR=04671633de06488b0296e3cf1b73a2a5597230d67b1800e229331c36e4136870_Device=CPU_Shape=static_Config=(),8.35877e-05 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_969_Device=CPU_Shape=static_Config=(),8.3273e-05 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_979_Device=CPU_Shape=static_Config=(),8.19571e-05 +conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_IR=dbc0e23a8b7d2bb56192026632d00c8e931f98c0f4c16fbbf15cfa0e90967af9_Device=CPU_Shape=static_Config=(),8.17569e-05 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_IR=e4dea2e1c3c8049c0f1133898c5920aac0526930e3102a28dced544027c1b246_Device=CPU_Shape=static_Config=(),8.17569e-05 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i32_IR=7d2e1566473cf91c6cc6470703a6090b564ad115592fabe76e878df4501ddcf6_Device=CPU_Shape=static_Config=(),8.17569e-05 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i32_IR=28b38fdc7d514d1cb11b546feb64682d2306b90c888e955e7744088eb4678126_Device=CPU_Shape=static_Config=(),8.17569e-05 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=6a32b9be72d06514d385a446533a16820298848fe641cabbdfcdad7b49a2e744_Device=CPU_Shape=static_Config=(),8.17569e-05 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_766_Device=CPU_Shape=static_Config=(),8.16425e-05 +conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_IR=49b68de3dce617801a3a6ff5802fe56e7553fb883cc8c2e6b46a541b99926cf9_Device=CPU_Shape=static_Config=(),7.89249e-05 +conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_IR=66ec8b40ad4b365f3404bfbed95363b751c285fd9f559de2b20060d134c096c9_Device=CPU_Shape=static_Config=(),7.61215e-05 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=5d589f436a268a8f1ce52fec9efc407b4367c419b071277e64d651d37ddeab60_Device=CPU_Shape=static_Config=(),7.55207e-05 +conformance_TopK/ReadIRTest.ImportExport/Op=TopK.3_Type=f32_IR=207bc8d50442d7eb86e0a0d5c9643e06e766c47091d7029bacf706e8b0a0de23_Device=CPU_Shape=static_Config=(),7.40618e-05 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_612_Device=CPU_Shape=static_Config=(),7.29748e-05 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_611_Device=CPU_Shape=static_Config=(),7.29748e-05 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_608_Device=CPU_Shape=static_Config=(),7.29748e-05 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=Concat-1_412_Device=CPU_Shape=static_Config=(),7.27745e-05 +conformance_DetectionOutput/ReadIRTest.ImportExport/Op=DetectionOutput.8_Type=f32_IR=f48c18bfc0980bb6f45ac932c4f572e212dbd843d845f23b2b429eb259a45686_Device=CPU_Shape=static_Config=(),7.22024e-05 +conformance_Clamp/ReadIRTest.ImportExport/Op=Clamp.1_Type=f32_IR=Clamp-1_514_Device=CPU_Shape=static_Config=(),7.18019e-05 +conformance_Split/ReadIRTest.ImportExport/Op=Split.1_Type=f32_IR=f0e68c6319c36993236e12b3eb3f6a4a771239f07d2927128113ef68c96656d2_Device=CPU_Shape=dynamic_Config=(),7.16589e-05 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_264_Device=CPU_Shape=dynamic_Config=(),7.09723e-05 +conformance_Loop/ReadIRTest.ImportExport/Op=Loop.5_Type=i32_IR=Loop-5_769_Device=CPU_Shape=static_Config=(),7.0629e-05 +conformance_Loop/ReadIRTest.ImportExport/Op=Loop.5_Type=i32_IR=Loop-5_732_Device=CPU_Shape=static_Config=(),7.0629e-05 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=31671f324507d264c28daccd39fd56de740e3c7ecdeed31fcbfc51573bf7cc62_Device=CPU_Shape=static_Config=(),7.05432e-05 +conformance_ConvolutionBackpropData/ReadIRTest.ImportExport/Op=ConvolutionBackpropData.1_Type=f32_IR=24b4ad9c39243a51c1deb064e5744fe9cfe264f9339b478bb9fbf27ea619c6cf_Device=CPU_Shape=static_Config=(),6.91701e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=cd1ed2eccd473c7c736e236bf36b52d47c6d83cca0321a8e5acfeb63e2074d44_Device=CPU_Shape=static_Config=(),6.91701e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=bb719a1f1649dd0b306250af0048fa40975de77303e4d632cbaccc80d287087e_Device=CPU_Shape=static_Config=(),6.91701e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=acda58b57ee88605661372a4d2d9c70908e84e5fea407e2331fe120bb5867608_Device=CPU_Shape=static_Config=(),6.91701e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=9c5dd7d2ba5667872200158addb672ef73bcaa37640db89ac73ece995999a0ae_Device=CPU_Shape=static_Config=(),6.91701e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=5123f7978940ee25fe5e1a34ba4a6ec5dd70395dbf13a9c5ad7b2d86c90b5df5_Device=CPU_Shape=static_Config=(),6.91701e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=1f4964078feaf71a560dfeb1b71c9f7c8a78fdbd0b7d77409d6f5035a7995f3e_Device=CPU_Shape=static_Config=(),6.91701e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=1e100e688548e6f34fda8af2be3833b53052d0af2bf338dac946f0cdfc3b0705_Device=CPU_Shape=static_Config=(),6.91701e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=02f542434c1f0c3ba50a8deb6bd9ab661951aef25e8f971a78a0f9a86d455ea8_Device=CPU_Shape=static_Config=(),6.91701e-05 +conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=i64_IR=320cf046816dafcd6c7c4b657df754672556751b820f6c6cc9299b08fc84d397_Device=CPU_Shape=dynamic_Config=(),6.73679e-05 +conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_IR=609788552b7a59bf3cbea272486cfc8985da67b3724f24fdfde5c0ecd971102c_Device=CPU_Shape=dynamic_Config=(),6.73679e-05 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=i64_IR=4ed8440de89ec0ad0a35222fcc165ad65109321364ed16c71d356d379e37fcd5_Device=CPU_Shape=static_Config=(),6.73679e-05 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=i64_IR=164e1c5560fc4c7f7cd8f540e8ed9ba8a01cec1542b66172f1345170c21657ee_Device=CPU_Shape=dynamic_Config=(),6.73679e-05 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=4662a5279f0a48bfab43c34ec2132f53f2e421105af00e7eb355497d22e1ef4b_Device=CPU_Shape=static_Config=(),6.73679e-05 +conformance_NonZero/ReadIRTest.ImportExport/Op=NonZero.3_Type=i64_IR=9d7b1a713d5949d31c15df3f480c4663ee2d80a042c068935c2c72e7cbfd8ee4_Device=CPU_Shape=dynamic_Config=(),6.73679e-05 +conformance_Greater/ReadIRTest.ImportExport/Op=Greater.1_Type=boolean_IR=6db037b38ec859e1f063f91f4446b794f9ea7ffa287e7683ba40a98c88e54532_Device=CPU_Shape=static_Config=(),6.73679e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=f5c45f7cdda216154d1a3b9ec22e1c9ef64d670ee2b8913b89b45ff50ca293e6_Device=CPU_Shape=static_Config=(),6.73679e-05 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_IR=3a46cd88dab5883ce7d993e1f276c3b3610992f15e9dac2915959b713f4766cd_Device=CPU_Shape=static_Config=(),6.73679e-05 +conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=f32_IR=9b1899b82a479f535932c28b113fc5af2c45df80e3fb60e611db034d49feb0be_Device=CPU_Shape=static_Config=(),6.73679e-05 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_IR=674226af6635d4be35c8b2351f0a76d96819e8efc44104731f28f7959d6e1594_Device=CPU_Shape=static_Config=(),6.72821e-05 +conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_IR=674226af6635d4be35c8b2351f0a76d96819e8efc44104731f28f7959d6e1594_Device=CPU_Shape=static_Config=(),6.72821e-05 +conformance_TopK/ReadIRTest.ImportExport/Op=TopK.3_Type=f32_IR=TopK-3_780_Device=CPU_Shape=static_Config=(),6.58232e-05 +conformance_FakeQuantize/ReadIRTest.ImportExport/Op=FakeQuantize.1_Type=f32_IR=5952466230ef3c0116bdb538c06a6d1d93c777c8d73d49d98701b28997d28450_Device=CPU_Shape=static_Config=(),6.48792e-05 +conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_IR=322c35d43311e6d9eceab74864fd5dbec2aca5144b62c16321d9c0b77b0d0314_Device=CPU_Shape=static_Config=(),6.4021e-05 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=a8fa871ffec2171a15fd2d4b0f2295c4c7e08378caed66d7a399fa2b17153721_Device=CPU_Shape=static_Config=(),6.36777e-05 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=96fa427f01452126f02777dd5c55062bcffaff9779d8b936bc74a2320770be87_Device=CPU_Shape=static_Config=(),6.36777e-05 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_891_Device=CPU_Shape=static_Config=(),6.36777e-05 +conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.1_Type=i64_IR=b8c78be79292df1ec63982d1b7a69b0adaccbc7e069cea00c36a2e2cbe711f41_Device=CPU_Shape=dynamic_Config=(),6.0531e-05 +conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.11_Type=f32_IR=d39c830b6c252804458126e23fe9503705107c21a1a49fda4c6b648276652bb9_Device=CPU_Shape=static_Config=(),5.91579e-05 +conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_IR=d39c830b6c252804458126e23fe9503705107c21a1a49fda4c6b648276652bb9_Device=CPU_Shape=static_Config=(),5.91579e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=d39c131d1dc9b0753deac96e70ede2a09310536078b5ec043a19994366efb571_Device=CPU_Shape=static_Config=(),5.90435e-05 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=823927a1cd84922da020280e5bcd82a8c5241193f17875629fada01f67b52dde_Device=CPU_Shape=static_Config=(),5.85286e-05 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=b2450cffac6de937e816a32481656eec5b8c968c40d5b840df3df622670ac37c_Device=CPU_Shape=static_Config=(),5.80136e-05 +conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_IR=6b45e0a0e572fdd4d93f1c93dac18fd3758e0664c93cdaa3ab568e7a37ada195_Device=CPU_Shape=static_Config=(),5.66405e-05 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=Concat-1_666_Device=CPU_Shape=static_Config=(),5.62973e-05 +conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i64_IR=d031592368b1488fac2025b6ec230ebaca971e3c78734a36817e52bec2c8ac1b_Device=CPU_Shape=static_Config=(),5.61256e-05 +conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i64_IR=557a55917c413eda7b1e28d76cca75a4e42e8052a45d1d58d5e2487cdcb75632_Device=CPU_Shape=static_Config=(),5.61256e-05 +conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_IR=b8056dd237267a291859585289b2b0f5b16c4d9750a34f6f72045e8100f56c96_Device=CPU_Shape=static_Config=(),5.50958e-05 +conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_IR=RegionYolo-1_541_Device=CPU_Shape=static_Config=(),5.14056e-05 +conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_IR=RegionYolo-1_223_Device=CPU_Shape=static_Config=(),5.14056e-05 +conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_IR=bd3e18081ce008e8f6e942ff531536d0dbb4f44c8b8d1767fe301c0596cbb434_Device=CPU_Shape=static_Config=(),5.09765e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=d82de8640009f3025b2722cbfaa270f06bc34f0fc54c85a4fd7474be252cc44d_Device=CPU_Shape=static_Config=(),5.09765e-05 +conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_IR=1e77537e44691d894e800acbbb410bbb11b55462e67b43ade22aa6975e5715d7_Device=CPU_Shape=static_Config=(),5.07476e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=88e7722362388c1ed19219da89ea252bdeb00f05531d2f024134270595c580a5_Device=CPU_Shape=static_Config=(),4.89168e-05 +conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_IR=e1249b057841a6a8ecdd605a4d73887dc09f31271815bb90c7c1e91b41c4dfe1_Device=CPU_Shape=static_Config=(),4.81445e-05 +conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_IR=MaxPool-8_441_Device=CPU_Shape=dynamic_Config=(),4.7887e-05 +conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_IR=22ae8d3d0f4ba99130074a080593d4bfce691ea1fecc6069063b40aca63cf7b1_Device=CPU_Shape=dynamic_Config=(),0 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_441_Device=CPU_Shape=dynamic_Config=(),4.7887e-05 +conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_IR=26df036b689f6aaf1436fc55f432e39ed413b933c19cde378539947360acab0a_Device=CPU_Shape=dynamic_Config=(),4.77154e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=26df036b689f6aaf1436fc55f432e39ed413b933c19cde378539947360acab0a_Device=CPU_Shape=dynamic_Config=(),4.77154e-05 +conformance_Minimum/ReadIRTest.ImportExport/Op=Minimum.1_Type=f32_IR=3d5957ca87af757d6050ea3e8f81cbdcabb0ad84688a7fe65d5ebc99cf68f66a_Device=CPU_Shape=static_Config=(),4.74293e-05 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_IR=PriorBoxClustered-1_954_Device=CPU_Shape=static_Config=(),4.69144e-05 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_IR=PriorBoxClustered-1_944_Device=CPU_Shape=static_Config=(),4.69144e-05 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_IR=PriorBoxClustered-1_676_Device=CPU_Shape=static_Config=(),4.69144e-05 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_IR=PriorBoxClustered-1_433_Device=CPU_Shape=static_Config=(),4.69144e-05 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_IR=PriorBoxClustered-1_342_Device=CPU_Shape=static_Config=(),4.69144e-05 +conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_IR=PriorBoxClustered-1_954_Device=CPU_Shape=static_Config=(),4.69144e-05 +conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_IR=PriorBoxClustered-1_944_Device=CPU_Shape=static_Config=(),4.69144e-05 +conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_IR=PriorBoxClustered-1_676_Device=CPU_Shape=static_Config=(),4.69144e-05 +conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_IR=PriorBoxClustered-1_433_Device=CPU_Shape=static_Config=(),4.69144e-05 +conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_IR=PriorBoxClustered-1_342_Device=CPU_Shape=static_Config=(),4.69144e-05 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_214_Device=CPU_Shape=static_Config=(),4.6142e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=8cdce4d75682fd4e98132e71c5f680dec43464dba6884cb071d9e32f5cbb8341_Device=CPU_Shape=static_Config=(),4.6142e-05 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_IR=PriorBoxClustered-1_845_Device=CPU_Shape=static_Config=(),4.58273e-05 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_IR=PriorBoxClustered-1_795_Device=CPU_Shape=static_Config=(),4.58273e-05 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_IR=PriorBoxClustered-1_564_Device=CPU_Shape=static_Config=(),4.58273e-05 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_IR=PriorBoxClustered-1_502_Device=CPU_Shape=static_Config=(),4.58273e-05 +conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_IR=PriorBoxClustered-1_845_Device=CPU_Shape=static_Config=(),4.58273e-05 +conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_IR=PriorBoxClustered-1_795_Device=CPU_Shape=static_Config=(),4.58273e-05 +conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_IR=PriorBoxClustered-1_564_Device=CPU_Shape=static_Config=(),4.58273e-05 +conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_IR=PriorBoxClustered-1_502_Device=CPU_Shape=static_Config=(),4.58273e-05 +conformance_FakeQuantize/ReadIRTest.ImportExport/Op=FakeQuantize.1_Type=f32_IR=80fab1a0645bdb57459e3d4abb0da3e23c0cb2c5e6c403cc985811685dbebb51_Device=CPU_Shape=static_Config=(),4.53124e-05 +conformance_TopK/ReadIRTest.Inference/Op=TopK.11_Type=f32_IR=191167d5b9c2fc58d278fe912cee18d02feb3eee41966c6ea78e48136855a211_Device=CPU_Shape=static_Config=(),4.48547e-05 +conformance_TopK/ReadIRTest.ImportExport/Op=TopK.11_Type=f32_IR=191167d5b9c2fc58d278fe912cee18d02feb3eee41966c6ea78e48136855a211_Device=CPU_Shape=static_Config=(),4.48547e-05 +conformance_ScatterElementsUpdate/ReadIRTest.Inference/Op=ScatterElementsUpdate.12_Type=f32_IR=b65ba26b5f26a196ca080ddb89c75ab3f76f2dd3e9b84be73a656cb28f839b56_Device=CPU_Shape=static_Config=(),4.48547e-05 +conformance_ScatterElementsUpdate/ReadIRTest.ImportExport/Op=ScatterElementsUpdate.12_Type=f32_IR=b65ba26b5f26a196ca080ddb89c75ab3f76f2dd3e9b84be73a656cb28f839b56_Device=CPU_Shape=static_Config=(),4.48547e-05 +conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_IR=RegionYolo-1_947_Device=CPU_Shape=static_Config=(),4.4111e-05 +conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_IR=RegionYolo-1_877_Device=CPU_Shape=static_Config=(),4.4111e-05 +conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_IR=RegionYolo-1_815_Device=CPU_Shape=static_Config=(),4.4111e-05 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_993_Device=CPU_Shape=static_Config=(),4.39393e-05 +conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_IR=3b212b5b25e823c4ed564202524f55ef1dcb95ec75226679aabb68d7780d5b52_Device=CPU_Shape=static_Config=(),4.35388e-05 +conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=f32_IR=5854c5cde01f584fa60a6e4bed4153e9e3ec358bf9a7cc1af15d1e52c198aacc_Device=CPU_Shape=static_Config=(),4.35388e-05 +conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_IR=Softmax-8_879_Device=CPU_Shape=static_Config=(),4.35388e-05 +conformance_GatherND/ReadIRTest.ImportExport/Op=GatherND.8_Type=f32_IR=cf26cf5748bbd3a2f8c9393be832df1a214cf87e5ff4d3b1d5d8b5c1f90e5394_Device=CPU_Shape=static_Config=(),4.35388e-05 +conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_IR=e851b62890c8f207a73f9b81b88cffec635f12e830225eed0c2929e46f2ffe73_Device=CPU_Shape=dynamic_Config=(),4.34244e-05 +conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i64_IR=a9538213f2f4d221fdaa07b7b193a2936b01f23cff866ee94716df79b6d5ddba_Device=CPU_Shape=dynamic_Config=(),4.34244e-05 +conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_IR=aa81c2f1e41cac01c5d0f09739f1e5ecf537ec67b1972688b4d179ab682d4cfd_Device=CPU_Shape=static_Config=(),4.34244e-05 +conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_IR=89814392656dabf90f43fa5f7b06164c611e5ac4db815c7639215614a3abb387_Device=CPU_Shape=dynamic_Config=(),4.34244e-05 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=8eab4ee3e0c015853a6e31a00b3903e83a3975d4c115d59aa91d04190b394e68_Device=CPU_Shape=static_Config=(),4.34244e-05 +conformance_ReduceMin/ReadIRTest.ImportExport/Op=ReduceMin.1_Type=i32_IR=564549da290bbddca482dbee4b678d7bb6eefbb295f63f41c695c84b6f7be2eb_Device=CPU_Shape=static_Config=(),4.34244e-05 +conformance_NonMaxSuppression/ReadIRTest.ImportExport/Op=NonMaxSuppression.9_Type=i64_IR=e6b6f6c92406a0558158b2a1175f779c073ec4eedc0bfc5907583305ea9b34b5_Device=CPU_Shape=dynamic_Config=(),4.34244e-05 +conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=f32_IR=d3a7f7f03964207ea8b253f361f11be37c2905f3f7c8ff731c29ef40d55b237f_Device=CPU_Shape=dynamic_Config=(),4.34244e-05 +conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_IR=d3a7f7f03964207ea8b253f361f11be37c2905f3f7c8ff731c29ef40d55b237f_Device=CPU_Shape=dynamic_Config=(),4.34244e-05 +conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_IR=a0dfba1f746d9d0ad8b53b22a62af48052b010a9fd1bd70339b1cf02a6be818c_Device=CPU_Shape=dynamic_Config=(),4.34244e-05 +conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_IR=68a6ff0135ae81327e5b3b0ab287f6cbaa647d678a14397858892023f6d7b043_Device=CPU_Shape=dynamic_Config=(),4.34244e-05 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=u64_IR=59d61fe3ad99f1f9c01aa355306a4b207386f4689b13b6c6208d970aaf54281b_Device=CPU_Shape=dynamic_Config=(),4.34244e-05 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=i64_IR=017893ca7e42922082bbaf2a75a788529cc40e74bca9654be374547fd019b49d_Device=CPU_Shape=static_Config=(),4.34244e-05 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=i64_IR=d7d40ea6b1b971b170cbe1762bf30caf122b054658fffa34e069566c6be8d26b_Device=CPU_Shape=dynamic_Config=(),4.34244e-05 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_231_Device=CPU_Shape=static_Config=(),4.24232e-05 +conformance_GRUSequence/ReadIRTest.ImportExport/Op=GRUSequence.5_Type=f32_IR=f5ebeb377ad81fb33a4832b1693727d7a59b7d4378bfa4a701d8aad819812f64_Device=CPU_Shape=static_Config=(),4.24232e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=d6a3851cea23fa42a953d697cc26c02f5a18f8a20b93d8e771ffa1ac70528a89_Device=CPU_Shape=static_Config=(),4.24232e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=b759a61e9c6e0dcf58218fd832b063613c40de6671214ddda32c5739d7150ec7_Device=CPU_Shape=static_Config=(),4.24232e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=54223c1ee835541f5f857e40867f7f82d9f8835252f05f1933f47166d6479439_Device=CPU_Shape=static_Config=(),4.24232e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=40f3f19b0836cfadc8bbbda05dee0acbff7e5476b2e2b5989dbeb31729ecd1b0_Device=CPU_Shape=static_Config=(),4.24232e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=014a9c2e7e595ae947ccc439204fac2863924553578c7552cac09c546a42dde9_Device=CPU_Shape=static_Config=(),4.24232e-05 +conformance_Split/ReadIRTest.ImportExport/Op=Split.1_Type=f32_IR=eeff9a413124b377afabe49671874975ba13276353a0b3d722702962ebbe51e0_Device=CPU_Shape=static_Config=(),4.02491e-05 +conformance_Minimum/ReadIRTest.ImportExport/Op=Minimum.1_Type=f32_IR=3766e089cf04fcee6bd548c428c436105f86230a9f2991dd184f2505e275ea6d_Device=CPU_Shape=static_Config=(),3.96484e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=2d7f7b1c9321267857c30413110d6410c3e5c0402b0ec5ae0efe7ffd53052c14_Device=CPU_Shape=static_Config=(),3.91335e-05 +conformance_NormalizeL2/ReadIRTest.ImportExport/Op=NormalizeL2.1_Type=f32_IR=cf2ecc84915c9424ae3e4b93f0be8ac370a5d4003d7d7673f9a0f7008068efd5_Device=CPU_Shape=static_Config=(),3.78748e-05 +conformance_DetectionOutput/ReadIRTest.ImportExport/Op=DetectionOutput.8_Type=f32_IR=e0b77b88d0d21ed244dae0e43fd4b1764a20f142cfdf161a8e9e484c8e87f72a_Device=CPU_Shape=static_Config=(),3.78748e-05 +conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_IR=31ee4ddebf2a2e33e19ef55935029cec0c3bb0ef2351fdc321c558819e29418e_Device=CPU_Shape=static_Config=(),3.75029e-05 +conformance_Maximum/ReadIRTest.ImportExport/Op=Maximum.1_Type=f32_IR=fca6a730d61dd7c4cc645bf091a950289b509ff10f30aecfae394014e8bf9b28_Device=CPU_Shape=static_Config=(),3.75029e-05 +conformance_HSwish/ReadIRTest.ImportExport/Op=HSwish.4_Type=f32_IR=227ba60a7156653619608c942d8b570d7704b17e8d616f4b071f1402d7f12383_Device=CPU_Shape=static_Config=(),3.68163e-05 +conformance_GRUSequence/ReadIRTest.ImportExport/Op=GRUSequence.5_Type=f32_IR=b7937dd6044ae051c5341db0b1772a270da3c6a8643eb14f727b89e02587a2ed_Device=CPU_Shape=static_Config=(),3.46709e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=e01b754d2bb3780bc2e1c5f23cb49ab847d9835c530b4720facd66dde8c3d96b_Device=CPU_Shape=static_Config=(),3.46709e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=dec9f8261a4738e527e92223e7aa09e2f905766844904273c63b00244df0c555_Device=CPU_Shape=static_Config=(),3.46709e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=d44a8a6422066261a025b4be4192706b1579163cef12944c453797070e3a2ee3_Device=CPU_Shape=static_Config=(),3.46709e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=baeb82c17d72e93406521482104b1c5b85fcbbdac22a501acab9bd04a54bc764_Device=CPU_Shape=static_Config=(),3.46709e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=7d6e53d9e576276b2f2949e5f94599d6bb56e516492e395ec86894ea2f8df688_Device=CPU_Shape=static_Config=(),3.46709e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=59368a22dcaff273e44b809fba7f13b5966d978f25db9d39cfa3726b71bb2b04_Device=CPU_Shape=static_Config=(),3.46709e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=513d7cbc0e61575ebdb8fcc7e0b92d0f6085e369c5d7aded8ddd0c37ee693b10_Device=CPU_Shape=static_Config=(),3.46709e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=38d7c53748ad83554735c93023f872dd621fd423404072d5e14cbf6b0079425a_Device=CPU_Shape=static_Config=(),3.46709e-05 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=d0b855aeac2caf3ddabe3d38dda46634c742def705766af84bd7b288737e39d0_Device=CPU_Shape=static_Config=(),3.46709e-05 +conformance_Loop/ReadIRTest.ImportExport/Op=Loop.5_Type=i32_IR=Loop-5_653_Device=CPU_Shape=static_Config=(),3.3212e-05 +conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_IR=Interpolate-11_753_Device=CPU_Shape=static_Config=(),3.31261e-05 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=i64_IR=9a11a2f1bf2d0f652af69fcd5b858f1fdb50af9786d9b4a9e8358d42e52d7863_Device=CPU_Shape=static_Config=(),3.2697e-05 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_690_Device=CPU_Shape=static_Config=(),3.2697e-05 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_191_Device=CPU_Shape=static_Config=(),3.2697e-05 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_170_Device=CPU_Shape=static_Config=(),3.2697e-05 +conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_IR=d54bae455ec3075ebbf2f9ae9cdef66087759a7d46820da8ca200cf757355e81_Device=CPU_Shape=static_Config=(),3.23538e-05 +conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_IR=035d21b2057f60921ccefb2ac3b3f3c1c8bed9da8a461115696005190d2a5fa5_Device=CPU_Shape=static_Config=(),3.19819e-05 +conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_IR=RegionYolo-1_998_Device=CPU_Shape=static_Config=(),3.19247e-05 +conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_IR=RegionYolo-1_660_Device=CPU_Shape=static_Config=(),3.19247e-05 +conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_IR=RegionYolo-1_142_Device=CPU_Shape=static_Config=(),3.19247e-05 +conformance_Sqrt/ReadIRTest.ImportExport/Op=Sqrt.1_Type=f32_IR=84deded0000080d9336c2c282711105a7acb45b7beb670d423c52f339ee055c9_Device=CPU_Shape=static_Config=(),3.1753e-05 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=7118ee68001e03a010a8dfd307f7483d9500f01070aee67205457ba5328b9a12_Device=CPU_Shape=static_Config=(),3.1753e-05 +conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_IR=b8ef53e65f3dd18ba0b3a39c01d34dd85bf374c1d7d70733b655cf583ac336fd_Device=CPU_Shape=static_Config=(),3.1753e-05 +conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_IR=62b5862e146727b216449226279800cb77657ca23847e8daeca88d3deaba63b6_Device=CPU_Shape=static_Config=(),3.1753e-05 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_632_Device=CPU_Shape=static_Config=(),3.1753e-05 +conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_IR=ff80757f15c30b488c3f763ebf4fc03cf05bf4fcd2018ccce49a5ccc247f4d8c_Device=CPU_Shape=static_Config=(),3.14956e-05 +conformance_IDFT/ReadIRTest.ImportExport/Op=IDFT.7_Type=f32_IR=28dce20800dd5a8409c0d50230baa40b875a5a0ddae9961376328d46efbc09d4_Device=CPU_Shape=static_Config=(),3.14956e-05 +conformance_LRN/ReadIRTest.ImportExport/Op=LRN.1_Type=f32_IR=75fe51bfdd72003afac1631ead7a731d8de15f5c586f39bedecbac920138ed06_Device=CPU_Shape=static_Config=(),3.07804e-05 +conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.4_Type=f32_IR=8dec756b3ed3541ac01747029daaca96225bc88b3d9e221b9f3bab958437ef78_Device=CPU_Shape=static_Config=(),3.04944e-05 +conformance_DetectionOutput/ReadIRTest.ImportExport/Op=DetectionOutput.8_Type=f32_IR=7f598191570ff810bf54a1cf08684687e1d70d4dc9233f29e74830dbe6e64d9d_Device=CPU_Shape=static_Config=(),3.04657e-05 +conformance_Gather/ReadIRTest.ImportExport/Op=Gather.1_Type=i64_IR=3c5d6e1848f0939e80d97d86ec65e95d71e86a8ea2e8d4bcd0b2f37b2c59e36c_Device=CPU_Shape=static_Config=(),2.9865e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=11974897f572809d1d6f004d8b15d393927752fa88e37afdaab87999bf598f74_Device=CPU_Shape=static_Config=(),2.91785e-05 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=b171d43d2869e339e27f9513677a72220b3956f64f264b75871f3541ded37a7e_Device=CPU_Shape=static_Config=(),2.91785e-05 +conformance_LSTMSequence/ReadIRTest.ImportExport/Op=LSTMSequence.5_Type=f32_IR=974270d9310d70ea02593f3ce98769467e1e3295ed999065008e0ce5c88dc63c_Device=CPU_Shape=static_Config=(),2.8921e-05 +conformance_LSTMSequence/ReadIRTest.ImportExport/Op=LSTMSequence.5_Type=f32_IR=143af1d4c87d64c07e394e533782d66ad33330b576f39f7bac20441ac460f947_Device=CPU_Shape=static_Config=(),2.8921e-05 +conformance_TopK/ReadIRTest.Inference/Op=TopK.11_Type=f32_IR=5f34dd2786a2968539b3fc0e6d51fbbee52d5d9b3a59f93807f49e9216e77b5c_Device=CPU_Shape=dynamic_Config=(),2.88924e-05 +conformance_TopK/ReadIRTest.ImportExport/Op=TopK.11_Type=f32_IR=5f34dd2786a2968539b3fc0e6d51fbbee52d5d9b3a59f93807f49e9216e77b5c_Device=CPU_Shape=dynamic_Config=(),2.88924e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=4e084cb457b8ecf58c405605c47c7391972dc2e7cd693bb21418911beea59092_Device=CPU_Shape=static_Config=(),2.88924e-05 +conformance_LSTMSequence/ReadIRTest.ImportExport/Op=LSTMSequence.5_Type=f32_IR=32b3ff73c08de6c5997cf36275ca4013f1d5e7652614be47ae5281e35a7c50c4_Device=CPU_Shape=static_Config=(),2.83203e-05 +conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=f32_IR=caa9c1f224cb27f267eac25a84df532b292251bd0e237a8de8c78d0f5085f10a_Device=CPU_Shape=static_Config=(),2.75479e-05 +conformance_ReduceMean/ReadIRTest.ImportExport/Op=ReduceMean.1_Type=f32_IR=505f545bb55297b6d0b0caf8c998ac57336ed44925611f68a58b5d0e4356d6fb_Device=CPU_Shape=static_Config=(),2.75479e-05 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_835_Device=CPU_Shape=static_Config=(),2.75479e-05 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_672_Device=CPU_Shape=static_Config=(),2.75479e-05 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_984_Device=CPU_Shape=static_Config=(),2.75479e-05 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_IR=PriorBox-1_974_Device=CPU_Shape=static_Config=(),2.56313e-05 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_IR=PriorBox-1_923_Device=CPU_Shape=static_Config=(),2.56313e-05 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_IR=PriorBox-1_886_Device=CPU_Shape=static_Config=(),2.56313e-05 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_IR=PriorBox-1_619_Device=CPU_Shape=static_Config=(),2.56313e-05 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_IR=PriorBox-1_465_Device=CPU_Shape=static_Config=(),2.56313e-05 +conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_IR=PriorBox-1_974_Device=CPU_Shape=static_Config=(),2.56313e-05 +conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_IR=PriorBox-1_923_Device=CPU_Shape=static_Config=(),2.56313e-05 +conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_IR=PriorBox-1_886_Device=CPU_Shape=static_Config=(),2.56313e-05 +conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_IR=PriorBox-1_619_Device=CPU_Shape=static_Config=(),2.56313e-05 +conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_IR=PriorBox-1_465_Device=CPU_Shape=static_Config=(),2.56313e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=d9386e93f7c1daa16a5517997437eef8feb3961c67d48fa154aaa9718ca78838_Device=CPU_Shape=static_Config=(),2.54024e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=cf252ca8487e87ca565efda29b7dab1bc6f86522b019861abb0ac6323d23b84b_Device=CPU_Shape=static_Config=(),2.54024e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=b5fdb53b420e560f3c9291881ae2cd2132a8dfab8fbf44c733727e9d0929bc00_Device=CPU_Shape=static_Config=(),2.54024e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=b3b3a7b1b91d898552d59510f8f1c994921f62deabce9106dba53ad755667666_Device=CPU_Shape=static_Config=(),2.54024e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=a1aac4cc8ebaa4f4de4478f5d53d23d0b4588611db83ded9e044040da807c238_Device=CPU_Shape=static_Config=(),2.54024e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=9b97bbbeff4b4872682d10966f3915819aa796aed9e8388d062366691893d64e_Device=CPU_Shape=static_Config=(),2.54024e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=7c1c9fb877c7c3959c1610b3e3887550ed4e4873e98da735605120ada3c55770_Device=CPU_Shape=static_Config=(),2.54024e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=659efb374a45b7850eaf40c570f1727c7797ed97673abc09a3dcb9d8555d1597_Device=CPU_Shape=static_Config=(),2.54024e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=1ef155e899cdb23357700e778492a5fae77c8e98df668316c8aaf3cb58ccc420_Device=CPU_Shape=static_Config=(),2.54024e-05 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=4d8525b599684f84bd4c900541b34cd2855974a2739f509ff965896bc03c7bdd_Device=CPU_Shape=static_Config=(),2.54024e-05 +conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=i64_IR=a746b77e38ec37a9fb67103e9577c73687e8694f981e6083c0f8b9f49d7a7614_Device=CPU_Shape=static_Config=(),2.50591e-05 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_959_Device=CPU_Shape=static_Config=(),2.50591e-05 +conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i32_IR=f0e0c32ba5b1abfb0936d7876684d204c898bc8560e28bda6b58d9a7c02b03b6_Device=CPU_Shape=static_Config=(),2.48875e-05 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_949_Device=CPU_Shape=static_Config=(),2.48875e-05 +conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=f32_IR=b7ff1fa55f284c0df98364b614dd89a99c4aabd2a75ea885cb0b8a4471b0bc61_Device=CPU_Shape=dynamic_Config=(),2.38577e-05 +conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_IR=932ae958cfcc2c49de70c15de97a38301ca0cd60070e8b31f52f1495e23a78a7_Device=CPU_Shape=dynamic_Config=(),2.38577e-05 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=9a2736ae3d787054bcc509653a533a614a63f66af0bccb6cd38d1d0af5c54723_Device=CPU_Shape=dynamic_Config=(),2.38577e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=cbffaa2c9b5d9fe202f3f5bfaa43689b38a22adc631604a2c600205907a50655_Device=CPU_Shape=dynamic_Config=(),2.38577e-05 +conformance_LSTMSequence/ReadIRTest.ImportExport/Op=LSTMSequence.5_Type=f32_IR=ba65774de91ef836922146f0bfd7b13f50d3c61abc191890db73d9e07a6b3bba_Device=CPU_Shape=static_Config=(),2.30853e-05 +conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_IR=c428f5bef879bf8ef11fb84d776e7f21a1e98980f2779184a23cec69464d101e_Device=CPU_Shape=static_Config=(),2.30853e-05 +conformance_Pad/ReadIRTest.ImportExport/Op=Pad.1_Type=f32_IR=3d45d67a7e386c3532753e23e86b7599525a3e0a02e5a69c213e640b3644c731_Device=CPU_Shape=static_Config=(),2.28851e-05 +conformance_TopK/ReadIRTest.ImportExport/Op=TopK.11_Type=f32_IR=6b0b0b4ae8fa7eb4882cb36012eb7b4ef60630041f9138a2969abc947be3dd18_Device=CPU_Shape=static_Config=(),2.17408e-05 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_640_Device=CPU_Shape=static_Config=(),2.11401e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=de3e4d9bce672d2f9d1b636bfbdaae03ee9a1d01125f692487a4e00a73561b45_Device=CPU_Shape=static_Config=(),2.11401e-05 +conformance_Pad/ReadIRTest.ImportExport/Op=Pad.12_Type=f32_IR=092beb3237c090ef1b8693e75c06dd08d99add4365ce4b8637ac565b5805e831_Device=CPU_Shape=static_Config=(),2.09398e-05 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_526_Device=CPU_Shape=static_Config=(),2.09398e-05 +conformance_Gather/ReadIRTest.ImportExport/Op=Gather.1_Type=i64_IR=21f175001cc6de836e25a43d601d7b79ba82e25d7602c277616d7ab9c7e50d9b_Device=CPU_Shape=static_Config=(),2.09398e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=ab3e751461c96ad0b7c06dd5ae4df600180048b3a8045fd9c5dca6923c777115_Device=CPU_Shape=static_Config=(),1.94809e-05 +conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_IR=16dc31428002b2d3f3ed685ce295dc5377f1a8fe3b87500325b2c6e81b450fc4_Device=CPU_Shape=static_Config=(),1.91662e-05 +conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.1_Type=i64_IR=768ef8dfca086085830f4c2b7918968f99267ef176768a4ca1434de3bd7f93e0_Device=CPU_Shape=static_Config=(),1.77359e-05 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_IR=PriorBoxClustered-1_875_Device=CPU_Shape=static_Config=(),1.74213e-05 +conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_IR=PriorBoxClustered-1_875_Device=CPU_Shape=static_Config=(),1.74213e-05 +conformance_Pad/ReadIRTest.ImportExport/Op=Pad.1_Type=f32_IR=f7720759ec4302d50c8202343aa480abccc973ed5b54f65d388f0e706f998ef5_Device=CPU_Shape=dynamic_Config=(),1.57907e-05 +conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_IR=4b8c349017646c48fa04047f30f0ad700bd67f3edeba1d66fcd110154c5016f8_Device=CPU_Shape=dynamic_Config=(),1.57907e-05 +conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_IR=AvgPool-1_1033_Device=CPU_Shape=static_Config=(),1.29587e-05 +conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i32_IR=57e8a1874c5b3f500c9d28adfc3c407d7a0c1bd061d46dc0d0ed2b27e263dc92_Device=CPU_Shape=static_Config=(),1.23579e-05 +conformance_Tile/ReadIRTest.ImportExport/Op=Tile.1_Type=f32_IR=339fb1130cd4308085ccdf47c50f16cba63456e42e1f563e6ef7da466256a0a0_Device=CPU_Shape=static_Config=(),1.23579e-05 +conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=f32_IR=3f7ce2b6d7977f47f72a692c54d7b8ceba8612d64468116a9189bf23423a0507_Device=CPU_Shape=static_Config=(),1.23579e-05 +conformance_SpaceToDepth/ReadIRTest.ImportExport/Op=SpaceToDepth.1_Type=f32_IR=53c74fcce6c2e4608c874b7334a35ffe89bbbaf436ad7ee2527a2a4361e3ef62_Device=CPU_Shape=static_Config=(),1.23579e-05 +conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_IR=MaxPool-8_448_Device=CPU_Shape=static_Config=(),1.23579e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=80f8d42b9528ab42df0f933a1b2f513e1873f78c763ab8ea6c2857af20e945ad_Device=CPU_Shape=static_Config=(),1.23579e-05 +conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_IR=010e40cb83a0516ff7ac30246b841a178fada738ab81a7c6938bce5f842bd957_Device=CPU_Shape=static_Config=(),1.23579e-05 +conformance_CTCGreedyDecoderSeqLen/ReadIRTest.ImportExport/Op=CTCGreedyDecoderSeqLen.6_Type=i64_IR=a2df483ee8bb9b66998376e2180988db6dc30f0081803b56fa38e63006d12acd_Device=CPU_Shape=static_Config=(),1.23579e-05 +conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_IR=1edb2383f309489c62d50e904d86df71ccff3bfda927a90caff1cf77500a55e4_Device=CPU_Shape=dynamic_Config=(),1.1843e-05 +conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_IR=169084543f933dbb1ef2e0ed10011a29532eb63d0dc7a7c68a3ed4c80b3fc734_Device=CPU_Shape=dynamic_Config=(),1.1843e-05 +conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_IR=0152e03f7a03f2917dc4ae13ebc15763e8639b7211fc819d56b55f343ed099b6_Device=CPU_Shape=dynamic_Config=(),1.1843e-05 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=8c67a77b3545adcfaad18010ac5479423027555b6ffaf8564ab7802906ec18ac_Device=CPU_Shape=dynamic_Config=(),1.1843e-05 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=797e676efcac3575336b5997f0b5176257276e8f4a37ca82a88505227004b06b_Device=CPU_Shape=dynamic_Config=(),1.1843e-05 +conformance_ReduceSum/ReadIRTest.Inference/Op=ReduceSum.1_Type=f32_IR=ade571535fa87f3ef6a5bea4eb3583e27213c8afb6b650584e42322bf18be841_Device=CPU_Shape=dynamic_Config=(),1.1843e-05 +conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_IR=e7f43fac89040f0f616aee19205f4f8d3fba86c6bf1c32af15961529a07a3cb3_Device=CPU_Shape=dynamic_Config=(),1.1843e-05 +conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_IR=ade571535fa87f3ef6a5bea4eb3583e27213c8afb6b650584e42322bf18be841_Device=CPU_Shape=dynamic_Config=(),1.1843e-05 +conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_IR=71ec8716d557b4a2009449568c2412c9af5084c12ba009e00dba164c96496a66_Device=CPU_Shape=dynamic_Config=(),1.1843e-05 +conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_IR=2c3dbad159b2762a5223bdba5272b59850ee3703ed5f38c0762f5dd76767624a_Device=CPU_Shape=dynamic_Config=(),1.1843e-05 +conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_IR=0ebabff73dd81b30539621df40f8faa148bc3382a048919e5a787c2d13e842fe_Device=CPU_Shape=dynamic_Config=(),1.1843e-05 +conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_IR=fdcab01e2bfd1d42e82e02abbc9485a70bbca7f6699c43dbd32e7cfcd32f10e6_Device=CPU_Shape=dynamic_Config=(),1.1843e-05 +conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_IR=5beacd2bb9191defe3872fe637eb2fcb831437af69e9164251f4546e5cb1156f_Device=CPU_Shape=dynamic_Config=(),1.1843e-05 +conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_IR=6f0d2c60afa3c010ecabbc0f54f50ab607b60c28ec71b00612c9fa1dea17224f_Device=CPU_Shape=dynamic_Config=(),1.1843e-05 +conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_IR=f875aa55e00c0b36cfb97f074f5efc77dfacced6f86eb726269c3d888bc4db71_Device=CPU_Shape=dynamic_Config=(),0 +conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_IR=6f0d2c60afa3c010ecabbc0f54f50ab607b60c28ec71b00612c9fa1dea17224f_Device=CPU_Shape=dynamic_Config=(),1.1843e-05 +conformance_Maximum/ReadIRTest.ImportExport/Op=Maximum.1_Type=f32_IR=9fabcc0b789906e2b65e7195462e0c915ad56a65b049e4d68f762c28437efddc_Device=CPU_Shape=dynamic_Config=(),1.1843e-05 +conformance_Maximum/ReadIRTest.ImportExport/Op=Maximum.1_Type=f32_IR=51dcd5f3e64d8126b3816a76d8ec0bca37f4b662d2b90be115af2be4e2d72c79_Device=CPU_Shape=dynamic_Config=(),1.1843e-05 +conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_IR=b887e471773f821d63d2e9cee6d46a451a771ae23e5ab37a199db58e1a8865c8_Device=CPU_Shape=dynamic_Config=(),1.1843e-05 +conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_IR=cad5dd8f018be8f8d628cdd3dc1043c97ab0ee4ae39cd321785e3b624fb96f6d_Device=CPU_Shape=dynamic_Config=(),1.1843e-05 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_IR=cad5dd8f018be8f8d628cdd3dc1043c97ab0ee4ae39cd321785e3b624fb96f6d_Device=CPU_Shape=dynamic_Config=(),1.1843e-05 +conformance_LSTMSequence/ReadIRTest.ImportExport/Op=LSTMSequence.5_Type=f32_IR=6f175bde26572d6d5844c630d03173dffd7c70efb78d45336b3aecc5399fa8ca_Device=CPU_Shape=static_Config=(),1.17286e-05 +conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.1_Type=f32_IR=0d4482e1f7ce2b8c899d93e76f0c8c8377dc35953ff1d6b0e69d9f00bb752183_Device=CPU_Shape=static_Config=(),1.14711e-05 +conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_IR=d124732486e130ab64410fc881d7eeca2417803af4affe42a16df10c990a6a99_Device=CPU_Shape=static_Config=(),1.01266e-05 +conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_IR=97375897a0f3c0095da86f90270be3d40a692e97dc0dcba3a8c833ecbb5248ac_Device=CPU_Shape=static_Config=(),1.01266e-05 +conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_IR=Softmax-8_893_Device=CPU_Shape=static_Config=(),1.01266e-05 +conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_IR=423b4baae3be39bf92c625a121c947c162273fc186a91c0ae9b102d573b8ea8b_Device=CPU_Shape=static_Config=(),1.01266e-05 +conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_IR=de31ce5d6727d121be352741e99f06c4c7f5b30c4a92e1ccd2742817f5c7c014_Device=CPU_Shape=static_Config=(),1.01266e-05 +conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_IR=b052b287437bea9d3b10bd946b0302626eef0bbfa5f8b134beae65f91e2e33fd_Device=CPU_Shape=static_Config=(),1.01266e-05 +conformance_NormalizeL2/ReadIRTest.ImportExport/Op=NormalizeL2.1_Type=f32_IR=bb5b435512cb922a87fb89af942ea55a0f1ba9e158ac9a6a55f7daf98ac93883_Device=CPU_Shape=static_Config=(),1.01266e-05 +conformance_NormalizeL2/ReadIRTest.ImportExport/Op=NormalizeL2.1_Type=f32_IR=7a346953eb65f172bdf3614c4851f338b3dbb0517d5a6168d5033a78474b2e9b_Device=CPU_Shape=static_Config=(),1.01266e-05 +conformance_Maximum/ReadIRTest.ImportExport/Op=Maximum.1_Type=f32_IR=b46839718ab32e78bdebd09281e08d06506bdbe17a790032d5a8cd2df3c2719e_Device=CPU_Shape=static_Config=(),1.01266e-05 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_IR=PriorBox-1_994_Device=CPU_Shape=static_Config=(),1.00408e-05 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_IR=PriorBox-1_844_Device=CPU_Shape=static_Config=(),1.00408e-05 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_IR=PriorBox-1_842_Device=CPU_Shape=static_Config=(),1.00408e-05 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_IR=PriorBox-1_761_Device=CPU_Shape=static_Config=(),1.00408e-05 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_IR=PriorBox-1_350_Device=CPU_Shape=static_Config=(),1.00408e-05 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_IR=PriorBox-1_224_Device=CPU_Shape=static_Config=(),1.00408e-05 +conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_IR=PriorBox-1_994_Device=CPU_Shape=static_Config=(),1.00408e-05 +conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_IR=PriorBox-1_844_Device=CPU_Shape=static_Config=(),1.00408e-05 +conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_IR=PriorBox-1_842_Device=CPU_Shape=static_Config=(),1.00408e-05 +conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_IR=PriorBox-1_761_Device=CPU_Shape=static_Config=(),1.00408e-05 +conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_IR=PriorBox-1_350_Device=CPU_Shape=static_Config=(),1.00408e-05 +conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_IR=PriorBox-1_224_Device=CPU_Shape=static_Config=(),1.00408e-05 +conformance_LSTMSequence/ReadIRTest.ImportExport/Op=LSTMSequence.5_Type=f32_IR=1270640cd6b52779c1f6da011ec6ecedb141f03134110fcd8ec4a3ab8c27f9b4_Device=CPU_Shape=static_Config=(),9.78337e-06 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_IR=PriorBox-1_914_Device=CPU_Shape=static_Config=(),9.61173e-06 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_IR=PriorBox-1_880_Device=CPU_Shape=static_Config=(),9.61173e-06 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_IR=PriorBox-1_680_Device=CPU_Shape=static_Config=(),9.61173e-06 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_IR=PriorBox-1_531_Device=CPU_Shape=static_Config=(),9.61173e-06 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_IR=PriorBox-1_470_Device=CPU_Shape=static_Config=(),9.61173e-06 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_IR=PriorBox-1_405_Device=CPU_Shape=static_Config=(),9.61173e-06 +conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_IR=PriorBox-1_914_Device=CPU_Shape=static_Config=(),9.61173e-06 +conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_IR=PriorBox-1_880_Device=CPU_Shape=static_Config=(),9.61173e-06 +conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_IR=PriorBox-1_680_Device=CPU_Shape=static_Config=(),9.61173e-06 +conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_IR=PriorBox-1_531_Device=CPU_Shape=static_Config=(),9.61173e-06 +conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_IR=PriorBox-1_470_Device=CPU_Shape=static_Config=(),9.61173e-06 +conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_IR=PriorBox-1_405_Device=CPU_Shape=static_Config=(),9.61173e-06 +conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_IR=RegionYolo-1_772_Device=CPU_Shape=static_Config=(),8.83936e-06 +conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_IR=RegionYolo-1_765_Device=CPU_Shape=static_Config=(),8.83936e-06 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_IR=PriorBox-1_977_Device=CPU_Shape=static_Config=(),8.52469e-06 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_IR=PriorBox-1_620_Device=CPU_Shape=static_Config=(),8.52469e-06 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_IR=PriorBox-1_595_Device=CPU_Shape=static_Config=(),8.52469e-06 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_IR=PriorBox-1_365_Device=CPU_Shape=static_Config=(),8.52469e-06 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_IR=PriorBox-1_175_Device=CPU_Shape=static_Config=(),8.52469e-06 +conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_IR=PriorBox-1_977_Device=CPU_Shape=static_Config=(),8.52469e-06 +conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_IR=PriorBox-1_620_Device=CPU_Shape=static_Config=(),8.52469e-06 +conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_IR=PriorBox-1_595_Device=CPU_Shape=static_Config=(),8.52469e-06 +conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_IR=PriorBox-1_365_Device=CPU_Shape=static_Config=(),8.52469e-06 +conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_IR=PriorBox-1_175_Device=CPU_Shape=static_Config=(),8.52469e-06 +conformance_PRelu/ReadIRTest.ImportExport/Op=PRelu.1_Type=f32_IR=20e7e74f55eb5fb78014cce7e0665d6925bbefd708dd9ccff12dbfbea2a330dd_Device=CPU_Shape=static_Config=(),5.69266e-06 +conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_IR=RegionYolo-1_750_Device=CPU_Shape=static_Config=(),5.06332e-06 +conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=i32_IR=28f23780d4ca0d40671caf79d5cd9223ad8f6dc2fa5ade2521f3d99586eeeb7f_Device=CPU_Shape=static_Config=(),9.72615e-07 From 46dc704e3f78a90a882c61560aaed7edf553897f Mon Sep 17 00:00:00 2001 From: Irina Efode Date: Thu, 5 Oct 2023 15:22:25 +0400 Subject: [PATCH 077/257] Fix SubgraphsDumper compilation Win (#20256) --- .../conformance/subgraphs_dumper/src/utils/model.cpp | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/model.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/model.cpp index 09dce548b91c02..25d7f4e520a683 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/model.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/model.cpp @@ -52,7 +52,8 @@ find_models(const std::vector &dirs, const std::string& regexp) { } else { continue; } - } catch (std::exception& e) { + } catch (...) { + // } catch (std::exception& e) { not_read_model.emplace_back(model_file); // std::cout << "[ ERROR ] Impossible to read model: " << model_file << std::endl << "Exception: " << e.what(); } @@ -100,11 +101,13 @@ std::map> cache_models( cache_status[ModelCacheStatus::LARGE_MODELS_INCLUDED].push_back(model); } cache->update_cache(function, model, extract_body, from_cache); - } catch (std::exception &e) { + } catch (...) { + // } catch (std::exception &e) { // std::cout << "[ ERROR ] Model processing failed with exception:" << std::endl << e.what() << std::endl; model_status = ModelCacheStatus::NOT_FULLY_CACHED; } - } catch (std::exception &e) { + } catch (...) { + // } catch (std::exception &e) { model_status = ModelCacheStatus::NOT_READ; // std::cout << "[ ERROR ] Model reading failed with exception:" << std::endl << e.what() << std::endl; } From a79c07b3a0d6e42777215d131a102ab30e52fd1b Mon Sep 17 00:00:00 2001 From: Mikhail Ryzhov Date: Thu, 5 Oct 2023 14:49:39 +0200 Subject: [PATCH 078/257] [GA] Enable win workflow (#19646) * Enable win workflow and cpu parallel tests * removed test code * update cache naming * extended logs collections * Revert "extended logs collections" This reverts commit 0dd6620832b815e731e0df9e994001d143f18a31. * revert lost code during the merge * missed dependencies * enabled push trigger * changed the cache key name * skipped failed test * fixed github action condition and added comments * Update src/core/tests/check.cpp * cache generation fix * Apply suggestions from code review * fixed python test configuration * Revert "cache generation fix" This reverts commit 0feab650feb6c0861b8326e36c45cee09912f89f. * debug parallel tests * Revert "Revert "cache generation fix"" This reverts commit e385b04410cf16a2cfaf2866c5704ffa8a72892a. * Revert "debug parallel tests" This reverts commit e4459472a7242424600e48dc38189ac19e5cd9b8. * fixed steps conditions * concurrency updated * fixed test skip condition on win * review changes * collect debug logs * overwrite test list * debug commit * Revert "debug commit" This reverts commit 8720b87c8fcfa224a7f1c8d5fa97447e1d43fac0. --- .github/workflows/windows.yml | 100 +++++++++++------- .../compile_flags/os_flags.cmake | 9 +- .../tests/test_graph/test_node_factory.py | 1 + .../python/tests/test_runtime/test_core.py | 1 + src/core/tests/check.cpp | 4 + .../layer_tests_summary/run_parallel.py | 2 +- .../mo/unit_tests/mo/utils/cli_parser_test.py | 4 +- 7 files changed, 78 insertions(+), 43 deletions(-) diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 680f2686ac63cf..d4e05a91943006 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -1,27 +1,28 @@ name: Tests on Windows (VS 2022, Python 3.11) on: workflow_dispatch: -# pull_request: -# paths-ignore: -# - '**/docs/**' -# - 'docs/**' -# - '**/**.md' -# - '**.md' -# - '**/layer_tests_summary/**' -# - '**/conformance/**' -# push: -# paths-ignore: -# - '**/docs/**' -# - 'docs/**' -# - '**/**.md' -# - '**.md' -# - '**/layer_tests_summary/**' -# - '**/conformance/**' -# branches: -# - master + pull_request: + paths-ignore: + - '**/docs/**' + - 'docs/**' + - '**/**.md' + - '**.md' + - '**/layer_tests_summary/**' + - '**/conformance/**' + push: + paths-ignore: + - '**/docs/**' + - 'docs/**' + - '**/**.md' + - '**.md' + - '**/layer_tests_summary/**' + - '**/conformance/**' + branches: + - master concurrency: - group: ${{ github.head_ref || github.run_id }}-windows + # github.ref is not unique in post-commit + group: ${{ github.event_name == 'push' && github.run_id || github.ref }}-windows cancel-in-progress: true env: @@ -37,7 +38,7 @@ env: LAYER_TESTS_INSTALL_DIR: "${{ github.workspace }}\\install\\tests\\layer_tests" BUILD_DIR: "${{ github.workspace }}\\build" OV_TEMP: "${{ github.workspace }}\\openvino_temp" - PYTHON_STATIC_ARGS: -m "not dynamic_library and not template_plugin" + PYTHON_STATIC_ARGS: -m "not dynamic_library" VCVARSPATH: "C:\\Program Files\\Microsoft Visual Studio\\2022\\Enterprise\\VC\\Auxiliary\\Build\\vcvarsall.bat" jobs: @@ -124,9 +125,10 @@ jobs: # Should save cache only if run in the master branch of the base repo # github.ref_name is 'ref/PR_#' in case of the PR, and 'branch_name' when executed on push save: ${{ github.ref_name == 'master' && 'true' || 'false' }} - key: ${{ github.job }}-windows + append-timestamp: true + key: ${{ github.job }}-${{ runner.os }}-common restore-keys: | - ${{ github.job }}-windows + ${{ github.job }}-${{ runner.os }}-common - name: CMake configure run: | @@ -222,7 +224,6 @@ jobs: call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_onnx_frontend_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ONNXFrontend.xml - name: List installed files - if: ${{ always() }} run: | Get-ChildItem -Recurse -Directory ${{ env.INSTALL_DIR }} @@ -316,6 +317,8 @@ jobs: # For running Paddle frontend unit tests python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/frontends/paddle/tests/requirements.txt + python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt + - name: Install MO dependencies run: | python3 -m pip install -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_mxnet.txt ` @@ -377,7 +380,6 @@ jobs: # TEST_DEVICE: CPU - name: TensorFlow 1 Layer Tests - TF FE - if: ${{ always() }} shell: cmd run: | python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt @@ -389,7 +391,6 @@ jobs: TEST_DEVICE: CPU - name: TensorFlow 2 Layer Tests - TF FE - if: ${{ always() }} shell: cmd run: | python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt @@ -401,7 +402,6 @@ jobs: TEST_DEVICE: CPU - name: TensorFlow 1 Layer Tests - Legacy FE - if: ${{ always() }} shell: cmd run: | python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt @@ -411,7 +411,6 @@ jobs: call "${{ env.INSTALL_DIR }}\\setupvars.bat" && python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow_tests/test_tf_Roll.py --ir_version=10 --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tf_Roll.xml - name: TensorFlow 2 Layer Tests - Legacy FE - if: ${{ always() }} shell: cmd run: | python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt @@ -423,7 +422,6 @@ jobs: TEST_DEVICE: CPU - name: TensorFlow Lite Layer Tests - TFL FE - if: ${{ always() }} shell: cmd run: | python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt @@ -442,7 +440,6 @@ jobs: TEST_DEVICE: CPU - name: MO Python API Tests - if: ${{ always() }} shell: cmd run: | python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt @@ -454,7 +451,6 @@ jobs: TEST_DEVICE: CPU - name: Python Frontend tests - if: ${{ always() }} shell: cmd run: | python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt @@ -465,7 +461,7 @@ jobs: - name: Upload Test Results uses: actions/upload-artifact@v3 - if: ${{ always() }} + if: ${{ !cancelled() }} with: name: test-results-python path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml @@ -636,7 +632,7 @@ jobs: - name: Upload Test Results uses: actions/upload-artifact@v3 - if: ${{ always() }} + if: ${{ !cancelled() }} with: name: test-results-cpp path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml @@ -647,10 +643,12 @@ jobs: defaults: run: shell: pwsh - runs-on: windows-latest + runs-on: windows-latest-8-cores env: INSTALL_DIR: "${{ github.workspace }}\\install" INSTALL_TEST_DIR: "${{ github.workspace }}\\install\\tests" + PARALLEL_TEST_SCRIPT: "${{ github.workspace }}\\install\\tests\\functional_test_utils\\run_parallel.py" + PARALLEL_TEST_CACHE: "${{ github.workspace }}\\install\\tests\\test_cache.lst" steps: - name: Create Directories @@ -685,15 +683,43 @@ jobs: ls "${{ env.INSTALL_DIR }}" ls "${{ env.INSTALL_TEST_DIR }}" - - name: Intel CPU plugin func tests + - name: Install python dependencies + shell: cmd + run: | + python3 -m pip install --upgrade pip + python3 -m pip install -r ${{ github.workspace }}\install\tests\functional_test_utils\requirements.txt + + - name: Restore tests execution time + uses: actions/cache/restore@v3 + with: + path: ${{ env.PARALLEL_TEST_CACHE }} + key: ${{ runner.os }}-tests-functional-cpu-stamp-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-tests-functional-cpu-stamp + + - name: Intel CPU plugin func tests (parallel) shell: cmd run: | - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_cpu_func_tests --gtest_print_time=1 --gtest_filter=*smoke* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-CPUFuncTests.xml + call "${{ env.INSTALL_DIR }}\\setupvars.bat" && python3 ${{ env.PARALLEL_TEST_SCRIPT }} -e ${{ env.INSTALL_TEST_DIR }}\ov_cpu_func_tests.exe -c ${{ env.PARALLEL_TEST_CACHE }} -w ${{ env.INSTALL_TEST_DIR }} -s suite -- --gtest_filter=*smoke*" + timeout-minutes: 45 + + - name: Save tests execution time + uses: actions/cache/save@v3 + if: github.ref_name == 'master' + with: + path: ${{ env.PARALLEL_TEST_CACHE }} + key: ${{ runner.os }}-tests-functional-cpu-stamp-${{ github.sha }} - name: Upload Test Results uses: actions/upload-artifact@v3 - if: ${{ always() }} + if: ${{ !cancelled() }} with: name: test-results-functional-cpu - path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml + path: | + ${{ env.INSTALL_TEST_DIR }}/temp/*.log + ${{ env.INSTALL_TEST_DIR }}/logs/failed/*.log + ${{ env.INSTALL_TEST_DIR }}/logs/crashed/*.log + ${{ env.INSTALL_TEST_DIR }}/logs/hanged/*.log + ${{ env.INSTALL_TEST_DIR }}/logs/interapted/*.log + ${{ env.INSTALL_TEST_DIR }}/logs/*.log if-no-files-found: 'error' diff --git a/cmake/developer_package/compile_flags/os_flags.cmake b/cmake/developer_package/compile_flags/os_flags.cmake index 3202def116dbe9..0ffdd903dcb46f 100644 --- a/cmake/developer_package/compile_flags/os_flags.cmake +++ b/cmake/developer_package/compile_flags/os_flags.cmake @@ -388,9 +388,12 @@ if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") ov_add_compiler_flags(/wd4275) # Enable __FILE__ trim, use path with forward and backward slash as directory separator - add_compile_options( - "$<$:/d1trimfile:${OV_NATIVE_PROJECT_ROOT_DIR}\\>" - "$<$:/d1trimfile:${OpenVINO_SOURCE_DIR}/>") + # github actions use sccache which doesn't support /d1trimfile compile option + if(NOT DEFINED ENV{GITHUB_ACTIONS}) + add_compile_options( + "$<$:/d1trimfile:${OV_NATIVE_PROJECT_ROOT_DIR}\\>" + "$<$:/d1trimfile:${OpenVINO_SOURCE_DIR}/>") + endif() # # Debug information flags, by default CMake adds /Zi option diff --git a/src/bindings/python/tests/test_graph/test_node_factory.py b/src/bindings/python/tests/test_graph/test_node_factory.py index 7be5c977bcd0b9..05c07335ea01ea 100644 --- a/src/bindings/python/tests/test_graph/test_node_factory.py +++ b/src/bindings/python/tests/test_graph/test_node_factory.py @@ -98,6 +98,7 @@ def test_node_factory_validate_missing_arguments(): @pytest.mark.template_extension() +@pytest.mark.dynamic_library() def test_extension_added_from_library(): if platform == "win32": library_path = "openvino_template_extension.dll" diff --git a/src/bindings/python/tests/test_runtime/test_core.py b/src/bindings/python/tests/test_runtime/test_core.py index f0583ca26aa03d..a864f73228106b 100644 --- a/src/bindings/python/tests/test_runtime/test_core.py +++ b/src/bindings/python/tests/test_runtime/test_core.py @@ -344,6 +344,7 @@ def test_unload_plugin(device): @pytest.mark.template_extension() +@pytest.mark.dynamic_library() def test_add_extension_template_extension(device): core, model = get_model_with_template_extension() assert isinstance(model, Model) diff --git a/src/core/tests/check.cpp b/src/core/tests/check.cpp index 6583a2c8c72da6..69a0f1342fff67 100644 --- a/src/core/tests/check.cpp +++ b/src/core/tests/check.cpp @@ -49,6 +49,10 @@ TEST(check, check_with_explanation) { } TEST(check, ov_throw_exception_check_relative_path_to_source) { + // github actions use sccache which doesn't support /d1trimfile compile option + if (std::getenv("GITHUB_ACTIONS")) { + GTEST_SKIP(); + } using namespace testing; const auto path = ov::util::path_join({"src", "core", "tests", "check.cpp"}); const auto exp_native_slash = "Exception from " + path + ":"; diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_parallel.py b/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_parallel.py index 9ca56067b3b851..c50e0b5f180af9 100644 --- a/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_parallel.py +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_parallel.py @@ -305,7 +305,7 @@ def __get_test_list_by_runtime(self, test_unit = constants.TEST_UNIT_NAME): os.remove(test_list_file_name) except Exception as err: logger.warning(f"Imposible to remove {test_list_file_name}. Error: {err}") - command_to_get_test_list = self._command + f' --gtest_list_tests >> {test_list_file_name}' + command_to_get_test_list = self._command + f' --gtest_list_tests > {test_list_file_name}' logger.info(f"Get test list using command: {command_to_get_test_list}") run_res = run(command_to_get_test_list, check=True, shell=True) if run_res.stderr != "" and run_res.stderr != None: diff --git a/tools/mo/unit_tests/mo/utils/cli_parser_test.py b/tools/mo/unit_tests/mo/utils/cli_parser_test.py index b3c7c4c125d540..6a96d08729337c 100644 --- a/tools/mo/unit_tests/mo/utils/cli_parser_test.py +++ b/tools/mo/unit_tests/mo/utils/cli_parser_test.py @@ -1224,13 +1224,13 @@ def test_single_writable_dir(self): self.assertEqual(__class__.WRITABLE_DIR, writable_dir(__class__.WRITABLE_DIR)) @unittest.skipIf(sys.platform.startswith("win"), "chmod() on Windows do nor support not writable dir") - @unittest.skipIf(os.geteuid() == 0, "root user does not support not writable dir") + @unittest.skipIf(sys.platform.startswith("lin") and os.geteuid() == 0, "root user does not support not writable dir") def test_single_non_writable_dir(self): with self.assertRaises(Error) as cm: writable_dir(__class__.NOT_WRITABLE_DIR) @unittest.skipIf(sys.platform.startswith("win"), "chmod() on Windows do nor support not writable dir") - @unittest.skipIf(os.geteuid() == 0, "root user does not support not writable dir") + @unittest.skipIf(sys.platform.startswith("lin") and os.geteuid() == 0, "root user does not support not writable dir") def test_single_non_writable_sub_dir(self): with self.assertRaises(Error) as cm: writable_dir(__class__.NOT_WRITABLE_SUB_DIR) From a999e870ae91cf6473abb66b60984fd5088ba32c Mon Sep 17 00:00:00 2001 From: Maciej Smyk Date: Thu, 5 Oct 2023 15:33:36 +0200 Subject: [PATCH 079/257] Optimize Inference Restructure (#20231) --- .../dldt_deployment_optimization_common.md | 0 .../dldt_deployment_optimization_internals.md | 0 .../dldt_deployment_optimization_latency.md | 0 .../Model_caching_overview.md | 0 .../dldt_deployment_optimization_tput.md | 0 .../dldt_deployment_optimization_tput_advanced.md | 0 .../memory_optimization_guide.md | 0 .../dldt_deployment_optimization_guide}/performance_hints.md | 0 .../dldt_deployment_optimization_guide}/precision_control.md | 0 .../dldt_deployment_optimization_guide}/preprocessing_overview.md | 0 .../preprocessing_overview}/layout_overview.md | 0 .../preprocessing_overview}/preprocessing_details.md | 0 .../preprocessing_overview}/preprocessing_usecase_save.md | 0 13 files changed, 0 insertions(+), 0 deletions(-) rename docs/{optimization_guide => articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide}/dldt_deployment_optimization_common.md (100%) rename docs/{optimization_guide => articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide}/dldt_deployment_optimization_internals.md (100%) rename docs/{optimization_guide => articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide}/dldt_deployment_optimization_latency.md (100%) rename docs/{OV_Runtime_UG => articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/dldt_deployment_optimization_latency}/Model_caching_overview.md (100%) rename docs/{optimization_guide => articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide}/dldt_deployment_optimization_tput.md (100%) rename docs/{optimization_guide => articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide}/dldt_deployment_optimization_tput_advanced.md (100%) rename docs/{optimization_guide => articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide}/memory_optimization_guide.md (100%) rename docs/{OV_Runtime_UG => articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide}/performance_hints.md (100%) rename docs/{OV_Runtime_UG => articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide}/precision_control.md (100%) rename docs/{OV_Runtime_UG => articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide}/preprocessing_overview.md (100%) rename docs/{OV_Runtime_UG => articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/preprocessing_overview}/layout_overview.md (100%) rename docs/{OV_Runtime_UG => articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/preprocessing_overview}/preprocessing_details.md (100%) rename docs/{OV_Runtime_UG => articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/preprocessing_overview}/preprocessing_usecase_save.md (100%) diff --git a/docs/optimization_guide/dldt_deployment_optimization_common.md b/docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/dldt_deployment_optimization_common.md similarity index 100% rename from docs/optimization_guide/dldt_deployment_optimization_common.md rename to docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/dldt_deployment_optimization_common.md diff --git a/docs/optimization_guide/dldt_deployment_optimization_internals.md b/docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/dldt_deployment_optimization_internals.md similarity index 100% rename from docs/optimization_guide/dldt_deployment_optimization_internals.md rename to docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/dldt_deployment_optimization_internals.md diff --git a/docs/optimization_guide/dldt_deployment_optimization_latency.md b/docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/dldt_deployment_optimization_latency.md similarity index 100% rename from docs/optimization_guide/dldt_deployment_optimization_latency.md rename to docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/dldt_deployment_optimization_latency.md diff --git a/docs/OV_Runtime_UG/Model_caching_overview.md b/docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/dldt_deployment_optimization_latency/Model_caching_overview.md similarity index 100% rename from docs/OV_Runtime_UG/Model_caching_overview.md rename to docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/dldt_deployment_optimization_latency/Model_caching_overview.md diff --git a/docs/optimization_guide/dldt_deployment_optimization_tput.md b/docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/dldt_deployment_optimization_tput.md similarity index 100% rename from docs/optimization_guide/dldt_deployment_optimization_tput.md rename to docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/dldt_deployment_optimization_tput.md diff --git a/docs/optimization_guide/dldt_deployment_optimization_tput_advanced.md b/docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/dldt_deployment_optimization_tput_advanced.md similarity index 100% rename from docs/optimization_guide/dldt_deployment_optimization_tput_advanced.md rename to docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/dldt_deployment_optimization_tput_advanced.md diff --git a/docs/optimization_guide/memory_optimization_guide.md b/docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/memory_optimization_guide.md similarity index 100% rename from docs/optimization_guide/memory_optimization_guide.md rename to docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/memory_optimization_guide.md diff --git a/docs/OV_Runtime_UG/performance_hints.md b/docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/performance_hints.md similarity index 100% rename from docs/OV_Runtime_UG/performance_hints.md rename to docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/performance_hints.md diff --git a/docs/OV_Runtime_UG/precision_control.md b/docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/precision_control.md similarity index 100% rename from docs/OV_Runtime_UG/precision_control.md rename to docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/precision_control.md diff --git a/docs/OV_Runtime_UG/preprocessing_overview.md b/docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/preprocessing_overview.md similarity index 100% rename from docs/OV_Runtime_UG/preprocessing_overview.md rename to docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/preprocessing_overview.md diff --git a/docs/OV_Runtime_UG/layout_overview.md b/docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/preprocessing_overview/layout_overview.md similarity index 100% rename from docs/OV_Runtime_UG/layout_overview.md rename to docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/preprocessing_overview/layout_overview.md diff --git a/docs/OV_Runtime_UG/preprocessing_details.md b/docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/preprocessing_overview/preprocessing_details.md similarity index 100% rename from docs/OV_Runtime_UG/preprocessing_details.md rename to docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/preprocessing_overview/preprocessing_details.md diff --git a/docs/OV_Runtime_UG/preprocessing_usecase_save.md b/docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/preprocessing_overview/preprocessing_usecase_save.md similarity index 100% rename from docs/OV_Runtime_UG/preprocessing_usecase_save.md rename to docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/preprocessing_overview/preprocessing_usecase_save.md From 6664164ce4ca405f8edae339575ff9716b07ba4e Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Thu, 5 Oct 2023 16:31:06 +0200 Subject: [PATCH 080/257] Fix results validation (#20246) * Fix results validation * Remove pt quantization * Apply suggestions from code review Co-authored-by: Roman Kazantsev * Refactor requirements * Revert "Refactor requirements" This reverts commit f78fe3d774c8aa7f5e4d5de0a520f9125991dcfa. --------- Co-authored-by: Roman Kazantsev Co-authored-by: Ilya Lavrenov --- .../model_hub_tests/models_hub_common/test_convert_model.py | 6 +++--- tests/model_hub_tests/torch_tests/requirements.txt | 1 - 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/tests/model_hub_tests/models_hub_common/test_convert_model.py b/tests/model_hub_tests/models_hub_common/test_convert_model.py index 56a402bced99fc..68e58a0658defc 100644 --- a/tests/model_hub_tests/models_hub_common/test_convert_model.py +++ b/tests/model_hub_tests/models_hub_common/test_convert_model.py @@ -61,7 +61,7 @@ def infer_ov_model(self, ov_model, inputs, ie_device): def compare_results(self, fw_outputs, ov_outputs): assert len(fw_outputs) == len(ov_outputs), \ - "Different number of outputs between TensorFlow and OpenVINO:" \ + "Different number of outputs between framework and OpenVINO:" \ " {} vs. {}".format(len(fw_outputs), len(ov_outputs)) fw_eps = 5e-2 @@ -73,13 +73,13 @@ def compare_results(self, fw_outputs, ov_outputs): "OpenVINO outputs does not contain tensor with name {}".format(out_name) cur_ov_res = ov_outputs[out_name] print(f"fw_re: {cur_fw_res};\n ov_res: {cur_ov_res}") - is_ok = compare_two_tensors(cur_ov_res, cur_fw_res, fw_eps) + is_ok = is_ok and compare_two_tensors(cur_ov_res, cur_fw_res, fw_eps) else: for i in range(len(ov_outputs)): cur_fw_res = fw_outputs[i] cur_ov_res = ov_outputs[i] print(f"fw_res: {cur_fw_res};\n ov_res: {cur_ov_res}") - is_ok = compare_two_tensors(cur_ov_res, cur_fw_res, fw_eps) + is_ok = is_ok and compare_two_tensors(cur_ov_res, cur_fw_res, fw_eps) assert is_ok, "Accuracy validation failed" def teardown_method(self): diff --git a/tests/model_hub_tests/torch_tests/requirements.txt b/tests/model_hub_tests/torch_tests/requirements.txt index f0a013faadbcc3..406607b69d2d89 100644 --- a/tests/model_hub_tests/torch_tests/requirements.txt +++ b/tests/model_hub_tests/torch_tests/requirements.txt @@ -11,7 +11,6 @@ sacremoses sentencepiece datasets pyctcdecode -pytorch-quantization<=2.1.3 --extra-index-url https://pypi.ngc.nvidia.com protobuf soundfile pandas From ba166b9fe436faf3840a6dae37049199215e2b4a Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Thu, 5 Oct 2023 18:05:15 +0200 Subject: [PATCH 081/257] [DOCS] Direct Github link to a specific notebook (#20248) --- docs/nbdoc/consts.py | 10 ++++++---- docs/nbdoc/nbdoc.py | 2 ++ 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/docs/nbdoc/consts.py b/docs/nbdoc/consts.py index 7e2ea87907f937..32478632b2c65a 100644 --- a/docs/nbdoc/consts.py +++ b/docs/nbdoc/consts.py @@ -6,6 +6,8 @@ repo_name = "openvino_notebooks" +repo_branch = "tree/main" + artifacts_link = "http://repository.toolbox.iotg.sclab.intel.com/projects/ov-notebook/0.1.0-latest/20230815220807/dist/rst_files/" blacklisted_extensions = ['.xml', '.bin'] @@ -29,7 +31,7 @@ .. |github_link| raw:: html - Github + Github \n """ @@ -50,7 +52,7 @@ .. |github_link| raw:: html - Github + Github \n """ @@ -75,7 +77,7 @@ .. |github_link| raw:: html - Github + Github \n """ @@ -91,7 +93,7 @@ .. |github_link| raw:: html - Github + Github \n """ diff --git a/docs/nbdoc/nbdoc.py b/docs/nbdoc/nbdoc.py index 2f7b0ac9aa3bad..bf6c6040b809ab 100644 --- a/docs/nbdoc/nbdoc.py +++ b/docs/nbdoc/nbdoc.py @@ -16,6 +16,7 @@ no_binder_template, repo_directory, repo_name, + repo_branch, repo_owner, ) from notebook import Notebook @@ -79,6 +80,7 @@ def __init__(self, nb_path: str = notebooks_path): "owner": repo_owner, "repo": repo_name, "folder": repo_directory, + "branch": repo_branch, } self.colab_data = { "owner": repo_owner, From 55ced6bf91525078e1a2e67bd3203a59113599ba Mon Sep 17 00:00:00 2001 From: Karol Blaszczak Date: Thu, 5 Oct 2023 18:08:43 +0200 Subject: [PATCH 082/257] [DOCS] prerelease notes update (#20274) --- .../about_openvino/prerelease_information.md | 71 ++++++++++++++++++- 1 file changed, 70 insertions(+), 1 deletion(-) diff --git a/docs/articles_en/about_openvino/prerelease_information.md b/docs/articles_en/about_openvino/prerelease_information.md index 42f85cb6e6df62..698d682e14c5b6 100644 --- a/docs/articles_en/about_openvino/prerelease_information.md +++ b/docs/articles_en/about_openvino/prerelease_information.md @@ -32,11 +32,80 @@ Please file a github Issue on these with the label “pre-release” so we can g -.. dropdown:: OpenVINO Toolkit 2023.1.0.dev20230811 + + +.. dropdown:: OpenVINO Toolkit 2023.2 Dev 22.09.2023 :animate: fade-in-slide-down :color: primary :open: + **What's Changed:** + + * CPU runtime: + + * Optimized Yolov8n and YoloV8s models on BF16/FP32. + * Optimized Falcon model on 4th Generation Intel® Xeon® Scalable Processors. + + * GPU runtime: + + * int8 weight compression further improves LLM performance. PR #19548 + * Optimization for gemm & fc in iGPU. PR #19780 + + * TensorFlow FE: + + * Added support for Selu operation. PR #19528 + * Added support for XlaConvV2 operation. PR #19466 + * Added support for TensorListLength and TensorListResize operations. PR #19390 + + * PyTorch FE: + + * New operations supported + + * aten::minimum aten::maximum. PR #19996 + * aten::broadcast_tensors. PR #19994 + * added support aten::logical_and, aten::logical_or, aten::logical_not, aten::logical_xor. PR #19981 + * aten::scatter_reduce and extend aten::scatter. PR #19980 + * prim::TupleIndex operation. PR #19978 + * mixed precision in aten::min/max. PR #19936 + * aten::tile op PR #19645 + * aten::one_hot PR #19779 + * PReLU. PR #19515 + * aten::swapaxes. PR #19483 + * non-boolean inputs for __or__ and __and__ operations. PR #19268 + + * Torchvision NMS can accept negative scores. PR #19826 + * New openvino_notebooks: + + * Visual Question Answering and Image Captioning using BLIP + + **Fixed GitHub issues** + + * Fixed #19784 “[Bug]: Cannot install libprotobuf-dev along with libopenvino-2023.0.2 on Ubuntu 22.04” with PR #19788 + * Fixed #19617 “Add a clear error message when creating an empty Constant” with PR #19674 + * Fixed #19616 “Align openvino.compile_model and openvino.Core.compile_model functions” with PR #19778 + * Fixed #19469 “[Feature Request]: Add SeLu activation in the OpenVino IR (TensorFlow Conversion)” with PR #19528 + * Fixed #19019 “[Bug]: Low performance of the TF quantized model.” With PR #19735 + * Fixed #19018 “[Feature Request]: Support aarch64 python wheel for Linux” with PR #19594 + * Fixed #18831 “Question: openvino support for Nvidia Jetson Xavier ?” with PR #19594 + * Fixed #18786 “OpenVINO Wheel does not install Debug libraries when CMAKE_BUILD_TYPE is Debug #18786” with PR #19197 + * Fixed #18731 “[Bug] Wrong output shapes of MaxPool” with PR #18965 + * Fixed #18091 “[Bug] 2023.0 Version crashes on Jetson Nano - L4T - Ubuntu 18.04” with PR #19717 + * Fixed #7194 “Conan for simplifying dependency management” with PR #17580 + + + **Acknowledgements:** + + Thanks for contributions from the OpenVINO developer community: + + * @siddhant-0707, + * @PRATHAM-SPS, + * @okhovan + + +.. dropdown:: OpenVINO Toolkit 2023.1.0.dev20230728 + :animate: fade-in-slide-down + :color: secondary + `Check on GitHub `__ **New features:** From 471295420f47859905664a9afab27c8382813c63 Mon Sep 17 00:00:00 2001 From: Irina Efode Date: Fri, 6 Oct 2023 00:28:47 +0400 Subject: [PATCH 083/257] [CONFORMANCE][SUBGRAPHS DUMPER] Fix warning threated as error (#20279) * [CONFORMANCE][SUBGRAPHS DUMPER] Fix warning threated as error * apply comments --- .../src/matchers/subgraph/manager.cpp | 19 +++++++++++-------- .../subgraphs_dumper/src/utils/model.cpp | 9 +++------ 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/subgraph/manager.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/subgraph/manager.cpp index 18575c7ec40f52..5c0c28cbcdcb0a 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/subgraph/manager.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/subgraph/manager.cpp @@ -11,11 +11,12 @@ using namespace ov::tools::subgraph_dumper; bool ExtractorsManager::match(const std::shared_ptr &model, const std::shared_ptr &ref) { - for (const auto &it : m_extractors) { - if (it.second->match(model, ref)) { + // `match` is not virtual method in base `SubgraphExtractor` class + // we can use function from any `extractor` to avoid of cycle + if (!m_extractors.empty()) { + if (m_extractors.begin()->second->match(model, ref)) { return true; } - return false; } return false; } @@ -25,8 +26,10 @@ ExtractorsManager::is_subgraph(const std::shared_ptr &model, const std::shared_ptr &ref_model, const std::map &in_info, const std::map &in_info_ref) { - for (const auto &it : m_extractors) { - auto extractor_res = it.second->is_subgraph(model, ref_model); + if (!m_extractors.empty()) { + // `is_subgraph` is not virtual method in base `SubgraphExtractor` class + // we can use function from any `extractor` to avoid of cycle + auto extractor_res = m_extractors.begin()->second->is_subgraph(model, ref_model); if (std::get<0>(extractor_res)) { std::map graph_in_info, subgraph_in_info; if (std::get<1>(extractor_res) == model && std::get<2>(extractor_res) == ref_model) { @@ -40,13 +43,13 @@ ExtractorsManager::is_subgraph(const std::shared_ptr &model, } try { subgraph_in_info = align_input_info(std::get<2>(extractor_res), std::get<1>(extractor_res), subgraph_in_info, graph_in_info); - } catch(...) { + } catch(std::exception) { return { false, nullptr, nullptr, {}, {} }; } return { true, std::get<1>(extractor_res), std::get<2>(extractor_res), graph_in_info, subgraph_in_info }; } - return { false, nullptr, nullptr, {}, {} }; } + return { false, nullptr, nullptr, {}, {} }; } bool ExtractorsManager::match(const std::shared_ptr &model, @@ -57,7 +60,7 @@ bool ExtractorsManager::match(const std::shared_ptr &model, try { in_info = align_input_info(model, ref, in_info, in_info_ref); return true; - } catch (...) { + } catch (std::exception) { return false; } } diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/model.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/model.cpp index 25d7f4e520a683..6717961ea1b837 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/model.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/model.cpp @@ -52,8 +52,7 @@ find_models(const std::vector &dirs, const std::string& regexp) { } else { continue; } - } catch (...) { - // } catch (std::exception& e) { + } catch (std::exception) { not_read_model.emplace_back(model_file); // std::cout << "[ ERROR ] Impossible to read model: " << model_file << std::endl << "Exception: " << e.what(); } @@ -101,13 +100,11 @@ std::map> cache_models( cache_status[ModelCacheStatus::LARGE_MODELS_INCLUDED].push_back(model); } cache->update_cache(function, model, extract_body, from_cache); - } catch (...) { - // } catch (std::exception &e) { + } catch (std::exception) { // std::cout << "[ ERROR ] Model processing failed with exception:" << std::endl << e.what() << std::endl; model_status = ModelCacheStatus::NOT_FULLY_CACHED; } - } catch (...) { - // } catch (std::exception &e) { + } catch (std::exception) { model_status = ModelCacheStatus::NOT_READ; // std::cout << "[ ERROR ] Model reading failed with exception:" << std::endl << e.what() << std::endl; } From d6c2a10b383dc63563896486906e127f60b5a194 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Fri, 6 Oct 2023 00:30:11 +0400 Subject: [PATCH 084/257] Merge Linux CC + static build + clang compiler (#20243) * Merge Linux CC + static build + clang compiler * Improvements * Fixes --- .github/workflows/fedora.yml | 3 - .github/workflows/linux.yml | 33 +-- .../linux_conditional_compilation.yml | 265 ++++++++++++++++-- .github/workflows/linux_cuda.yml | 2 +- CMakeLists.txt | 1 - .../IEDevScriptsConfig.cmake | 6 +- cmake/developer_package/add_ie_target.cmake | 29 +- cmake/features.cmake | 5 + docs/requirements.txt | 2 +- .../tests/CMakeLists.txt | 2 +- .../tests/CMakeLists.txt | 2 +- src/common/snippets/tests/CMakeLists.txt | 4 +- .../transformations/tests/CMakeLists.txt | 2 +- src/core/tests/CMakeLists.txt | 4 +- src/frontends/ir/tests/CMakeLists.txt | 3 +- src/frontends/onnx/tests/CMakeLists.txt | 4 +- src/frontends/paddle/tests/CMakeLists.txt | 5 +- src/frontends/tensorflow/tests/CMakeLists.txt | 3 +- .../tensorflow_common/tests/CMakeLists.txt | 3 +- .../tensorflow_lite/tests/CMakeLists.txt | 3 +- src/inference/tests/functional/CMakeLists.txt | 2 +- src/inference/tests/unit/CMakeLists.txt | 2 +- src/plugins/auto/tests/unit/CMakeLists.txt | 4 +- .../tests/functional/CMakeLists.txt | 4 +- .../auto_batch/tests/unit/CMakeLists.txt | 6 +- .../hetero/tests/functional/CMakeLists.txt | 4 +- src/plugins/hetero/tests/unit/CMakeLists.txt | 2 +- .../intel_cpu/tests/functional/CMakeLists.txt | 4 +- .../skip_tests_config.cpp | 2 + .../tests/functional/specific_tests.cmake | 5 +- .../tests/functional/target_per_test.cmake | 10 +- .../intel_cpu/tests/unit/CMakeLists.txt | 4 +- .../intel_gna/legacy/tests/CMakeLists.txt | 2 +- .../tests/deprecated/unit/CMakeLists.txt | 2 +- .../intel_gna/tests/functional/CMakeLists.txt | 4 +- .../intel_gna/tests/unit/CMakeLists.txt | 4 +- .../intel_gpu/tests/functional/CMakeLists.txt | 4 +- src/plugins/proxy/tests/CMakeLists.txt | 2 +- .../template/tests/functional/CMakeLists.txt | 2 +- .../subgraphs_dumper/CMakeLists.txt | 5 +- .../subgraphs_dumper/tests/CMakeLists.txt | 10 +- .../api_conformance_runner/CMakeLists.txt | 4 +- .../conformance_infra/CMakeLists.txt | 2 +- .../op_conformance_runner/CMakeLists.txt | 4 +- .../functional/plugin/shared/CMakeLists.txt | 2 +- .../shared_test_classes/CMakeLists.txt | 2 +- .../ov_helpers/ov_lpt_models/CMakeLists.txt | 2 +- src/tests/ov_helpers/ov_models/CMakeLists.txt | 2 +- .../ov_snippets_models/CMakeLists.txt | 3 +- .../common_test_utils/CMakeLists.txt | 2 +- .../common_test_utils/tests/CMakeLists.txt | 16 +- .../functional_test_utils/CMakeLists.txt | 2 +- .../test_utils/unit_test_utils/CMakeLists.txt | 2 +- 53 files changed, 360 insertions(+), 148 deletions(-) diff --git a/.github/workflows/fedora.yml b/.github/workflows/fedora.yml index 9602180ab66c4e..a554dfa98b462b 100644 --- a/.github/workflows/fedora.yml +++ b/.github/workflows/fedora.yml @@ -178,9 +178,6 @@ jobs: RPM_PACKAGES_DIR: /__w/openvino/packages/ steps: - - name: Create Directories - run: mkdir -p ${RPM_PACKAGES_DIR} - - name: Download OpenVINO RPM packages uses: actions/download-artifact@v3 with: diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 48d2f75d150d7f..dd10243d8d2a35 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -90,6 +90,7 @@ jobs: - uses: actions/setup-python@v4 with: python-version: ${{ env.PYTHON_VERSION }} + - name: Setup pip cache dir run: | PIP_VER=$(python3 -c "import pip; print(pip.__version__)") @@ -232,9 +233,6 @@ jobs: DEBIAN_PACKAGES_DIR: /__w/openvino/packages/ steps: - - name: Create Directories - run: mkdir -p ${DEBIAN_PACKAGES_DIR} - - name: Download OpenVINO debian packages uses: actions/download-artifact@v3 with: @@ -290,13 +288,6 @@ jobs: BUILD_DIR: /__w/openvino/openvino/build steps: - - name: Create Directories - run: mkdir -p ${INSTALL_DIR} ${INSTALL_TEST_DIR} - - # - # Initialize OpenVINO - # - - name: Download OpenVINO package uses: actions/download-artifact@v3 with: @@ -324,6 +315,7 @@ jobs: - uses: actions/setup-python@v4 with: python-version: ${{ env.PYTHON_VERSION }} + - name: Setup pip cache dir run: | PIP_VER=$(python3 -c "import pip; print(pip.__version__)") @@ -615,13 +607,6 @@ jobs: INSTALL_TEST_DIR: /__w/openvino/openvino/install/tests steps: - - name: Create Directories - run: mkdir -p ${INSTALL_DIR} ${INSTALL_TEST_DIR} - - # - # Initialize OpenVINO - # - - name: Download OpenVINO package uses: actions/download-artifact@v3 with: @@ -812,9 +797,6 @@ jobs: LAYER_TESTS_INSTALL_DIR: /__w/openvino/openvino/install/tests/layer_tests steps: - - name: Create Directories - run: mkdir -p ${INSTALL_DIR} ${INSTALL_TEST_DIR} - - name: Install git run: | apt update @@ -832,6 +814,7 @@ jobs: - uses: actions/setup-python@v4 with: python-version: ${{ env.PYTHON_VERSION }} + - name: Setup pip cache dir run: | PIP_VER=$(python3 -c "import pip; print(pip.__version__)") @@ -1033,9 +1016,6 @@ jobs: PARALLEL_TEST_CACHE: /__w/openvino/openvino/install/tests/test_cache.lst steps: - - name: Create Directories - run: mkdir -p ${INSTALL_DIR} ${INSTALL_TEST_DIR} - - name: Download OpenVINO package uses: actions/download-artifact@v3 with: @@ -1121,15 +1101,13 @@ jobs: MODEL_HUB_TESTS_INSTALL_DIR: ${{ github.workspace }}/install/tests/model_hub_tests steps: - - name: Create Directories - run: mkdir -p ${INSTALL_DIR} ${INSTALL_TEST_DIR} - - name: Install 'actions/setup-python@v4' dependencies run: apt-get update && apt-get install -y libssl1.1 ca-certificates - uses: actions/setup-python@v4 with: python-version: ${{ env.PYTHON_VERSION }} + - name: Setup pip cache dir run: | PIP_VER=$(python3 -c "import pip; print(pip.__version__)") @@ -1209,9 +1187,6 @@ jobs: # install 'g++' to build 'detectron2' and 'natten' wheels sudo apt-get install --assume-yes --no-install-recommends g++ git ca-certificates - - name: Create Directories - run: mkdir -p ${INSTALL_DIR} ${INSTALL_TEST_DIR} - - name: Download OpenVINO package uses: actions/download-artifact@v3 with: diff --git a/.github/workflows/linux_conditional_compilation.yml b/.github/workflows/linux_conditional_compilation.yml index f0c8a4f7a17960..dc63843d5ce452 100644 --- a/.github/workflows/linux_conditional_compilation.yml +++ b/.github/workflows/linux_conditional_compilation.yml @@ -1,4 +1,4 @@ -name: Linux Conditional Compilation (Ubuntu 22.04, Python 3.11) +name: Linux Static CC (Ubuntu 22.04, Python 3.11, Clang) on: workflow_dispatch: pull_request: @@ -25,6 +25,10 @@ concurrency: group: ${{ github.event_name == 'push' && github.run_id || github.ref }}-linux-cc cancel-in-progress: true +env: + PIP_CACHE_PATH: /mount/caches/pip/linux + PYTHON_VERSION: '3.11' + jobs: Build: defaults: @@ -43,13 +47,15 @@ jobs: CMAKE_C_COMPILER_LAUNCHER: ccache GITHUB_WORKSPACE: '/__w/openvino/openvino' OPENVINO_REPO: /__w/openvino/openvino/openvino + INSTALL_DIR: /__w/openvino/openvino/openvino_install BUILD_DIR: /__w/openvino/openvino/openvino_build + SELECTIVE_BUILD_STAT_DIR: /__w/openvino/openvino/selective_build_stat MODELS_PATH: /__w/openvino/openvino/testdata - CCACHE_DIR: /mount/caches/ccache/ubuntu22_x86_64_cc_Release + CCACHE_DIR: /mount/caches/ccache/ubuntu22_x86_64_itt_clang_Release CCACHE_TEMPDIR: /__w/openvino/openvino/ccache_temp - CCACHE_MAXSIZE: 50G - steps: + CCACHE_MAXSIZE: 20G + steps: - name: Install git run: | apt-get update @@ -76,44 +82,265 @@ jobs: run: | bash ${OPENVINO_REPO}/install_build_dependencies.sh + # use clang as a default compiler + apt --assume-yes install clang + update-alternatives --install /usr/bin/cc cc /usr/bin/clang 100 + update-alternatives --install /usr/bin/c++ c++ /usr/bin/clang++ 100 + + - uses: actions/setup-python@v4 + with: + python-version: ${{ env.PYTHON_VERSION }} + + - name: Setup pip cache dir + run: | + PIP_VER=$(python3 -c "import pip; print(pip.__version__)") + echo "Using pip version: ${PIP_VER}" + echo "PIP_CACHE_DIR=${PIP_CACHE_PATH}/${PIP_VER}" >> $GITHUB_ENV + + - name: Install python dependencies + run: | + # For running ONNX frontend unit tests + python3 -m pip install --force-reinstall -r ${OPENVINO_REPO}/src/frontends/onnx/tests/requirements.txt + + # For running TensorFlow frontend unit tests + python3 -m pip install -r ${OPENVINO_REPO}/src/frontends/tensorflow/tests/requirements.txt + + # For running TensorFlow Lite frontend unit tests + python3 -m pip install -r ${OPENVINO_REPO}/src/frontends/tensorflow_lite/tests/requirements.txt + + # For running Paddle frontend unit tests + python3 -m pip install -r ${OPENVINO_REPO}/src/frontends/paddle/tests/requirements.txt + # # Build # - - name: CMake configure CC COLLECT + - name: CMake configure - CC COLLECT run: | cmake \ -G "${{ env.CMAKE_GENERATOR }}" \ + -DBUILD_SHARED_LIBS=OFF \ + -DENABLE_TESTS=ON \ -DENABLE_CPPLINT=OFF \ - -DENABLE_GAPI_PREPROCESSING=OFF \ - -DCMAKE_VERBOSE_MAKEFILE=ON \ + -DENABLE_NCC_STYLE=OFF \ + -DENABLE_INTEL_GNA=OFF \ -DCMAKE_COMPILE_WARNING_AS_ERROR=OFF \ - -DENABLE_FASTER_BUILD=ON \ -DENABLE_PROFILING_ITT=ON \ -DSELECTIVE_BUILD=COLLECT \ + -DCMAKE_C_COMPILER_LAUNCHER=${{ env.CMAKE_C_COMPILER_LAUNCHER }} \ + -DCMAKE_CXX_COMPILER_LAUNCHER=${{ env.CMAKE_CXX_COMPILER_LAUNCHER }} \ -S ${OPENVINO_REPO} \ -B ${BUILD_DIR} - - name: Build CC COLLECT - run: cmake --build ${BUILD_DIR} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} --target openvino_intel_cpu_plugin openvino_ir_frontend benchmark_app sea_itt_lib + - name: Cmake build - CC COLLECT + run: | + cmake --build ${BUILD_DIR} --parallel 8 --config ${{ env.CMAKE_BUILD_TYPE }} + cmake --build ${BUILD_DIR} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} --target sea_itt_lib - name: Show ccache stats run: ccache --show-stats - - name: Code usage analysis + - name: Cmake install - OpenVINO + run: cmake -DCMAKE_INSTALL_PREFIX=${INSTALL_DIR} -P ${BUILD_DIR}/cmake_install.cmake + + - name: Build C++ samples - OpenVINO build tree + run: | + cmake -G "${{ env.CMAKE_GENERATOR }}" -DOpenVINO_DIR=${BUILD_DIR} -S ${INSTALL_DIR}/samples/cpp -B ${BUILD_DIR}/cpp_samples + cmake --build ${BUILD_DIR}/cpp_samples --parallel --config ${{ env.CMAKE_BUILD_TYPE }} --target hello_query_device + + - name: Build C samples - OpenVINO install tree + run: ${INSTALL_DIR}/samples/c/build_samples.sh -i ${INSTALL_DIR} -b ${BUILD_DIR}/c_samples + + - name: Ctest - OpenVINO unit tests + run: ctest -C ${{ env.CMAKE_BUILD_TYPE }} --test-dir ${BUILD_DIR} -V -L UNIT + + - name: Perform code tracing via ITT collector run: | python3 ${OPENVINO_REPO}/thirdparty/itt_collector/runtool/sea_runtool.py \ - --bindir ${OPENVINO_REPO}/bin/intel64/Release -o ${BUILD_DIR}/itt_stat ! \ + --bindir ${OPENVINO_REPO}/bin/intel64/Release -o ${SELECTIVE_BUILD_STAT_DIR}/itt_stat ! \ ${OPENVINO_REPO}/bin/intel64/Release/benchmark_app -niter 1 -nireq 1 \ -m ${MODELS_PATH}/models/test_model/test_model_fp32.xml -d CPU - - name: CMake configure with CC ON - run: cmake -DSELECTIVE_BUILD=ON -DSELECTIVE_BUILD_STAT=${BUILD_DIR}/*.csv -S ${OPENVINO_REPO} -B ${BUILD_DIR} + - name: Pack Artifacts + run: | + pushd ${SELECTIVE_BUILD_STAT_DIR} + tar -czvf ${BUILD_DIR}/openvino_selective_build_stat.tar.gz * + popd + + pushd ${OPENVINO_REPO} + tar -czvf ${BUILD_DIR}/openvino_tests.tar.gz \ + bin/intel64/Release/ov_cpu_func_tests \ + src/tests/test_utils/functional_test_utils/layer_tests_summary/* \ + scripts/install_dependencies/* + popd + + - name: Upload selective build statistics package + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: openvino_selective_build_stat + path: ${{ env.BUILD_DIR }}/openvino_selective_build_stat.tar.gz + if-no-files-found: 'error' + + - name: Upload OpenVINO tests package + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: openvino_tests + path: ${{ env.BUILD_DIR }}/openvino_tests.tar.gz + if-no-files-found: 'error' + + CC_Build: + name: Conditional Compilation + needs: Build + defaults: + run: + shell: bash + runs-on: aks-linux-16-cores + container: + image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:22.04 + volumes: + - /mount/caches:/mount/caches + env: + DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input + CMAKE_CXX_COMPILER_LAUNCHER: ccache + CMAKE_C_COMPILER_LAUNCHER: ccache + GITHUB_WORKSPACE: '/__w/openvino/openvino' + OPENVINO_REPO: /__w/openvino/openvino/openvino + BUILD_DIR: /__w/openvino/openvino/openvino_build + SELECTIVE_BUILD_STAT_DIR: /__w/openvino/openvino/selective_build_stat + MODELS_PATH: /__w/openvino/openvino/testdata + CCACHE_DIR: /mount/caches/ccache/ubuntu22_x86_64_cc_Release + CCACHE_TEMPDIR: /__w/openvino/openvino/ccache_temp + CCACHE_MAXSIZE: 20G + + steps: + - name: Install git + run: apt-get update && apt-get install --assume-yes --no-install-recommends git ca-certificates git-lfs + + - name: Clone OpenVINO + uses: actions/checkout@v4 + with: + path: ${{ env.OPENVINO_REPO }} + submodules: 'true' + + - name: Clone test models + uses: actions/checkout@v4 + with: + repository: 'openvinotoolkit/testdata' + path: ${{ env.MODELS_PATH }} + lfs: 'true' + + - name: Download selective build statistics package + uses: actions/download-artifact@v3 + with: + name: openvino_selective_build_stat + path: ${{ env.SELECTIVE_BUILD_STAT_DIR }} + + - name: Extract selective build statistics package + run: tar -xvzf ${SELECTIVE_BUILD_STAT_DIR}/openvino_selective_build_stat.tar.gz -C ${SELECTIVE_BUILD_STAT_DIR} - - name: Build with CC ON - run: cmake --build ${BUILD_DIR} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} --target openvino_intel_cpu_plugin openvino_ir_frontend + # + # Dependencies + # - - name: Use OpenVINO after CC + - name: Install build dependencies + run: bash ${OPENVINO_REPO}/install_build_dependencies.sh + + # + # Build + # + + - name: CMake configure - CC ON run: | - ${OPENVINO_REPO}/bin/intel64/Release/benchmark_app -niter 1 -nireq 1 \ - -m ${MODELS_PATH}/models/test_model/test_model_fp32.xml -d CPU + cmake \ + -DBUILD_SHARED_LIBS=OFF \ + -DENABLE_CPPLINT=OFF \ + -DSELECTIVE_BUILD=ON \ + -DENABLE_LTO=OFF \ + -DENABLE_TEMPLATE=OFF \ + -DENABLE_INTEL_GPU=OFF \ + -DENABLE_INTEL_GNA=OFF \ + -DENABLE_OV_TF_FRONTEND=OFF \ + -DENABLE_OV_TF_LITE_FRONTEND=OFF \ + -DENABLE_OV_PADDLE_FRONTEND=OFF \ + -DENABLE_OV_PYTORCH_FRONTEND=OFF \ + -DENABLE_OV_ONNX_FRONTEND=OFF \ + -DSELECTIVE_BUILD_STAT=${SELECTIVE_BUILD_STAT_DIR}/*.csv \ + -DCMAKE_C_COMPILER_LAUNCHER=${{ env.CMAKE_C_COMPILER_LAUNCHER }} \ + -DCMAKE_CXX_COMPILER_LAUNCHER=${{ env.CMAKE_CXX_COMPILER_LAUNCHER }} \ + -S ${OPENVINO_REPO} \ + -B ${BUILD_DIR} + + - name: Cmake build - CC ON + run: cmake --build ${BUILD_DIR} --parallel 8 --target benchmark_app + + - name: Show ccache stats + run: ccache --show-stats + + - name: Run with CC-ed runtime + run: ${OPENVINO_REPO}/bin/intel64/Release/benchmark_app -niter 1 -nireq 1 -m ${MODELS_PATH}/models/test_model/test_model_fp32.xml -d CPU + + CPU_Functional_Tests: + name: CPU functional tests + needs: Build + defaults: + run: + shell: bash + runs-on: aks-linux-8-cores + container: + image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:22.04 + env: + DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input + INSTALL_TEST_DIR: /__w/openvino/openvino/install/tests + PARALLEL_TEST_SCRIPT: /__w/openvino/openvino/install/tests/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_parallel.py + PARALLEL_TEST_CACHE: /__w/openvino/openvino/install/tests/test_cache.lst + + steps: + - name: Download OpenVINO tests package + uses: actions/download-artifact@v3 + with: + name: openvino_tests + path: ${{ env.INSTALL_TEST_DIR }} + + - name: Extract OpenVINO packages + run: tar -xvzf ${INSTALL_TEST_DIR}/openvino_tests.tar.gz -C ${INSTALL_TEST_DIR} + + - name: Install OpenVINO dependencies + run: bash ${INSTALL_TEST_DIR}/scripts/install_dependencies/install_openvino_dependencies.sh -c=core -c=gpu -y + + - name: Install 'actions/setup-python@v4' dependencies + run: apt-get install -y libssl3 + + - uses: actions/setup-python@v4 + with: + python-version: ${{ env.PYTHON_VERSION }} + + - name: Install python dependencies for run_parallel.py + run: python3 -m pip install -r ${INSTALL_TEST_DIR}/src/tests/test_utils/functional_test_utils/layer_tests_summary/requirements.txt + + - name: Restore tests execution time + uses: actions/cache/restore@v3 + with: + path: ${{ env.PARALLEL_TEST_CACHE }} + key: ${{ runner.os }}-tests-functional-cpu-stamp-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-tests-functional-cpu-stamp + + - name: Intel CPU plugin func tests (parallel) + run: python3 ${PARALLEL_TEST_SCRIPT} -e ${INSTALL_TEST_DIR}/bin/intel64/Release/ov_cpu_func_tests -c ${PARALLEL_TEST_CACHE} -w ${INSTALL_TEST_DIR} -s suite -rf 0 -- --gtest_print_time=1 --gtest_filter=*smoke* + timeout-minutes: 40 + + - name: Upload Test Results + uses: actions/upload-artifact@v3 + if: ${{ !cancelled() }} + with: + name: test-results-functional-cpu + path: | + ${{ env.INSTALL_TEST_DIR }}/TEST*.xml + ${{ env.INSTALL_TEST_DIR }}/logs/failed/*.log + ${{ env.INSTALL_TEST_DIR }}/logs/crashed/*.log + ${{ env.INSTALL_TEST_DIR }}/logs/hanged/*.log + ${{ env.INSTALL_TEST_DIR }}/logs/interapted/*.log + ${{ env.INSTALL_TEST_DIR }}/logs/disabled_tests.log + if-no-files-found: 'error' diff --git a/.github/workflows/linux_cuda.yml b/.github/workflows/linux_cuda.yml index 517b34fb573f5b..7fd90dac00dcf4 100644 --- a/.github/workflows/linux_cuda.yml +++ b/.github/workflows/linux_cuda.yml @@ -115,7 +115,7 @@ jobs: -DENABLE_INTEL_GPU=OFF \ -DENABLE_INTEL_GNA=OFF \ -DENABLE_OV_TF_FRONTEND=OFF \ - -DENABLE_OV_TF_LITE=OFF \ + -DENABLE_OV_TF_LITE_FRONTEND=OFF \ -DENABLE_OV_PADDLE_FRONTEND=OFF \ -DENABLE_OV_PYTORCH_FRONTEND=OFF \ -DENABLE_OV_ONNX_FRONTEND=OFF \ diff --git a/CMakeLists.txt b/CMakeLists.txt index e3630486535dda..b0aceaa39db057 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -35,7 +35,6 @@ find_package(IEDevScripts REQUIRED NO_CMAKE_FIND_ROOT_PATH NO_DEFAULT_PATH) -include(CTest) include(cmake/features.cmake) # These options are shared with 3rdparty plugins by means of developer package diff --git a/cmake/developer_package/IEDevScriptsConfig.cmake b/cmake/developer_package/IEDevScriptsConfig.cmake index 997f85b3a407c2..95e3c9eea3629f 100644 --- a/cmake/developer_package/IEDevScriptsConfig.cmake +++ b/cmake/developer_package/IEDevScriptsConfig.cmake @@ -118,7 +118,11 @@ if(CMAKE_GENERATOR STREQUAL "Ninja Multi-Config") # https://cmake.org/cmake/help/latest/variable/CMAKE_DEFAULT_BUILD_TYPE.html set(CMAKE_DEFAULT_BUILD_TYPE "Release" CACHE STRING "CMake default build type") elseif(NOT OV_GENERATOR_MULTI_CONFIG) - set(CMAKE_BUILD_TYPE "Release" CACHE STRING "CMake build type") + if(NOT CMAKE_BUILD_TYPE) + # default value + set(CMAKE_BUILD_TYPE "Release") + endif() + set(CMAKE_BUILD_TYPE "${CMAKE_BUILD_TYPE}" CACHE STRING "CMake build type") set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Release;Debug;RelWithDebInfo;MinSizeRel") endif() diff --git a/cmake/developer_package/add_ie_target.cmake b/cmake/developer_package/add_ie_target.cmake index 7d62b1f604cd9a..2452312d82026a 100644 --- a/cmake/developer_package/add_ie_target.cmake +++ b/cmake/developer_package/add_ie_target.cmake @@ -5,9 +5,10 @@ #[[ function to create CMake target and setup its options in a declarative style. Example: -addIeTarget( +ov_add_target( NAME core_lib ADD_CPPLINT + ADD_CLANG_FORMAT DEVELOPER_PACKAGE TYPE ROOT ${CMAKE_CURRENT_SOURCE_DIR} @@ -25,9 +26,15 @@ addIeTarget( ie::important_plugin OBJECT_FILES object libraries + DEFINES + DEF1 DEF2 + LINK_LIBRARIES_WHOLE_ARCHIVE + lib1 lib2 + LINK_FLAGS + flag1 flag2 ) #]] -function(addIeTarget) +function(ov_add_target) set(options ADD_CPPLINT # Enables code style checks for the target ADD_CLANG_FORMAT # Enables code style checks for the target @@ -54,7 +61,7 @@ function(addIeTarget) cmake_parse_arguments(ARG "${options}" "${oneValueRequiredArgs};${oneValueOptionalArgs}" "${multiValueArgs}" ${ARGN} ) # sanity checks - foreach(argName ${oneValueRequiredArgs}) + foreach(argName IN LISTS oneValueRequiredArgs) if (NOT ARG_${argName}) message(SEND_ERROR "Argument '${argName}' is required.") endif() @@ -133,18 +140,19 @@ function(addIeTarget) endif() endfunction() -function(ov_add_target) - addIeTarget(${ARGV}) +function(addIeTarget) + message(WARNING "'addIeTarget' is deprecated, please, use 'ov_add_target' instead") + ov_add_target(${ARGV}) endfunction() #[[ Wrapper function over addIeTarget, that also adds a test with the same name. You could use -addIeTargetTest( ... LABELS labelOne labelTwo ) +ov_add_test_target( ... LABELS labelOne labelTwo ) also to provide labels for that test. Important: you MUST pass LABELS as last argument, otherwise it will consume any parameters that come after. #]] -function(addIeTargetTest) +function(ov_add_test_target) set(options ) set(oneValueRequiredArgs @@ -161,7 +169,7 @@ function(addIeTargetTest) set(ARG_COMPONENT tests) endif() - addIeTarget(TYPE EXECUTABLE NAME ${ARG_NAME} ${ARG_UNPARSED_ARGUMENTS}) + ov_add_target(TYPE EXECUTABLE NAME ${ARG_NAME} ${ARG_UNPARSED_ARGUMENTS}) if(EMSCRIPTEN) set(JS_BIN_NAME "${ARG_NAME}.js") @@ -187,6 +195,7 @@ function(addIeTargetTest) EXCLUDE_FROM_ALL) endfunction() -function(ov_add_test_target) - addIeTargetTest(${ARGV}) +function(addIeTargetTest) + message(WARNING "'addIeTargetTest' is deprecated, please, use 'ov_add_test_target' instead") + ov_add_test_target(${ARGV}) endfunction() diff --git a/cmake/features.cmake b/cmake/features.cmake index 455db56d28ab32..7b11a8a968f1a7 100644 --- a/cmake/features.cmake +++ b/cmake/features.cmake @@ -21,6 +21,11 @@ ie_dependent_option (ENABLE_ARM_COMPUTE_CMAKE "Enable ARM Compute build via cmak ie_option (ENABLE_TESTS "unit, behavior and functional tests" OFF) +if(ENABLE_TESTS) + include(CTest) + enable_testing() +endif() + if(X86_64) set(ENABLE_INTEL_GPU_DEFAULT ON) else() diff --git a/docs/requirements.txt b/docs/requirements.txt index ddeb91b02f16a2..69433a40eb64ff 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -44,6 +44,6 @@ sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 toml==0.10.2 -urllib3==1.26.5 +urllib3==1.26.17 zipp==3.4.1 docs/openvino_custom_sphinx_sitemap diff --git a/src/common/conditional_compilation/tests/CMakeLists.txt b/src/common/conditional_compilation/tests/CMakeLists.txt index a68ac41d41b384..a57ef923bad7ec 100644 --- a/src/common/conditional_compilation/tests/CMakeLists.txt +++ b/src/common/conditional_compilation/tests/CMakeLists.txt @@ -17,5 +17,5 @@ ov_add_test_target( "${CMAKE_CURRENT_SOURCE_DIR}/../include" ADD_CLANG_FORMAT LABELS - OV + OV UNIT ) diff --git a/src/common/low_precision_transformations/tests/CMakeLists.txt b/src/common/low_precision_transformations/tests/CMakeLists.txt index aef7ed1391d87a..85d4e5ce472808 100644 --- a/src/common/low_precision_transformations/tests/CMakeLists.txt +++ b/src/common/low_precision_transformations/tests/CMakeLists.txt @@ -17,5 +17,5 @@ ov_add_test_target( gmock INCLUDES ${CMAKE_CURRENT_SOURCE_DIR} LABELS - LP_TRANSFORMATIONS + OV UNIT LP_TRANSFORMATIONS ) diff --git a/src/common/snippets/tests/CMakeLists.txt b/src/common/snippets/tests/CMakeLists.txt index 350b87e583f3e0..274d653518a69a 100644 --- a/src/common/snippets/tests/CMakeLists.txt +++ b/src/common/snippets/tests/CMakeLists.txt @@ -4,7 +4,7 @@ set(TARGET_NAME ov_snippets_func_tests) -addIeTargetTest( +ov_add_test_target( NAME ${TARGET_NAME} ROOT ${CMAKE_CURRENT_SOURCE_DIR} INCLUDES @@ -16,7 +16,7 @@ addIeTargetTest( ov_snippets_models ADD_CPPLINT LABELS - IE OV SNIPPETS + OV UNIT SNIPPETS ) # LTO diff --git a/src/common/transformations/tests/CMakeLists.txt b/src/common/transformations/tests/CMakeLists.txt index 296a198a666f1a..84a4fb6e900edf 100644 --- a/src/common/transformations/tests/CMakeLists.txt +++ b/src/common/transformations/tests/CMakeLists.txt @@ -22,7 +22,7 @@ ov_add_test_target( INCLUDES $/src LABELS - TRANSFORMATIONS + OV UNIT TRANSFORMATIONS ) if(WIN32) diff --git a/src/core/tests/CMakeLists.txt b/src/core/tests/CMakeLists.txt index f8fb34acb349ae..433395d8a4bc0f 100644 --- a/src/core/tests/CMakeLists.txt +++ b/src/core/tests/CMakeLists.txt @@ -47,9 +47,7 @@ ov_add_test_target( openvino::runtime::dev ADD_CLANG_FORMAT LABELS - OV - IE - CORE + OV UNIT CORE ) get_target_property(OV_CORE_SRC_DIR ngraph_obj SOURCE_DIR) diff --git a/src/frontends/ir/tests/CMakeLists.txt b/src/frontends/ir/tests/CMakeLists.txt index 3962d4c96c0546..da042bec160d61 100644 --- a/src/frontends/ir/tests/CMakeLists.txt +++ b/src/frontends/ir/tests/CMakeLists.txt @@ -18,6 +18,5 @@ ov_add_test_target( "${CMAKE_CURRENT_SOURCE_DIR}/../include" ADD_CLANG_FORMAT LABELS - OV - IR_FE + OV UNIT IR_FE ) diff --git a/src/frontends/onnx/tests/CMakeLists.txt b/src/frontends/onnx/tests/CMakeLists.txt index 67462466b9131c..22bd85d5b92ee3 100644 --- a/src/frontends/onnx/tests/CMakeLists.txt +++ b/src/frontends/onnx/tests/CMakeLists.txt @@ -123,8 +123,8 @@ foreach(BACKEND_NAME IN LISTS ACTIVE_BACKEND_LIST) endforeach() add_executable(ov_onnx_frontend_tests ${SRC}) -add_test(NAME ov_onnx_frontend_tests COMMAND ov_onnx_frontend_tests) -set_property(TEST ov_onnx_frontend_tests PROPERTY LABELS OV ONNX_FE) +add_test(NAME ov_onnx_frontend_tests COMMAND ov_onnx_frontend_tests --gtest_filter=-*IE_GPU*) +set_property(TEST ov_onnx_frontend_tests PROPERTY LABELS OV UNIT ONNX_FE) add_dependencies(ov_onnx_frontend_tests template_extension) diff --git a/src/frontends/paddle/tests/CMakeLists.txt b/src/frontends/paddle/tests/CMakeLists.txt index 236919d7dfda8f..82a68ce62bc206 100644 --- a/src/frontends/paddle/tests/CMakeLists.txt +++ b/src/frontends/paddle/tests/CMakeLists.txt @@ -19,9 +19,8 @@ ov_add_test_target( gtest_main_manifest func_test_utils ADD_CLANG_FORMAT - LABELS - OV - PADDLE_FE + # LABELS + # OV UNIT PADDLE_FE ) # Test model generating diff --git a/src/frontends/tensorflow/tests/CMakeLists.txt b/src/frontends/tensorflow/tests/CMakeLists.txt index ccffb195159b35..45e64536b60f23 100644 --- a/src/frontends/tensorflow/tests/CMakeLists.txt +++ b/src/frontends/tensorflow/tests/CMakeLists.txt @@ -22,8 +22,7 @@ ov_add_test_target( openvino_tensorflow_common ADD_CLANG_FORMAT LABELS - OV - TF_FE + OV UNIT TF_FE ) # Test model generating diff --git a/src/frontends/tensorflow_common/tests/CMakeLists.txt b/src/frontends/tensorflow_common/tests/CMakeLists.txt index 76e3f4522d5097..1eb4755d808ce0 100644 --- a/src/frontends/tensorflow_common/tests/CMakeLists.txt +++ b/src/frontends/tensorflow_common/tests/CMakeLists.txt @@ -13,6 +13,5 @@ ov_add_test_target( openvino_tensorflow_common ADD_CLANG_FORMAT LABELS - OV - TF_COMMON + OV UNIT TF_COMMON ) diff --git a/src/frontends/tensorflow_lite/tests/CMakeLists.txt b/src/frontends/tensorflow_lite/tests/CMakeLists.txt index 5e0b544db2620c..ddb2fba4d9c429 100644 --- a/src/frontends/tensorflow_lite/tests/CMakeLists.txt +++ b/src/frontends/tensorflow_lite/tests/CMakeLists.txt @@ -16,8 +16,7 @@ ov_add_test_target( openvino_tensorflow_lite_frontend ADD_CLANG_FORMAT LABELS - OV - TFL_FE + OV UNIT TFL_FE ) # Test model generating diff --git a/src/inference/tests/functional/CMakeLists.txt b/src/inference/tests/functional/CMakeLists.txt index 66f819df8e84d7..16bcc08d4b95c8 100644 --- a/src/inference/tests/functional/CMakeLists.txt +++ b/src/inference/tests/functional/CMakeLists.txt @@ -49,7 +49,7 @@ ov_add_test_target( ${CMAKE_CURRENT_SOURCE_DIR} ADD_CLANG_FORMAT LABELS - OV + OV UNIT RUNTIME ) add_compile_definitions(${TARGET_NAME} ${COMPILE_DEFINITIONS}) diff --git a/src/inference/tests/unit/CMakeLists.txt b/src/inference/tests/unit/CMakeLists.txt index 9aff0e1adbe3b0..ef8e346aaf486b 100644 --- a/src/inference/tests/unit/CMakeLists.txt +++ b/src/inference/tests/unit/CMakeLists.txt @@ -17,5 +17,5 @@ ov_add_test_target( unit_test_utils ADD_CLANG_FORMAT LABELS - OV + OV UNIT RUNTIME ) diff --git a/src/plugins/auto/tests/unit/CMakeLists.txt b/src/plugins/auto/tests/unit/CMakeLists.txt index ca149e48b33349..ab9f4731fd86bf 100644 --- a/src/plugins/auto/tests/unit/CMakeLists.txt +++ b/src/plugins/auto/tests/unit/CMakeLists.txt @@ -26,9 +26,9 @@ ov_add_test_target( mock_engine ov_models LABELS - Multi - Auto + OV UNIT MULTI AUTO ) + ov_add_version_defines(${OpenVINO_SOURCE_DIR}/src/plugins/auto/src/plugin.cpp ${TARGET_NAME}) ov_set_threading_interface_for(${TARGET_NAME}) diff --git a/src/plugins/auto_batch/tests/functional/CMakeLists.txt b/src/plugins/auto_batch/tests/functional/CMakeLists.txt index 7c5137f9b565f8..f0b81893015a63 100644 --- a/src/plugins/auto_batch/tests/functional/CMakeLists.txt +++ b/src/plugins/auto_batch/tests/functional/CMakeLists.txt @@ -5,7 +5,7 @@ set(TARGET_NAME ov_auto_batch_func_tests) set(SHARED_HEADERS_DIR "${OpenVINO_SOURCE_DIR}/src/tests/functional/plugin/shared/include") -addIeTargetTest( +ov_add_test_target( NAME ${TARGET_NAME} ROOT @@ -19,7 +19,7 @@ addIeTargetTest( openvino_auto_batch_plugin ADD_CPPLINT LABELS - Auto_Batch + OV UNIT AUTO_BATCH ) ov_set_threading_interface_for(${TARGET_NAME}) diff --git a/src/plugins/auto_batch/tests/unit/CMakeLists.txt b/src/plugins/auto_batch/tests/unit/CMakeLists.txt index 14b2181060eeb8..3b1c1285c84177 100644 --- a/src/plugins/auto_batch/tests/unit/CMakeLists.txt +++ b/src/plugins/auto_batch/tests/unit/CMakeLists.txt @@ -8,7 +8,8 @@ set(CI_BUILD_NUMBER "unittest") add_definitions(-DAUTOBATCH_UNITTEST) set(SHARED_HEADERS_DIR "${OpenVINO_SOURCE_DIR}/src/tests/ie_test_util") -addIeTargetTest( + +ov_add_test_target( NAME ${TARGET_NAME} ROOT @@ -27,8 +28,9 @@ addIeTargetTest( ov_models ADD_CPPLINT LABELS - Auto_Batch + OV UNIT AUTO_BATCH ) + ov_add_version_defines(${OpenVINO_SOURCE_DIR}/src/plugins/auto_batch/src/plugin.cpp ${TARGET_NAME}) ov_set_threading_interface_for(${TARGET_NAME}) diff --git a/src/plugins/hetero/tests/functional/CMakeLists.txt b/src/plugins/hetero/tests/functional/CMakeLists.txt index eaa56e072b7746..a1726e31c5a413 100644 --- a/src/plugins/hetero/tests/functional/CMakeLists.txt +++ b/src/plugins/hetero/tests/functional/CMakeLists.txt @@ -18,7 +18,7 @@ ov_add_test_target( common_test_utils ADD_CLANG_FORMAT LABELS - HETERO + OV UNIT HETERO ) target_compile_definitions(${TARGET_NAME} PRIVATE CI_BUILD_NUMBER=\"mock_version\") @@ -26,4 +26,4 @@ target_compile_definitions(${TARGET_NAME} PRIVATE CI_BUILD_NUMBER=\"mock_version if(ENABLE_OV_IR_FRONTEND) add_dependencies(${TARGET_NAME} openvino_ir_frontend) target_compile_definitions(${TARGET_NAME} PRIVATE IR_FRONTEND_ENABLED) -endif() \ No newline at end of file +endif() diff --git a/src/plugins/hetero/tests/unit/CMakeLists.txt b/src/plugins/hetero/tests/unit/CMakeLists.txt index 654f491220c6bb..939229e4ee9ffb 100644 --- a/src/plugins/hetero/tests/unit/CMakeLists.txt +++ b/src/plugins/hetero/tests/unit/CMakeLists.txt @@ -27,5 +27,5 @@ ov_add_test_target( ov_models ADD_CLANG_FORMAT LABELS - HETERO + OV UNIT HETERO ) diff --git a/src/plugins/intel_cpu/tests/functional/CMakeLists.txt b/src/plugins/intel_cpu/tests/functional/CMakeLists.txt index d012589f5b0c1e..06d6b1b6b3583b 100644 --- a/src/plugins/intel_cpu/tests/functional/CMakeLists.txt +++ b/src/plugins/intel_cpu/tests/functional/CMakeLists.txt @@ -44,7 +44,7 @@ if(NOT X86_64) ${CMAKE_CURRENT_SOURCE_DIR}/subgraph_tests/src/x64) endif() -addIeTargetTest( +ov_add_test_target( NAME ${TARGET_NAME} ROOT ${CMAKE_CURRENT_SOURCE_DIR} INCLUDES ${INCLUDES} @@ -54,7 +54,7 @@ addIeTargetTest( DEPENDENCIES ${DEPENDENCIES} LINK_LIBRARIES ${LINK_LIBRARIES} ADD_CPPLINT - LABELS CPU + LABELS OV CPU ) ov_set_threading_interface_for(${TARGET_NAME}) diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index b3e7855c05086c..b9caace0239ab2 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -181,6 +181,8 @@ std::vector disabledTestPatterns() { R"(.*smoke_Proposal_(Static|Dynamic)_Test_Case1/ProposalLayerCPUTest.*)", // Issue: 111418 R"(.*smoke_Snippets_ConvertStub/ConvertStub\.CompareWithRefImpl/IS.*_OT=\(bf16\)_#N=2_#S=2_targetDevice=CPU.*)", + R"(.*smoke_Snippets_Convert/Convert\.CompareWithRefImpl/IS.*_IT=\(f32\)_OT=\(u8\)_#N=1_#S=1_targetDevice=CPU.*)", + R"(.*smoke_Snippets_ConvertManyOnInputs/ConvertManyOnInputs\.CompareWithRefImpl/IS.*_IT=\(f32\.u8\)_OT=\(\)_#N=1_#S=1_targetDevice=CPU.*)", // Issue: 106939 R"(.*ScatterNDUpdateLayerCPUTest.*-1.-1.-1.-2.-2.-2.*)", // New plugin API doesn't support changes of pre-processing diff --git a/src/plugins/intel_cpu/tests/functional/specific_tests.cmake b/src/plugins/intel_cpu/tests/functional/specific_tests.cmake index 1a3025966a4437..6ec6d019bea143 100644 --- a/src/plugins/intel_cpu/tests/functional/specific_tests.cmake +++ b/src/plugins/intel_cpu/tests/functional/specific_tests.cmake @@ -40,7 +40,7 @@ if(DEFINED ENABLE_CPU_SUBSET_TESTS_PATH) ${CMAKE_CURRENT_SOURCE_DIR}/test_utils/fusing_test_utils.cpp ${CPU_SUBSET_TEST_ABS_PATH}) - addIeTargetTest( + ov_add_test_target( NAME ${SUBSET_TARGET_NAME} ROOT ${CMAKE_CURRENT_SOURCE_DIR} INCLUDES ${INCLUDES} @@ -49,8 +49,7 @@ if(DEFINED ENABLE_CPU_SUBSET_TESTS_PATH) DEFINES ${DEFINES} DEPENDENCIES ${DEPENDENCIES} LINK_LIBRARIES ${LINK_LIBRARIES} - LABELS - CPU + LABELS OV CPU ) ov_set_threading_interface_for(${SUBSET_TARGET_NAME}) diff --git a/src/plugins/intel_cpu/tests/functional/target_per_test.cmake b/src/plugins/intel_cpu/tests/functional/target_per_test.cmake index 4d783a270f8943..c876474426b486 100644 --- a/src/plugins/intel_cpu/tests/functional/target_per_test.cmake +++ b/src/plugins/intel_cpu/tests/functional/target_per_test.cmake @@ -25,7 +25,7 @@ function(create_target_per_test_for_directory TEST_DIR TARGET_PREFIX) set(TEST_TARGET_NAME ${TARGET_PREFIX}_${TEST_FILE_WE}) # create target - addIeTargetTest( + ov_add_test_target( NAME ${TEST_TARGET_NAME} ROOT ${TEST_DIR} INCLUDES ${INCLUDES} @@ -35,8 +35,7 @@ function(create_target_per_test_for_directory TEST_DIR TARGET_PREFIX) DEPENDENCIES ${DEPENDENCIES} LINK_LIBRARIES ${LINK_LIBRARIES} ADD_CPPLINT - LABELS - CPU + LABELS OV CPU ) ov_set_threading_interface_for(${TEST_TARGET_NAME}) @@ -65,7 +64,7 @@ function(create_target_per_test_for_directory TEST_DIR TARGET_PREFIX) set(TEST_TARGET_NAME ${TARGET_PREFIX}_${TEST_CLASS}) # create target - addIeTargetTest( + ov_add_test_target( NAME ${TEST_TARGET_NAME} ROOT ${TEST_DIR} INCLUDES ${INCLUDES} @@ -74,8 +73,7 @@ function(create_target_per_test_for_directory TEST_DIR TARGET_PREFIX) DEFINES ${DEFINES} DEPENDENCIES ${DEPENDENCIES} LINK_LIBRARIES ${LINK_LIBRARIES} - LABELS - CPU + LABELS OV CPU ) ov_set_threading_interface_for(${TEST_TARGET_NAME}) diff --git a/src/plugins/intel_cpu/tests/unit/CMakeLists.txt b/src/plugins/intel_cpu/tests/unit/CMakeLists.txt index a85f63c0f38df0..5bb3205c7a85f9 100644 --- a/src/plugins/intel_cpu/tests/unit/CMakeLists.txt +++ b/src/plugins/intel_cpu/tests/unit/CMakeLists.txt @@ -32,7 +32,7 @@ else() set(MLAS_LIBRARY "mlas") endif() -addIeTargetTest( +ov_add_test_target( NAME ${TARGET_NAME} ROOT ${CMAKE_CURRENT_SOURCE_DIR} INCLUDES @@ -64,7 +64,7 @@ addIeTargetTest( ${MLAS_LIBRARY} ADD_CPPLINT LABELS - CPU + OV UNIT CPU ) # LTO diff --git a/src/plugins/intel_gna/legacy/tests/CMakeLists.txt b/src/plugins/intel_gna/legacy/tests/CMakeLists.txt index be69212ba81b82..55771645c79686 100644 --- a/src/plugins/intel_gna/legacy/tests/CMakeLists.txt +++ b/src/plugins/intel_gna/legacy/tests/CMakeLists.txt @@ -23,7 +23,7 @@ ov_add_test_target( INCLUDES $/src LABELS - TRANSFORMATIONS + OV UNIT TRANSFORMATIONS ) if(WIN32) diff --git a/src/plugins/intel_gna/tests/deprecated/unit/CMakeLists.txt b/src/plugins/intel_gna/tests/deprecated/unit/CMakeLists.txt index daa3b45d35baa7..fd85b52a313767 100644 --- a/src/plugins/intel_gna/tests/deprecated/unit/CMakeLists.txt +++ b/src/plugins/intel_gna/tests/deprecated/unit/CMakeLists.txt @@ -91,7 +91,7 @@ target_link_libraries(${TARGET_NAME} PRIVATE ov_try_use_gold_linker() add_test(NAME ${TARGET_NAME} COMMAND ${TARGET_NAME}) -set_property(TEST ${TARGET_NAME} PROPERTY LABELS IE) +set_property(TEST ${TARGET_NAME} PROPERTY LABELS OV UNIT) install(TARGETS ${TARGET_NAME} RUNTIME DESTINATION tests diff --git a/src/plugins/intel_gna/tests/functional/CMakeLists.txt b/src/plugins/intel_gna/tests/functional/CMakeLists.txt index 92a86566b869d4..c1d1c74c46d7ba 100644 --- a/src/plugins/intel_gna/tests/functional/CMakeLists.txt +++ b/src/plugins/intel_gna/tests/functional/CMakeLists.txt @@ -8,7 +8,7 @@ endif() set(TARGET_NAME ov_gna_func_tests) -addIeTargetTest( +ov_add_test_target( NAME ${TARGET_NAME} ROOT ${CMAKE_CURRENT_SOURCE_DIR} INCLUDES @@ -19,7 +19,7 @@ addIeTargetTest( funcSharedTests ADD_CLANG_FORMAT LABELS - GNA + OV GNA ) target_compile_definitions(${TARGET_NAME} diff --git a/src/plugins/intel_gna/tests/unit/CMakeLists.txt b/src/plugins/intel_gna/tests/unit/CMakeLists.txt index c4d49f7b4b0c55..87f4223a643f79 100644 --- a/src/plugins/intel_gna/tests/unit/CMakeLists.txt +++ b/src/plugins/intel_gna/tests/unit/CMakeLists.txt @@ -17,7 +17,7 @@ if(NOT BUILD_SHARED_LIBS) set(exclude_path EXCLUDED_SOURCE_PATHS "${CMAKE_CURRENT_SOURCE_DIR}/(gna_api_stub|gna_wait_test|gna_export_import_test|gna_infer_request_test|gna_plugin_load_network_test|gna_mock_api_initializer|gna_extra_pwl_segments_tests).cpp") endif() -addIeTargetTest( +ov_add_test_target( NAME ${TARGET_NAME} ROOT ${CMAKE_CURRENT_SOURCE_DIR} ${exclude_path} @@ -30,7 +30,7 @@ addIeTargetTest( inference_engine_legacy_s ADD_CLANG_FORMAT LABELS - GNA + OV GNA ) if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") diff --git a/src/plugins/intel_gpu/tests/functional/CMakeLists.txt b/src/plugins/intel_gpu/tests/functional/CMakeLists.txt index e40814d43eb7ff..480717eaacb912 100644 --- a/src/plugins/intel_gpu/tests/functional/CMakeLists.txt +++ b/src/plugins/intel_gpu/tests/functional/CMakeLists.txt @@ -14,7 +14,7 @@ endif() list(APPEND DEFINES TEST_CUSTOM_OP_CONFIG_PATH="${CMAKE_CURRENT_SOURCE_DIR}/custom_op/custom_op.xml") -addIeTargetTest( +ov_add_test_target( NAME ${TARGET_NAME} ROOT @@ -35,7 +35,7 @@ addIeTargetTest( OpenCL::OpenCL ADD_CPPLINT LABELS - GPU + OV GPU ) if(ENABLE_PROXY) diff --git a/src/plugins/proxy/tests/CMakeLists.txt b/src/plugins/proxy/tests/CMakeLists.txt index 04ff0fea9ffb31..73ebec7b96f999 100644 --- a/src/plugins/proxy/tests/CMakeLists.txt +++ b/src/plugins/proxy/tests/CMakeLists.txt @@ -39,7 +39,7 @@ ov_add_test_target( func_test_utils ADD_CLANG_FORMAT LABELS - PROXY_PLUGIN + OV UNIT PROXY ) target_compile_definitions(${TARGET_NAME} PRIVATE ${COMPILE_DEFINITIONS}) diff --git a/src/plugins/template/tests/functional/CMakeLists.txt b/src/plugins/template/tests/functional/CMakeLists.txt index aef4a4ecfa71a5..eb634d4f91f955 100644 --- a/src/plugins/template/tests/functional/CMakeLists.txt +++ b/src/plugins/template/tests/functional/CMakeLists.txt @@ -22,7 +22,7 @@ ov_add_test_target( "${CMAKE_CURRENT_SOURCE_DIR}/op_reference" ADD_CLANG_FORMAT LABELS - TEMPLATE + OV UNIT TEMPLATE ) find_package(OpenCV QUIET COMPONENTS core imgproc) diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/CMakeLists.txt b/src/tests/functional/plugin/conformance/subgraphs_dumper/CMakeLists.txt index 3ab17f76aefbcf..6f981d0702c96d 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/CMakeLists.txt +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/CMakeLists.txt @@ -12,8 +12,9 @@ list(APPEND LIBRARIES ) # add subgraphs_dumpers tool -addIeTargetTest( +ov_add_target( NAME ${TARGET_NAME} + TYPE EXECUTABLE ROOT ${CMAKE_CURRENT_SOURCE_DIR}/src INCLUDES PRIVATE @@ -27,7 +28,7 @@ addIeTargetTest( ) # add subgraphs_dumpers lib to get API -addIeTarget( +ov_add_target( NAME "${TARGET_NAME}Util" TYPE STATIC ROOT "${CMAKE_CURRENT_SOURCE_DIR}/src" diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/CMakeLists.txt b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/CMakeLists.txt index c9eced4c632b72..5d0f9c45c91883 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/CMakeLists.txt +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/CMakeLists.txt @@ -4,19 +4,17 @@ set(TARGET_NAME subgraphsDumperTests) -list(APPEND DEPENDENCIES subgraphsDumperUtil) - -addIeTargetTest( +ov_add_test_target( NAME ${TARGET_NAME} ROOT ${CMAKE_CURRENT_SOURCE_DIR} INCLUDES ${CMAKE_CURRENT_SOURCE_DIR}/ LINK_LIBRARIES PRIVATE - ${DEPENDENCIES} - DEPENDENCIES - ${DEPENDENCIES} + subgraphsDumperUtil ADD_CPPLINT + LABELS + OV UNIT ) ov_build_target_faster(${TARGET_NAME} UNITY) diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/CMakeLists.txt b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/CMakeLists.txt index e61718b5f6641a..f007e0ebfe5382 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/CMakeLists.txt +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/CMakeLists.txt @@ -4,7 +4,7 @@ set(TARGET_NAME apiConformanceTests) -addIeTargetTest( +ov_add_test_target( NAME ${TARGET_NAME} ROOT "${CMAKE_CURRENT_SOURCE_DIR}/include" ADDITIONAL_SOURCE_DIRS @@ -17,7 +17,7 @@ addIeTargetTest( PUBLIC conformanceShared LABELS - API_CONFORMANCE + OV API_CONFORMANCE ) ov_build_target_faster(${TARGET_NAME} UNITY) diff --git a/src/tests/functional/plugin/conformance/test_runner/conformance_infra/CMakeLists.txt b/src/tests/functional/plugin/conformance/test_runner/conformance_infra/CMakeLists.txt index 54330c5d4271a1..0f25d3218e1b92 100644 --- a/src/tests/functional/plugin/conformance/test_runner/conformance_infra/CMakeLists.txt +++ b/src/tests/functional/plugin/conformance/test_runner/conformance_infra/CMakeLists.txt @@ -4,7 +4,7 @@ set(TARGET_NAME conformanceShared) -addIeTarget( +ov_add_target( NAME ${TARGET_NAME} TYPE STATIC ROOT "${CMAKE_CURRENT_SOURCE_DIR}/include" diff --git a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/CMakeLists.txt b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/CMakeLists.txt index a4ee71c9ac02a3..a9eb6cec8f3006 100644 --- a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/CMakeLists.txt +++ b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/CMakeLists.txt @@ -4,7 +4,7 @@ set(TARGET_NAME conformanceTests) -addIeTargetTest( +ov_add_test_target( NAME ${TARGET_NAME} ROOT "${CMAKE_CURRENT_SOURCE_DIR}/include" ADDITIONAL_SOURCE_DIRS @@ -18,7 +18,7 @@ addIeTargetTest( PUBLIC conformanceShared LABELS - OP_CONFORMANCE + OV OP_CONFORMANCE ) ov_build_target_faster(${TARGET_NAME} UNITY) diff --git a/src/tests/functional/plugin/shared/CMakeLists.txt b/src/tests/functional/plugin/shared/CMakeLists.txt index 23f279a8dca3c6..c75d2938d6badf 100644 --- a/src/tests/functional/plugin/shared/CMakeLists.txt +++ b/src/tests/functional/plugin/shared/CMakeLists.txt @@ -45,7 +45,7 @@ else() list(APPEND EXCLUDED_SOURCE_PATHS ${CMAKE_CURRENT_SOURCE_DIR}/src/snippets) endif() -addIeTarget( +ov_add_target( NAME ${TARGET_NAME} TYPE STATIC ROOT ${PUBLIC_HEADERS_DIR} diff --git a/src/tests/functional/shared_test_classes/CMakeLists.txt b/src/tests/functional/shared_test_classes/CMakeLists.txt index f53f00ca135317..35d09840c09770 100644 --- a/src/tests/functional/shared_test_classes/CMakeLists.txt +++ b/src/tests/functional/shared_test_classes/CMakeLists.txt @@ -4,7 +4,7 @@ set(TARGET_NAME sharedTestClasses) -addIeTarget( +ov_add_target( NAME ${TARGET_NAME} TYPE STATIC ROOT "${CMAKE_CURRENT_SOURCE_DIR}/include" diff --git a/src/tests/ov_helpers/ov_lpt_models/CMakeLists.txt b/src/tests/ov_helpers/ov_lpt_models/CMakeLists.txt index 27dc9effbdf74d..f2b4514c5b0d32 100644 --- a/src/tests/ov_helpers/ov_lpt_models/CMakeLists.txt +++ b/src/tests/ov_helpers/ov_lpt_models/CMakeLists.txt @@ -6,7 +6,7 @@ set(TARGET_NAME ov_lpt_models) set(PUBLIC_HEADERS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/include") -addIeTarget( +ov_add_target( NAME ${TARGET_NAME} TYPE STATIC ROOT ${PUBLIC_HEADERS_DIR} diff --git a/src/tests/ov_helpers/ov_models/CMakeLists.txt b/src/tests/ov_helpers/ov_models/CMakeLists.txt index 0c7c1f48cd275b..6d2989f94af734 100644 --- a/src/tests/ov_helpers/ov_models/CMakeLists.txt +++ b/src/tests/ov_helpers/ov_models/CMakeLists.txt @@ -6,7 +6,7 @@ set(TARGET_NAME ov_models) set(PUBLIC_HEADERS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/include") -addIeTarget( +ov_add_target( NAME ${TARGET_NAME} TYPE STATIC ROOT ${PUBLIC_HEADERS_DIR} diff --git a/src/tests/ov_helpers/ov_snippets_models/CMakeLists.txt b/src/tests/ov_helpers/ov_snippets_models/CMakeLists.txt index 872a928e2e0509..69cd602bb5eab5 100644 --- a/src/tests/ov_helpers/ov_snippets_models/CMakeLists.txt +++ b/src/tests/ov_helpers/ov_snippets_models/CMakeLists.txt @@ -7,7 +7,8 @@ set(TARGET_NAME ov_snippets_models) set(PUBLIC_HEADERS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/include") set(SNIPPETS_INCLUDES "$/include") set(COMMON_TEST_UTILS_INCLUDES "$") -addIeTarget( + +ov_add_target( NAME ${TARGET_NAME} TYPE STATIC ROOT ${PUBLIC_HEADERS_DIR} diff --git a/src/tests/test_utils/common_test_utils/CMakeLists.txt b/src/tests/test_utils/common_test_utils/CMakeLists.txt index d2364915b7a962..3d63059962c4f1 100644 --- a/src/tests/test_utils/common_test_utils/CMakeLists.txt +++ b/src/tests/test_utils/common_test_utils/CMakeLists.txt @@ -16,7 +16,7 @@ function(add_common_utils ADD_TARGET_NAME) endif() # create target - addIeTarget( + ov_add_target( NAME ${ADD_TARGET_NAME} TYPE STATIC ROOT ${CMAKE_CURRENT_SOURCE_DIR} diff --git a/src/tests/test_utils/common_test_utils/tests/CMakeLists.txt b/src/tests/test_utils/common_test_utils/tests/CMakeLists.txt index 8ef78985db9271..9712c46de4a240 100644 --- a/src/tests/test_utils/common_test_utils/tests/CMakeLists.txt +++ b/src/tests/test_utils/common_test_utils/tests/CMakeLists.txt @@ -4,11 +4,13 @@ set(TARGET_NAME ov_util_tests) -addIeTargetTest( - NAME ${TARGET_NAME} - ROOT ${CMAKE_CURRENT_SOURCE_DIR} - DEPENDENCIES - LINK_LIBRARIES - common_test_utils - ADD_CPPLINT +ov_add_test_target( + NAME ${TARGET_NAME} + ROOT ${CMAKE_CURRENT_SOURCE_DIR} + DEPENDENCIES + LINK_LIBRARIES + common_test_utils + ADD_CPPLINT + LABELS + OV UNIT ) diff --git a/src/tests/test_utils/functional_test_utils/CMakeLists.txt b/src/tests/test_utils/functional_test_utils/CMakeLists.txt index ba3f83db88a185..c990febcd6a0b2 100644 --- a/src/tests/test_utils/functional_test_utils/CMakeLists.txt +++ b/src/tests/test_utils/functional_test_utils/CMakeLists.txt @@ -4,7 +4,7 @@ set(TARGET_NAME func_test_utils) -addIeTarget( +ov_add_target( NAME ${TARGET_NAME} TYPE STATIC ROOT ${CMAKE_CURRENT_SOURCE_DIR} diff --git a/src/tests/test_utils/unit_test_utils/CMakeLists.txt b/src/tests/test_utils/unit_test_utils/CMakeLists.txt index 4bdae74e806dc1..7b8607d6bae28c 100644 --- a/src/tests/test_utils/unit_test_utils/CMakeLists.txt +++ b/src/tests/test_utils/unit_test_utils/CMakeLists.txt @@ -10,7 +10,7 @@ set(TARGET_NAME unit_test_utils) add_subdirectory(mocks/mock_engine) -addIeTarget( +ov_add_target( NAME ${TARGET_NAME} TYPE STATIC ROOT ${CMAKE_CURRENT_SOURCE_DIR} From fb400814759da0476a4af24859a3462e7264f0c8 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Fri, 6 Oct 2023 12:20:26 +0400 Subject: [PATCH 085/257] Moved apply auto padding to dev API (#20257) --- src/core/dev_api/validation_util.hpp | 33 +++++++++++++++++++- src/core/src/validation_util.cpp | 46 +++++++++++++++++++--------- 2 files changed, 63 insertions(+), 16 deletions(-) diff --git a/src/core/dev_api/validation_util.hpp b/src/core/dev_api/validation_util.hpp index fe607828c80148..c214b404798a9c 100644 --- a/src/core/dev_api/validation_util.hpp +++ b/src/core/dev_api/validation_util.hpp @@ -4,10 +4,17 @@ #pragma once -#include "openvino/core/node.hpp" +#include "openvino/core/coordinate_diff.hpp" +#include "openvino/core/core_visibility.hpp" +#include "openvino/core/partial_shape.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/core/strides.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/util/attr_types.hpp" namespace ov { namespace util { + /// \brief Normalize value to the max if value is negative. /// /// \param value Input value to normalize. @@ -47,5 +54,29 @@ OPENVINO_API std::shared_ptr constantfold_subgraph(const Outpu * @return Shared pointer to constant data or nullptr. */ OPENVINO_API std::shared_ptr get_constant_from_source(const Output& source); + +/// \brief Apply auto padding to padding_above and padding_below inputs +/// if all needed informations are known. +/// +/// \param image_shape The shape of input image. +/// \param filter_shape The shape of filter input. +/// \param filter_strides The strides of applied padding. +/// \param filter_dilations The dilations of applied padding. +/// \param pad_type The type of padding. Auto padding is applied only +/// for SAME_UPPER and SAME_LOWER mode. +/// \param padding_above The beginning of padding shape. +/// \param end The beginning of padding shape. +/// +/// \return true if auto padding was applied successfully (all needed informations such as +/// spatial dims are known), false otherwise. +OPENVINO_API +bool try_apply_auto_padding(const PartialShape& image_shape, + const Shape& filter_shape, + const Strides& filter_strides, + const Strides& filter_dilations, + const op::PadType pad_type, + CoordinateDiff& padding_above, + CoordinateDiff& padding_below); + } // namespace util } // namespace ov diff --git a/src/core/src/validation_util.cpp b/src/core/src/validation_util.cpp index b1f03d198f1152..3d2f72b8533825 100644 --- a/src/core/src/validation_util.cpp +++ b/src/core/src/validation_util.cpp @@ -658,23 +658,23 @@ void ov::infer_auto_padding(const Shape& image_shape, CoordinateDiff& padding_below) { const auto image_dims = std::vector(std::begin(image_shape), std::end(image_shape)); // because image_shape is fully known result of try_apply_infer_auto_padding is ignored - ngraph::try_apply_auto_padding(image_dims, - filter_shape, - filter_strides, - filter_dilations, - pad_type, - padding_above, - padding_below); + ov::util::try_apply_auto_padding(image_dims, + filter_shape, + filter_strides, + filter_dilations, + pad_type, + padding_above, + padding_below); } -bool ngraph::try_apply_auto_padding(const PartialShape& image_shape, - const Shape& filter_shape, - const Strides& filter_strides, - const Strides& filter_dilations, - const op::PadType pad_type, - CoordinateDiff& padding_above, - CoordinateDiff& padding_below) { - NGRAPH_CHECK(pad_type == op::PadType::SAME_UPPER || pad_type == op::PadType::SAME_LOWER); +bool ov::util::try_apply_auto_padding(const PartialShape& image_shape, + const Shape& filter_shape, + const Strides& filter_strides, + const Strides& filter_dilations, + const op::PadType pad_type, + CoordinateDiff& padding_above, + CoordinateDiff& padding_below) { + OPENVINO_ASSERT(pad_type == op::PadType::SAME_UPPER || pad_type == op::PadType::SAME_LOWER); if (image_shape.rank().is_dynamic()) { return false; @@ -700,6 +700,22 @@ bool ngraph::try_apply_auto_padding(const PartialShape& image_shape, return true; } +bool ngraph::try_apply_auto_padding(const PartialShape& image_shape, + const Shape& filter_shape, + const Strides& filter_strides, + const Strides& filter_dilations, + const op::PadType pad_type, + CoordinateDiff& padding_above, + CoordinateDiff& padding_below) { + return ov::util::try_apply_auto_padding(image_shape, + filter_shape, + filter_strides, + filter_dilations, + pad_type, + padding_above, + padding_below); +} + ngraph::PartialShape ngraph::infer_slice_shape(const Node* node, const PartialShape& input_shape, const std::vector& begin, From bb2c2fab6c7452b8ebc6076bac7ef6a5b36623ec Mon Sep 17 00:00:00 2001 From: Ekaterina Aidova Date: Fri, 6 Oct 2023 12:26:12 +0400 Subject: [PATCH 086/257] [PT FE]: support aten::log1p, fixes for where and linalg_norm (#20167) * [PT FE]: support aten::log1p, fixes for where and linalg_norm * clarify norm behaviour --- src/frontends/pytorch/src/op/log.cpp | 12 +++++ src/frontends/pytorch/src/op/norm.cpp | 2 +- src/frontends/pytorch/src/op/where.cpp | 1 + src/frontends/pytorch/src/op_table.cpp | 3 ++ tests/layer_tests/pytorch_tests/test_log.py | 9 +++- tests/layer_tests/pytorch_tests/test_norm.py | 17 ++++--- tests/layer_tests/pytorch_tests/test_where.py | 51 +++++++++++++++---- 7 files changed, 75 insertions(+), 20 deletions(-) diff --git a/src/frontends/pytorch/src/op/log.cpp b/src/frontends/pytorch/src/op/log.cpp index c047f9e7853835..20232e31dec5ce 100644 --- a/src/frontends/pytorch/src/op/log.cpp +++ b/src/frontends/pytorch/src/op/log.cpp @@ -5,6 +5,7 @@ #include "openvino/op/log.hpp" #include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/op/add.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/convert.hpp" #include "openvino/op/divide.hpp" @@ -55,6 +56,17 @@ OutputVector translate_logsumexp(const NodeContext& context) { return {log}; }; +OutputVector translate_log1p(const NodeContext& context) { + // torch.log1p returns a tensor with the natural logarithm of the elements of input + 1. + num_inputs_check(context, 1, 1); + auto x = context.get_input(0); + x = context.mark_node(std::make_shared(x, element::f32)); + auto one = context.mark_node(v0::Constant::create(element::f32, Shape{}, {1})); + auto x_plus_one = context.mark_node(std::make_shared(x, one)); + auto log = context.mark_node(std::make_shared(x_plus_one)); + return {log}; +}; + } // namespace op } // namespace pytorch } // namespace frontend diff --git a/src/frontends/pytorch/src/op/norm.cpp b/src/frontends/pytorch/src/op/norm.cpp index 6cf30a323e4dba..d3136b7e76ad48 100644 --- a/src/frontends/pytorch/src/op/norm.cpp +++ b/src/frontends/pytorch/src/op/norm.cpp @@ -259,7 +259,7 @@ OutputVector translate_linalg_norm(const NodeContext& context) { auto input_rank = x.get_partial_shape().rank(); if (input_rank.is_static() && input_rank.get_length() == 2) { result = frobenius_norm(context, x, dim, keep_dim); - } else if (input_rank.is_static() && input_rank.get_length() == 1) { + } else if (input_rank.is_dynamic() || input_rank.get_length() == 1) { result = norm_vector(context, x, dim, 2, keep_dim); } else { FRONT_END_OP_CONVERSION_CHECK(false, diff --git a/src/frontends/pytorch/src/op/where.cpp b/src/frontends/pytorch/src/op/where.cpp index 4a9de9f69edab8..3d03706970bc67 100644 --- a/src/frontends/pytorch/src/op/where.cpp +++ b/src/frontends/pytorch/src/op/where.cpp @@ -21,6 +21,7 @@ OutputVector translate_where(const NodeContext& context) { auto bool_cond = context.mark_node(std::make_shared(cond, element::boolean)); auto x = context.get_input(1); auto y = context.get_input(2); + align_eltwise_input_types(context, x, y, true); return {context.mark_node(std::make_shared(bool_cond, x, y))}; }; diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index bbad312b74ed4e..41a790d1ef2079 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -89,6 +89,7 @@ OP_CONVERTER(translate_linspace); OP_CONVERTER(translate_list_construct); OP_CONVERTER(translate_list_unpack); OP_CONVERTER(translate_log); +OP_CONVERTER(translate_log1p); OP_CONVERTER(translate_log_softmax); OP_CONVERTER(translate_log2); OP_CONVERTER(translate_logsumexp); @@ -353,6 +354,8 @@ const std::map get_supported_ops_ts() { {"aten::logical_not", op::translate_not}, {"aten::logical_xor", op::translate_xor}, {"aten::log_softmax", op::translate_log_softmax}, + {"aten::log1p", op::translate_log1p}, + {"aten::log1p_", op::inplace_op}, {"aten::log2", op::translate_log2}, {"aten::log2_", op::inplace_op}, {"aten::lt", op::translate_1to1_match_2_inputs_align_types}, diff --git a/tests/layer_tests/pytorch_tests/test_log.py b/tests/layer_tests/pytorch_tests/test_log.py index 2d8a87fd22d203..1e4de2dd4f19af 100644 --- a/tests/layer_tests/pytorch_tests/test_log.py +++ b/tests/layer_tests/pytorch_tests/test_log.py @@ -17,7 +17,9 @@ def create_model(self, op): "log": torch.log, "log_": torch.log_, "log2": torch.log2, - "log2_": torch.log2_ + "log2_": torch.log2_, + "log1p": torch.log1p, + "log1p_": torch.log1p_ } op_fn = ops[op] @@ -42,7 +44,10 @@ def forward(self, x): ["log_", "float32"], ["log2", "float32"], ["log2", "int32"], - ["log2_", "float32"]]) + ["log2_", "float32"], + ["log1p", "float32"], + ["log1p", "int32"], + ["log1p_", "float32"]]) def test_log(self, op, input_dtype, ie_device, precision, ir_version): self._test(*self.create_model(op), ie_device, precision, ir_version, kwargs_to_prepare_input={"dtype": input_dtype}) \ No newline at end of file diff --git a/tests/layer_tests/pytorch_tests/test_norm.py b/tests/layer_tests/pytorch_tests/test_norm.py index fa2da2f082adc9..aef0a074059950 100644 --- a/tests/layer_tests/pytorch_tests/test_norm.py +++ b/tests/layer_tests/pytorch_tests/test_norm.py @@ -253,11 +253,11 @@ def test_linalg_matrix_norm(self, p, dim, keepdim, dtype, out, prim_dtype, ie_de class TestLinalgNorm(PytorchLayerTest): - def _prepare_input(self, out=False, out_dtype=None): + def _prepare_input(self, out=False, out_dtype=None, input_shape=(3, 3)): if not out: - return (np.random.randn(3, 3).astype(np.float32),) - x = np.random.randn(3, 3).astype(np.float32) - y = np.random.randn(3, 3).astype( + return (np.random.randn(*input_shape).astype(np.float32),) + x = np.random.randn(*input_shape).astype(np.float32) + y = np.random.randn(*input_shape).astype( out_dtype if out_dtype is not None else np.float32) return (x, y) @@ -318,7 +318,12 @@ def forward_out(self, x, y): @pytest.mark.parametrize("dtype", ["float32", "float64", None]) @pytest.mark.parametrize("out", [True, False]) @pytest.mark.parametrize("prim_dtype", [True, False]) - def test_linalg_norm(self, p, dim, keepdim, dtype, out, prim_dtype, ie_device, precision, ir_version): + @pytest.mark.parametrize("input_shape", [[1, 3], [3, 3], [1, 3, 3]]) + def test_linalg_norm(self, p, dim, keepdim, dtype, out, prim_dtype, input_shape, ie_device, precision, ir_version): self._test(*self.create_model(p, dim, keepdim, dtype, out, prim_dtype), ie_device, precision, ir_version, - kwargs_to_prepare_input={"out": out or prim_dtype, "out_dtype": dtype if prim_dtype else None}) + kwargs_to_prepare_input={ + "out": out or prim_dtype, + "out_dtype": dtype if prim_dtype else None, + "input_shape": input_shape + }) diff --git a/tests/layer_tests/pytorch_tests/test_where.py b/tests/layer_tests/pytorch_tests/test_where.py index 20d9fa1d19b53e..b87f3794f76398 100644 --- a/tests/layer_tests/pytorch_tests/test_where.py +++ b/tests/layer_tests/pytorch_tests/test_where.py @@ -8,7 +8,7 @@ class Testwhere(PytorchLayerTest): - def _prepare_input(self, mask_fill='ones', mask_dtype=bool, return_x_y=False): + def _prepare_input(self, mask_fill='ones', mask_dtype=bool, return_x_y=False, x_dtype="float32", y_dtype=None): input_shape = [2, 10] mask = np.zeros(input_shape).astype(mask_dtype) if mask_fill == 'ones': @@ -16,16 +16,31 @@ def _prepare_input(self, mask_fill='ones', mask_dtype=bool, return_x_y=False): if mask_fill == 'random': idx = np.random.choice(10, 5) mask[:, idx] = 1 - x = np.random.randn(*input_shape) - y = np.random.randn(*input_shape) + x = np.random.randn(*input_shape).astype(x_dtype) + y = np.random.randn(*input_shape).astype(y_dtype or x_dtype) return (mask,) if not return_x_y else (mask, x, y) - def create_model(self, as_non_zero): + def create_model(self, as_non_zero, dtypes=None): import torch + dtype_map = { + "float32": torch.float32, + "int32": torch.int32 + } + + torch_dtypes = None + if dtypes: + torch_dtypes = (dtype_map[dtypes[0]], dtype_map[dtypes[1]]) + class aten_where(torch.nn.Module): + def __init__(self, dtypes) -> None: + super().__init__() + self.x_dtype = dtypes[0] + self.y_dtype = dtypes[1] + + def forward(self, cond, x, y): - return torch.where(cond, x, y) + return torch.where(cond, x.to(self.x_dtype), y.to(self.y_dtype)) class aten_where_as_nonzero(torch.nn.Module): def forward(self, cond): @@ -35,25 +50,39 @@ def forward(self, cond): if as_non_zero: return aten_where_as_nonzero(), ref_net, "aten::where" - return aten_where(), ref_net, "aten::where" + return aten_where(torch_dtypes), ref_net, "aten::where" @pytest.mark.parametrize( "mask_fill", ['zeros', 'ones', 'random']) @pytest.mark.parametrize("mask_dtype", [np.uint8, bool]) # np.float32 incorrectly casted to bool + @pytest.mark.parametrize("x_dtype", ["float32", "int32"]) + @pytest.mark.parametrize("y_dtype", ["float32", "int32"]) @pytest.mark.nightly @pytest.mark.precommit - def test_where(self, mask_fill, mask_dtype, ie_device, precision, ir_version): - self._test(*self.create_model(False), + def test_where(self, mask_fill, mask_dtype, x_dtype, y_dtype, ie_device, precision, ir_version): + self._test(*self.create_model(False, dtypes=(x_dtype, y_dtype)), ie_device, precision, ir_version, - kwargs_to_prepare_input={'mask_fill': mask_fill, 'mask_dtype': mask_dtype, 'return_x_y': True}) + kwargs_to_prepare_input={ + 'mask_fill': mask_fill, + 'mask_dtype': mask_dtype, + 'return_x_y': True, + "x_dtype": x_dtype, + "y_dtype": y_dtype + }) @pytest.mark.parametrize( "mask_fill", ['zeros', 'ones', 'random']) @pytest.mark.parametrize("mask_dtype", [np.uint8, bool]) # np.float32 incorrectly casted to bool + @pytest.mark.parametrize("x_dtype", ["float32", "int32"]) @pytest.mark.nightly @pytest.mark.precommit - def test_where_as_nonzero(self, mask_fill, mask_dtype, ie_device, precision, ir_version): + def test_where_as_nonzero(self, mask_fill, mask_dtype, x_dtype, ie_device, precision, ir_version): self._test(*self.create_model(True), ie_device, precision, ir_version, - kwargs_to_prepare_input={'mask_fill': mask_fill, 'mask_dtype': mask_dtype, 'return_x_y': False}, + kwargs_to_prepare_input={ + 'mask_fill': mask_fill, + 'mask_dtype': mask_dtype, + 'return_x_y': False, + "x_dtype": x_dtype, + }, trace_model=True) From 9fe6be21ea53cf2931bbd5ae9fb3473907a17953 Mon Sep 17 00:00:00 2001 From: Przemyslaw Wysocki Date: Fri, 6 Oct 2023 10:28:27 +0200 Subject: [PATCH 087/257] [PyOV] Drop Python 3.7 in OpenVINO Runtime (#19534) * Drop 37 * Fix linter * Minor change * Minor changes * update add_lib_path * Minor changes * Update pypi pages * Update pypi pages * Fix linter * Minor changes --------- Co-authored-by: Anastasia Kuporosova Co-authored-by: Jan Iwaszkiewicz Co-authored-by: Ilya Lavrenov --- .github/github_org_control/configs.py | 4 ++-- .github/workflows/py_checks.yml | 2 +- .../ncc_naming_style/requirements_dev.txt | 1 - scripts/setupvars/setupvars.bat | 2 +- scripts/setupvars/setupvars.sh | 2 +- .../src/compatibility/ngraph/utils/types.py | 6 +++--- .../python/src/openvino/runtime/utils/types.py | 6 +++--- src/bindings/python/src/openvino/utils.py | 5 +---- src/bindings/python/tests/test_graph/test_any.py | 4 ++-- src/bindings/python/tests/test_graph/test_if.py | 2 +- .../python/tests/test_graph/test_loop.py | 2 +- .../tests/test_graph/test_tensor_iterator.py | 2 +- .../python/tests/test_runtime/test_properties.py | 16 ++++++++-------- tools/deployment_manager/deployment_manager.py | 4 ++-- 14 files changed, 27 insertions(+), 31 deletions(-) diff --git a/.github/github_org_control/configs.py b/.github/github_org_control/configs.py index 92ba0a96c2d9bc..872638bb657fdf 100644 --- a/.github/github_org_control/configs.py +++ b/.github/github_org_control/configs.py @@ -14,8 +14,8 @@ from pathlib import Path -if sys.version_info[:2] < (3, 7): - raise Exception("Python version must be >= 3.7") +if sys.version_info[:2] < (3, 8): + raise Exception("Python version must be >= 3.8") class ConfigException(Exception): diff --git a/.github/workflows/py_checks.yml b/.github/workflows/py_checks.yml index 2a2a5741754895..c97d5167e2b035 100644 --- a/.github/workflows/py_checks.yml +++ b/.github/workflows/py_checks.yml @@ -30,7 +30,7 @@ jobs: - name: Setup Python uses: actions/setup-python@v4 with: - python-version: '3.7' + python-version: '3.8' - name: Install dependencies run: python -m pip install -r src/bindings/python/requirements_test.txt diff --git a/cmake/developer_package/ncc_naming_style/requirements_dev.txt b/cmake/developer_package/ncc_naming_style/requirements_dev.txt index d7e1d3f831009d..21e3e089b54dfe 100644 --- a/cmake/developer_package/ncc_naming_style/requirements_dev.txt +++ b/cmake/developer_package/ncc_naming_style/requirements_dev.txt @@ -1,4 +1,3 @@ -clang==11.1.0; python_version == '3.7' clang==12.0.1; python_version == '3.8' clang==12.0.1; python_version == '3.9' clang==14.0; python_version == '3.10' diff --git a/scripts/setupvars/setupvars.bat b/scripts/setupvars/setupvars.bat index 9ef50fc88fa263..bc5b94e537319f 100644 --- a/scripts/setupvars/setupvars.bat +++ b/scripts/setupvars/setupvars.bat @@ -65,7 +65,7 @@ set "PATH=%OPENVINO_LIB_PATHS%;%PATH%" :: Check if Python is installed set PYTHON_VERSION_MAJOR=3 -set MIN_REQUIRED_PYTHON_VERSION_MINOR=7 +set MIN_REQUIRED_PYTHON_VERSION_MINOR=8 set MAX_SUPPORTED_PYTHON_VERSION_MINOR=11 python --version 2>NUL diff --git a/scripts/setupvars/setupvars.sh b/scripts/setupvars/setupvars.sh index 591b25e7bfb7f5..bc5d257d68b835 100755 --- a/scripts/setupvars/setupvars.sh +++ b/scripts/setupvars/setupvars.sh @@ -99,7 +99,7 @@ if command -v lsb_release >/dev/null 2>&1; then fi PYTHON_VERSION_MAJOR="3" -MIN_REQUIRED_PYTHON_VERSION_MINOR="7" +MIN_REQUIRED_PYTHON_VERSION_MINOR="8" MAX_SUPPORTED_PYTHON_VERSION_MINOR="11" check_python_version () { diff --git a/src/bindings/python/src/compatibility/ngraph/utils/types.py b/src/bindings/python/src/compatibility/ngraph/utils/types.py index 76d70894a285ed..9556fe2ccf04f2 100644 --- a/src/bindings/python/src/compatibility/ngraph/utils/types.py +++ b/src/bindings/python/src/compatibility/ngraph/utils/types.py @@ -105,16 +105,16 @@ def get_dtype(ngraph_type: NgraphType) -> np.dtype: def get_ndarray(data: NumericData) -> np.ndarray: """Wrap data into a numpy ndarray.""" - if type(data) == np.ndarray: + if isinstance(data, np.ndarray): return data return np.array(data) def get_shape(data: NumericData) -> TensorShape: """Return a shape of NumericData.""" - if type(data) == np.ndarray: + if isinstance(data, np.ndarray): return data.shape # type: ignore - elif type(data) == list: + if isinstance(data, list): return [len(data)] # type: ignore return [] diff --git a/src/bindings/python/src/openvino/runtime/utils/types.py b/src/bindings/python/src/openvino/runtime/utils/types.py index a127cb2e17bdef..5eeeb021a7c724 100644 --- a/src/bindings/python/src/openvino/runtime/utils/types.py +++ b/src/bindings/python/src/openvino/runtime/utils/types.py @@ -121,16 +121,16 @@ def get_numpy_ctype(openvino_type: Type) -> type: def get_ndarray(data: NumericData) -> np.ndarray: """Wrap data into a numpy ndarray.""" - if type(data) == np.ndarray: + if isinstance(data, np.ndarray): return data # type: ignore return np.array(data) def get_shape(data: NumericData) -> TensorShape: """Return a shape of NumericData.""" - if type(data) == np.ndarray: + if isinstance(data, np.ndarray): return data.shape # type: ignore - elif type(data) == list: + if isinstance(data, list): return [len(data)] # type: ignore return [] diff --git a/src/bindings/python/src/openvino/utils.py b/src/bindings/python/src/openvino/utils.py index a62418d951fc24..d2c646ef986a9f 100644 --- a/src/bindings/python/src/openvino/utils.py +++ b/src/bindings/python/src/openvino/utils.py @@ -36,10 +36,7 @@ def _add_openvino_libs_to_search_path() -> None: lib_path = os.path.join(os.path.dirname(__file__), lib) if os.path.isdir(lib_path): # On Windows, with Python >= 3.8, DLLs are no longer imported from the PATH. - if (3, 8) <= sys.version_info: - os.add_dll_directory(os.path.abspath(lib_path)) - else: - os.environ["PATH"] = os.path.abspath(lib_path) + ";" + os.environ["PATH"] + os.add_dll_directory(os.path.abspath(lib_path)) def add_openvino_libs_to_path() -> None: diff --git a/src/bindings/python/tests/test_graph/test_any.py b/src/bindings/python/tests/test_graph/test_any.py index 439b4d5df98a47..a6ea4fc2f420c1 100644 --- a/src/bindings/python/tests/test_graph/test_any.py +++ b/src/bindings/python/tests/test_graph/test_any.py @@ -47,8 +47,8 @@ def test_any_dict(value_dict, value_type, data_type): assert isinstance(ovany.value, dict) assert ovany[key] == list(value_dict.values())[0] assert len(ovany.value) == 1 - assert type(ovany.value[key]) == value_type - assert type(list(value_dict.values())[0]) == data_type + assert isinstance(ovany.value[key], value_type) + assert isinstance(list(value_dict.values())[0], data_type) assert ovany.get() == value_dict diff --git a/src/bindings/python/tests/test_graph/test_if.py b/src/bindings/python/tests/test_graph/test_if.py index 7e165342a2ce02..39068fa29c4d6b 100644 --- a/src/bindings/python/tests/test_graph/test_if.py +++ b/src/bindings/python/tests/test_graph/test_if.py @@ -195,7 +195,7 @@ def test_simple_if_basic(): if_node.set_function(0, then_body) subgraph_func = if_node.get_function(0) - assert type(subgraph_func) == type(then_body) + assert isinstance(subgraph_func, type(then_body)) assert compare_models(subgraph_func, then_body) assert subgraph_func._get_raw_address() == then_body._get_raw_address() diff --git a/src/bindings/python/tests/test_graph/test_loop.py b/src/bindings/python/tests/test_graph/test_loop.py index 9a2fb6fcaf62d9..235ea917ba5e28 100644 --- a/src/bindings/python/tests/test_graph/test_loop.py +++ b/src/bindings/python/tests/test_graph/test_loop.py @@ -142,7 +142,7 @@ def test_loop_basic(): subgraph_func = loop.get_function() - assert type(subgraph_func) == type(graph_body) + assert isinstance(subgraph_func, type(graph_body)) assert subgraph_func._get_raw_address() == graph_body._get_raw_address() assert compare_models(subgraph_func, graph_body) assert loop.get_special_body_ports() == body_ports diff --git a/src/bindings/python/tests/test_graph/test_tensor_iterator.py b/src/bindings/python/tests/test_graph/test_tensor_iterator.py index dd58b3da3f403a..7dbeecc4c47005 100644 --- a/src/bindings/python/tests/test_graph/test_tensor_iterator.py +++ b/src/bindings/python/tests/test_graph/test_tensor_iterator.py @@ -121,7 +121,7 @@ def test_tensor_iterator_basic(): subgraph_func = ti.get_function() - assert type(subgraph_func) == type(graph_body) + assert isinstance(subgraph_func, type(graph_body)) assert compare_models(subgraph_func, graph_body) assert subgraph_func._get_raw_address() == graph_body._get_raw_address() assert ti.get_num_iterations() == 16 diff --git a/src/bindings/python/tests/test_runtime/test_properties.py b/src/bindings/python/tests/test_runtime/test_properties.py index cb70887fb19055..6a76ccb57cb6ab 100644 --- a/src/bindings/python/tests/test_runtime/test_properties.py +++ b/src/bindings/python/tests/test_runtime/test_properties.py @@ -472,7 +472,7 @@ def test_single_property_setting(device): core.set_property(device, streams.num(streams.Num.AUTO)) assert props.streams.Num.AUTO.to_integer() == -1 - assert type(core.get_property(device, streams.num())) == int + assert isinstance(core.get_property(device, streams.num()), int) @pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason=f"Cannot run test on device {os.environ.get('TEST_DEVICE')}, Plugin specific test") @@ -539,10 +539,10 @@ def test_core_cpu_properties(properties_to_set): assert core.get_property("CPU", streams.num) == 5 # RO properties - assert type(core.get_property("CPU", props.supported_properties)) == dict - assert type(core.get_property("CPU", props.available_devices)) == list - assert type(core.get_property("CPU", props.optimal_number_of_infer_requests)) == int - assert type(core.get_property("CPU", props.range_for_streams)) == tuple - assert type(core.get_property("CPU", props.range_for_async_infer_requests)) == tuple - assert type(core.get_property("CPU", device.full_name)) == str - assert type(core.get_property("CPU", device.capabilities)) == list + assert isinstance(core.get_property("CPU", props.supported_properties), dict) + assert isinstance(core.get_property("CPU", props.available_devices), list) + assert isinstance(core.get_property("CPU", props.optimal_number_of_infer_requests), int) + assert isinstance(core.get_property("CPU", props.range_for_streams), tuple) + assert isinstance(core.get_property("CPU", props.range_for_async_infer_requests), tuple) + assert isinstance(core.get_property("CPU", device.full_name), str) + assert isinstance(core.get_property("CPU", device.capabilities), list) diff --git a/tools/deployment_manager/deployment_manager.py b/tools/deployment_manager/deployment_manager.py index 9cf07b3286be3c..7548ceb18f688e 100755 --- a/tools/deployment_manager/deployment_manager.py +++ b/tools/deployment_manager/deployment_manager.py @@ -17,8 +17,8 @@ import sys -if sys.version_info[0] < 3 or (sys.version_info[0] == 3 and sys.version_info[1] < 7): - exit("Python* 3.7 or higher is required to run the Deployment Manager.") +if sys.version_info[0] < 3 or (sys.version_info[0] == 3 and sys.version_info[1] < 8): + exit("Python* 3.8 or higher is required to run the Deployment Manager.") if __name__ == '__main__': from deployman.main import main From 7c847cecba3789fa6b84719aca34cbd7f8b3a71d Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Fri, 6 Oct 2023 12:54:11 +0400 Subject: [PATCH 088/257] Remove ngraph namespace from operations without evaluate (#20252) * Remove ngraph namespace from operations without namespace * Try to fix build * Additional fixes * More fixes * More fixs * Fix reverse op * Fixed tests * Throw an exception if somebody tries to reallocate tensor * Revert "Throw an exception if somebody tries to reallocate tensor" This reverts commit 8e06d6d5768a9d851b1135b5ee5aaa475e211bec. * Remove python test * Revert "Remove python test" This reverts commit 37b12148d345eb3e94118f6f8d00ad155a4f9329. * Changed evaluate model behavior --- .../python/tests/test_runtime/test_model.py | 2 +- src/core/include/openvino/op/constant.hpp | 4 +- src/core/include/openvino/op/reshape.hpp | 6 +- src/core/include/openvino/op/result.hpp | 4 +- src/core/include/openvino/op/tile.hpp | 6 - src/core/src/itt.hpp | 6 +- src/core/src/model.cpp | 13 +- src/core/src/node.cpp | 3 +- src/core/src/op/binary_convolution.cpp | 2 +- src/core/src/op/broadcast.cpp | 82 ++++----- src/core/src/op/constant.cpp | 105 ++++++------ src/core/src/op/convert.cpp | 34 ++-- src/core/src/op/detection_output.cpp | 2 +- src/core/src/op/divide.cpp | 14 +- src/core/src/op/elu.cpp | 18 +- src/core/src/op/embedding_segments_sum.cpp | 2 - src/core/src/op/equal.cpp | 30 ++-- src/core/src/op/erf.cpp | 12 +- src/core/src/op/exp.cpp | 12 +- src/core/src/op/eye.cpp | 16 +- src/core/src/op/fake_quantize.cpp | 12 +- src/core/src/op/floor.cpp | 20 +-- src/core/src/op/floor_mod.cpp | 18 +- src/core/src/op/gelu.cpp | 4 +- src/core/src/op/greater.cpp | 14 +- src/core/src/op/greater_eq.cpp | 14 +- src/core/src/op/grid_sample.cpp | 2 +- src/core/src/op/hard_sigmoid.cpp | 4 +- src/core/src/op/hsigmoid.cpp | 6 +- src/core/src/op/hswish.cpp | 6 +- src/core/src/op/less.cpp | 14 +- src/core/src/op/less_eq.cpp | 14 +- src/core/src/op/log.cpp | 12 +- src/core/src/op/logical_and.cpp | 2 +- src/core/src/op/logical_not.cpp | 14 +- src/core/src/op/logical_or.cpp | 2 +- src/core/src/op/loop.cpp | 56 +++--- src/core/src/op/matmul.cpp | 12 +- src/core/src/op/max_pool.cpp | 40 ++--- src/core/src/op/maximum.cpp | 12 +- src/core/src/op/minimum.cpp | 16 +- src/core/src/op/mish.cpp | 4 +- src/core/src/op/multiply.cpp | 20 +-- src/core/src/op/mvn.cpp | 59 +++---- src/core/src/op/negative.cpp | 10 +- src/core/src/op/non_max_suppression.cpp | 4 +- src/core/src/op/non_zero.cpp | 26 +-- src/core/src/op/normalize_l2.cpp | 23 +-- src/core/src/op/not_equal.cpp | 14 +- src/core/src/op/one_hot.cpp | 4 +- src/core/src/op/power.cpp | 14 +- src/core/src/op/prelu.cpp | 8 +- src/core/src/op/prior_box.cpp | 32 ++-- src/core/src/op/prior_box_clustered.cpp | 16 +- src/core/src/op/random_uniform.cpp | 2 +- src/core/src/op/range.cpp | 24 +-- src/core/src/op/relu.cpp | 12 +- src/core/src/op/reshape.cpp | 75 ++++---- src/core/src/op/result.cpp | 51 +++--- src/core/src/op/reverse.cpp | 25 ++- src/core/src/op/roi_align.cpp | 78 ++++----- src/core/src/op/scatter_elements_update.cpp | 160 +++++++++--------- src/core/src/op/scatter_nd_update.cpp | 14 +- src/core/src/op/select.cpp | 26 +-- src/core/src/op/shape_of.cpp | 16 +- src/core/src/op/sigmoid.cpp | 12 +- src/core/src/op/sign.cpp | 12 +- src/core/src/op/softmax.cpp | 8 +- src/core/src/op/softplus.cpp | 6 +- src/core/src/op/softsign.cpp | 8 +- src/core/src/op/sqrt.cpp | 14 +- src/core/src/op/tile.cpp | 60 ++----- src/core/src/op/topk.cpp | 12 +- src/core/src/op/unique.cpp | 4 +- src/core/src/op/util/gather_base.cpp | 18 +- src/core/tests/eval.cpp | 13 ++ 76 files changed, 748 insertions(+), 803 deletions(-) diff --git a/src/bindings/python/tests/test_runtime/test_model.py b/src/bindings/python/tests/test_runtime/test_model.py index b8823b732943de..53d0a4d27398f8 100644 --- a/src/bindings/python/tests/test_runtime/test_model.py +++ b/src/bindings/python/tests/test_runtime/test_model.py @@ -289,7 +289,7 @@ def test_evaluate_invalid_input_shape(): [Tensor("float32", Shape([2, 1]))], [Tensor("float32", Shape([3, 1])), Tensor("float32", Shape([3, 1]))], ) - assert "must be compatible with the partial shape: [2,1]" in str(e.value) + assert "Cannot evaluate model!" in str(e.value) def test_get_batch(): diff --git a/src/core/include/openvino/op/constant.hpp b/src/core/include/openvino/op/constant.hpp index bc36330217d876..14ee7b3313490e 100644 --- a/src/core/include/openvino/op/constant.hpp +++ b/src/core/include/openvino/op/constant.hpp @@ -198,9 +198,7 @@ class OPENVINO_API Constant : public Op { bool visit_attributes(AttributeVisitor& visitor) override; - OPENVINO_SUPPRESS_DEPRECATED_START - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - OPENVINO_SUPPRESS_DEPRECATED_END + bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override; bool has_evaluate() const override; bool evaluate_lower(TensorVector& outputs) const override; bool evaluate_upper(TensorVector& outputs) const override; diff --git a/src/core/include/openvino/op/reshape.hpp b/src/core/include/openvino/op/reshape.hpp index 9d4ecc18da1cc0..274b276e7f2ce4 100644 --- a/src/core/include/openvino/op/reshape.hpp +++ b/src/core/include/openvino/op/reshape.hpp @@ -46,9 +46,7 @@ class OPENVINO_API Reshape : public Op { void set_special_zero(bool special_zero) { m_special_zero = special_zero; } - OPENVINO_SUPPRESS_DEPRECATED_START - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - OPENVINO_SUPPRESS_DEPRECATED_END + bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override; bool has_evaluate() const override; bool evaluate_upper(TensorVector& outputs) const override; bool evaluate_lower(TensorVector& outputs) const override; @@ -57,7 +55,7 @@ class OPENVINO_API Reshape : public Op { protected: bool m_special_zero; - bool evaluate_reshape(const HostTensorVector& outputs, const HostTensorVector& inputs) const; + bool evaluate_reshape(ov::TensorVector& outputs, const ov::TensorVector& inputs) const; private: void calculate_output_shape(std::vector& reshape_pattern, diff --git a/src/core/include/openvino/op/result.hpp b/src/core/include/openvino/op/result.hpp index 43fd236880324f..7bcc0faaf60c03 100644 --- a/src/core/include/openvino/op/result.hpp +++ b/src/core/include/openvino/op/result.hpp @@ -29,9 +29,7 @@ class OPENVINO_API Result : public Op { std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - OPENVINO_SUPPRESS_DEPRECATED_START - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - OPENVINO_SUPPRESS_DEPRECATED_END + bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override; bool has_evaluate() const override; bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override; diff --git a/src/core/include/openvino/op/tile.hpp b/src/core/include/openvino/op/tile.hpp index 313c34833e0f57..d7459477dac75a 100644 --- a/src/core/include/openvino/op/tile.hpp +++ b/src/core/include/openvino/op/tile.hpp @@ -28,17 +28,11 @@ class OPENVINO_API Tile : public Op { std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - OPENVINO_SUPPRESS_DEPRECATED_START - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - OPENVINO_SUPPRESS_DEPRECATED_END bool evaluate_lower(TensorVector& outputs) const override; bool evaluate_upper(TensorVector& outputs) const override; bool has_evaluate() const override; bool evaluate(ov::TensorVector& output_values, const ov::TensorVector& input_values) const override; bool evaluate_label(TensorLabelVector& output_labels) const override; - -private: - bool evaluate_tile(const HostTensorVector& outputs, const HostTensorVector& inputs) const; }; } // namespace v0 } // namespace op diff --git a/src/core/src/itt.hpp b/src/core/src/itt.hpp index e8702f0410931d..e50af7da7f4d31 100644 --- a/src/core/src/itt.hpp +++ b/src/core/src/itt.hpp @@ -50,21 +50,21 @@ OV_CC_DOMAINS(ov_opset); # define INSERT_OP(opset_name, op_name, op_namespace) opset.insert() #endif -#define NGRAPH_TYPE_CASE(region, a, ...) \ +#define OPENVINO_TYPE_CASE(region, a, ...) \ case ov::element::Type_t::a: { \ OV_SCOPE(ov_op, OV_PP_CAT3(region, _, a)) { \ rc = evaluate(__VA_ARGS__); \ } \ } break -#define NGRAPH_2_TYPES_CASE(region, a, b, ...) \ +#define OPENVINO_2_TYPES_CASE(region, a, b, ...) \ case element::Type_t::a: { \ OV_SCOPE(ov_op, OV_PP_CAT4(region, _, a, b)) { \ rc = evaluate(__VA_ARGS__); \ } \ } break -#define NGRAPH_COPY_TENSOR(region, a, ...) \ +#define OPENVINO_COPY_TENSOR(region, a, ...) \ case ov::element::Type_t::a: { \ OV_SCOPE(ov_op, OV_PP_CAT3(region, _, a)) { \ rc = copy_tensor(__VA_ARGS__); \ diff --git a/src/core/src/model.cpp b/src/core/src/model.cpp index 1872660227fc16..b06bd5ece52933 100644 --- a/src/core/src/model.cpp +++ b/src/core/src/model.cpp @@ -516,8 +516,19 @@ bool ov::Model::evaluate(ov::TensorVector& output_tensors, ov::EvaluationContext& evaluation_context) const { evaluation_context.emplace("VariableContext", ov::op::util::VariableContext()); std::map value_map; + OPENVINO_ASSERT(input_tensors.size() == m_parameters.size(), + "Cannot evaluate model! Number of tensors (", + input_tensors.size(), + ") is not equal to number of parameters (", + m_parameters.size(), + ")."); for (size_t i = 0; i < m_parameters.size(); ++i) { value_map[m_parameters.at(i)->output(0)] = input_tensors.at(i); + OPENVINO_ASSERT(m_parameters.at(i)->get_partial_shape().is_dynamic() || + m_parameters.at(i)->get_partial_shape().to_shape() == input_tensors[i].get_shape(), + "Cannot evaluate model! Tensor input shape and Parameter op with index ", + i, + " are mismatches."); } OutputVector outputs; std::map output_tensor_map; @@ -554,7 +565,7 @@ bool ov::Model::evaluate(ov::TensorVector& output_tensors, } return output_tensors; } else { - OPENVINO_ASSERT(false, "Evaluation failed on ", node); + OPENVINO_THROW("Evaluation failed on ", node); } }); for (const auto& value : outputs) { diff --git a/src/core/src/node.cpp b/src/core/src/node.cpp index 1610d7b2fda4da..ee2c454bb6a235 100644 --- a/src/core/src/node.cpp +++ b/src/core/src/node.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/node.hpp" +#include "openvino/core/node.hpp" #include #include @@ -12,7 +12,6 @@ #include "atomic_guard.hpp" #include "bound_evaluate.hpp" #include "itt.hpp" -#include "ngraph/graph_util.hpp" #include "openvino/core/descriptor/input.hpp" #include "openvino/core/rt_info.hpp" #include "openvino/core/shape_util.hpp" diff --git a/src/core/src/op/binary_convolution.cpp b/src/core/src/op/binary_convolution.cpp index d045818aa48c01..7e462ac091ae85 100644 --- a/src/core/src/op/binary_convolution.cpp +++ b/src/core/src/op/binary_convolution.cpp @@ -50,7 +50,7 @@ void ov::op::v1::BinaryConvolution::validate_and_infer_types() { "Data batch element type must be numeric. Got: ", data_batch_et); - // TODO: Add NodeValidationCheck to filters et once u1 is supported in nGraph Python API + // TODO: Add NodeValidationCheck to filters et once u1 is supported in OpenVINO Python API // (#52715) OPENVINO_SUPPRESS_DEPRECATED_START const auto input_shapes = get_node_input_partial_shapes(*this); diff --git a/src/core/src/op/broadcast.cpp b/src/core/src/op/broadcast.cpp index a793164570ad9c..26957103499b4a 100644 --- a/src/core/src/op/broadcast.cpp +++ b/src/core/src/op/broadcast.cpp @@ -2,41 +2,33 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/broadcast.hpp" +#include "openvino/op/broadcast.hpp" #include -#include #include #include "itt.hpp" -#include "ngraph/attribute_visitor.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/partial_shape.hpp" -#include "ngraph/runtime/host_tensor.hpp" #include "openvino/reference/broadcast.hpp" -using namespace std; -using namespace ngraph; - -op::v3::Broadcast::Broadcast(const Output& arg, - const Output& target_shape, - const Output& axes_mapping, - const BroadcastModeSpec& broadcast_spec) +ov::op::v3::Broadcast::Broadcast(const Output& arg, + const Output& target_shape, + const Output& axes_mapping, + const BroadcastModeSpec& broadcast_spec) : util::BroadcastBase{arg, target_shape, axes_mapping, broadcast_spec} { constructor_validate_and_infer_types(); } -op::v3::Broadcast::Broadcast(const Output& arg, - const Output& target_shape, - const BroadcastModeSpec& broadcast_spec) +ov::op::v3::Broadcast::Broadcast(const Output& arg, + const Output& target_shape, + const BroadcastModeSpec& broadcast_spec) : util::BroadcastBase{arg, target_shape, broadcast_spec} { constructor_validate_and_infer_types(); } -OPENVINO_SUPPRESS_DEPRECATED_START namespace { -std::pair get_broadcast_axes_bidirectional(const ov::Shape& arg_shape, const ov::Shape& result_shape) { - AxisSet broadcast_axes; +std::pair get_broadcast_axes_bidirectional(const ov::Shape& arg_shape, + const ov::Shape& result_shape) { + ov::AxisSet broadcast_axes; bool axes_known = false; const auto start_axis = static_cast(result_shape.size()) - static_cast(arg_shape.size()); OPENVINO_ASSERT(start_axis >= 0); @@ -50,7 +42,7 @@ std::pair get_broadcast_axes_bidirectional(const ov::Shape& arg_s } } // namespace -std::pair op::v3::Broadcast::get_broadcast_axes() const { +std::pair ov::op::v3::Broadcast::get_broadcast_axes() const { if (m_mode.m_type == BroadcastType::BIDIRECTIONAL) { AxisSet broadcast_axes; bool axes_known = false; @@ -67,7 +59,7 @@ std::pair op::v3::Broadcast::get_broadcast_axes() const { } namespace { -ov::PartialShape get_result_shape_bidirectional(const Node* this_ptr, +ov::PartialShape get_result_shape_bidirectional(const ov::Node* this_ptr, ov::PartialShape arg_shape, ov::PartialShape target_shape) { if (arg_shape.rank().is_dynamic() || target_shape.rank().is_dynamic()) { @@ -94,8 +86,8 @@ ov::PartialShape get_result_shape_bidirectional(const Node* this_ptr, } else if (arg_dim == 1 || (target_dim.is_static() && target_dim != 1)) { result_shape[i] = target_dim; } else { - result_shape[i] = Dimension(std::min(arg_dim.get_min_length(), target_dim.get_min_length()), - std::max(arg_dim.get_max_length(), target_dim.get_max_length())); + result_shape[i] = ov::Dimension(std::min(arg_dim.get_min_length(), target_dim.get_min_length()), + std::max(arg_dim.get_max_length(), target_dim.get_max_length())); } continue; } @@ -114,7 +106,7 @@ ov::PartialShape get_result_shape_bidirectional(const Node* this_ptr, } } // namespace -bool op::v3::Broadcast::broadcast_evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { +bool ov::op::v3::Broadcast::broadcast_evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { if (get_broadcast_spec().m_type == op::BroadcastType::BIDIRECTIONAL) { auto arg_shape = inputs[0].get_shape(); ov::Shape target_shape = op::util::BroadcastBase::get_target_shape(inputs[1]); @@ -129,7 +121,7 @@ bool op::v3::Broadcast::broadcast_evaluate(ov::TensorVector& outputs, const ov:: return op::util::BroadcastBase::evaluate(outputs, inputs); } -void op::v3::Broadcast::validate_and_infer_types() { +void ov::op::v3::Broadcast::validate_and_infer_types() { OV_OP_SCOPE(v3_Broadcast_validate_and_infer_types); if (m_mode.m_type == BroadcastType::NONE) { NODE_VALIDATION_CHECK(this, @@ -175,37 +167,37 @@ void op::v3::Broadcast::validate_and_infer_types() { set_output_type(0, get_input_element_type(0), output_shapes[0]); } -shared_ptr op::v3::Broadcast::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr ov::op::v3::Broadcast::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v3_Broadcast_clone_with_new_inputs); check_new_args_count(this, new_args); if (new_args.size() == 2) { - return make_shared(new_args.at(0), new_args.at(1), m_mode); + return std::make_shared(new_args.at(0), new_args.at(1), m_mode); } else if (new_args.size() == 3) { - return make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_mode); + return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_mode); } else { OPENVINO_THROW("Not supported number of Broadcast:v3 args"); } } -bool op::v3::Broadcast::visit_attributes(AttributeVisitor& visitor) { +bool ov::op::v3::Broadcast::visit_attributes(ov::AttributeVisitor& visitor) { OV_OP_SCOPE(v3_Broadcast_visit_attributes); visitor.on_attribute("mode", m_mode); return true; } -bool op::v3::Broadcast::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { +bool ov::op::v3::Broadcast::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { OV_OP_SCOPE(v3_Broadcast_evaluate); return broadcast_evaluate(outputs, inputs); } -bool op::v3::Broadcast::has_evaluate() const { +bool ov::op::v3::Broadcast::has_evaluate() const { OV_OP_SCOPE(v3_Broadcast_has_evaluate); return m_mode.m_type == BroadcastType::NONE || m_mode.m_type == BroadcastType::PDPD || m_mode.m_type == BroadcastType::NUMPY || m_mode.m_type == BroadcastType::BIDIRECTIONAL; } namespace { -using namespace op; +using namespace ov::op; BroadcastModeSpec to_broadcast_mode(const AutoBroadcastSpec& bs) { BroadcastModeSpec broadcast_mode; broadcast_mode.m_axis = bs.m_axis; @@ -224,18 +216,18 @@ BroadcastModeSpec to_broadcast_mode(const AutoBroadcastSpec& bs) { } } // namespace -op::v1::Broadcast::Broadcast(const Output& arg, - const Output& target_shape, - const Output& axes_mapping, - const AutoBroadcastSpec& broadcast_spec) +ov::op::v1::Broadcast::Broadcast(const Output& arg, + const Output& target_shape, + const Output& axes_mapping, + const AutoBroadcastSpec& broadcast_spec) : util::BroadcastBase{arg, target_shape, axes_mapping, to_broadcast_mode(broadcast_spec)}, m_broadcast_spec{broadcast_spec} { constructor_validate_and_infer_types(); } -op::v1::Broadcast::Broadcast(const Output& arg, - const Output& target_shape, - const AutoBroadcastSpec& broadcast_spec) +ov::op::v1::Broadcast::Broadcast(const Output& arg, + const Output& target_shape, + const AutoBroadcastSpec& broadcast_spec) : util::BroadcastBase{arg, target_shape, op::v0::Constant::create(element::u8, ov::Shape{}, {0})->output(0), @@ -244,7 +236,7 @@ op::v1::Broadcast::Broadcast(const Output& arg, constructor_validate_and_infer_types(); } -void op::v1::Broadcast::validate_and_infer_types() { +void ov::op::v1::Broadcast::validate_and_infer_types() { OV_OP_SCOPE(v1_Broadcast_validate_and_infer_types); // m_type is deduced and not always explicitly stated, for cases where broadcast // has 2 inputs its always NUMPY mode @@ -291,24 +283,24 @@ void op::v1::Broadcast::validate_and_infer_types() { set_output_type(0, get_input_element_type(0), output_shapes[0]); } -shared_ptr op::v1::Broadcast::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr ov::op::v1::Broadcast::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v1_Broadcast_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_broadcast_spec); + return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_broadcast_spec); } -bool op::v1::Broadcast::visit_attributes(AttributeVisitor& visitor) { +bool ov::op::v1::Broadcast::visit_attributes(AttributeVisitor& visitor) { OV_OP_SCOPE(v1_Broadcast_visit_attributes); visitor.on_attribute("mode", m_broadcast_spec); return true; } -bool op::v1::Broadcast::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { +bool ov::op::v1::Broadcast::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { OV_OP_SCOPE(v1_Broadcast_evaluate); return op::util::BroadcastBase::evaluate(outputs, inputs); } -bool op::v1::Broadcast::has_evaluate() const { +bool ov::op::v1::Broadcast::has_evaluate() const { OV_OP_SCOPE(v1_Broadcast_has_evaluate); return m_mode.m_type == BroadcastType::NONE || m_mode.m_type == BroadcastType::PDPD || m_mode.m_type == BroadcastType::NUMPY; diff --git a/src/core/src/op/constant.cpp b/src/core/src/op/constant.cpp index 34ed30d636cf64..27d9e000b64dec 100644 --- a/src/core/src/op/constant.cpp +++ b/src/core/src/op/constant.cpp @@ -2,44 +2,40 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/constant.hpp" +#include "openvino/op/constant.hpp" #include #include #include -#include #include #include "itt.hpp" -#include "ngraph/log.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/util.hpp" - -using namespace std; -OPENVINO_SUPPRESS_DEPRECATED_START +#include "ngraph/runtime/host_tensor.hpp" +#include "ngraph/runtime/tensor.hpp" template -static inline string to_cpp_string(T value) { - string rc; +static inline std::string to_cpp_string(T value) { + std::string rc; if (std::isnan(value)) { rc = "NAN"; } else if (std::isinf(value)) { rc = (value > 0 ? "INFINITY" : "-INFINITY"); } else { - stringstream ss; + std::stringstream ss; ss << value; rc = ss.str(); } return rc; } -ov::op::v0::Constant::Constant(const shared_ptr& tensor) { +OPENVINO_SUPPRESS_DEPRECATED_START +ov::op::v0::Constant::Constant(const std::shared_ptr& tensor) { m_element_type = tensor->get_element_type(); m_shape = tensor->get_shape(); // Share data from HostTensor if we work with it // And copy data in other cas if (auto hostTensor = std::dynamic_pointer_cast(tensor)) { - m_data = make_shared>>( + m_data = std::make_shared>>( static_cast(hostTensor->get_data_ptr()), tensor->get_size_in_bytes(), tensor); @@ -50,14 +46,17 @@ ov::op::v0::Constant::Constant(const shared_ptr& tensor } constructor_validate_and_infer_types(); } +OPENVINO_SUPPRESS_DEPRECATED_END ov::op::v0::Constant::Constant(const ov::Tensor& tensor) { m_element_type = tensor.get_element_type(); m_shape = tensor.get_shape(); + OPENVINO_SUPPRESS_DEPRECATED_START // Share data from ov::Tensor - m_data = make_shared>(static_cast(tensor.data()), - tensor.get_byte_size(), - tensor); + m_data = std::make_shared>(static_cast(tensor.data()), + tensor.get_byte_size(), + tensor); + OPENVINO_SUPPRESS_DEPRECATED_END constructor_validate_and_infer_types(); } @@ -212,10 +211,12 @@ ov::op::v0::Constant::Constant(bool memset_allocation, const element::Type& type } void ov::op::v0::Constant::allocate_buffer(bool memset_allocation) { - m_data = make_shared(mem_size(), host_alignment()); + OPENVINO_SUPPRESS_DEPRECATED_START + m_data = std::make_shared(mem_size(), host_alignment()); if (memset_allocation) { std::memset(m_data->get_ptr(), 0, m_data->size()); } + OPENVINO_SUPPRESS_DEPRECATED_END } ov::op::v0::Constant::Constant(const element::Type& type, const ov::Shape& shape, const void* data) @@ -245,8 +246,8 @@ ov::op::v0::Constant::Constant(const Constant& other, const ov::Shape& new_shape ov::op::v0::Constant::~Constant() = default; -string ov::op::v0::Constant::convert_value_to_string(size_t index) const { - string rc; +std::string ov::op::v0::Constant::convert_value_to_string(size_t index) const { + std::string rc; #if defined(__GNUC__) && !(__GNUC__ == 4 && __GNUC_MINOR__ == 8) # pragma GCC diagnostic push # pragma GCC diagnostic error "-Wswitch" @@ -255,7 +256,7 @@ string ov::op::v0::Constant::convert_value_to_string(size_t index) const { using Type_t = element::Type_t; switch (get_element_type()) { case Type_t::boolean: - rc = to_string(get_element_value(index)); + rc = std::to_string(get_element_value(index)); break; case Type_t::bf16: rc = to_cpp_string(static_cast(get_element_value(index))); @@ -270,40 +271,40 @@ string ov::op::v0::Constant::convert_value_to_string(size_t index) const { rc = to_cpp_string(get_element_value(index)); break; case Type_t::i4: - rc = to_string(get_element_value(index)); + rc = std::to_string(get_element_value(index)); break; case Type_t::i8: - rc = to_string(get_element_value(index)); + rc = std::to_string(get_element_value(index)); break; case Type_t::i16: - rc = to_string(get_element_value(index)); + rc = std::to_string(get_element_value(index)); break; case Type_t::i32: - rc = to_string(get_element_value(index)); + rc = std::to_string(get_element_value(index)); break; case Type_t::i64: - rc = to_string(get_element_value(index)); + rc = std::to_string(get_element_value(index)); break; case Type_t::u1: - rc = to_string(get_element_value(index)); + rc = std::to_string(get_element_value(index)); break; case Type_t::u4: - rc = to_string(get_element_value(index)); + rc = std::to_string(get_element_value(index)); break; case Type_t::u8: - rc = to_string(get_element_value(index)); + rc = std::to_string(get_element_value(index)); break; case Type_t::u16: - rc = to_string(get_element_value(index)); + rc = std::to_string(get_element_value(index)); break; case Type_t::u32: - rc = to_string(get_element_value(index)); + rc = std::to_string(get_element_value(index)); break; case Type_t::u64: - rc = to_string(get_element_value(index)); + rc = std::to_string(get_element_value(index)); break; case Type_t::nf4: - rc = to_string(get_element_value(index)); + rc = std::to_string(get_element_value(index)); break; case Type_t::undefined: case Type_t::dynamic: @@ -315,8 +316,8 @@ string ov::op::v0::Constant::convert_value_to_string(size_t index) const { return rc; } -vector ov::op::v0::Constant::get_value_strings() const { - vector rc; +std::vector ov::op::v0::Constant::get_value_strings() const { + std::vector rc; #if defined(__GNUC__) && !(__GNUC__ == 4 && __GNUC_MINOR__ == 8) # pragma GCC diagnostic push @@ -326,7 +327,7 @@ vector ov::op::v0::Constant::get_value_strings() const { switch (get_element_type()) { case element::Type_t::boolean: for (int value : get_vector()) { - rc.push_back(to_string(value)); + rc.push_back(std::to_string(value)); } break; case element::Type_t::bf16: @@ -351,54 +352,54 @@ vector ov::op::v0::Constant::get_value_strings() const { break; case element::Type_t::i4: for (auto value : cast_vector()) { - rc.push_back(to_string(value)); + rc.push_back(std::to_string(value)); } break; case element::Type_t::i8: for (int value : get_vector()) { - rc.push_back(to_string(value)); + rc.push_back(std::to_string(value)); } break; case element::Type_t::i16: for (int value : get_vector()) { - rc.push_back(to_string(value)); + rc.push_back(std::to_string(value)); } break; case element::Type_t::i32: for (int32_t value : get_vector()) { - rc.push_back(to_string(value)); + rc.push_back(std::to_string(value)); } break; case element::Type_t::i64: for (int64_t value : get_vector()) { - rc.push_back(to_string(value)); + rc.push_back(std::to_string(value)); } break; case element::Type_t::u1: case element::Type_t::u4: case element::Type_t::nf4: for (auto value : cast_vector()) { - rc.push_back(to_string(value)); + rc.push_back(std::to_string(value)); } break; case element::Type_t::u8: for (uint32_t value : get_vector()) { - rc.push_back(to_string(value)); + rc.push_back(std::to_string(value)); } break; case element::Type_t::u16: for (uint32_t value : get_vector()) { - rc.push_back(to_string(value)); + rc.push_back(std::to_string(value)); } break; case element::Type_t::u32: for (uint32_t value : get_vector()) { - rc.push_back(to_string(value)); + rc.push_back(std::to_string(value)); } break; case element::Type_t::u64: for (uint64_t value : get_vector()) { - rc.push_back(to_string(value)); + rc.push_back(std::to_string(value)); } break; case element::Type_t::undefined: @@ -475,10 +476,10 @@ ov::AxisSet ov::op::v0::Constant::get_axis_set_val() const { return output_axis_set; } -shared_ptr ov::op::v0::Constant::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr ov::op::v0::Constant::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v0_Constant_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(*this); + return std::make_shared(*this); } template @@ -566,11 +567,13 @@ bool ov::op::v0::Constant::visit_attributes(AttributeVisitor& visitor) { return true; } -bool ov::op::v0::Constant::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool ov::op::v0::Constant::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { OV_OP_SCOPE(v0_Constant_evaluate); - auto output = outputs[0]; - output->set_shape(m_shape); - output->write(get_data_ptr(), output->get_size_in_bytes()); + if (outputs.empty()) + outputs.emplace_back(ov::Tensor(m_element_type, m_shape)); + else + outputs[0].set_shape(m_shape); + std::memcpy(outputs[0].data(), get_data_ptr(), outputs[0].get_byte_size()); return true; } diff --git a/src/core/src/op/convert.cpp b/src/core/src/op/convert.cpp index 2d602161fb4e5a..34c3f59f525b5c 100644 --- a/src/core/src/op/convert.cpp +++ b/src/core/src/op/convert.cpp @@ -103,23 +103,23 @@ bool evaluate(const HostTensorPtr& arg, const HostTensorPtr& out) { bool evaluate_convert(const HostTensorPtr& arg, const HostTensorPtr& out) { bool rc = true; switch (arg->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_convert, u1, arg, out); - NGRAPH_TYPE_CASE(evaluate_convert, u4, arg, out); - NGRAPH_TYPE_CASE(evaluate_convert, u8, arg, out); - NGRAPH_TYPE_CASE(evaluate_convert, u16, arg, out); - NGRAPH_TYPE_CASE(evaluate_convert, u32, arg, out); - NGRAPH_TYPE_CASE(evaluate_convert, u64, arg, out); - NGRAPH_TYPE_CASE(evaluate_convert, i4, arg, out); - NGRAPH_TYPE_CASE(evaluate_convert, i8, arg, out); - NGRAPH_TYPE_CASE(evaluate_convert, i16, arg, out); - NGRAPH_TYPE_CASE(evaluate_convert, i32, arg, out); - NGRAPH_TYPE_CASE(evaluate_convert, i64, arg, out); - NGRAPH_TYPE_CASE(evaluate_convert, bf16, arg, out); - NGRAPH_TYPE_CASE(evaluate_convert, f16, arg, out); - NGRAPH_TYPE_CASE(evaluate_convert, f32, arg, out); - NGRAPH_TYPE_CASE(evaluate_convert, f64, arg, out); - NGRAPH_TYPE_CASE(evaluate_convert, boolean, arg, out); - NGRAPH_TYPE_CASE(evaluate_convert, nf4, arg, out); + OPENVINO_TYPE_CASE(evaluate_convert, u1, arg, out); + OPENVINO_TYPE_CASE(evaluate_convert, u4, arg, out); + OPENVINO_TYPE_CASE(evaluate_convert, u8, arg, out); + OPENVINO_TYPE_CASE(evaluate_convert, u16, arg, out); + OPENVINO_TYPE_CASE(evaluate_convert, u32, arg, out); + OPENVINO_TYPE_CASE(evaluate_convert, u64, arg, out); + OPENVINO_TYPE_CASE(evaluate_convert, i4, arg, out); + OPENVINO_TYPE_CASE(evaluate_convert, i8, arg, out); + OPENVINO_TYPE_CASE(evaluate_convert, i16, arg, out); + OPENVINO_TYPE_CASE(evaluate_convert, i32, arg, out); + OPENVINO_TYPE_CASE(evaluate_convert, i64, arg, out); + OPENVINO_TYPE_CASE(evaluate_convert, bf16, arg, out); + OPENVINO_TYPE_CASE(evaluate_convert, f16, arg, out); + OPENVINO_TYPE_CASE(evaluate_convert, f32, arg, out); + OPENVINO_TYPE_CASE(evaluate_convert, f64, arg, out); + OPENVINO_TYPE_CASE(evaluate_convert, boolean, arg, out); + OPENVINO_TYPE_CASE(evaluate_convert, nf4, arg, out); default: rc = false; break; diff --git a/src/core/src/op/detection_output.cpp b/src/core/src/op/detection_output.cpp index 6f13e090c06b2f..84a2b9190052de 100644 --- a/src/core/src/op/detection_output.cpp +++ b/src/core/src/op/detection_output.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/detection_output.hpp" +#include "openvino/op/detection_output.hpp" #include "detection_output_shape_inference.hpp" #include "itt.hpp" diff --git a/src/core/src/op/divide.cpp b/src/core/src/op/divide.cpp index c2da6a9c05d00a..03fa88dfbc8a31 100644 --- a/src/core/src/op/divide.cpp +++ b/src/core/src/op/divide.cpp @@ -48,13 +48,13 @@ bool evaluate_divide(const HostTensorPtr& arg0, bool rc = true; out->set_broadcast(broadcast_spec, arg0, arg1); switch (arg0->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_divide, i32, arg0, arg1, out, broadcast_spec, pythondiv); - NGRAPH_TYPE_CASE(evaluate_divide, i64, arg0, arg1, out, broadcast_spec, pythondiv); - NGRAPH_TYPE_CASE(evaluate_divide, u32, arg0, arg1, out, broadcast_spec, pythondiv); - NGRAPH_TYPE_CASE(evaluate_divide, u64, arg0, arg1, out, broadcast_spec, pythondiv); - NGRAPH_TYPE_CASE(evaluate_divide, f16, arg0, arg1, out, broadcast_spec, pythondiv); - NGRAPH_TYPE_CASE(evaluate_divide, f32, arg0, arg1, out, broadcast_spec, pythondiv); - NGRAPH_TYPE_CASE(evaluate_divide, bf16, arg0, arg1, out, broadcast_spec, pythondiv); + OPENVINO_TYPE_CASE(evaluate_divide, i32, arg0, arg1, out, broadcast_spec, pythondiv); + OPENVINO_TYPE_CASE(evaluate_divide, i64, arg0, arg1, out, broadcast_spec, pythondiv); + OPENVINO_TYPE_CASE(evaluate_divide, u32, arg0, arg1, out, broadcast_spec, pythondiv); + OPENVINO_TYPE_CASE(evaluate_divide, u64, arg0, arg1, out, broadcast_spec, pythondiv); + OPENVINO_TYPE_CASE(evaluate_divide, f16, arg0, arg1, out, broadcast_spec, pythondiv); + OPENVINO_TYPE_CASE(evaluate_divide, f32, arg0, arg1, out, broadcast_spec, pythondiv); + OPENVINO_TYPE_CASE(evaluate_divide, bf16, arg0, arg1, out, broadcast_spec, pythondiv); default: rc = false; break; diff --git a/src/core/src/op/elu.cpp b/src/core/src/op/elu.cpp index b3f052731c530f..10a3bf26a2137c 100644 --- a/src/core/src/op/elu.cpp +++ b/src/core/src/op/elu.cpp @@ -2,32 +2,30 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/elu.hpp" +#include "openvino/op/elu.hpp" #include "itt.hpp" -#include "ngraph/attribute_visitor.hpp" -using namespace std; -using namespace ngraph; - -op::Elu::Elu(const Output& data, const double alpha) : util::UnaryElementwiseArithmetic(data), m_alpha{alpha} { +ov::op::v0::Elu::Elu(const Output& data, const double alpha) + : util::UnaryElementwiseArithmetic(data), + m_alpha{alpha} { constructor_validate_and_infer_types(); } -bool ngraph::op::v0::Elu::visit_attributes(AttributeVisitor& visitor) { +bool ov::op::v0::Elu::visit_attributes(AttributeVisitor& visitor) { OV_OP_SCOPE(v0_Elu_visit_attributes); visitor.on_attribute("alpha", m_alpha); return true; } -void op::v0::Elu::validate_and_infer_types() { +void ov::op::v0::Elu::validate_and_infer_types() { OV_OP_SCOPE(v0_Elu_validate_and_infer_types); set_output_size(1); set_output_type(0, get_input_element_type(0), get_input_partial_shape(0)); } -shared_ptr op::Elu::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr ov::op::v0::Elu::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v0_Elu_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), m_alpha); + return std::make_shared(new_args.at(0), m_alpha); } diff --git a/src/core/src/op/embedding_segments_sum.cpp b/src/core/src/op/embedding_segments_sum.cpp index c368e6f748fc27..e31ca23dd1f690 100644 --- a/src/core/src/op/embedding_segments_sum.cpp +++ b/src/core/src/op/embedding_segments_sum.cpp @@ -4,8 +4,6 @@ #include "openvino/op/embedding_segments_sum.hpp" -#include - #include "embedding_segments_sum_shape_inference.hpp" #include "itt.hpp" diff --git a/src/core/src/op/equal.cpp b/src/core/src/op/equal.cpp index 035a9e24227896..e4adf5d0e4ce53 100644 --- a/src/core/src/op/equal.cpp +++ b/src/core/src/op/equal.cpp @@ -39,21 +39,21 @@ bool evaluate_equal(const HostTensorPtr& arg0, bool rc = true; out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean); switch (arg0->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_equal, boolean, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_equal, i4, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_equal, i8, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_equal, i16, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_equal, i32, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_equal, i64, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_equal, u4, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_equal, u8, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_equal, u16, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_equal, u32, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_equal, u64, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_equal, bf16, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_equal, f16, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_equal, f32, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_equal, f64, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_equal, boolean, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_equal, i4, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_equal, i8, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_equal, i16, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_equal, i32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_equal, i64, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_equal, u4, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_equal, u8, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_equal, u16, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_equal, u32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_equal, u64, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_equal, bf16, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_equal, f16, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_equal, f32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_equal, f64, arg0, arg1, out, broadcast_spec); default: rc = false; break; diff --git a/src/core/src/op/erf.cpp b/src/core/src/op/erf.cpp index 36acd027a273a3..e471065a94279b 100644 --- a/src/core/src/op/erf.cpp +++ b/src/core/src/op/erf.cpp @@ -43,12 +43,12 @@ bool evaluate_erf(const HostTensorPtr& arg0, const HostTensorPtr& out, const siz out->set_unary(arg0); switch (arg0->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_erf, i32, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_erf, i64, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_erf, u32, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_erf, u64, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_erf, f16, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_erf, f32, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_erf, i32, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_erf, i64, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_erf, u32, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_erf, u64, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_erf, f16, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_erf, f32, arg0, out, count); default: rc = false; break; diff --git a/src/core/src/op/exp.cpp b/src/core/src/op/exp.cpp index 8a8a5fa88d9cd2..14131f07c75b3a 100644 --- a/src/core/src/op/exp.cpp +++ b/src/core/src/op/exp.cpp @@ -44,12 +44,12 @@ bool evaluate_exp(const HostTensorPtr& arg0, const HostTensorPtr& out) { out->set_unary(arg0); switch (arg0->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_exp, i32, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_exp, i64, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_exp, u32, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_exp, u64, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_exp, f16, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_exp, f32, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_exp, i32, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_exp, i64, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_exp, u32, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_exp, u64, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_exp, f16, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_exp, f32, arg0, out, count); default: rc = false; break; diff --git a/src/core/src/op/eye.cpp b/src/core/src/op/eye.cpp index 551fd6b1be13cf..77e4082792e2f6 100644 --- a/src/core/src/op/eye.cpp +++ b/src/core/src/op/eye.cpp @@ -23,14 +23,14 @@ bool evaluate(const ngraph::HostTensorPtr& out, const int64_t diagonal_index) { bool evaluate_eye(const ngraph::HostTensorPtr& out, const int64_t diagonal_index) { bool rc = true; switch (out->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate, i8, out, diagonal_index); - NGRAPH_TYPE_CASE(evaluate, u8, out, diagonal_index); - NGRAPH_TYPE_CASE(evaluate, f16, out, diagonal_index); - NGRAPH_TYPE_CASE(evaluate, bf16, out, diagonal_index); - NGRAPH_TYPE_CASE(evaluate, i32, out, diagonal_index); - NGRAPH_TYPE_CASE(evaluate, f32, out, diagonal_index); - NGRAPH_TYPE_CASE(evaluate, f64, out, diagonal_index); - NGRAPH_TYPE_CASE(evaluate, i64, out, diagonal_index); + OPENVINO_TYPE_CASE(evaluate, i8, out, diagonal_index); + OPENVINO_TYPE_CASE(evaluate, u8, out, diagonal_index); + OPENVINO_TYPE_CASE(evaluate, f16, out, diagonal_index); + OPENVINO_TYPE_CASE(evaluate, bf16, out, diagonal_index); + OPENVINO_TYPE_CASE(evaluate, i32, out, diagonal_index); + OPENVINO_TYPE_CASE(evaluate, f32, out, diagonal_index); + OPENVINO_TYPE_CASE(evaluate, f64, out, diagonal_index); + OPENVINO_TYPE_CASE(evaluate, i64, out, diagonal_index); default: rc = false; break; diff --git a/src/core/src/op/fake_quantize.cpp b/src/core/src/op/fake_quantize.cpp index 625d0c8952f432..f558d090723e32 100644 --- a/src/core/src/op/fake_quantize.cpp +++ b/src/core/src/op/fake_quantize.cpp @@ -114,12 +114,12 @@ bool evaluate_fakequantize(const HostTensorPtr& arg0, const ngraph::op::FakeQuantize* parent) { bool rc = true; switch (arg0->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_fakequantize, i32, arg0, arg1, arg2, arg3, arg4, out, parent); - NGRAPH_TYPE_CASE(evaluate_fakequantize, i64, arg0, arg1, arg2, arg3, arg4, out, parent); - NGRAPH_TYPE_CASE(evaluate_fakequantize, u32, arg0, arg1, arg2, arg3, arg4, out, parent); - NGRAPH_TYPE_CASE(evaluate_fakequantize, u64, arg0, arg1, arg2, arg3, arg4, out, parent); - NGRAPH_TYPE_CASE(evaluate_fakequantize, f16, arg0, arg1, arg2, arg3, arg4, out, parent); - NGRAPH_TYPE_CASE(evaluate_fakequantize, f32, arg0, arg1, arg2, arg3, arg4, out, parent); + OPENVINO_TYPE_CASE(evaluate_fakequantize, i32, arg0, arg1, arg2, arg3, arg4, out, parent); + OPENVINO_TYPE_CASE(evaluate_fakequantize, i64, arg0, arg1, arg2, arg3, arg4, out, parent); + OPENVINO_TYPE_CASE(evaluate_fakequantize, u32, arg0, arg1, arg2, arg3, arg4, out, parent); + OPENVINO_TYPE_CASE(evaluate_fakequantize, u64, arg0, arg1, arg2, arg3, arg4, out, parent); + OPENVINO_TYPE_CASE(evaluate_fakequantize, f16, arg0, arg1, arg2, arg3, arg4, out, parent); + OPENVINO_TYPE_CASE(evaluate_fakequantize, f32, arg0, arg1, arg2, arg3, arg4, out, parent); default: rc = false; break; diff --git a/src/core/src/op/floor.cpp b/src/core/src/op/floor.cpp index ffc9f20e1b19c3..864b7dd8188448 100644 --- a/src/core/src/op/floor.cpp +++ b/src/core/src/op/floor.cpp @@ -51,16 +51,16 @@ bool evaluate_floor(const HostTensorPtr& arg0, const HostTensorPtr& out, const s out->set_unary(arg0); switch (arg0->get_element_type()) { - NGRAPH_COPY_TENSOR(evaluate_floor, i8, arg0, out, count); - NGRAPH_COPY_TENSOR(evaluate_floor, i16, arg0, out, count); - NGRAPH_COPY_TENSOR(evaluate_floor, i32, arg0, out, count); - NGRAPH_COPY_TENSOR(evaluate_floor, i64, arg0, out, count); - NGRAPH_COPY_TENSOR(evaluate_floor, u8, arg0, out, count); - NGRAPH_COPY_TENSOR(evaluate_floor, u16, arg0, out, count); - NGRAPH_COPY_TENSOR(evaluate_floor, u32, arg0, out, count); - NGRAPH_COPY_TENSOR(evaluate_floor, u64, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_floor, f16, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_floor, f32, arg0, out, count); + OPENVINO_COPY_TENSOR(evaluate_floor, i8, arg0, out, count); + OPENVINO_COPY_TENSOR(evaluate_floor, i16, arg0, out, count); + OPENVINO_COPY_TENSOR(evaluate_floor, i32, arg0, out, count); + OPENVINO_COPY_TENSOR(evaluate_floor, i64, arg0, out, count); + OPENVINO_COPY_TENSOR(evaluate_floor, u8, arg0, out, count); + OPENVINO_COPY_TENSOR(evaluate_floor, u16, arg0, out, count); + OPENVINO_COPY_TENSOR(evaluate_floor, u32, arg0, out, count); + OPENVINO_COPY_TENSOR(evaluate_floor, u64, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_floor, f16, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_floor, f32, arg0, out, count); default: rc = false; break; diff --git a/src/core/src/op/floor_mod.cpp b/src/core/src/op/floor_mod.cpp index 16bc282d5b0003..012d55a6f4c1da 100644 --- a/src/core/src/op/floor_mod.cpp +++ b/src/core/src/op/floor_mod.cpp @@ -46,15 +46,15 @@ bool evaluate_floor_mod(const HostTensorPtr& arg0, bool rc = true; out->set_broadcast(broadcast_spec, arg0, arg1); switch (arg0->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_floor_mod, i8, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_floor_mod, i32, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_floor_mod, i64, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_floor_mod, u8, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_floor_mod, u32, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_floor_mod, u64, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_floor_mod, bf16, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_floor_mod, f16, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_floor_mod, f32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_floor_mod, i8, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_floor_mod, i32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_floor_mod, i64, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_floor_mod, u8, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_floor_mod, u32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_floor_mod, u64, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_floor_mod, bf16, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_floor_mod, f16, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_floor_mod, f32, arg0, arg1, out, broadcast_spec); default: rc = false; break; diff --git a/src/core/src/op/gelu.cpp b/src/core/src/op/gelu.cpp index 752b83fe5cd5b3..f7c974af77c7e3 100644 --- a/src/core/src/op/gelu.cpp +++ b/src/core/src/op/gelu.cpp @@ -120,8 +120,8 @@ bool evaluate_gelu(const HostTensorPtr& arg0, const HostTensorPtr& out, op::Gelu out->set_unary(arg0); switch (arg0->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_gelu, f16, arg0, out, mode, count); - NGRAPH_TYPE_CASE(evaluate_gelu, f32, arg0, out, mode, count); + OPENVINO_TYPE_CASE(evaluate_gelu, f16, arg0, out, mode, count); + OPENVINO_TYPE_CASE(evaluate_gelu, f32, arg0, out, mode, count); default: rc = false; break; diff --git a/src/core/src/op/greater.cpp b/src/core/src/op/greater.cpp index 25323faf0dce42..dfc838c2f9c795 100644 --- a/src/core/src/op/greater.cpp +++ b/src/core/src/op/greater.cpp @@ -35,13 +35,13 @@ bool evaluate_greater(const HostTensorPtr& arg0, bool rc = true; out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean); switch (arg0->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_greater, boolean, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_greater, i32, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_greater, i64, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_greater, u32, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_greater, u64, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_greater, f16, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_greater, f32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_greater, boolean, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_greater, i32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_greater, i64, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_greater, u32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_greater, u64, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_greater, f16, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_greater, f32, arg0, arg1, out, broadcast_spec); default: rc = false; break; diff --git a/src/core/src/op/greater_eq.cpp b/src/core/src/op/greater_eq.cpp index 64b761233dc0cc..0eb36149de2083 100644 --- a/src/core/src/op/greater_eq.cpp +++ b/src/core/src/op/greater_eq.cpp @@ -36,13 +36,13 @@ bool evaluate_greater_equal(const HostTensorPtr& arg0, bool rc = true; out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean); switch (arg0->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_greater_equal, boolean, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_greater_equal, i32, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_greater_equal, i64, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_greater_equal, u32, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_greater_equal, u64, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_greater_equal, f16, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_greater_equal, f32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_greater_equal, boolean, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_greater_equal, i32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_greater_equal, i64, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_greater_equal, u32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_greater_equal, u64, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_greater_equal, f16, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_greater_equal, f32, arg0, arg1, out, broadcast_spec); default: rc = false; break; diff --git a/src/core/src/op/grid_sample.cpp b/src/core/src/op/grid_sample.cpp index 0df903f8b54f7e..000b38cfbdc363 100644 --- a/src/core/src/op/grid_sample.cpp +++ b/src/core/src/op/grid_sample.cpp @@ -119,7 +119,7 @@ bool evaluate_grid_sample(const ngraph::HostTensorPtr& output, const op::v9::GridSample::Attributes& attributes) { auto rc = true; switch (output->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_grid_sample, f32, output, data, grid, attributes); + OPENVINO_TYPE_CASE(evaluate_grid_sample, f32, output, data, grid, attributes); default: rc = false; break; diff --git a/src/core/src/op/hard_sigmoid.cpp b/src/core/src/op/hard_sigmoid.cpp index 02c85eec1762bc..e61f2df6aa33e3 100644 --- a/src/core/src/op/hard_sigmoid.cpp +++ b/src/core/src/op/hard_sigmoid.cpp @@ -24,7 +24,7 @@ void op::v0::HardSigmoid::validate_and_infer_types() { if (alpha_pshape.is_static()) { const auto alpha_shape = alpha_pshape.to_shape(); NODE_VALIDATION_CHECK(this, - ngraph::is_scalar(alpha_shape), + ov::is_scalar(alpha_shape), "A scalar is expected for the 'alpha' input. Got: ", alpha_shape); } @@ -32,7 +32,7 @@ void op::v0::HardSigmoid::validate_and_infer_types() { if (beta_pshape.is_static()) { const auto beta_shape = beta_pshape.to_shape(); NODE_VALIDATION_CHECK(this, - ngraph::is_scalar(beta_shape), + ov::is_scalar(beta_shape), "A scalar is expected for the 'beta' input. Got: ", beta_shape); } diff --git a/src/core/src/op/hsigmoid.cpp b/src/core/src/op/hsigmoid.cpp index 94316792b20732..19fb55de3c5fc8 100644 --- a/src/core/src/op/hsigmoid.cpp +++ b/src/core/src/op/hsigmoid.cpp @@ -45,9 +45,9 @@ bool evaluate_hsigmoid(const HostTensorPtr& arg, const HostTensorPtr& out) { out->set_unary(arg); switch (arg->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_hsigmoid, bf16, arg, out, count); - NGRAPH_TYPE_CASE(evaluate_hsigmoid, f16, arg, out, count); - NGRAPH_TYPE_CASE(evaluate_hsigmoid, f32, arg, out, count); + OPENVINO_TYPE_CASE(evaluate_hsigmoid, bf16, arg, out, count); + OPENVINO_TYPE_CASE(evaluate_hsigmoid, f16, arg, out, count); + OPENVINO_TYPE_CASE(evaluate_hsigmoid, f32, arg, out, count); default: rc = false; break; diff --git a/src/core/src/op/hswish.cpp b/src/core/src/op/hswish.cpp index c5ab6c4a5562a5..b509ecb95aabd1 100644 --- a/src/core/src/op/hswish.cpp +++ b/src/core/src/op/hswish.cpp @@ -45,9 +45,9 @@ bool evaluate_hswish(const HostTensorPtr& arg, const HostTensorPtr& out) { out->set_unary(arg); switch (arg->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_hswish, bf16, arg, out, count); - NGRAPH_TYPE_CASE(evaluate_hswish, f16, arg, out, count); - NGRAPH_TYPE_CASE(evaluate_hswish, f32, arg, out, count); + OPENVINO_TYPE_CASE(evaluate_hswish, bf16, arg, out, count); + OPENVINO_TYPE_CASE(evaluate_hswish, f16, arg, out, count); + OPENVINO_TYPE_CASE(evaluate_hswish, f32, arg, out, count); default: rc = false; break; diff --git a/src/core/src/op/less.cpp b/src/core/src/op/less.cpp index 9f4b15bf204b45..aada1ff872d481 100644 --- a/src/core/src/op/less.cpp +++ b/src/core/src/op/less.cpp @@ -35,13 +35,13 @@ bool evaluate_less(const HostTensorPtr& arg0, bool rc = true; out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean); switch (arg0->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_less, boolean, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_less, i32, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_less, i64, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_less, u32, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_less, u64, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_less, f16, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_less, f32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_less, boolean, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_less, i32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_less, i64, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_less, u32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_less, u64, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_less, f16, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_less, f32, arg0, arg1, out, broadcast_spec); default: rc = false; break; diff --git a/src/core/src/op/less_eq.cpp b/src/core/src/op/less_eq.cpp index 61e90cc3c9a051..ff15661fd88d6d 100644 --- a/src/core/src/op/less_eq.cpp +++ b/src/core/src/op/less_eq.cpp @@ -50,13 +50,13 @@ bool evaluate_less_equal(const HostTensorPtr& arg0, bool rc = true; out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean); switch (arg0->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_less_equal, boolean, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_less_equal, i32, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_less_equal, i64, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_less_equal, u32, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_less_equal, u64, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_less_equal, f16, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_less_equal, f32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_less_equal, boolean, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_less_equal, i32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_less_equal, i64, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_less_equal, u32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_less_equal, u64, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_less_equal, f16, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_less_equal, f32, arg0, arg1, out, broadcast_spec); default: rc = false; break; diff --git a/src/core/src/op/log.cpp b/src/core/src/op/log.cpp index ec592957b8351a..a854ceb06f5b50 100644 --- a/src/core/src/op/log.cpp +++ b/src/core/src/op/log.cpp @@ -42,12 +42,12 @@ bool evaluate_log(const HostTensorPtr& arg0, const HostTensorPtr& out, const siz out->set_unary(arg0); switch (arg0->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_log, i32, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_log, i64, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_log, u32, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_log, u64, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_log, f16, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_log, f32, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_log, i32, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_log, i64, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_log, u32, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_log, u64, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_log, f16, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_log, f32, arg0, out, count); default: rc = false; break; diff --git a/src/core/src/op/logical_and.cpp b/src/core/src/op/logical_and.cpp index ce935e4a188265..d6f451715a564d 100644 --- a/src/core/src/op/logical_and.cpp +++ b/src/core/src/op/logical_and.cpp @@ -54,7 +54,7 @@ bool evaluate_logand(const HostTensorPtr& arg0, bool rc = true; out->set_broadcast(broadcast_spec, arg0, arg1); switch (arg0->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_logand, boolean, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_logand, boolean, arg0, arg1, out, broadcast_spec); default: rc = false; break; diff --git a/src/core/src/op/logical_not.cpp b/src/core/src/op/logical_not.cpp index 6870c07921fc0e..7ed4971861766a 100644 --- a/src/core/src/op/logical_not.cpp +++ b/src/core/src/op/logical_not.cpp @@ -46,13 +46,13 @@ bool evaluate_not(const HostTensorPtr& arg0, const HostTensorPtr& out, const siz out->set_unary(arg0); switch (arg0->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_not, boolean, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_not, i32, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_not, i64, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_not, u32, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_not, u64, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_not, f16, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_not, f32, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_not, boolean, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_not, i32, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_not, i64, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_not, u32, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_not, u64, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_not, f16, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_not, f32, arg0, out, count); default: rc = false; break; diff --git a/src/core/src/op/logical_or.cpp b/src/core/src/op/logical_or.cpp index c5cbc20c83b13d..c473e6c12e385f 100644 --- a/src/core/src/op/logical_or.cpp +++ b/src/core/src/op/logical_or.cpp @@ -48,7 +48,7 @@ bool evaluate_logor(const HostTensorPtr& arg0, bool rc = true; out->set_broadcast(broadcast_spec, arg0, arg1); switch (arg0->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_logor, boolean, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_logor, boolean, arg0, arg1, out, broadcast_spec); default: rc = false; break; diff --git a/src/core/src/op/loop.cpp b/src/core/src/op/loop.cpp index 8bf7710861319f..0d7433629e8b9f 100644 --- a/src/core/src/op/loop.cpp +++ b/src/core/src/op/loop.cpp @@ -2,27 +2,22 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/loop.hpp" +#include "openvino/op/loop.hpp" #include -#include #include "itt.hpp" -#include "ngraph/factory.hpp" -#include "ngraph/graph_util.hpp" -#include "ngraph/opsets/opset5.hpp" +#include "openvino/core/validation_util.hpp" +#include "openvino/op/tensor_iterator.hpp" #include "openvino/reference/loop.hpp" #include "openvino/runtime/tensor.hpp" -using namespace std; -using namespace ngraph; - -op::v5::Loop::Loop(const Output& trip_count, const Output& execution_condition) : SubGraphOp() { +ov::op::v5::Loop::Loop(const Output& trip_count, const Output& execution_condition) : SubGraphOp() { set_argument(0, trip_count); set_argument(1, execution_condition); } -bool op::v5::Loop::visit_attributes(AttributeVisitor& visitor) { +bool ov::op::v5::Loop::visit_attributes(AttributeVisitor& visitor) { OV_OP_SCOPE(v5_Loop_visit_attributes); visitor.on_attribute("body", m_bodies[0]); visitor.on_attribute("input_descriptions", m_input_descriptions[0]); @@ -32,7 +27,7 @@ bool op::v5::Loop::visit_attributes(AttributeVisitor& visitor) { return true; } -void op::v5::Loop::validate_and_infer_types() { +void ov::op::v5::Loop::validate_and_infer_types() { OV_OP_SCOPE(v5_Loop_validate_and_infer_types); NODE_VALIDATION_CHECK(this, m_bodies.size() == 1, "Number of bodies for loop is greater than 1"); @@ -63,7 +58,7 @@ void op::v5::Loop::validate_and_infer_types() { "Rank of ExecutionCondition input must be equal to 0 or 1"); } OPENVINO_SUPPRESS_DEPRECATED_START - if (const auto& cond_value = get_constant_from_source(loop_execution_condition)) { + if (const auto& cond_value = ov::get_constant_from_source(loop_execution_condition)) { OPENVINO_SUPPRESS_DEPRECATED_END auto val = cond_value->cast_vector(); NODE_VALIDATION_CHECK(this, @@ -101,7 +96,7 @@ void op::v5::Loop::validate_and_infer_types() { } else { m_num_iterations = 1; // condition_always_false, do_while mode } - } else if (const auto& cond_param = std::dynamic_pointer_cast( + } else if (const auto& cond_param = std::dynamic_pointer_cast( body_execution_condition.get_node_shared_ptr())) { // Const(true or false) -> Loop (body: Parameter -> execution_condition output) for (const auto& desc : get_input_descriptions()) { @@ -178,8 +173,7 @@ void op::v5::Loop::validate_and_infer_types() { } else { auto out_shape = input_partial_shape; OPENVINO_SUPPRESS_DEPRECATED_START - const auto axis = - ngraph::normalize_axis(this, slice_input_description->m_axis, input_partial_shape.rank()); + const auto axis = ov::normalize_axis(this, slice_input_description->m_axis, input_partial_shape.rank()); OPENVINO_SUPPRESS_DEPRECATED_END out_shape[axis] = slice_input_description->m_part_size; body_parameter->set_partial_shape(out_shape); @@ -196,7 +190,7 @@ void op::v5::Loop::validate_and_infer_types() { body_parameter->set_element_type(input_type); back_edges[merged_input_description->m_body_value_index] = merged_input_description->m_body_parameter_index; } else if (auto invariant_input_description = - ov::as_type_ptr(input_description)) { + ov::as_type_ptr(input_description)) { auto body_parameter = m_bodies[0]->get_parameters().at(invariant_input_description->m_body_parameter_index); auto input_partial_shape = input(index).get_partial_shape(); @@ -290,7 +284,7 @@ void op::v5::Loop::validate_and_infer_types() { out_shape = ov::PartialShape{0}; } else if (out_shape.rank().is_static()) { OPENVINO_SUPPRESS_DEPRECATED_START - const auto axis = ngraph::normalize_axis(this, concat_output_description->m_axis, out_shape.rank()); + const auto axis = ov::normalize_axis(this, concat_output_description->m_axis, out_shape.rank()); OPENVINO_SUPPRESS_DEPRECATED_END const auto rank = out_shape.rank().get_length(); if (rank == 0) { @@ -326,10 +320,10 @@ void op::v5::Loop::validate_and_infer_types() { "Number of outputs must be the same as number of output descriptions"); } -std::shared_ptr op::v5::Loop::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr ov::op::v5::Loop::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v5_Loop_clone_with_new_inputs); check_new_args_count(this, new_args); - auto op = make_shared(); + auto op = std::make_shared(); OPENVINO_ASSERT(op.get(), op != nullptr, "Cannot clone ", @@ -340,12 +334,12 @@ std::shared_ptr op::v5::Loop::clone_with_new_inputs(const OutputVector& ne return op; } -Output op::v5::Loop::get_concatenated_slices(const Output& value, - int64_t start, - int64_t stride, - int64_t part_size, - int64_t end, - int64_t axis) { +ov::Output ov::op::v5::Loop::get_concatenated_slices(const Output& value, + int64_t start, + int64_t stride, + int64_t part_size, + int64_t end, + int64_t axis) { OPENVINO_ASSERT(start == 0 && stride == 1 && part_size == 1 && end == -1, "Invalid start, stride, part_size, or end attribute values in Loop op. " "Supported values for start {0}, for stride and part_size {1}, for end " @@ -353,7 +347,7 @@ Output op::v5::Loop::get_concatenated_slices(const Output& value, return SubGraphOp::get_concatenated_slices(value, start, stride, part_size, end, axis); } -bool op::v5::Loop::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { +bool ov::op::v5::Loop::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { OV_OP_SCOPE(v5_Loop_evaluate); ov::reference::loop(m_bodies[0], m_output_descriptions[0], @@ -364,11 +358,11 @@ bool op::v5::Loop::evaluate(ov::TensorVector& outputs, const ov::TensorVector& i return true; } -bool op::v5::Loop::has_evaluate() const { +bool ov::op::v5::Loop::has_evaluate() const { OV_OP_SCOPE(v5_Loop_has_evaluate); switch (get_input_element_type(0)) { - case ngraph::element::i32: - case ngraph::element::i64: + case ov::element::i32: + case ov::element::i64: return true; default: break; @@ -376,7 +370,7 @@ bool op::v5::Loop::has_evaluate() const { return false; } -void op::v5::Loop::clone_to(op::v5::Loop& dst, const OutputVector& new_args) const { +void ov::op::v5::Loop::clone_to(op::v5::Loop& dst, const OutputVector& new_args) const { dst.set_arguments(new_args); dst.set_output_size(m_output_descriptions.size()); @@ -394,6 +388,6 @@ void op::v5::Loop::clone_to(op::v5::Loop& dst, const OutputVector& new_args) con dst.validate_and_infer_types(); } -op::v5::Loop::Loop(const op::v5::Loop& other) : SubGraphOp() { +ov::op::v5::Loop::Loop(const op::v5::Loop& other) : SubGraphOp() { other.clone_to(*this, other.input_values()); } diff --git a/src/core/src/op/matmul.cpp b/src/core/src/op/matmul.cpp index d3fd1a30e8ef5a..d43b787e032232 100644 --- a/src/core/src/op/matmul.cpp +++ b/src/core/src/op/matmul.cpp @@ -69,12 +69,12 @@ bool evaluate_matmul(const op::MatMul* op, bool rc = true; switch (arg0->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_matmul, i32, op, arg0, arg1, output); - NGRAPH_TYPE_CASE(evaluate_matmul, i64, op, arg0, arg1, output); - NGRAPH_TYPE_CASE(evaluate_matmul, u32, op, arg0, arg1, output); - NGRAPH_TYPE_CASE(evaluate_matmul, u64, op, arg0, arg1, output); - NGRAPH_TYPE_CASE(evaluate_matmul, f16, op, arg0, arg1, output); - NGRAPH_TYPE_CASE(evaluate_matmul, f32, op, arg0, arg1, output); + OPENVINO_TYPE_CASE(evaluate_matmul, i32, op, arg0, arg1, output); + OPENVINO_TYPE_CASE(evaluate_matmul, i64, op, arg0, arg1, output); + OPENVINO_TYPE_CASE(evaluate_matmul, u32, op, arg0, arg1, output); + OPENVINO_TYPE_CASE(evaluate_matmul, u64, op, arg0, arg1, output); + OPENVINO_TYPE_CASE(evaluate_matmul, f16, op, arg0, arg1, output); + OPENVINO_TYPE_CASE(evaluate_matmul, f32, op, arg0, arg1, output); default: rc = false; break; diff --git a/src/core/src/op/max_pool.cpp b/src/core/src/op/max_pool.cpp index 6a43995896f569..d40c13644cd3cd 100644 --- a/src/core/src/op/max_pool.cpp +++ b/src/core/src/op/max_pool.cpp @@ -94,12 +94,12 @@ bool evaluate_maxpool(const HostTensorPtr& arg, auto arg_shape = arg->get_shape(); switch (out->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_maxpool, i32, arg, out, out_shape, kernel, strides, pad_begin, pad_end); - NGRAPH_TYPE_CASE(evaluate_maxpool, i64, arg, out, out_shape, kernel, strides, pad_begin, pad_end); - NGRAPH_TYPE_CASE(evaluate_maxpool, u32, arg, out, out_shape, kernel, strides, pad_begin, pad_end); - NGRAPH_TYPE_CASE(evaluate_maxpool, u64, arg, out, out_shape, kernel, strides, pad_begin, pad_end); - NGRAPH_TYPE_CASE(evaluate_maxpool, f16, arg, out, out_shape, kernel, strides, pad_begin, pad_end); - NGRAPH_TYPE_CASE(evaluate_maxpool, f32, arg, out, out_shape, kernel, strides, pad_begin, pad_end); + OPENVINO_TYPE_CASE(evaluate_maxpool, i32, arg, out, out_shape, kernel, strides, pad_begin, pad_end); + OPENVINO_TYPE_CASE(evaluate_maxpool, i64, arg, out, out_shape, kernel, strides, pad_begin, pad_end); + OPENVINO_TYPE_CASE(evaluate_maxpool, u32, arg, out, out_shape, kernel, strides, pad_begin, pad_end); + OPENVINO_TYPE_CASE(evaluate_maxpool, u64, arg, out, out_shape, kernel, strides, pad_begin, pad_end); + OPENVINO_TYPE_CASE(evaluate_maxpool, f16, arg, out, out_shape, kernel, strides, pad_begin, pad_end); + OPENVINO_TYPE_CASE(evaluate_maxpool, f32, arg, out, out_shape, kernel, strides, pad_begin, pad_end); default: rc = false; break; @@ -185,20 +185,20 @@ bool evaluate_maxpool(const HostTensorPtr& data, const ov::Shape& pads_begin, const ov::Shape& pads_end, const int64_t axis) { -#define EVAL_MAX_POOL_8(data_et, index_et) \ - NGRAPH_2_TYPES_CASE(maxpool_v8::evaluate_maxpool, \ - data_et, \ - index_et, \ - data, \ - values, \ - indices, \ - out_shape, \ - kernel, \ - strides, \ - dilations, \ - pads_begin, \ - pads_end, \ - axis) +#define EVAL_MAX_POOL_8(data_et, index_et) \ + OPENVINO_2_TYPES_CASE(maxpool_v8::evaluate_maxpool, \ + data_et, \ + index_et, \ + data, \ + values, \ + indices, \ + out_shape, \ + kernel, \ + strides, \ + dilations, \ + pads_begin, \ + pads_end, \ + axis) bool rc = true; switch (indices->get_element_type()) { diff --git a/src/core/src/op/maximum.cpp b/src/core/src/op/maximum.cpp index 55f6b83e26575e..8a9a2a6569b336 100644 --- a/src/core/src/op/maximum.cpp +++ b/src/core/src/op/maximum.cpp @@ -43,12 +43,12 @@ bool evaluate_maximum(const HostTensorPtr& arg0, bool rc = true; out->set_broadcast(broadcast_spec, arg0, arg1); switch (arg0->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_maximum, i32, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_maximum, i64, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_maximum, u32, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_maximum, u64, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_maximum, f16, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_maximum, f32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_maximum, i32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_maximum, i64, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_maximum, u32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_maximum, u64, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_maximum, f16, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_maximum, f32, arg0, arg1, out, broadcast_spec); default: rc = false; break; diff --git a/src/core/src/op/minimum.cpp b/src/core/src/op/minimum.cpp index 26725b8a059294..83252519beeeac 100644 --- a/src/core/src/op/minimum.cpp +++ b/src/core/src/op/minimum.cpp @@ -41,14 +41,14 @@ bool evaluate_minimum(const HostTensorPtr& arg0, bool rc = true; out->set_broadcast(broadcast_spec, arg0, arg1); switch (arg0->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_minimum, i32, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_minimum, i64, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_minimum, u8, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_minimum, u16, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_minimum, u32, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_minimum, u64, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_minimum, f16, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_minimum, f32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_minimum, i32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_minimum, i64, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_minimum, u8, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_minimum, u16, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_minimum, u32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_minimum, u64, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_minimum, f16, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_minimum, f32, arg0, arg1, out, broadcast_spec); default: rc = false; break; diff --git a/src/core/src/op/mish.cpp b/src/core/src/op/mish.cpp index 1974e683029a6b..0fc374d3f8a8f5 100644 --- a/src/core/src/op/mish.cpp +++ b/src/core/src/op/mish.cpp @@ -59,8 +59,8 @@ bool evaluate_mish(const HostTensorPtr& arg0, const HostTensorPtr& out) { out->set_unary(arg0); switch (arg0->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_mish, f16, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_mish, f32, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_mish, f16, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_mish, f32, arg0, out, count); default: rc = false; break; diff --git a/src/core/src/op/multiply.cpp b/src/core/src/op/multiply.cpp index 23f21549c72011..04ccc8d05e349d 100644 --- a/src/core/src/op/multiply.cpp +++ b/src/core/src/op/multiply.cpp @@ -35,16 +35,16 @@ bool evaluate_multiply(const HostTensorPtr& arg0, bool rc = true; out->set_broadcast(broadcast_spec, arg0, arg1); switch (arg0->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_multiply, i32, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_multiply, i64, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_multiply, u32, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_multiply, u64, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_multiply, f16, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_multiply, f32, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_multiply, bf16, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_multiply, u8, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_multiply, i16, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_multiply, u16, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_multiply, i32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_multiply, i64, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_multiply, u32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_multiply, u64, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_multiply, f16, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_multiply, f32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_multiply, bf16, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_multiply, u8, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_multiply, i16, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_multiply, u16, arg0, arg1, out, broadcast_spec); default: rc = false; break; diff --git a/src/core/src/op/mvn.cpp b/src/core/src/op/mvn.cpp index bf7a0afc71c03f..b02c6eef550de9 100644 --- a/src/core/src/op/mvn.cpp +++ b/src/core/src/op/mvn.cpp @@ -2,18 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/mvn.hpp" - -#include +#include "openvino/op/mvn.hpp" #include "itt.hpp" #include "openvino/reference/mvn.hpp" -using namespace std; -using namespace ngraph; - // ------------------------------ V0 ------------------------------ -op::v0::MVN::MVN(const Output& data, bool across_channels, bool normalize_variance, double eps) +ov::op::v0::MVN::MVN(const Output& data, bool across_channels, bool normalize_variance, double eps) : Op({data}), m_eps{eps}, m_across_channels{across_channels}, @@ -21,7 +16,7 @@ op::v0::MVN::MVN(const Output& data, bool across_channels, bool normalize_ constructor_validate_and_infer_types(); } -op::v0::MVN::MVN(const Output& data, AxisSet reduction_axes, bool normalize_variance, double eps) +ov::op::v0::MVN::MVN(const Output& data, AxisSet reduction_axes, bool normalize_variance, double eps) : Op({data}), m_eps{eps}, m_across_channels{false}, @@ -32,7 +27,7 @@ op::v0::MVN::MVN(const Output& data, AxisSet reduction_axes, bool normaliz m_across_channels = (m_reduction_axes.count(chanelAxis) > 0); } -void op::v0::MVN::validate_and_infer_types() { +void ov::op::v0::MVN::validate_and_infer_types() { OV_OP_SCOPE(v0_MVN_validate_and_infer_types); // if m_across_channels is true we should calculate mean and variance per batch // else we calculate these per channel @@ -48,7 +43,7 @@ void op::v0::MVN::validate_and_infer_types() { set_output_type(0, get_input_element_type(0), get_input_partial_shape(0)); } -shared_ptr op::v0::MVN::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr ov::op::v0::MVN::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v0_MVN_clone_with_new_inputs); NODE_VALIDATION_CHECK(this, new_args.size() == 1, @@ -57,7 +52,7 @@ shared_ptr op::v0::MVN::clone_with_new_inputs(const OutputVector& new_args return std::make_shared(new_args.at(0), m_reduction_axes, m_normalize_variance, m_eps); } -bool op::v0::MVN::visit_attributes(AttributeVisitor& visitor) { +bool ov::op::v0::MVN::visit_attributes(AttributeVisitor& visitor) { OV_OP_SCOPE(v0_MVN_visit_attributes); visitor.on_attribute("eps", m_eps); visitor.on_attribute("across_channels", m_across_channels); @@ -70,23 +65,23 @@ bool op::v0::MVN::visit_attributes(AttributeVisitor& visitor) { namespace ov { template <> -NGRAPH_API EnumNames& EnumNames::get() { - static auto enum_names = EnumNames( +OPENVINO_API EnumNames& EnumNames::get() { + static auto enum_names = EnumNames( "op::MVNEpsMode", - {{"OUTSIDE_SQRT", ngraph::op::MVNEpsMode::OUTSIDE_SQRT}, {"INSIDE_SQRT", ngraph::op::MVNEpsMode::INSIDE_SQRT}}); + {{"OUTSIDE_SQRT", ov::op::MVNEpsMode::OUTSIDE_SQRT}, {"INSIDE_SQRT", ov::op::MVNEpsMode::INSIDE_SQRT}}); return enum_names; } } // namespace ov -std::ostream& ov::op::operator<<(std::ostream& s, const ngraph::op::MVNEpsMode& type) { +std::ostream& ov::op::operator<<(std::ostream& s, const ov::op::MVNEpsMode& type) { return s << as_string(type); } -op::v6::MVN::MVN(const Output& data, - const Output& reduction_axes, - bool normalize_variance, - float eps, - MVNEpsMode eps_mode) +ov::op::v6::MVN::MVN(const Output& data, + const Output& reduction_axes, + bool normalize_variance, + float eps, + MVNEpsMode eps_mode) : Op({data, reduction_axes}), m_normalize_variance{normalize_variance}, m_eps{eps}, @@ -94,7 +89,7 @@ op::v6::MVN::MVN(const Output& data, constructor_validate_and_infer_types(); } -void op::v6::MVN::validate_and_infer_types() { +void ov::op::v6::MVN::validate_and_infer_types() { OV_OP_SCOPE(v6_MVN_validate_and_infer_types); const auto data = get_input_partial_shape(0); const auto axes = get_input_partial_shape(1); @@ -112,16 +107,16 @@ void op::v6::MVN::validate_and_infer_types() { set_output_type(0, get_input_element_type(0), get_input_partial_shape(0)); } -shared_ptr op::v6::MVN::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr ov::op::v6::MVN::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v6_MVN_clone_with_new_inputs); NODE_VALIDATION_CHECK(this, new_args.size() == 2, "Expected 2 element in new_args for the MVN op but got ", new_args.size()); - return make_shared(new_args.at(0), new_args.at(1), m_normalize_variance, m_eps, m_eps_mode); + return std::make_shared(new_args.at(0), new_args.at(1), m_normalize_variance, m_eps, m_eps_mode); } -bool op::v6::MVN::visit_attributes(AttributeVisitor& visitor) { +bool ov::op::v6::MVN::visit_attributes(AttributeVisitor& visitor) { OV_OP_SCOPE(v6_MVN_visit_attributes); visitor.on_attribute("eps", m_eps); visitor.on_attribute("normalize_variance", m_normalize_variance); @@ -131,18 +126,18 @@ bool op::v6::MVN::visit_attributes(AttributeVisitor& visitor) { namespace mvn { namespace { -template +template bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs, bool normalize_variance, float eps, ov::op::MVNEpsMode eps_mode) { - using T = typename element_type_traits::value_type; - AxisSet reduction_axes; + using T = typename ov::element_type_traits::value_type; + ov::AxisSet reduction_axes; auto rank = inputs[0].get_shape().size(); - if (inputs[1].get_element_type() == element::i64) { + if (inputs[1].get_element_type() == ov::element::i64) { reduction_axes = ov::reference::mvn_6_reduction_axes(inputs[1], rank); - } else if (inputs[1].get_element_type() == element::i32) { + } else if (inputs[1].get_element_type() == ov::element::i32) { reduction_axes = ov::reference::mvn_6_reduction_axes(inputs[1], rank); } else { OPENVINO_THROW("Unexpected indices type"); @@ -164,7 +159,7 @@ bool evaluate_mvn(ov::TensorVector& outputs, ov::op::MVNEpsMode eps_mode) { bool rc = true; switch (inputs[0].get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_mvn, f32, outputs, inputs, normalize_variance, eps, eps_mode); + OPENVINO_TYPE_CASE(evaluate_mvn, f32, outputs, inputs, normalize_variance, eps, eps_mode); default: rc = false; break; @@ -174,12 +169,12 @@ bool evaluate_mvn(ov::TensorVector& outputs, } // namespace } // namespace mvn -bool op::v6::MVN::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { +bool ov::op::v6::MVN::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { OV_OP_SCOPE(v6_MVN_evaluate); return mvn::evaluate_mvn(outputs, inputs, get_normalize_variance(), get_eps(), get_eps_mode()); } -bool op::v6::MVN::has_evaluate() const { +bool ov::op::v6::MVN::has_evaluate() const { OV_OP_SCOPE(v6_MVN_has_evaluate); switch (get_input_element_type(0)) { case ov::element::f32: diff --git a/src/core/src/op/negative.cpp b/src/core/src/op/negative.cpp index 5b86e98e6ad2a1..1cbe44de659a4b 100644 --- a/src/core/src/op/negative.cpp +++ b/src/core/src/op/negative.cpp @@ -42,11 +42,11 @@ bool evaluate_negative(const HostTensorPtr& arg0, const HostTensorPtr& out, cons out->set_unary(arg0); switch (arg0->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_negative, i32, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_negative, i64, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_negative, bf16, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_negative, f16, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_negative, f32, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_negative, i32, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_negative, i64, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_negative, bf16, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_negative, f16, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_negative, f32, arg0, out, count); default: rc = false; break; diff --git a/src/core/src/op/non_max_suppression.cpp b/src/core/src/op/non_max_suppression.cpp index 91d2c087fe8595..6e5357e9a79bce 100644 --- a/src/core/src/op/non_max_suppression.cpp +++ b/src/core/src/op/non_max_suppression.cpp @@ -550,7 +550,7 @@ std::ostream& operator<<(std::ostream& s, const op::v5::NonMaxSuppression::BoxEn } template <> -NGRAPH_API EnumNames& +OPENVINO_API EnumNames& EnumNames::get() { static auto enum_names = EnumNames( "op::v5::NonMaxSuppression::BoxEncodingType", @@ -787,7 +787,7 @@ std::ostream& operator<<(std::ostream& s, const op::v9::NonMaxSuppression::BoxEn } template <> -NGRAPH_API EnumNames& +OPENVINO_API EnumNames& EnumNames::get() { static auto enum_names = EnumNames( "op::v9::NonMaxSuppression::BoxEncodingType", diff --git a/src/core/src/op/non_zero.cpp b/src/core/src/op/non_zero.cpp index b24ce5c77b383e..a281cdf2645268 100644 --- a/src/core/src/op/non_zero.cpp +++ b/src/core/src/op/non_zero.cpp @@ -130,19 +130,19 @@ bool evaluate_nonzero(const HostTensorPtr& input, const HostTensorPtr& output) { bool rc = true; switch (input->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_nonzero, boolean, input, output); - NGRAPH_TYPE_CASE(evaluate_nonzero, i8, input, output); - NGRAPH_TYPE_CASE(evaluate_nonzero, i16, input, output); - NGRAPH_TYPE_CASE(evaluate_nonzero, i32, input, output); - NGRAPH_TYPE_CASE(evaluate_nonzero, i64, input, output); - NGRAPH_TYPE_CASE(evaluate_nonzero, u8, input, output); - NGRAPH_TYPE_CASE(evaluate_nonzero, u16, input, output); - NGRAPH_TYPE_CASE(evaluate_nonzero, u32, input, output); - NGRAPH_TYPE_CASE(evaluate_nonzero, u64, input, output); - NGRAPH_TYPE_CASE(evaluate_nonzero, bf16, input, output); - NGRAPH_TYPE_CASE(evaluate_nonzero, f16, input, output); - NGRAPH_TYPE_CASE(evaluate_nonzero, f32, input, output); - NGRAPH_TYPE_CASE(evaluate_nonzero, f64, input, output); + OPENVINO_TYPE_CASE(evaluate_nonzero, boolean, input, output); + OPENVINO_TYPE_CASE(evaluate_nonzero, i8, input, output); + OPENVINO_TYPE_CASE(evaluate_nonzero, i16, input, output); + OPENVINO_TYPE_CASE(evaluate_nonzero, i32, input, output); + OPENVINO_TYPE_CASE(evaluate_nonzero, i64, input, output); + OPENVINO_TYPE_CASE(evaluate_nonzero, u8, input, output); + OPENVINO_TYPE_CASE(evaluate_nonzero, u16, input, output); + OPENVINO_TYPE_CASE(evaluate_nonzero, u32, input, output); + OPENVINO_TYPE_CASE(evaluate_nonzero, u64, input, output); + OPENVINO_TYPE_CASE(evaluate_nonzero, bf16, input, output); + OPENVINO_TYPE_CASE(evaluate_nonzero, f16, input, output); + OPENVINO_TYPE_CASE(evaluate_nonzero, f32, input, output); + OPENVINO_TYPE_CASE(evaluate_nonzero, f64, input, output); default: rc = false; break; diff --git a/src/core/src/op/normalize_l2.cpp b/src/core/src/op/normalize_l2.cpp index 65456dafef86b0..4b33d5ee93e63e 100644 --- a/src/core/src/op/normalize_l2.cpp +++ b/src/core/src/op/normalize_l2.cpp @@ -2,35 +2,28 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/normalize_l2.hpp" +#include "openvino/op/normalize_l2.hpp" -#include -#include -#include - -#include "bound_evaluate.hpp" #include "itt.hpp" -#include "ngraph/attribute_visitor.hpp" -#include "ngraph/op/util/op_types.hpp" +#include "openvino/core/validation_util.hpp" using namespace std; -using namespace ngraph; -op::v0::NormalizeL2::NormalizeL2(const Output& data, const Output& axes, float eps, EpsMode eps_mode) +ov::op::v0::NormalizeL2::NormalizeL2(const Output& data, const Output& axes, float eps, EpsMode eps_mode) : Op({data, axes}), m_eps(eps), m_eps_mode(eps_mode) { constructor_validate_and_infer_types(); } -bool op::v0::NormalizeL2::visit_attributes(AttributeVisitor& visitor) { +bool ov::op::v0::NormalizeL2::visit_attributes(AttributeVisitor& visitor) { OV_OP_SCOPE(v0_NormalizeL2_visit_attributes); visitor.on_attribute("eps", m_eps); visitor.on_attribute("eps_mode", m_eps_mode); return true; } -void op::v0::NormalizeL2::validate_and_infer_types() { +void ov::op::v0::NormalizeL2::validate_and_infer_types() { OV_OP_SCOPE(v0_NormalizeL2_validate_and_infer_types); auto axes_node = input_value(1).get_node_shared_ptr(); const auto& input_pshape = get_input_partial_shape(0); @@ -62,10 +55,10 @@ void op::v0::NormalizeL2::validate_and_infer_types() { set_output_type(0, get_input_element_type(0), get_input_partial_shape(0)); } -AxisSet op::v0::NormalizeL2::get_reduction_axes() const { +ov::AxisSet ov::op::v0::NormalizeL2::get_reduction_axes() const { AxisSet axes; OPENVINO_SUPPRESS_DEPRECATED_START - if (auto const_op = get_constant_from_source(input_value(1))) { + if (auto const_op = ov::get_constant_from_source(input_value(1))) { OPENVINO_SUPPRESS_DEPRECATED_END const auto const_data = const_op->cast_vector(); const auto input_data_rank = get_input_partial_shape(0).rank(); @@ -77,7 +70,7 @@ AxisSet op::v0::NormalizeL2::get_reduction_axes() const { return axes; } -shared_ptr op::v0::NormalizeL2::clone_with_new_inputs(const OutputVector& new_args) const { +shared_ptr ov::op::v0::NormalizeL2::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v0_NormalizeL2_clone_with_new_inputs); if (new_args.size() != 2) { OPENVINO_THROW("Incorrect number of new arguments"); diff --git a/src/core/src/op/not_equal.cpp b/src/core/src/op/not_equal.cpp index 80c77cfa58f1c1..68da0abeaa6e0e 100644 --- a/src/core/src/op/not_equal.cpp +++ b/src/core/src/op/not_equal.cpp @@ -36,13 +36,13 @@ bool evaluate_not_equal(const HostTensorPtr& arg0, bool rc = true; out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean); switch (arg0->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_not_equal, boolean, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_not_equal, i32, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_not_equal, i64, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_not_equal, u32, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_not_equal, u64, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_not_equal, f16, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_not_equal, f32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_not_equal, boolean, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_not_equal, i32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_not_equal, i64, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_not_equal, u32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_not_equal, u64, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_not_equal, f16, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_not_equal, f32, arg0, arg1, out, broadcast_spec); default: rc = false; break; diff --git a/src/core/src/op/one_hot.cpp b/src/core/src/op/one_hot.cpp index 81186f506e10dd..6ca4eb10931c5b 100644 --- a/src/core/src/op/one_hot.cpp +++ b/src/core/src/op/one_hot.cpp @@ -94,8 +94,8 @@ bool evaluate_onehot(const HostTensorVector& output_values, const HostTensorVect bool rc = true; const auto& indices = input_values[0]; switch (indices->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_onehot, i32, output_values, input_values, axis); - NGRAPH_TYPE_CASE(evaluate_onehot, i64, output_values, input_values, axis); + OPENVINO_TYPE_CASE(evaluate_onehot, i32, output_values, input_values, axis); + OPENVINO_TYPE_CASE(evaluate_onehot, i64, output_values, input_values, axis); default: rc = false; } diff --git a/src/core/src/op/power.cpp b/src/core/src/op/power.cpp index 6c6f41c78cec0e..f9847066940afd 100644 --- a/src/core/src/op/power.cpp +++ b/src/core/src/op/power.cpp @@ -38,13 +38,13 @@ bool evaluate_power(const HostTensorPtr& arg0, bool rc = true; out->set_broadcast(broadcast_spec, arg0, arg1); switch (arg0->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_power, i32, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_power, i64, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_power, u32, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_power, u64, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_power, f16, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_power, f32, arg0, arg1, out, broadcast_spec); - NGRAPH_TYPE_CASE(evaluate_power, bf16, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_power, i32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_power, i64, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_power, u32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_power, u64, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_power, f16, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_power, f32, arg0, arg1, out, broadcast_spec); + OPENVINO_TYPE_CASE(evaluate_power, bf16, arg0, arg1, out, broadcast_spec); default: rc = false; break; diff --git a/src/core/src/op/prelu.cpp b/src/core/src/op/prelu.cpp index 782ca120733722..9e1ccd3ec3c197 100644 --- a/src/core/src/op/prelu.cpp +++ b/src/core/src/op/prelu.cpp @@ -50,10 +50,10 @@ bool evaluate_prelu(const ngraph::HostTensorPtr& arg, const ngraph::HostTensorPtr& out) { bool rc = true; switch (arg->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_prelu, i8, arg, slope, out); - NGRAPH_TYPE_CASE(evaluate_prelu, bf16, arg, slope, out); - NGRAPH_TYPE_CASE(evaluate_prelu, f16, arg, slope, out); - NGRAPH_TYPE_CASE(evaluate_prelu, f32, arg, slope, out); + OPENVINO_TYPE_CASE(evaluate_prelu, i8, arg, slope, out); + OPENVINO_TYPE_CASE(evaluate_prelu, bf16, arg, slope, out); + OPENVINO_TYPE_CASE(evaluate_prelu, f16, arg, slope, out); + OPENVINO_TYPE_CASE(evaluate_prelu, f32, arg, slope, out); default: rc = false; break; diff --git a/src/core/src/op/prior_box.cpp b/src/core/src/op/prior_box.cpp index e66e3ea30bf9b1..d89286a84ecd64 100644 --- a/src/core/src/op/prior_box.cpp +++ b/src/core/src/op/prior_box.cpp @@ -44,14 +44,14 @@ bool evaluate_prior_box(const Tensor& arg0, const op::v0::PriorBox::Attributes& attrs) { bool rc = true; switch (arg0.get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_prior_box, i8, arg0, arg1, out, attrs); - NGRAPH_TYPE_CASE(evaluate_prior_box, i16, arg0, arg1, out, attrs); - NGRAPH_TYPE_CASE(evaluate_prior_box, i32, arg0, arg1, out, attrs); - NGRAPH_TYPE_CASE(evaluate_prior_box, i64, arg0, arg1, out, attrs); - NGRAPH_TYPE_CASE(evaluate_prior_box, u8, arg0, arg1, out, attrs); - NGRAPH_TYPE_CASE(evaluate_prior_box, u16, arg0, arg1, out, attrs); - NGRAPH_TYPE_CASE(evaluate_prior_box, u32, arg0, arg1, out, attrs); - NGRAPH_TYPE_CASE(evaluate_prior_box, u64, arg0, arg1, out, attrs); + OPENVINO_TYPE_CASE(evaluate_prior_box, i8, arg0, arg1, out, attrs); + OPENVINO_TYPE_CASE(evaluate_prior_box, i16, arg0, arg1, out, attrs); + OPENVINO_TYPE_CASE(evaluate_prior_box, i32, arg0, arg1, out, attrs); + OPENVINO_TYPE_CASE(evaluate_prior_box, i64, arg0, arg1, out, attrs); + OPENVINO_TYPE_CASE(evaluate_prior_box, u8, arg0, arg1, out, attrs); + OPENVINO_TYPE_CASE(evaluate_prior_box, u16, arg0, arg1, out, attrs); + OPENVINO_TYPE_CASE(evaluate_prior_box, u32, arg0, arg1, out, attrs); + OPENVINO_TYPE_CASE(evaluate_prior_box, u64, arg0, arg1, out, attrs); default: rc = false; break; @@ -186,14 +186,14 @@ bool evaluate_prior_box(const Tensor& arg0, const op::v8::PriorBox::Attributes& attrs) { bool rc = true; switch (arg0.get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_prior_box, i8, arg0, arg1, out, attrs); - NGRAPH_TYPE_CASE(evaluate_prior_box, i16, arg0, arg1, out, attrs); - NGRAPH_TYPE_CASE(evaluate_prior_box, i32, arg0, arg1, out, attrs); - NGRAPH_TYPE_CASE(evaluate_prior_box, i64, arg0, arg1, out, attrs); - NGRAPH_TYPE_CASE(evaluate_prior_box, u8, arg0, arg1, out, attrs); - NGRAPH_TYPE_CASE(evaluate_prior_box, u16, arg0, arg1, out, attrs); - NGRAPH_TYPE_CASE(evaluate_prior_box, u32, arg0, arg1, out, attrs); - NGRAPH_TYPE_CASE(evaluate_prior_box, u64, arg0, arg1, out, attrs); + OPENVINO_TYPE_CASE(evaluate_prior_box, i8, arg0, arg1, out, attrs); + OPENVINO_TYPE_CASE(evaluate_prior_box, i16, arg0, arg1, out, attrs); + OPENVINO_TYPE_CASE(evaluate_prior_box, i32, arg0, arg1, out, attrs); + OPENVINO_TYPE_CASE(evaluate_prior_box, i64, arg0, arg1, out, attrs); + OPENVINO_TYPE_CASE(evaluate_prior_box, u8, arg0, arg1, out, attrs); + OPENVINO_TYPE_CASE(evaluate_prior_box, u16, arg0, arg1, out, attrs); + OPENVINO_TYPE_CASE(evaluate_prior_box, u32, arg0, arg1, out, attrs); + OPENVINO_TYPE_CASE(evaluate_prior_box, u64, arg0, arg1, out, attrs); default: rc = false; break; diff --git a/src/core/src/op/prior_box_clustered.cpp b/src/core/src/op/prior_box_clustered.cpp index 5dc0521b48cd96..f31671922d7aa1 100644 --- a/src/core/src/op/prior_box_clustered.cpp +++ b/src/core/src/op/prior_box_clustered.cpp @@ -72,14 +72,14 @@ bool evaluate_prior_box(const Tensor& arg0, const op::v0::PriorBoxClustered::Attributes& attrs) { bool rc = true; switch (arg0.get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_prior_box, i8, arg0, arg1, out, attrs); - NGRAPH_TYPE_CASE(evaluate_prior_box, i16, arg0, arg1, out, attrs); - NGRAPH_TYPE_CASE(evaluate_prior_box, i32, arg0, arg1, out, attrs); - NGRAPH_TYPE_CASE(evaluate_prior_box, i64, arg0, arg1, out, attrs); - NGRAPH_TYPE_CASE(evaluate_prior_box, u8, arg0, arg1, out, attrs); - NGRAPH_TYPE_CASE(evaluate_prior_box, u16, arg0, arg1, out, attrs); - NGRAPH_TYPE_CASE(evaluate_prior_box, u32, arg0, arg1, out, attrs); - NGRAPH_TYPE_CASE(evaluate_prior_box, u64, arg0, arg1, out, attrs); + OPENVINO_TYPE_CASE(evaluate_prior_box, i8, arg0, arg1, out, attrs); + OPENVINO_TYPE_CASE(evaluate_prior_box, i16, arg0, arg1, out, attrs); + OPENVINO_TYPE_CASE(evaluate_prior_box, i32, arg0, arg1, out, attrs); + OPENVINO_TYPE_CASE(evaluate_prior_box, i64, arg0, arg1, out, attrs); + OPENVINO_TYPE_CASE(evaluate_prior_box, u8, arg0, arg1, out, attrs); + OPENVINO_TYPE_CASE(evaluate_prior_box, u16, arg0, arg1, out, attrs); + OPENVINO_TYPE_CASE(evaluate_prior_box, u32, arg0, arg1, out, attrs); + OPENVINO_TYPE_CASE(evaluate_prior_box, u64, arg0, arg1, out, attrs); default: rc = false; break; diff --git a/src/core/src/op/random_uniform.cpp b/src/core/src/op/random_uniform.cpp index c22c18ae03081e..296b115979c8f7 100644 --- a/src/core/src/op/random_uniform.cpp +++ b/src/core/src/op/random_uniform.cpp @@ -24,7 +24,7 @@ inline bool out_et(const element::Type& et) { RandomUniform::RandomUniform(const Output& out_shape, const Output& min_val, const Output& max_val, - const ngraph::element::Type& out_type, + const ov::element::Type& out_type, uint64_t global_seed, uint64_t op_seed) : Op({out_shape, min_val, max_val}), diff --git a/src/core/src/op/range.cpp b/src/core/src/op/range.cpp index 557a88822646b6..6285391ae56e06 100644 --- a/src/core/src/op/range.cpp +++ b/src/core/src/op/range.cpp @@ -182,18 +182,18 @@ bool evaluate_power(const HostTensorPtr& out, int version) { bool rc = true; switch (output_type) { - NGRAPH_TYPE_CASE(evaluate_range, bf16, out, start, stop, step, version); - NGRAPH_TYPE_CASE(evaluate_range, f16, out, start, stop, step, version); - NGRAPH_TYPE_CASE(evaluate_range, f32, out, start, stop, step, version); - NGRAPH_TYPE_CASE(evaluate_range, f64, out, start, stop, step, version); - NGRAPH_TYPE_CASE(evaluate_range, i8, out, start, stop, step, version); - NGRAPH_TYPE_CASE(evaluate_range, i16, out, start, stop, step, version); - NGRAPH_TYPE_CASE(evaluate_range, i32, out, start, stop, step, version); - NGRAPH_TYPE_CASE(evaluate_range, i64, out, start, stop, step, version); - NGRAPH_TYPE_CASE(evaluate_range, u8, out, start, stop, step, version); - NGRAPH_TYPE_CASE(evaluate_range, u16, out, start, stop, step, version); - NGRAPH_TYPE_CASE(evaluate_range, u32, out, start, stop, step, version); - NGRAPH_TYPE_CASE(evaluate_range, u64, out, start, stop, step, version); + OPENVINO_TYPE_CASE(evaluate_range, bf16, out, start, stop, step, version); + OPENVINO_TYPE_CASE(evaluate_range, f16, out, start, stop, step, version); + OPENVINO_TYPE_CASE(evaluate_range, f32, out, start, stop, step, version); + OPENVINO_TYPE_CASE(evaluate_range, f64, out, start, stop, step, version); + OPENVINO_TYPE_CASE(evaluate_range, i8, out, start, stop, step, version); + OPENVINO_TYPE_CASE(evaluate_range, i16, out, start, stop, step, version); + OPENVINO_TYPE_CASE(evaluate_range, i32, out, start, stop, step, version); + OPENVINO_TYPE_CASE(evaluate_range, i64, out, start, stop, step, version); + OPENVINO_TYPE_CASE(evaluate_range, u8, out, start, stop, step, version); + OPENVINO_TYPE_CASE(evaluate_range, u16, out, start, stop, step, version); + OPENVINO_TYPE_CASE(evaluate_range, u32, out, start, stop, step, version); + OPENVINO_TYPE_CASE(evaluate_range, u64, out, start, stop, step, version); default: rc = false; break; diff --git a/src/core/src/op/relu.cpp b/src/core/src/op/relu.cpp index d0795cb646e9b9..30395883d9b4c7 100644 --- a/src/core/src/op/relu.cpp +++ b/src/core/src/op/relu.cpp @@ -40,12 +40,12 @@ bool evaluate_relu(const HostTensorPtr& arg0, const HostTensorPtr& out) { out->set_unary(arg0); switch (arg0->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_relu, i32, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_relu, i64, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_relu, u32, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_relu, u64, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_relu, f16, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_relu, f32, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_relu, i32, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_relu, i64, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_relu, u32, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_relu, u64, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_relu, f16, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_relu, f32, arg0, out, count); default: rc = false; break; diff --git a/src/core/src/op/reshape.cpp b/src/core/src/op/reshape.cpp index 52b78bfbf8a37e..a20a4b5d3a06fe 100644 --- a/src/core/src/op/reshape.cpp +++ b/src/core/src/op/reshape.cpp @@ -2,48 +2,40 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/reshape.hpp" +#include "openvino/op/reshape.hpp" #include -#include +#include #include "bound_evaluate.hpp" #include "compare.hpp" #include "itt.hpp" -#include "ngraph/op/constant.hpp" #include "ngraph/runtime/opt_kernel/reshape.hpp" +#include "ngraph/util.hpp" #include "openvino/core/dimension_tracker.hpp" +#include "openvino/core/validation_util.hpp" +#include "openvino/op/constant.hpp" #include "openvino/op/util/precision_sensitive_attribute.hpp" #include "openvino/reference/reshape.hpp" using namespace std; -using namespace ngraph; +using namespace ov; -OPENVINO_SUPPRESS_DEPRECATED_START namespace reshapeop { namespace { -bool evaluate_reshape(const HostTensorPtr& arg0, const HostTensorPtr& out, const AxisVector& order) { - runtime::opt_kernel::reshape(arg0->get_data_ptr(), - out->get_data_ptr(), - arg0->get_shape(), - order, - out->get_shape(), - arg0->get_element_type().size()); - return true; -} template -void compute_output_shape(const HostTensorPtr& shape_pattern, std::vector& output_shape) { +void compute_output_shape(const ov::Tensor& shape_pattern, std::vector& output_shape) { size_t output_rank; - if (shape_pattern->get_partial_shape().is_static()) { - output_rank = shape_pattern->get_shape().empty() ? 0 : shape_pattern->get_shape()[0]; + if (shape_pattern.get_size() != 0) { + output_rank = shape_pattern.get_shape().empty() ? 0 : shape_pattern.get_shape()[0]; } else { // Can be dynamic during shape infer as conversion result from empty ov::Tensor output_rank = 0; } for (size_t i = 0; i < output_rank; i++) { - output_shape.push_back(shape_pattern->get_data_ptr()[i]); + output_shape.push_back(shape_pattern.data::value_type>()[i]); } } } // namespace @@ -151,12 +143,12 @@ shared_ptr op::v1::Reshape::clone_with_new_inputs(const OutputVector& new_ reshapeop::compute_output_shape(__VA_ARGS__); \ } break; -bool op::v1::Reshape::evaluate_reshape(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool op::v1::Reshape::evaluate_reshape(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { // infer and set output shape if the output shape contain -1 // and zero value dimension std::vector out_shape_val; - switch (inputs[1]->get_element_type()) { + switch (inputs[1].get_element_type()) { COMPUTE_OUT_SHAPE_CASE(i8, inputs[1], out_shape_val); COMPUTE_OUT_SHAPE_CASE(i16, inputs[1], out_shape_val); COMPUTE_OUT_SHAPE_CASE(i32, inputs[1], out_shape_val); @@ -181,36 +173,43 @@ bool op::v1::Reshape::evaluate_reshape(const HostTensorVector& outputs, const Ho } std::vector output_shape(out_shape_val.size()); - calculate_output_shape(reshape_pattern, minus_one_idx, inputs[0]->get_partial_shape(), output_shape); + calculate_output_shape(reshape_pattern, minus_one_idx, inputs[0].get_shape(), output_shape); OPENVINO_ASSERT(ov::PartialShape(output_shape).is_static()); - outputs[0]->set_shape(ov::PartialShape(output_shape).to_shape()); + outputs[0].set_shape(ov::PartialShape(output_shape).to_shape()); OPENVINO_SUPPRESS_DEPRECATED_START - const AxisVector order = get_default_order(inputs[0]->get_shape()); + const AxisVector order = ngraph::get_default_order(inputs[0].get_shape()); OPENVINO_SUPPRESS_DEPRECATED_END - return reshapeop::evaluate_reshape(inputs[0], outputs[0], order); + ngraph::runtime::opt_kernel::reshape(static_cast(inputs[0].data()), + static_cast(outputs[0].data()), + inputs[0].get_shape(), + order, + outputs[0].get_shape(), + inputs[0].get_element_type().size()); + return true; } -bool op::v1::Reshape::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool op::v1::Reshape::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { OV_OP_SCOPE(v1_Reshape_evaluate); - OPENVINO_SUPPRESS_DEPRECATED_START - OPENVINO_ASSERT(validate_host_tensor_vector(inputs, 2)); - OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1)); - OPENVINO_SUPPRESS_DEPRECATED_END + OPENVINO_ASSERT(inputs.size() == 2); + if (outputs.empty()) + outputs.emplace_back(ov::Tensor(inputs[0].get_element_type(), {0})); + else + OPENVINO_ASSERT(outputs.size() == 1); return evaluate_reshape(outputs, inputs); } bool op::v1::Reshape::has_evaluate() const { OV_OP_SCOPE(v1_Reshape_has_evaluate); switch (get_input_element_type(1)) { - case ngraph::element::i8: - case ngraph::element::i16: - case ngraph::element::i32: - case ngraph::element::i64: - case ngraph::element::u8: - case ngraph::element::u16: - case ngraph::element::u32: - case ngraph::element::u64: + case ov::element::i8: + case ov::element::i16: + case ov::element::i32: + case ov::element::i64: + case ov::element::u8: + case ov::element::u16: + case ov::element::u32: + case ov::element::u64: return true; default: break; @@ -230,7 +229,7 @@ bool op::v1::Reshape::evaluate_label(TensorLabelVector& output_labels) const { if (!get_input_tensor(1).has_and_set_bound()) return false; OPENVINO_SUPPRESS_DEPRECATED_START - return default_label_evaluator(this, output_labels); + return ov::default_label_evaluator(this, output_labels); OPENVINO_SUPPRESS_DEPRECATED_END } diff --git a/src/core/src/op/result.cpp b/src/core/src/op/result.cpp index 61e8537b99a200..f7669e4346d549 100644 --- a/src/core/src/op/result.cpp +++ b/src/core/src/op/result.cpp @@ -2,29 +2,26 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/result.hpp" +#include "openvino/op/result.hpp" #include #include #include #include "itt.hpp" -#include "ngraph/node.hpp" -#include "ngraph/runtime/host_tensor.hpp" -using namespace std; -using namespace ngraph; +using namespace ov; -op::Result::Result(const Output& arg) : Op({arg}) { +op::v0::Result::Result(const Output& arg) : Op({arg}) { constructor_validate_and_infer_types(); } -bool ngraph::op::v0::Result::visit_attributes(AttributeVisitor& visitor) { +bool op::v0::Result::visit_attributes(AttributeVisitor& visitor) { OV_OP_SCOPE(v0_Result_visit_attributes); return true; } -void op::Result::validate_and_infer_types() { +void op::v0::Result::validate_and_infer_types() { OV_OP_SCOPE(v0_Result_validate_and_infer_types); NODE_VALIDATION_CHECK(this, get_input_size() == 1, "Argument has ", get_input_size(), " outputs (1 expected)."); @@ -34,40 +31,46 @@ void op::Result::validate_and_infer_types() { output.set_tensor_ptr(input.get_tensor_ptr()); } -shared_ptr op::Result::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v0::Result::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v0_Result_clone_with_new_inputs); check_new_args_count(this, new_args); - auto res = make_shared(new_args.at(0)); + auto res = std::make_shared(new_args.at(0)); return std::move(res); } -OPENVINO_SUPPRESS_DEPRECATED_START -bool op::Result::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool op::v0::Result::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { OV_OP_SCOPE(v0_Result_evaluate); - outputs[0]->set_unary(inputs[0]); - void* output = outputs[0]->get_data_ptr(); - void* input = inputs[0]->get_data_ptr(); - memcpy(output, input, outputs[0]->get_size_in_bytes()); + OPENVINO_ASSERT(inputs.size() == 1); + if (outputs.empty()) + outputs.emplace_back(ov::Tensor(inputs[0].get_element_type(), inputs[0].get_shape())); + else + OPENVINO_ASSERT(outputs.size() == 1); + if (!outputs[0]) + outputs[0] = ov::Tensor(inputs[0].get_element_type(), inputs[0].get_shape()); + if (inputs[0].get_shape() != outputs[0].get_shape()) + outputs[0].set_shape(inputs[0].get_shape()); + void* output = outputs[0].data(); + void* input = inputs[0].data(); + memcpy(output, input, outputs[0].get_byte_size()); return true; } -OPENVINO_SUPPRESS_DEPRECATED_END -bool op::Result::has_evaluate() const { +bool op::v0::Result::has_evaluate() const { OV_OP_SCOPE(v0_Result_has_evaluate); return true; } -bool op::Result::constant_fold(OutputVector& output_values, const OutputVector& inputs_values) { +bool op::v0::Result::constant_fold(OutputVector& output_values, const OutputVector& inputs_values) { return false; } -ov::Layout op::Result::get_layout() const { +ov::Layout op::v0::Result::get_layout() const { return ov::layout::get_layout(output(0)); } -void op::Result::set_layout(const ov::Layout& layout) { +void op::v0::Result::set_layout(const ov::Layout& layout) { ov::layout::set_layout(output(0), layout); } @@ -79,17 +82,17 @@ bool ov::AttributeAdapter::visit_attributes(AttributeVisitor& visi if (size != m_ref.size()) { m_ref.resize(size); } - ostringstream index; + std::ostringstream index; for (size_t i = 0; i < size; i++) { index.str(""); index << i; - string id; + std::string id; if (m_ref[i]) { id = visitor.get_registered_node_id(m_ref[i]); } visitor.on_attribute(index.str(), id); if (!m_ref[i]) { - m_ref[i] = ov::as_type_ptr(visitor.get_registered_node(id)); + m_ref[i] = ov::as_type_ptr(visitor.get_registered_node(id)); } } return true; diff --git a/src/core/src/op/reverse.cpp b/src/core/src/op/reverse.cpp index 7b2f386cb5661a..670120047a1345 100644 --- a/src/core/src/op/reverse.cpp +++ b/src/core/src/op/reverse.cpp @@ -16,16 +16,13 @@ #include "openvino/reference/reverse.hpp" #include "reverse_shape_inference.hpp" -using namespace std; -using namespace ngraph; - -op::v1::Reverse::Reverse(const Output& data, const Output& reversed_axes, const std::string& mode) +ov::op::v1::Reverse::Reverse(const Output& data, const Output& reversed_axes, const std::string& mode) : Op({data, reversed_axes}), m_mode{mode_from_string(mode)} { constructor_validate_and_infer_types(); } -op::v1::Reverse::Reverse(const Output& data, const Output& reversed_axes, const Mode mode) +ov::op::v1::Reverse::Reverse(const Output& data, const Output& reversed_axes, const Mode mode) : Op({data, reversed_axes}), m_mode{mode} { constructor_validate_and_infer_types(); @@ -37,7 +34,7 @@ bool ngraph::op::v1::Reverse::visit_attributes(AttributeVisitor& visitor) { return true; } -void op::v1::Reverse::validate_and_infer_types() { +void ov::op::v1::Reverse::validate_and_infer_types() { OV_OP_SCOPE(v1_Reverse_validate_and_infer_types); if (m_mode == Mode::MASK) { NODE_VALIDATION_CHECK(this, @@ -56,13 +53,13 @@ void op::v1::Reverse::validate_and_infer_types() { set_output_type(0, get_input_element_type(0), output_shape); } -shared_ptr op::v1::Reverse::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr ov::op::v1::Reverse::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v1_Reverse_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), m_mode); + return std::make_shared(new_args.at(0), new_args.at(1), m_mode); } -op::v1::Reverse::Mode op::v1::Reverse::mode_from_string(const std::string& mode) const { +ov::op::v1::Reverse::Mode ov::op::v1::Reverse::mode_from_string(const std::string& mode) const { static const std::map allowed_values = {{"index", Mode::INDEX}, {"mask", Mode::MASK}}; NODE_VALIDATION_CHECK(this, allowed_values.count(mode) > 0, "Invalid 'mode' value passed in."); @@ -72,8 +69,8 @@ op::v1::Reverse::Mode op::v1::Reverse::mode_from_string(const std::string& mode) OPENVINO_SUPPRESS_DEPRECATED_START namespace reverseop { -template -void get_axes(AxisSet& axes, const HostTensorPtr& in) { +template +void get_axes(ov::AxisSet& axes, const ngraph::HostTensorPtr& in) { auto axes_indices = in->get_data_ptr(); size_t axes_rank = in->get_element_count(); std::copy(axes_indices, axes_indices + axes_rank, std::inserter(axes, axes.end())); @@ -86,7 +83,7 @@ void get_axes(AxisSet& axes, const HostTensorPtr& in) { reverseop::get_axes(__VA_ARGS__); \ } break; -bool op::v1::Reverse::evaluate_reverse(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool ov::op::v1::Reverse::evaluate_reverse(const HostTensorVector& outputs, const HostTensorVector& inputs) const { AxisSet axes{}; if (get_mode() == op::v1::Reverse::Mode::INDEX) { switch (inputs[1]->get_element_type()) { @@ -119,12 +116,12 @@ bool op::v1::Reverse::evaluate_reverse(const HostTensorVector& outputs, const Ho return true; } -bool op::v1::Reverse::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool ov::op::v1::Reverse::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v1_Reverse_evaluate); return evaluate_reverse(outputs, inputs); } -bool op::v1::Reverse::has_evaluate() const { +bool ov::op::v1::Reverse::has_evaluate() const { OV_OP_SCOPE(v1_Reverse_has_evaluate); if (get_mode() == op::v1::Reverse::Mode::INDEX) { diff --git a/src/core/src/op/roi_align.cpp b/src/core/src/op/roi_align.cpp index c57d1d13dd396c..2c2ed8f83ef129 100644 --- a/src/core/src/op/roi_align.cpp +++ b/src/core/src/op/roi_align.cpp @@ -260,45 +260,45 @@ bool evaluate(const TensorVector& args, bool rc; switch (feature_maps.get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_roi_align, - bf16, - feature_maps, - rois, - batch_indices_vec_scaled_up, - out, - pooled_height, - pooled_width, - sampling_ratio, - spatial_scale, - pooling_mode, - batch_indices.get_shape(), - aligned_mode); - NGRAPH_TYPE_CASE(evaluate_roi_align, - f16, - feature_maps, - rois, - batch_indices_vec_scaled_up, - out, - pooled_height, - pooled_width, - sampling_ratio, - spatial_scale, - pooling_mode, - batch_indices.get_shape(), - aligned_mode); - NGRAPH_TYPE_CASE(evaluate_roi_align, - f32, - feature_maps, - rois, - batch_indices_vec_scaled_up, - out, - pooled_height, - pooled_width, - sampling_ratio, - spatial_scale, - pooling_mode, - batch_indices.get_shape(), - aligned_mode); + OPENVINO_TYPE_CASE(evaluate_roi_align, + bf16, + feature_maps, + rois, + batch_indices_vec_scaled_up, + out, + pooled_height, + pooled_width, + sampling_ratio, + spatial_scale, + pooling_mode, + batch_indices.get_shape(), + aligned_mode); + OPENVINO_TYPE_CASE(evaluate_roi_align, + f16, + feature_maps, + rois, + batch_indices_vec_scaled_up, + out, + pooled_height, + pooled_width, + sampling_ratio, + spatial_scale, + pooling_mode, + batch_indices.get_shape(), + aligned_mode); + OPENVINO_TYPE_CASE(evaluate_roi_align, + f32, + feature_maps, + rois, + batch_indices_vec_scaled_up, + out, + pooled_height, + pooled_width, + sampling_ratio, + spatial_scale, + pooling_mode, + batch_indices.get_shape(), + aligned_mode); default: rc = false; break; diff --git a/src/core/src/op/scatter_elements_update.cpp b/src/core/src/op/scatter_elements_update.cpp index fd6c52951cb34b..faeeed96d819e9 100644 --- a/src/core/src/op/scatter_elements_update.cpp +++ b/src/core/src/op/scatter_elements_update.cpp @@ -205,86 +205,86 @@ bool evaluate_scatter_elements_update( bool rc = true; switch (out->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_scatter_element_update, - i16, - arg0, - arg1, - arg2, - arg3, - out, - normalized_axis, - reduction_type, - use_init_value); - NGRAPH_TYPE_CASE(evaluate_scatter_element_update, - i32, - arg0, - arg1, - arg2, - arg3, - out, - normalized_axis, - reduction_type, - use_init_value); - NGRAPH_TYPE_CASE(evaluate_scatter_element_update, - i64, - arg0, - arg1, - arg2, - arg3, - out, - normalized_axis, - reduction_type, - use_init_value); - NGRAPH_TYPE_CASE(evaluate_scatter_element_update, - u32, - arg0, - arg1, - arg2, - arg3, - out, - normalized_axis, - reduction_type, - use_init_value); - NGRAPH_TYPE_CASE(evaluate_scatter_element_update, - u64, - arg0, - arg1, - arg2, - arg3, - out, - normalized_axis, - reduction_type, - use_init_value); - NGRAPH_TYPE_CASE(evaluate_scatter_element_update, - f16, - arg0, - arg1, - arg2, - arg3, - out, - normalized_axis, - reduction_type, - use_init_value); - NGRAPH_TYPE_CASE(evaluate_scatter_element_update, - f32, - arg0, - arg1, - arg2, - arg3, - out, - normalized_axis, - reduction_type, - use_init_value); - NGRAPH_TYPE_CASE(evaluate_scatter_element_update, - boolean, - arg0, - arg1, - arg2, - arg3, - out, - normalized_axis, - reduction_type, - use_init_value); + OPENVINO_TYPE_CASE(evaluate_scatter_element_update, + i16, + arg0, + arg1, + arg2, + arg3, + out, + normalized_axis, + reduction_type, + use_init_value); + OPENVINO_TYPE_CASE(evaluate_scatter_element_update, + i32, + arg0, + arg1, + arg2, + arg3, + out, + normalized_axis, + reduction_type, + use_init_value); + OPENVINO_TYPE_CASE(evaluate_scatter_element_update, + i64, + arg0, + arg1, + arg2, + arg3, + out, + normalized_axis, + reduction_type, + use_init_value); + OPENVINO_TYPE_CASE(evaluate_scatter_element_update, + u32, + arg0, + arg1, + arg2, + arg3, + out, + normalized_axis, + reduction_type, + use_init_value); + OPENVINO_TYPE_CASE(evaluate_scatter_element_update, + u64, + arg0, + arg1, + arg2, + arg3, + out, + normalized_axis, + reduction_type, + use_init_value); + OPENVINO_TYPE_CASE(evaluate_scatter_element_update, + f16, + arg0, + arg1, + arg2, + arg3, + out, + normalized_axis, + reduction_type, + use_init_value); + OPENVINO_TYPE_CASE(evaluate_scatter_element_update, + f32, + arg0, + arg1, + arg2, + arg3, + out, + normalized_axis, + reduction_type, + use_init_value); + OPENVINO_TYPE_CASE(evaluate_scatter_element_update, + boolean, + arg0, + arg1, + arg2, + arg3, + out, + normalized_axis, + reduction_type, + use_init_value); default: rc = false; break; diff --git a/src/core/src/op/scatter_nd_update.cpp b/src/core/src/op/scatter_nd_update.cpp index 1195b76fa3f7dc..343da7c6b38c13 100644 --- a/src/core/src/op/scatter_nd_update.cpp +++ b/src/core/src/op/scatter_nd_update.cpp @@ -64,13 +64,13 @@ bool evaluate_scatter(const HostTensorPtr& arg0, bool rc = true; switch (out->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_scatter, i32, arg0, arg1, arg2, out); - NGRAPH_TYPE_CASE(evaluate_scatter, i64, arg0, arg1, arg2, out); - NGRAPH_TYPE_CASE(evaluate_scatter, u32, arg0, arg1, arg2, out); - NGRAPH_TYPE_CASE(evaluate_scatter, u64, arg0, arg1, arg2, out); - NGRAPH_TYPE_CASE(evaluate_scatter, f16, arg0, arg1, arg2, out); - NGRAPH_TYPE_CASE(evaluate_scatter, f32, arg0, arg1, arg2, out); - NGRAPH_TYPE_CASE(evaluate_scatter, boolean, arg0, arg1, arg2, out); + OPENVINO_TYPE_CASE(evaluate_scatter, i32, arg0, arg1, arg2, out); + OPENVINO_TYPE_CASE(evaluate_scatter, i64, arg0, arg1, arg2, out); + OPENVINO_TYPE_CASE(evaluate_scatter, u32, arg0, arg1, arg2, out); + OPENVINO_TYPE_CASE(evaluate_scatter, u64, arg0, arg1, arg2, out); + OPENVINO_TYPE_CASE(evaluate_scatter, f16, arg0, arg1, arg2, out); + OPENVINO_TYPE_CASE(evaluate_scatter, f32, arg0, arg1, arg2, out); + OPENVINO_TYPE_CASE(evaluate_scatter, boolean, arg0, arg1, arg2, out); default: rc = false; break; diff --git a/src/core/src/op/select.cpp b/src/core/src/op/select.cpp index 5108aecdedd9b4..106d0f3b967205 100644 --- a/src/core/src/op/select.cpp +++ b/src/core/src/op/select.cpp @@ -92,19 +92,19 @@ bool evaluate_select(const HostTensorVector& output_values, bool rc = false; switch (et) { - NGRAPH_TYPE_CASE(evaluate_select, i8, output_values, input_values, autob); - NGRAPH_TYPE_CASE(evaluate_select, i16, output_values, input_values, autob); - NGRAPH_TYPE_CASE(evaluate_select, i32, output_values, input_values, autob); - NGRAPH_TYPE_CASE(evaluate_select, i64, output_values, input_values, autob); - NGRAPH_TYPE_CASE(evaluate_select, u8, output_values, input_values, autob); - NGRAPH_TYPE_CASE(evaluate_select, u16, output_values, input_values, autob); - NGRAPH_TYPE_CASE(evaluate_select, u32, output_values, input_values, autob); - NGRAPH_TYPE_CASE(evaluate_select, u64, output_values, input_values, autob); - NGRAPH_TYPE_CASE(evaluate_select, bf16, output_values, input_values, autob); - NGRAPH_TYPE_CASE(evaluate_select, f16, output_values, input_values, autob); - NGRAPH_TYPE_CASE(evaluate_select, f32, output_values, input_values, autob); - NGRAPH_TYPE_CASE(evaluate_select, f64, output_values, input_values, autob); - NGRAPH_TYPE_CASE(evaluate_select, boolean, output_values, input_values, autob); + OPENVINO_TYPE_CASE(evaluate_select, i8, output_values, input_values, autob); + OPENVINO_TYPE_CASE(evaluate_select, i16, output_values, input_values, autob); + OPENVINO_TYPE_CASE(evaluate_select, i32, output_values, input_values, autob); + OPENVINO_TYPE_CASE(evaluate_select, i64, output_values, input_values, autob); + OPENVINO_TYPE_CASE(evaluate_select, u8, output_values, input_values, autob); + OPENVINO_TYPE_CASE(evaluate_select, u16, output_values, input_values, autob); + OPENVINO_TYPE_CASE(evaluate_select, u32, output_values, input_values, autob); + OPENVINO_TYPE_CASE(evaluate_select, u64, output_values, input_values, autob); + OPENVINO_TYPE_CASE(evaluate_select, bf16, output_values, input_values, autob); + OPENVINO_TYPE_CASE(evaluate_select, f16, output_values, input_values, autob); + OPENVINO_TYPE_CASE(evaluate_select, f32, output_values, input_values, autob); + OPENVINO_TYPE_CASE(evaluate_select, f64, output_values, input_values, autob); + OPENVINO_TYPE_CASE(evaluate_select, boolean, output_values, input_values, autob); default: rc = false; break; diff --git a/src/core/src/op/shape_of.cpp b/src/core/src/op/shape_of.cpp index 3fa225cea017fc..f7ae99f10c096c 100644 --- a/src/core/src/op/shape_of.cpp +++ b/src/core/src/op/shape_of.cpp @@ -70,10 +70,10 @@ bool evaluate_shape_of(const HostTensorPtr& output_value, const HostTensorPtr& i ov::Shape shape = input_value->get_shape(); output_value->set_shape(ov::Shape{shape.size()}); switch (output_value->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_shape_of, i32, shape, output_value); - NGRAPH_TYPE_CASE(evaluate_shape_of, i64, shape, output_value); - NGRAPH_TYPE_CASE(evaluate_shape_of, u32, shape, output_value); - NGRAPH_TYPE_CASE(evaluate_shape_of, u64, shape, output_value); + OPENVINO_TYPE_CASE(evaluate_shape_of, i32, shape, output_value); + OPENVINO_TYPE_CASE(evaluate_shape_of, i64, shape, output_value); + OPENVINO_TYPE_CASE(evaluate_shape_of, u32, shape, output_value); + OPENVINO_TYPE_CASE(evaluate_shape_of, u64, shape, output_value); default: rc = false; break; @@ -85,10 +85,10 @@ bool evaluate_shape_of(ov::Tensor& output_value, const Shape& input_shape) { bool rc; output_value.set_shape(ov::Shape{input_shape.size()}); switch (output_value.get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_shape_of, i32, input_shape, output_value); - NGRAPH_TYPE_CASE(evaluate_shape_of, i64, input_shape, output_value); - NGRAPH_TYPE_CASE(evaluate_shape_of, u32, input_shape, output_value); - NGRAPH_TYPE_CASE(evaluate_shape_of, u64, input_shape, output_value); + OPENVINO_TYPE_CASE(evaluate_shape_of, i32, input_shape, output_value); + OPENVINO_TYPE_CASE(evaluate_shape_of, i64, input_shape, output_value); + OPENVINO_TYPE_CASE(evaluate_shape_of, u32, input_shape, output_value); + OPENVINO_TYPE_CASE(evaluate_shape_of, u64, input_shape, output_value); default: rc = false; break; diff --git a/src/core/src/op/sigmoid.cpp b/src/core/src/op/sigmoid.cpp index c5c872ebc80bb7..9966dbcab8d69b 100644 --- a/src/core/src/op/sigmoid.cpp +++ b/src/core/src/op/sigmoid.cpp @@ -41,12 +41,12 @@ bool evaluate_sigmoid(const HostTensorPtr& arg0, const HostTensorPtr& out) { out->set_unary(arg0); switch (arg0->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_sigmoid, i32, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_sigmoid, i64, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_sigmoid, u32, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_sigmoid, u64, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_sigmoid, f16, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_sigmoid, f32, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_sigmoid, i32, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_sigmoid, i64, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_sigmoid, u32, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_sigmoid, u64, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_sigmoid, f16, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_sigmoid, f32, arg0, out, count); default: rc = false; break; diff --git a/src/core/src/op/sign.cpp b/src/core/src/op/sign.cpp index 8044ffef21d6f6..9e22a7f75d4643 100644 --- a/src/core/src/op/sign.cpp +++ b/src/core/src/op/sign.cpp @@ -42,12 +42,12 @@ bool evaluate_sign(const HostTensorPtr& arg0, const HostTensorPtr& out, const si out->set_unary(arg0); switch (arg0->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_sign, i32, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_sign, i64, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_sign, u32, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_sign, u64, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_sign, f16, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_sign, f32, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_sign, i32, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_sign, i64, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_sign, u32, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_sign, u64, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_sign, f16, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_sign, f32, arg0, out, count); default: rc = false; break; diff --git a/src/core/src/op/softmax.cpp b/src/core/src/op/softmax.cpp index 126c6d82fc7252..775f88419c4fe9 100644 --- a/src/core/src/op/softmax.cpp +++ b/src/core/src/op/softmax.cpp @@ -28,10 +28,10 @@ bool evaluate_softmax(const HostTensorPtr& arg, const HostTensorPtr& out, const bool rc = true; switch (arg->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_softmax, bf16, arg, out, shape, axes); - NGRAPH_TYPE_CASE(evaluate_softmax, f16, arg, out, shape, axes); - NGRAPH_TYPE_CASE(evaluate_softmax, f32, arg, out, shape, axes); - NGRAPH_TYPE_CASE(evaluate_softmax, f64, arg, out, shape, axes); + OPENVINO_TYPE_CASE(evaluate_softmax, bf16, arg, out, shape, axes); + OPENVINO_TYPE_CASE(evaluate_softmax, f16, arg, out, shape, axes); + OPENVINO_TYPE_CASE(evaluate_softmax, f32, arg, out, shape, axes); + OPENVINO_TYPE_CASE(evaluate_softmax, f64, arg, out, shape, axes); default: rc = false; break; diff --git a/src/core/src/op/softplus.cpp b/src/core/src/op/softplus.cpp index 70d1e63b17c1ee..43ce8d9720b67c 100644 --- a/src/core/src/op/softplus.cpp +++ b/src/core/src/op/softplus.cpp @@ -58,9 +58,9 @@ bool evaluate_softplus(const HostTensorPtr& arg, const HostTensorPtr& out) { size_t count = shape_size(arg->get_shape()); switch (arg->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_softplus, bf16, arg, out, count); - NGRAPH_TYPE_CASE(evaluate_softplus, f16, arg, out, count); - NGRAPH_TYPE_CASE(evaluate_softplus, f32, arg, out, count); + OPENVINO_TYPE_CASE(evaluate_softplus, bf16, arg, out, count); + OPENVINO_TYPE_CASE(evaluate_softplus, f16, arg, out, count); + OPENVINO_TYPE_CASE(evaluate_softplus, f32, arg, out, count); default: rc = false; break; diff --git a/src/core/src/op/softsign.cpp b/src/core/src/op/softsign.cpp index 9ec3e8038ea3f7..fdb90e97f88fe0 100644 --- a/src/core/src/op/softsign.cpp +++ b/src/core/src/op/softsign.cpp @@ -24,10 +24,10 @@ bool evaluate_softsign(const ov::Tensor& arg, const ov::Tensor& out) { size_t count = arg.get_size(); switch (arg.get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_softsign, bf16, arg, out, count); - NGRAPH_TYPE_CASE(evaluate_softsign, f16, arg, out, count); - NGRAPH_TYPE_CASE(evaluate_softsign, f32, arg, out, count); - NGRAPH_TYPE_CASE(evaluate_softsign, f64, arg, out, count); + OPENVINO_TYPE_CASE(evaluate_softsign, bf16, arg, out, count); + OPENVINO_TYPE_CASE(evaluate_softsign, f16, arg, out, count); + OPENVINO_TYPE_CASE(evaluate_softsign, f32, arg, out, count); + OPENVINO_TYPE_CASE(evaluate_softsign, f64, arg, out, count); default: rc = false; break; diff --git a/src/core/src/op/sqrt.cpp b/src/core/src/op/sqrt.cpp index 4758be3442b6d8..fe9c3830e9cc5c 100644 --- a/src/core/src/op/sqrt.cpp +++ b/src/core/src/op/sqrt.cpp @@ -42,13 +42,13 @@ bool evaluate_sqrt(const HostTensorPtr& arg0, const HostTensorPtr& out, const si bool rc = true; out->set_unary(arg0); switch (arg0->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_sqrt, i32, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_sqrt, i64, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_sqrt, u32, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_sqrt, u64, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_sqrt, f16, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_sqrt, f32, arg0, out, count); - NGRAPH_TYPE_CASE(evaluate_sqrt, f64, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_sqrt, i32, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_sqrt, i64, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_sqrt, u32, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_sqrt, u64, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_sqrt, f16, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_sqrt, f32, arg0, out, count); + OPENVINO_TYPE_CASE(evaluate_sqrt, f64, arg0, out, count); default: rc = false; break; diff --git a/src/core/src/op/tile.cpp b/src/core/src/op/tile.cpp index de6ced0e97a445..6696ec8676a5f1 100644 --- a/src/core/src/op/tile.cpp +++ b/src/core/src/op/tile.cpp @@ -2,30 +2,26 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/tile.hpp" +#include "openvino/op/tile.hpp" #include #include "bound_evaluate.hpp" #include "itt.hpp" -#include "ngraph/op/constant.hpp" #include "openvino/op/util/precision_sensitive_attribute.hpp" #include "openvino/reference/tile.hpp" -using namespace std; -using namespace ngraph; - -op::v0::Tile::Tile(const Output& data, const Output& repeats) : Op({data, repeats}) { +ov::op::v0::Tile::Tile(const Output& data, const Output& repeats) : Op({data, repeats}) { ov::mark_as_precision_sensitive(input(1)); constructor_validate_and_infer_types(); } -bool ngraph::op::v0::Tile::visit_attributes(AttributeVisitor& visitor) { +bool ov::op::v0::Tile::visit_attributes(ov::AttributeVisitor& visitor) { OV_OP_SCOPE(v0_Tile_visit_attributes); return true; } -void op::v0::Tile::validate_and_infer_types() { +void ov::op::v0::Tile::validate_and_infer_types() { OV_OP_SCOPE(v0_Tile_validate_and_infer_types); // Repeats should have integer data type. For now we only allow i64 @@ -44,39 +40,13 @@ void op::v0::Tile::validate_and_infer_types() { set_input_is_relevant_to_shape(1); } -shared_ptr op::v0::Tile::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr ov::op::v0::Tile::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v0_Tile_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1)); + return std::make_shared(new_args.at(0), new_args.at(1)); } -OPENVINO_SUPPRESS_DEPRECATED_START -bool op::v0::Tile::evaluate_tile(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - const auto& data = inputs[0]; - const auto& axis = inputs[1]; - auto& output = outputs[0]; - OPENVINO_SUPPRESS_DEPRECATED_START - auto repeats_val = read_index_vector(axis); - OPENVINO_SUPPRESS_DEPRECATED_END - const auto repeats_rank = repeats_val.size(); - - const auto input_shapes = std::vector{data->get_shape(), axis->get_shape()}; - const auto& output_shape = shape_infer(this, input_shapes, make_tensor_accessor(inputs)).front().to_shape(); - if (!output->get_is_allocated()) { - output->set_shape(output_shape); - } - repeats_val.insert(repeats_val.begin(), output_shape.size() - repeats_rank, 1); - ov::reference::tile(data->get_data_ptr(), - output->get_data_ptr(), - data->get_shape(), - output_shape, - data->get_element_type().size(), - repeats_val); - - return true; -} - -bool op::v0::Tile::evaluate(ov::TensorVector& output_values, const ov::TensorVector& input_values) const { +bool ov::op::v0::Tile::evaluate(ov::TensorVector& output_values, const ov::TensorVector& input_values) const { OV_OP_SCOPE(v0_Tile_evaluate); const auto& data = input_values[0]; const auto& axis = input_values[1]; @@ -99,32 +69,24 @@ bool op::v0::Tile::evaluate(ov::TensorVector& output_values, const ov::TensorVec return true; } -bool op::v0::Tile::has_evaluate() const { +bool ov::op::v0::Tile::has_evaluate() const { OV_OP_SCOPE(v0_Tile_has_evaluate); return true; } -bool op::v0::Tile::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - // This duplicate version for ov::Tensor because template plugin and shape inference utils - // are not ready for usage with ov::Tensor when it happens this function can be removed. - OV_OP_SCOPE(v0_Tile_evaluate); - return evaluate_tile(outputs, inputs); -} -OPENVINO_SUPPRESS_DEPRECATED_END - -bool op::v0::Tile::evaluate_lower(ov::TensorVector& output_values) const { +bool ov::op::v0::Tile::evaluate_lower(ov::TensorVector& output_values) const { OV_OP_SCOPE(v0_Tile_evaluate_lower); return get_input_tensor(1).has_and_set_bound() && default_lower_bound_evaluator(this, output_values); } -bool op::v0::Tile::evaluate_upper(ov::TensorVector& output_values) const { +bool ov::op::v0::Tile::evaluate_upper(ov::TensorVector& output_values) const { OV_OP_SCOPE(v0_Tile_evaluate_upper); return get_input_tensor(1).has_and_set_bound() && default_upper_bound_evaluator(this, output_values); } -bool op::v0::Tile::evaluate_label(TensorLabelVector& output_labels) const { +bool ov::op::v0::Tile::evaluate_label(TensorLabelVector& output_labels) const { OV_OP_SCOPE(v0_Tile_evaluate_label); OPENVINO_ASSERT(output_labels.size() == 1); diff --git a/src/core/src/op/topk.cpp b/src/core/src/op/topk.cpp index 485dc4e91fea5a..da56c6bb7494c7 100644 --- a/src/core/src/op/topk.cpp +++ b/src/core/src/op/topk.cpp @@ -91,12 +91,12 @@ bool evaluate_topk(const ngraph::HostTensorPtr& arg, const element::Type index_et) { bool rc = true; switch (arg->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_topk, i32, arg, out_indices, out_values, out_shape, axis, k, max, sort, index_et); - NGRAPH_TYPE_CASE(evaluate_topk, i64, arg, out_indices, out_values, out_shape, axis, k, max, sort, index_et); - NGRAPH_TYPE_CASE(evaluate_topk, u32, arg, out_indices, out_values, out_shape, axis, k, max, sort, index_et); - NGRAPH_TYPE_CASE(evaluate_topk, u64, arg, out_indices, out_values, out_shape, axis, k, max, sort, index_et); - NGRAPH_TYPE_CASE(evaluate_topk, f16, arg, out_indices, out_values, out_shape, axis, k, max, sort, index_et); - NGRAPH_TYPE_CASE(evaluate_topk, f32, arg, out_indices, out_values, out_shape, axis, k, max, sort, index_et); + OPENVINO_TYPE_CASE(evaluate_topk, i32, arg, out_indices, out_values, out_shape, axis, k, max, sort, index_et); + OPENVINO_TYPE_CASE(evaluate_topk, i64, arg, out_indices, out_values, out_shape, axis, k, max, sort, index_et); + OPENVINO_TYPE_CASE(evaluate_topk, u32, arg, out_indices, out_values, out_shape, axis, k, max, sort, index_et); + OPENVINO_TYPE_CASE(evaluate_topk, u64, arg, out_indices, out_values, out_shape, axis, k, max, sort, index_et); + OPENVINO_TYPE_CASE(evaluate_topk, f16, arg, out_indices, out_values, out_shape, axis, k, max, sort, index_et); + OPENVINO_TYPE_CASE(evaluate_topk, f32, arg, out_indices, out_values, out_shape, axis, k, max, sort, index_et); default: rc = false; break; diff --git a/src/core/src/op/unique.cpp b/src/core/src/op/unique.cpp index 3361d02b2b4001..21d685584105a9 100644 --- a/src/core/src/op/unique.cpp +++ b/src/core/src/op/unique.cpp @@ -6,7 +6,7 @@ #include "element_visitor.hpp" #include "itt.hpp" -#include "ngraph/validation_util.hpp" +#include "openvino/core/validation_util.hpp" #include "openvino/op/unique.hpp" #include "openvino/op/util/op_types.hpp" @@ -142,7 +142,7 @@ void op::v10::Unique::validate_and_infer_types() { if (input_shape.rank().is_static()) { OPENVINO_SUPPRESS_DEPRECATED_START - const auto normalized_axis = ngraph::normalize_axis(this, axis, input_shape.rank()); + const auto normalized_axis = ov::normalize_axis(this, axis, input_shape.rank()); OPENVINO_SUPPRESS_DEPRECATED_END const auto dim_at_axis = input_shape[normalized_axis]; diff --git a/src/core/src/op/util/gather_base.cpp b/src/core/src/op/util/gather_base.cpp index d89b6c8c30d29d..be0d0158dce9fd 100644 --- a/src/core/src/op/util/gather_base.cpp +++ b/src/core/src/op/util/gather_base.cpp @@ -118,15 +118,15 @@ bool evaluate_gather(const ngraph::HostTensorPtr& arg0, using ov::element::Type_t; switch (out->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_gather, i32, arg0, arg1, out, axis, batch_dims); - NGRAPH_TYPE_CASE(evaluate_gather, i64, arg0, arg1, out, axis, batch_dims); - NGRAPH_TYPE_CASE(evaluate_gather, i8, arg0, arg1, out, axis, batch_dims); - NGRAPH_TYPE_CASE(evaluate_gather, u8, arg0, arg1, out, axis, batch_dims); - NGRAPH_TYPE_CASE(evaluate_gather, u32, arg0, arg1, out, axis, batch_dims); - NGRAPH_TYPE_CASE(evaluate_gather, u64, arg0, arg1, out, axis, batch_dims); - NGRAPH_TYPE_CASE(evaluate_gather, f16, arg0, arg1, out, axis, batch_dims); - NGRAPH_TYPE_CASE(evaluate_gather, f32, arg0, arg1, out, axis, batch_dims); - NGRAPH_TYPE_CASE(evaluate_gather, boolean, arg0, arg1, out, axis, batch_dims); + OPENVINO_TYPE_CASE(evaluate_gather, i32, arg0, arg1, out, axis, batch_dims); + OPENVINO_TYPE_CASE(evaluate_gather, i64, arg0, arg1, out, axis, batch_dims); + OPENVINO_TYPE_CASE(evaluate_gather, i8, arg0, arg1, out, axis, batch_dims); + OPENVINO_TYPE_CASE(evaluate_gather, u8, arg0, arg1, out, axis, batch_dims); + OPENVINO_TYPE_CASE(evaluate_gather, u32, arg0, arg1, out, axis, batch_dims); + OPENVINO_TYPE_CASE(evaluate_gather, u64, arg0, arg1, out, axis, batch_dims); + OPENVINO_TYPE_CASE(evaluate_gather, f16, arg0, arg1, out, axis, batch_dims); + OPENVINO_TYPE_CASE(evaluate_gather, f32, arg0, arg1, out, axis, batch_dims); + OPENVINO_TYPE_CASE(evaluate_gather, boolean, arg0, arg1, out, axis, batch_dims); default: rc = false; break; diff --git a/src/core/tests/eval.cpp b/src/core/tests/eval.cpp index 3950546f5c96db..86b3cc2ecf82ce 100644 --- a/src/core/tests/eval.cpp +++ b/src/core/tests/eval.cpp @@ -15,6 +15,7 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" #include "ngraph/validation_util.hpp" +#include "openvino/core/except.hpp" #include "openvino/core/model.hpp" #include "openvino/core/shape.hpp" #include "openvino/core/type/element_type.hpp" @@ -2664,3 +2665,15 @@ TEST(eval, evaluate_cum_sum_v0_exclusive_reversed) { EXPECT_EQ(outputs[0].get_shape(), data->get_shape()); EXPECT_EQ(memcmp(outputs[0].data(), out_expected, sizeof(out_expected)), 0); } + +TEST(eval, invalid_shape) { + auto p1 = make_shared(element::f32, PartialShape{1, 2}); + auto p2 = make_shared(element::f32, PartialShape{1, 2}); + auto add = make_shared(p1, p2); + auto model = make_shared(OutputVector{add}, ParameterVector{p1, p2}); + auto result_tensor = ov::Tensor(element::f32, {1, 2}); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = ov::TensorVector{make_tensor({1, 3}, {1.0f, 1.0f, 1.0f}), + make_tensor({1, 3}, {7.0f, 6.0f, 1.0f})}; + ASSERT_THROW(model->evaluate(out_vector, in_vector), ov::Exception); +} From 5764b5c108d65129869abb156c643f277421933f Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Fri, 6 Oct 2023 13:50:44 +0400 Subject: [PATCH 089/257] Use RPATH instead of RUNPATH for wheel package (#20239) --- src/bindings/python/wheel/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/bindings/python/wheel/setup.py b/src/bindings/python/wheel/setup.py index 94de22103d3b70..4b056912212de0 100644 --- a/src/bindings/python/wheel/setup.py +++ b/src/bindings/python/wheel/setup.py @@ -512,7 +512,7 @@ def set_rpath(rpath, binary): log.warn(f"WARNING: {binary}: missed ELF header") return rpath_tool = "patchelf" - cmd = [rpath_tool, "--set-rpath", rpath, binary] + cmd = [rpath_tool, "--set-rpath", rpath, binary, "--force-rpath"] elif sys.platform == "darwin": rpath_tool = "install_name_tool" cmd = [rpath_tool, "-add_rpath", rpath, binary] From 27ae3fb217b1e2150e29ec51d1155b669e424a74 Mon Sep 17 00:00:00 2001 From: Mikhail Ryzhov Date: Fri, 6 Oct 2023 15:05:03 +0200 Subject: [PATCH 090/257] changed script path (#20285) --- .github/workflows/windows.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index d4e05a91943006..e4b2f912d23a94 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -647,7 +647,7 @@ jobs: env: INSTALL_DIR: "${{ github.workspace }}\\install" INSTALL_TEST_DIR: "${{ github.workspace }}\\install\\tests" - PARALLEL_TEST_SCRIPT: "${{ github.workspace }}\\install\\tests\\functional_test_utils\\run_parallel.py" + PARALLEL_TEST_SCRIPT: "${{ github.workspace }}\\install\\tests\\functional_test_utils\\layer_tests_summary\\run_parallel.py" PARALLEL_TEST_CACHE: "${{ github.workspace }}\\install\\tests\\test_cache.lst" steps: @@ -687,7 +687,7 @@ jobs: shell: cmd run: | python3 -m pip install --upgrade pip - python3 -m pip install -r ${{ github.workspace }}\install\tests\functional_test_utils\requirements.txt + python3 -m pip install -r ${{ github.workspace }}\install\tests\functional_test_utils\layer_tests_summary\requirements.txt - name: Restore tests execution time uses: actions/cache/restore@v3 From 2fe549f9fda8898d23dfa3ac53c97a8570e53d56 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Fri, 6 Oct 2023 17:06:04 +0400 Subject: [PATCH 091/257] Throw an error if SELECTIVE_BUILD_STAT doesn't contain csv files (#20283) * Throw an error if SELECTIVE_BUILD_STAT doesn't contain csv files * Fixed detection STAT files for Windows * Remove regex --- src/common/conditional_compilation/CMakeLists.txt | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/common/conditional_compilation/CMakeLists.txt b/src/common/conditional_compilation/CMakeLists.txt index b0a0f2835fbabc..876558cea5e474 100644 --- a/src/common/conditional_compilation/CMakeLists.txt +++ b/src/common/conditional_compilation/CMakeLists.txt @@ -22,7 +22,13 @@ elseif(SELECTIVE_BUILD STREQUAL "ON") endif() find_host_package (Python3 REQUIRED COMPONENTS Interpreter) - file(GLOB STAT_FILES ${SELECTIVE_BUILD_STAT}) + file(TO_CMAKE_PATH ${SELECTIVE_BUILD_STAT} CMAKE_SELECTIVE_BUILD_STAT) + + file(GLOB STAT_FILES ${CMAKE_SELECTIVE_BUILD_STAT}) + + if(NOT STAT_FILES) + message(FATAL_ERROR "SELECTIVE_BUILD_STAT (${SELECTIVE_BUILD_STAT}) path doesn't contain valid csv files!") + endif() target_compile_definitions(${TARGET_NAME} INTERFACE SELECTIVE_BUILD) From 03dca446110c469fdfa86d159bd5be5a53bf7f5e Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Fri, 6 Oct 2023 23:11:03 +0400 Subject: [PATCH 092/257] Small improvements in conan, vcpkg docs (#20272) --- .../installing-openvino-conan.md | 24 +++++++++---------- .../installing-openvino-vcpkg.md | 4 ++-- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conan.md b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conan.md index 5f444196c338fd..1748a9086ab6c3 100644 --- a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conan.md +++ b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conan.md @@ -39,15 +39,15 @@ Installing OpenVINO Runtime with Conan Package Manager ############################################################ -1. Install Conan 2.0.8 or higher: - - .. code-block:: console +1. `Install Conan `__ 2.0.8 or higher, for example, using pip: + + .. code-block:: sh python3 -m pip install conan 2. Create a ``conanfile.txt`` file for your OpenVINO project and add "*openvino*" dependency in there: - .. code-block:: console + .. code-block:: sh [requires] openvino/2023.1.0 @@ -58,8 +58,8 @@ Installing OpenVINO Runtime with Conan Package Manager cmake_layout Run the command below to create ``conan_toolchain.cmake`` file, which will be used to compile your project with OpenVINO: - - .. code-block:: console + + .. code-block:: sh conan install conanfile.txt --build=missing @@ -68,20 +68,20 @@ Installing OpenVINO Runtime with Conan Package Manager what options there are on the `Conan Package Manager page for OpenVINO `__ and extend the command, like so: - .. code-block:: console + .. code-block:: sh - conan install conanfile.txt --build=missing -o:h openvino/*:enable_intel_gpu=False -o:h openvino/*:enable_onnx_frontend=False' -o:h openvino/*:shared=True. - + conan install conanfile.txt --build=missing -o:h 'openvino/*:enable_intel_gpu=False' -o:h 'openvino/*:enable_onnx_frontend=False' -o:h 'openvino/*:shared=True' + 3. Configure and compile your project with OpenVINO: - - .. code-block:: console + + .. code-block:: sh cmake -DCMAKE_TOOLCHAIN_FILE= -DCMAKE_BUILD_TYPE=Release -S -B cmake --build --parallel .. note:: - OpenVINO can be used with any build interface, as long as it is supported by Conan 2.0. + OpenVINO can be used with any build interface, as long as it is supported by Conan 2.0. Read `more `__. Additional Resources ######################## diff --git a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-shared/installing-openvino-vcpkg.md b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-shared/installing-openvino-vcpkg.md index 8202d3f7fc1fb7..39c82520907d35 100644 --- a/docs/articles_en/get started/installing-openvino-overview/installing-openvino-shared/installing-openvino-vcpkg.md +++ b/docs/articles_en/get started/installing-openvino-overview/installing-openvino-shared/installing-openvino-vcpkg.md @@ -60,7 +60,7 @@ Installing OpenVINO Runtime .. code-block:: sh - vcpkg install openvino:x64-windows-static + vcpkg install 'openvino:x64-windows-static' Note that the vcpkg installation means building all packages and dependencies from source, which means the compiler stage will require additional time to complete the process. @@ -93,7 +93,7 @@ which means the compiler stage will require additional time to complete the proc .. code-block:: sh - vcpkg install openvino:x64-linux-release-dynamic + vcpkg install 'openvino:x64-linux-release-dynamic' After installation, you can use OpenVINO in your product's cmake scripts: From 7e7b6482aa66dafa8d384b17fc3481aa6233b3bf Mon Sep 17 00:00:00 2001 From: Mikhail Ryzhov Date: Sat, 7 Oct 2023 00:22:32 +0200 Subject: [PATCH 093/257] [GHA][HF] Switch python nightly tests to github runners (#20286) * switched runners to GHA * excluded timm models from nightly scope * added sudo steps * switched precommit too --- .github/workflows/linux.yml | 41 +++++++++++-------- .../model_hub_tests/torch_tests/test_timm.py | 1 - 2 files changed, 23 insertions(+), 19 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index dd10243d8d2a35..010d5fdb4b8411 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -155,12 +155,12 @@ jobs: - name: Pack Artifacts run: | - - # Add the ONNX Runtime version and skip tests list to the archive to use in the ONNX Runtime Job + + # Add the ONNX Runtime version and skip tests list to the archive to use in the ONNX Runtime Job # w/o the need to checkout repository - + cp -R ${ONNX_RUNTIME_UTILS} ${INSTALL_DIR} - + pushd ${INSTALL_DIR} tar -czvf ${BUILD_DIR}/openvino_package.tar.gz * popd @@ -432,10 +432,10 @@ jobs: - name: Install Dependencies run: | sudo -E ${INSTALL_DIR}/install_dependencies/install_openvino_dependencies.sh -c=core -y - + # Needed for downloading IRs from storage.openvinotoolkit with Python urllib sudo apt-get update && sudo apt-get install --assume-yes --no-install-recommends ca-certificates - + python3 -m pip install -r ${CONFORMANCE_TOOLS_DIR}/requirements.txt # @@ -560,7 +560,7 @@ jobs: run: | source ${INSTALL_DIR}/setupvars.sh skip_tests=$(tr -s '\n ' ':' < ${ONNX_RUNTIME_UTILS}/skip_tests) - + ./onnxruntime_test_all --gtest_filter=-$skip_tests working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo @@ -1090,19 +1090,23 @@ jobs: defaults: run: shell: bash - runs-on: ${{ github.event_name == 'schedule' && 'aks-linux-16-cores' || 'aks-linux-4-cores-16gb'}} - container: - image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04 - volumes: - - /mount/caches:/mount/caches + runs-on: ${{ github.event_name == 'schedule' && 'ubuntu-20.04-16-cores' || 'ubuntu-20.04-8-cores'}} + # TODO: Switch back to self-hosted runners + # container: + # image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04 + # volumes: + # - /mount/caches:/mount/caches env: INSTALL_DIR: ${{ github.workspace }}/install INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests MODEL_HUB_TESTS_INSTALL_DIR: ${{ github.workspace }}/install/tests/model_hub_tests steps: + - name: Check sudo + run: if [ "$(id -u)" -eq 0 ]; then apt update && apt --assume-yes install sudo; fi + - name: Install 'actions/setup-python@v4' dependencies - run: apt-get update && apt-get install -y libssl1.1 ca-certificates + run: sudo apt-get update && sudo apt-get install -y libssl1.1 ca-certificates - uses: actions/setup-python@v4 with: @@ -1165,11 +1169,12 @@ jobs: defaults: run: shell: bash - runs-on: ${{ github.event_name == 'schedule' && 'aks-linux-16-cores' || 'aks-linux-8-cores'}} - container: - image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04 - volumes: - - /mount/caches:/mount/caches + runs-on: ${{ github.event_name == 'schedule' && 'ubuntu-20.04-16-cores' || 'ubuntu-20.04-8-cores'}} + # TODO: Switch back to self-hosted runners + # container: + # image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04 + # volumes: + # - /mount/caches:/mount/caches env: INSTALL_DIR: ${{ github.workspace }}/install INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests diff --git a/tests/model_hub_tests/torch_tests/test_timm.py b/tests/model_hub_tests/torch_tests/test_timm.py index 4294000740759c..d08f6a8c4a9a40 100644 --- a/tests/model_hub_tests/torch_tests/test_timm.py +++ b/tests/model_hub_tests/torch_tests/test_timm.py @@ -62,6 +62,5 @@ def test_convert_model_precommit(self, name, ie_device): self.run(name, None, ie_device) @pytest.mark.parametrize("name", get_all_models()) - @pytest.mark.nightly def test_convert_model_all_models(self, name, ie_device): self.run(name, None, ie_device) From ad41d0f52f94dfd326c5a30958ce7d19a20c45b4 Mon Sep 17 00:00:00 2001 From: yanlan song Date: Sat, 7 Oct 2023 18:44:25 +0800 Subject: [PATCH 094/257] rework auto test cases (#19862) * initial commit Signed-off-by: fishbell * clean up Signed-off-by: fishbell * fix windows build failure Signed-off-by: fishbell * enable auto func tests Signed-off-by: fishbell * enable auto_func_test to ci Signed-off-by: fishbell * some clean up in gpu case Signed-off-by: fishbell * clang Signed-off-by: fishbell * fix build warning Signed-off-by: fishbell * enable new tests Signed-off-by: fishbell * fix build warning Signed-off-by: fishbell * enable consistency test Signed-off-by: fishbell * try fix build error on manylinux Signed-off-by: fishbell * enable cpplint Signed-off-by: fishbell * enable clang-format Signed-off-by: fishbell enable some tests Signed-off-by: fishbell * fix typo Signed-off-by: fishbell * clang for unit tests Signed-off-by: fishbell * fix merge conflict Signed-off-by: fishbell --------- Signed-off-by: fishbell --- .ci/azure/linux.yml | 3 + .ci/azure/windows.yml | 3 + .github/workflows/linux.yml | 6 + .github/workflows/windows.yml | 5 + src/inference/src/infer_request.cpp | 2 + src/plugins/auto/src/plugin.cpp | 5 +- src/plugins/auto/tests/CMakeLists.txt | 1 + .../auto/tests/functional/CMakeLists.txt | 34 + ...sync_compiled_for_multiple_device_test.cpp | 97 +++ .../functional/behavior/auto_func_test.cpp | 790 ++++++++++++++++++ .../functional/behavior/auto_func_test.hpp | 133 +++ .../functional/behavior/caching_test.cpp | 60 ++ .../functional/behavior/callback_test.cpp | 116 +++ .../behavior/infer_consistency_test.cpp | 25 + .../behavior/infer_consistency_test.hpp | 105 +++ .../behavior/infer_multi_threading_tests.cpp | 114 +++ .../tests/functional/behavior/io_tensor.cpp | 172 ++++ .../tests/functional/behavior/io_tensor.hpp | 51 ++ .../behavior/life_time_batch_enabled_test.cpp | 57 ++ .../functional/behavior/property_test.cpp | 99 +++ .../behavior/remote_tensor_test.cpp | 104 +++ .../tests/functional/behavior/wait_test.cpp | 73 ++ .../executable_network/exec_network_base.cpp | 44 + .../executable_network/get_metric.cpp | 36 + .../behavior/infer_request/callback.cpp | 23 + .../behavior/infer_request/io_blob.cpp | 29 + .../behavior/infer_request/memory_states.cpp | 20 +- .../behavior/infer_request/multitheading.cpp | 27 + .../behavior/infer_request/perf_counters.cpp | 24 + .../infer_request/set_blob_by_type.cpp | 33 + .../behavior/infer_request/wait.cpp | 28 + .../core_integration.cpp | 30 + .../exec_network_base.cpp | 35 + .../ov_exec_net_import_export.cpp | 32 + .../ov_executable_network/properties.cpp | 144 ++++ .../behavior/ov_infer_request/callback.cpp | 25 + .../infer_request_dynamic.cpp | 47 ++ .../ov_infer_request/inference_chaining.cpp | 25 + .../behavior/ov_infer_request/io_tensor.cpp | 74 ++ .../ov_infer_request/multithreading.cpp | 26 + .../ov_infer_request/perf_counters.cpp | 26 + .../behavior/ov_infer_request/wait.cpp | 26 + .../behavior/ov_plugin/caching_tests.cpp | 47 ++ .../behavior/ov_plugin/core_integration.cpp | 60 ++ .../behavior/ov_plugin/life_time.cpp | 22 + .../behavior/ov_plugin/properties_tests.cpp | 165 ++++ .../behavior/plugin/configuration_tests.cpp | 191 +++++ .../behavior/plugin/core_integration.cpp | 46 + .../behavior/plugin/core_threading_tests.cpp | 37 + .../behavior/plugin/set_preprocess.cpp | 84 ++ .../behavior/plugin/version.cpp | 18 + .../shared_tests_instances/core_config.cpp | 17 + .../set_device_name.cpp | 17 + .../skip_tests_config.cpp | 80 ++ .../auto/tests/unit/auto_unit_test.cpp | 321 +++---- .../tests/unit/compile_model_metric_test.cpp | 268 +++--- .../unit/compile_model_property_test.cpp | 132 +-- src/plugins/auto/tests/unit/ctput_test.cpp | 63 +- .../tests/unit/default_perf_hint_test.cpp | 173 ++-- .../auto/tests/unit/dynamic_output_test.cpp | 71 +- .../auto/tests/unit/get_device_list.cpp | 131 +-- .../tests/unit/include/auto_unit_test.hpp | 121 ++- .../auto/tests/unit/include/gmock_plugin.hpp | 53 +- .../auto/tests/unit/include/mock_common.hpp | 145 ---- .../tests/unit/include/mock_log_utils.hpp | 10 +- .../tests/unit/key_network_priority_test.cpp | 391 +++++---- .../auto/tests/unit/life_time_test.cpp | 84 ++ .../auto/tests/unit/log_utils_format_test.cpp | 134 +-- .../auto/tests/unit/log_utils_test.cpp | 95 +-- src/plugins/auto/tests/unit/mock_common.cpp | 62 -- .../tests/unit/parse_meta_device_test.cpp | 72 +- src/plugins/auto/tests/unit/property_test.cpp | 100 --- .../auto/tests/unit/release_helper_test.cpp | 87 +- .../auto/tests/unit/runtime_fallback_test.cpp | 228 +++-- .../tests/unit/select_device_failed_test.cpp | 120 +-- .../auto/tests/unit/select_device_test.cpp | 102 ++- .../auto/tests/unit/set_log_level_test.cpp | 19 +- .../unit/startup_fallback_property_test.cpp | 47 +- .../auto_batch/src/sync_infer_request.cpp | 8 +- .../executable_network/exec_network_base.cpp | 39 - .../executable_network/get_metric.cpp | 10 +- .../behavior/infer_request/callback.cpp | 16 - .../behavior/infer_request/config.cpp | 35 - .../behavior/infer_request/io_blob.cpp | 21 - .../behavior/infer_request/memory_states.cpp | 22 - .../behavior/infer_request/multitheading.cpp | 17 - .../behavior/infer_request/perf_counters.cpp | 21 - .../infer_request/set_blob_by_type.cpp | 15 - .../behavior/infer_request/wait.cpp | 21 - .../core_integration.cpp | 4 +- .../exec_network_base.cpp | 37 - .../ov_exec_net_import_export.cpp | 10 - .../ov_executable_network/properties.cpp | 117 +-- .../behavior/ov_infer_request/callback.cpp | 16 - .../ov_infer_request/infer_consistency.cpp | 31 - .../infer_request_dynamic.cpp | 15 - .../ov_infer_request/inference_chaining.cpp | 16 - .../behavior/ov_infer_request/io_tensor.cpp | 48 -- .../ov_infer_request/iteration_chaining.cpp | 11 - .../ov_infer_request/multithreading.cpp | 17 - .../ov_infer_request/perf_counters.cpp | 20 - .../behavior/ov_infer_request/wait.cpp | 21 - .../behavior/ov_plugin/caching_tests.cpp | 47 -- .../behavior/ov_plugin/core_integration.cpp | 21 - .../behavior/ov_plugin/life_time.cpp | 4 +- .../behavior/ov_plugin/properties_tests.cpp | 125 +-- .../behavior/plugin/configuration_tests.cpp | 150 ---- .../behavior/plugin/core_integration.cpp | 12 +- .../behavior/plugin/core_threading_tests.cpp | 7 - .../behavior/plugin/set_preprocess.cpp | 76 -- .../behavior/plugin/version.cpp | 8 - .../multi/cpu_remote_blob_tests.cpp | 15 - .../skip_tests_config.cpp | 19 +- .../intel_gpu/tests/functional/CMakeLists.txt | 11 - .../executable_network/exec_net_base.cpp | 28 - .../executable_network/get_metric.cpp | 10 +- .../behavior/infer_request/callback.cpp | 24 - .../behavior/infer_request/config.cpp | 12 - .../behavior/infer_request/io_blob.cpp | 25 - .../behavior/infer_request/multithreading.cpp | 25 - .../behavior/infer_request/perf_counters.cpp | 26 - .../infer_request/set_blob_by_type.cpp | 16 +- .../behavior/infer_request/wait.cpp | 24 - .../ov_executable_network/exec_net_base.cpp | 14 - .../ov_executable_network/get_metric.cpp | 137 +-- .../behavior/ov_infer_request/callback.cpp | 16 - .../ov_infer_request/infer_consistency.cpp | 61 -- .../infer_request_dynamic.cpp | 64 -- .../behavior/ov_infer_request/io_tensor.cpp | 34 - .../ov_infer_request/multithreading.cpp | 16 - .../ov_infer_request/perf_counters.cpp | 50 -- .../behavior/ov_infer_request/wait.cpp | 20 - .../behavior/ov_plugin/caching_tests.cpp | 38 - .../behavior/ov_plugin/life_time.cpp | 18 +- .../behavior/ov_plugin/properties_tests.cpp | 105 +-- .../behavior/ov_plugin/remote.cpp | 12 - .../behavior/plugin/caching_tests.cpp | 21 - .../behavior/plugin/configuration_tests.cpp | 112 --- .../behavior/plugin/core_integration.cpp | 10 +- .../behavior/plugin/set_preprocess.cpp | 26 - .../behavior/plugin/version.cpp | 8 - .../multi/gpu_remote_blob_tests.cpp | 154 ---- .../skip_tests_config.cpp | 14 +- 143 files changed, 5065 insertions(+), 3760 deletions(-) create mode 100644 src/plugins/auto/tests/functional/CMakeLists.txt create mode 100644 src/plugins/auto/tests/functional/behavior/async_compiled_for_multiple_device_test.cpp create mode 100644 src/plugins/auto/tests/functional/behavior/auto_func_test.cpp create mode 100644 src/plugins/auto/tests/functional/behavior/auto_func_test.hpp create mode 100644 src/plugins/auto/tests/functional/behavior/caching_test.cpp create mode 100644 src/plugins/auto/tests/functional/behavior/callback_test.cpp create mode 100644 src/plugins/auto/tests/functional/behavior/infer_consistency_test.cpp create mode 100644 src/plugins/auto/tests/functional/behavior/infer_consistency_test.hpp create mode 100644 src/plugins/auto/tests/functional/behavior/infer_multi_threading_tests.cpp create mode 100644 src/plugins/auto/tests/functional/behavior/io_tensor.cpp create mode 100644 src/plugins/auto/tests/functional/behavior/io_tensor.hpp create mode 100644 src/plugins/auto/tests/functional/behavior/life_time_batch_enabled_test.cpp create mode 100644 src/plugins/auto/tests/functional/behavior/property_test.cpp create mode 100644 src/plugins/auto/tests/functional/behavior/remote_tensor_test.cpp create mode 100644 src/plugins/auto/tests/functional/behavior/wait_test.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/executable_network/exec_network_base.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/io_blob.cpp rename src/plugins/{intel_gpu => auto}/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp (53%) create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/multitheading.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/core_integration.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/life_time.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/set_preprocess.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/version.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/core_config.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/set_device_name.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/skip_tests_config.cpp delete mode 100644 src/plugins/auto/tests/unit/include/mock_common.hpp create mode 100644 src/plugins/auto/tests/unit/life_time_test.cpp delete mode 100644 src/plugins/auto/tests/unit/mock_common.cpp delete mode 100644 src/plugins/auto/tests/unit/property_test.cpp delete mode 100644 src/plugins/intel_cpu/tests/functional/shared_tests_instances/multi/cpu_remote_blob_tests.cpp delete mode 100644 src/plugins/intel_gpu/tests/functional/shared_tests_instances/multi/gpu_remote_blob_tests.cpp diff --git a/.ci/azure/linux.yml b/.ci/azure/linux.yml index a4d710bafc2112..8626f9d609ed0e 100644 --- a/.ci/azure/linux.yml +++ b/.ci/azure/linux.yml @@ -411,6 +411,9 @@ jobs: - script: $(RUN_PREFIX) $(INSTALL_TEST_DIR)/ov_auto_unit_tests --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ov_auto_unit_tests.xml displayName: 'AUTO UT' + - script: $(RUN_PREFIX) $(INSTALL_TEST_DIR)/ov_auto_func_tests --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ov_auto_func_tests.xml + displayName: 'AUTO FuncTests' + - script: $(RUN_PREFIX) $(INSTALL_TEST_DIR)/ov_auto_batch_unit_tests --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ov_auto_batch_unit_tests.xml displayName: 'AutoBatch UT' diff --git a/.ci/azure/windows.yml b/.ci/azure/windows.yml index cbff7caa7533cd..a36238fca4a874 100644 --- a/.ci/azure/windows.yml +++ b/.ci/azure/windows.yml @@ -305,6 +305,9 @@ jobs: - script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ov_auto_unit_tests --gtest_output=xml:$(INSTALL_TEST_DIR)\TEST-ov_auto_unit_tests.xml displayName: 'AUTO UT' + - script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ov_auto_func_tests --gtest_output=xml:$(INSTALL_TEST_DIR)\TEST-ov_auto_func_tests.xml + displayName: 'AUTO FuncTests' + - script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ov_auto_batch_unit_tests --gtest_output=xml:$(INSTALL_TEST_DIR)\TEST-ov_auto_batch_unit_tests.xml displayName: 'AutoBatch UT' diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 010d5fdb4b8411..16affeb9bfa01c 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -733,6 +733,12 @@ jobs: ${INSTALL_TEST_DIR}/ov_auto_unit_tests --gtest_print_time=1 \ --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ov_auto_unit_tests.xml + - name: AUTO func Tests + run: | + source ${{ env.INSTALL_DIR }}/setupvars.sh + ${{ env.INSTALL_TEST_DIR }}/ov_auto_func_tests --gtest_print_time=1 \ + --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_func_tests.xml + - name: Template plugin func tests run: | source ${INSTALL_DIR}/setupvars.sh diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index e4b2f912d23a94..e8b539c7a1d49d 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -595,6 +595,11 @@ jobs: run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_auto_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_unit_tests.xml + - name: AUTO FuncTests + shell: cmd + run: | + call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_auto_func_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_func_tests.xml + - name: Template plugin tests shell: cmd run: | diff --git a/src/inference/src/infer_request.cpp b/src/inference/src/infer_request.cpp index 18b97cdf7b7eb8..1023479546d1a8 100644 --- a/src/inference/src/infer_request.cpp +++ b/src/inference/src/infer_request.cpp @@ -30,6 +30,8 @@ OPENVINO_SUPPRESS_DEPRECATED_START __VA_ARGS__; \ } catch (const ::InferenceEngine::RequestBusy& ex) { \ ov::Busy::create(ex.what()); \ + } catch (const ov::Busy&) { \ + throw; \ } catch (const std::exception& ex) { \ OPENVINO_THROW(ex.what()); \ } catch (...) { \ diff --git a/src/plugins/auto/src/plugin.cpp b/src/plugins/auto/src/plugin.cpp index aea9af9a4c79b6..8e5131b7a3c1bb 100644 --- a/src/plugins/auto/src/plugin.cpp +++ b/src/plugins/auto/src/plugin.cpp @@ -415,8 +415,9 @@ std::shared_ptr Plugin::compile_model_impl(const std::string load_config.set_user_property(pre_process_config(properties)); load_config.apply_user_properties(); if (!work_mode_auto) { - if (iter_config != properties.end() && iter_config->second != "THROUGHPUT") { - LOG_WARNING_TAG("User set perf_hint:%s, but MULTI supports THROUGHPUT only", iter_config->second.as().c_str()); + if (iter_config != properties.end() && iter_config->second.as() != "THROUGHPUT") { + LOG_WARNING_TAG("User set perf_hint:%s, but MULTI supports THROUGHPUT only", + iter_config->second.as().c_str()); } load_config.set_property(ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)); } diff --git a/src/plugins/auto/tests/CMakeLists.txt b/src/plugins/auto/tests/CMakeLists.txt index c9273285747381..bce0f68667ca23 100644 --- a/src/plugins/auto/tests/CMakeLists.txt +++ b/src/plugins/auto/tests/CMakeLists.txt @@ -8,4 +8,5 @@ if(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL set(CMAKE_INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO}) endif() +add_subdirectory(functional) add_subdirectory(unit) \ No newline at end of file diff --git a/src/plugins/auto/tests/functional/CMakeLists.txt b/src/plugins/auto/tests/functional/CMakeLists.txt new file mode 100644 index 00000000000000..44bef91f8fa1d9 --- /dev/null +++ b/src/plugins/auto/tests/functional/CMakeLists.txt @@ -0,0 +1,34 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +set(TARGET_NAME ov_auto_func_tests) + +if(ENABLE_AUTO_BATCH) + list(APPEND DEPENDENCIES openvino_auto_batch_plugin) + list(APPEND COMPILE_DEFINITIONS ENABLE_AUTO_BATCH) +endif() + +if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + ie_add_compiler_flags(/wd4305) +endif() + +ov_add_test_target( + NAME ${TARGET_NAME} + ROOT ${CMAKE_CURRENT_SOURCE_DIR} + LINK_LIBRARIES + openvino::runtime::dev + gtest + gtest_main + openvino::funcSharedTests + INCLUDES + ${CMAKE_CURRENT_SOURCE_DIR} + ${TEST_COMMON_INCLUDE_DIR} + ADD_CLANG_FORMAT + LABELS + Multi + Auto +) + +target_compile_definitions(${TARGET_NAME} PRIVATE ${COMPILE_DEFINITIONS}) +set_ie_threading_interface_for(${TARGET_NAME}) \ No newline at end of file diff --git a/src/plugins/auto/tests/functional/behavior/async_compiled_for_multiple_device_test.cpp b/src/plugins/auto/tests/functional/behavior/async_compiled_for_multiple_device_test.cpp new file mode 100644 index 00000000000000..bfd26cc6ffd260 --- /dev/null +++ b/src/plugins/auto/tests/functional/behavior/async_compiled_for_multiple_device_test.cpp @@ -0,0 +1,97 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "auto_func_test.hpp" +#ifdef __GLIBC__ +# include +# if __GLIBC_MINOR__ >= 34 +# define ENABLETESTTHREADING +# endif +#endif + +using namespace ov::auto_plugin::tests; + +#ifdef ENABLETESTTHREADING +TEST_F(AutoFuncTests, can_compile_with_multiple_devices) { + ov::CompiledModel compiled_model; + ASSERT_NO_THROW(compiled_model = + core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")})); + compiled_model = core.compile_model(model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}); +} + +TEST_F(AutoFuncTests, threading_test) { + ThreadingTest::runParallel( + [&]() { + (void)core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")}); + }, + 10, + 10); + ThreadingTest::runParallel( + [&]() { + (void)core.compile_model(model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}); + }, + 10, + 10); +} + +TEST_F(AutoFuncTests, threading_test_cache_enabled) { + core.set_property(ov::cache_dir(cache_path)); + ThreadingTest::runParallel( + [&]() { + (void)core.compile_model(model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}); + }, + 10, + 10); + core.set_property(ov::cache_dir("")); +} + +TEST_F(AutoFuncTests, threading_test_get_version) { + ThreadingTest::runParallel([&]() { + auto versions = core.get_versions("AUTO"); + ASSERT_LE(1u, versions.size()); + }); +} + +TEST_F(AutoFuncTests, theading_compiled_with_cpu_help) { + ThreadingTest::runParallel( + [&]() { + (void)core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")}); + }, + 10, + 10); +} + +TEST_F(AutoFuncTests, threading_test_hardware_slower) { + core.compile_model(model_cannot_batch, "MOCK_CPU"); + core.compile_model(model_cannot_batch, "MOCK_GPU"); // need to initialize the order of plugins in mock_engine + register_plugin_mock_gpu_compile_slower(core, "MOCK_GPU_SLOWER", {}); + ThreadingTest::runParallel( + [&]() { + (void)core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU_SLOWER", "MOCK_CPU")}); + }, + 10, + 10); +} + +TEST_F(AutoFuncTests, threading_test_cpu_help_slower) { + core.compile_model(model_cannot_batch, "MOCK_CPU"); + core.compile_model(model_cannot_batch, "MOCK_GPU"); // need to initialize the order of plugins in mock_engine + register_plugin_mock_cpu_compile_slower(core, "MOCK_CPU_SLOWER", {}); + ThreadingTest::runParallel( + [&]() { + (void)core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU_SLOWER")}); + }, + 10, + 10); +} +#endif \ No newline at end of file diff --git a/src/plugins/auto/tests/functional/behavior/auto_func_test.cpp b/src/plugins/auto/tests/functional/behavior/auto_func_test.cpp new file mode 100644 index 00000000000000..1ba14b66d57207 --- /dev/null +++ b/src/plugins/auto/tests/functional/behavior/auto_func_test.cpp @@ -0,0 +1,790 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "auto_func_test.hpp" + +#include +#include +#include + +#include "common_test_utils/file_utils.hpp" +#include "ie_plugin_config.hpp" +#include "openvino/core/any.hpp" +#include "openvino/core/except.hpp" +#include "openvino/opsets/opset11.hpp" +#include "openvino/pass/serialize.hpp" +#include "openvino/runtime/auto/properties.hpp" +#include "openvino/runtime/intel_gpu/properties.hpp" +#include "openvino/runtime/internal_properties.hpp" +#include "openvino/runtime/iplugin.hpp" +#include "openvino/runtime/iremote_context.hpp" +#include "openvino/runtime/iremote_tensor.hpp" +#include "openvino/runtime/make_tensor.hpp" +#include "openvino/runtime/properties.hpp" +#include "openvino/util/file_util.hpp" +#include "openvino/util/shared_object.hpp" + +namespace { + +std::string get_mock_engine_path() { + std::string mockEngineName("mock_engine"); + return ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), + mockEngineName + IE_BUILD_POSTFIX); +} + +template +std::function make_std_function(const std::shared_ptr so, const std::string& functionName) { + std::function ptr(reinterpret_cast(ov::util::get_symbol(so, functionName.c_str()))); + return ptr; +} + +bool support_model(const std::shared_ptr& model, const ov::SupportedOpsMap& supported_ops) { + for (const auto& op : model->get_ops()) { + if (supported_ops.find(op->get_friendly_name()) == supported_ops.end()) + return false; + } + return true; +} + +ov::PropertyName RO_property(const std::string& propertyName) { + return ov::PropertyName(propertyName, ov::PropertyMutability::RO); +} + +ov::PropertyName RW_property(const std::string& propertyName) { + return ov::PropertyName(propertyName, ov::PropertyMutability::RW); +} + +} // namespace + +void ov::auto_plugin::tests::AutoFuncTests::SetUp() { + if (m_mock_plugins.empty()) { + register_plugin_mock_cpu(core, "MOCK_CPU", {}); + register_plugin_mock_gpu(core, "MOCK_GPU", {}); + } + model_can_batch = create_model_with_batch_possible(); + model_cannot_batch = create_model_with_reshape(); + auto hash = std::hash()(::testing::UnitTest::GetInstance()->current_test_info()->name()); + std::stringstream ss; + ss << std::this_thread::get_id(); + cache_path = + "threading_test" + std::to_string(hash) + "_" + ss.str() + "_" + ov::test::utils::GetTimestamp() + "_cache"; +} + +void ov::auto_plugin::tests::AutoFuncTests::TearDown() { + ov::test::utils::removeFilesWithExt(cache_path, "blob"); + ov::test::utils::removeDir(cache_path); +} + +ov::Tensor ov::auto_plugin::tests::AutoFuncTests::create_and_fill_tensor(const ov::element::Type& type, + const ov::Shape& shape) { + switch (type) { + case ov::element::Type_t::i64: + return create_tensor::value_type>(type, shape); + default: + break; + } + OPENVINO_THROW("Cannot generate tensor. Unsupported element type."); +} + +std::shared_ptr ov::auto_plugin::tests::AutoFuncTests::create_model_with_batch_possible() { + auto param = std::make_shared(ov::element::i64, ov::Shape{1, 3, 2, 2}); + param->set_friendly_name("input"); + auto const_value = ov::opset11::Constant::create(ov::element::i64, ov::Shape{1, 1, 1, 1}, {1}); + const_value->set_friendly_name("const_val"); + auto add = std::make_shared(param, const_value); + add->set_friendly_name("add"); + auto result = std::make_shared(add); + result->set_friendly_name("res"); + return std::make_shared(ov::ResultVector{result}, ov::ParameterVector{param}); +} + +std::shared_ptr ov::auto_plugin::tests::AutoFuncTests::create_model_with_reshape() { + auto param = std::make_shared(ov::element::i64, ov::Shape{1, 3, 2, 2}); + param->set_friendly_name("input"); + auto const_value = ov::opset11::Constant::create(ov::element::i64, ov::Shape{1, 1, 1, 1}, {1}); + const_value->set_friendly_name("const_val"); + auto add = std::make_shared(param, const_value); + add->set_friendly_name("add"); + auto reshape_val = ov::opset11::Constant::create(ov::element::i64, ov::Shape{1}, {-1}); + reshape_val->set_friendly_name("reshape_val"); + auto reshape = std::make_shared(add, reshape_val, true); + reshape->set_friendly_name("reshape"); + auto result = std::make_shared(reshape); + result->set_friendly_name("res"); + return std::make_shared(ov::ResultVector{result}, ov::ParameterVector{param}); +} + +// Mock plugins + +class MockCompiledModel : public ov::ICompiledModel { +public: + MockCompiledModel(const std::shared_ptr& model, + const std::shared_ptr& plugin, + const ov::AnyMap& config) + : ov::ICompiledModel(model, plugin), + m_config(config), + m_model(model), + m_has_context(false) { + try { + m_context = plugin->get_default_context(config); + } catch (ov::Exception&) { + } + } + + MockCompiledModel(const std::shared_ptr& model, + const std::shared_ptr& plugin, + const ov::AnyMap& config, + const ov::SoPtr& context) + : ov::ICompiledModel(model, plugin), + m_config(config), + m_model(model), + m_has_context(true), + m_context(context) {} + + // Methods from a base class ov::ICompiledModel + void export_model(std::ostream& model) const override { + ov::pass::StreamSerialize(model, std::function()) + .run_on_model(std::const_pointer_cast(m_model)); + } + + std::shared_ptr get_runtime_model() const override { + return m_model; + } + + void set_property(const ov::AnyMap& properties) override { + OPENVINO_NOT_IMPLEMENTED; + } + + ov::Any get_property(const std::string& name) const override { + auto prop = m_config.find(name); + if (prop != m_config.end()) + return prop->second; + if (name == ov::supported_properties) { + std::vector supportedProperties{ov::optimal_number_of_infer_requests, + ov::hint::performance_mode}; + + return decltype(ov::supported_properties)::value_type(supportedProperties); + } else if (name == ov::optimal_number_of_infer_requests.name()) { + return decltype(ov::optimal_number_of_infer_requests)::value_type(2); + } else if (name == ov::model_name) { + return decltype(ov::model_name)::value_type(m_model->get_name()); + } else if (name == ov::execution_devices) { + return decltype(ov::execution_devices)::value_type({get_plugin()->get_device_name()}); + } + OPENVINO_NOT_IMPLEMENTED; + } + + std::shared_ptr create_sync_infer_request() const override; + + const std::shared_ptr& get_model() const { + return m_model; + } + + ov::SoPtr get_context() const { + return m_context; + } + + bool has_context() const { + return m_has_context; + } + +private: + ov::AnyMap m_config; + std::shared_ptr m_model; + bool m_has_context; + ov::SoPtr m_context; +}; + +class MockInferRequest : public ov::ISyncInferRequest { +public: + MockInferRequest(const std::shared_ptr& compiled_model) + : ov::ISyncInferRequest(compiled_model) { + OPENVINO_ASSERT(compiled_model); + m_model = compiled_model->get_model(); + m_has_context = compiled_model->get_context() != nullptr; + // Allocate input/output tensors + for (const auto& input : get_inputs()) { + allocate_tensor(input, [this, input, compiled_model](ov::SoPtr& tensor) { + // Can add a check to avoid double work in case of shared tensors + allocate_tensor_impl(tensor, + input.get_element_type(), + input.get_partial_shape().is_dynamic() ? ov::Shape{0} : input.get_shape(), + compiled_model->has_context(), + compiled_model->get_context()); + }); + } + for (const auto& output : get_outputs()) { + allocate_tensor(output, [this, output, compiled_model](ov::SoPtr& tensor) { + // Can add a check to avoid double work in case of shared tensors + allocate_tensor_impl(tensor, + output.get_element_type(), + output.get_partial_shape().is_dynamic() ? ov::Shape{0} : output.get_shape(), + compiled_model->has_context(), + compiled_model->get_context()); + }); + } + } + ~MockInferRequest() = default; + + void infer() override { + ov::TensorVector input_tensors; + bool evaludate_flag = true; + for (const auto& input : get_inputs()) { + auto tensor = get_tensor(input); + // check if valid if remote tensor + if (std::dynamic_pointer_cast(tensor._ptr) && m_has_context) { + evaludate_flag = false; + auto remote_tensor = std::dynamic_pointer_cast(tensor._ptr); + if (remote_tensor->get_device_name() != get_compiled_model()->get_context()->get_device_name()) + OPENVINO_THROW("cannot consume the buffer!"); + } + input_tensors.emplace_back(ov::make_tensor(tensor)); + } + ov::TensorVector output_tensors; + for (const auto& output : get_outputs()) { + auto tensor = get_tensor(output); + // check if valid if remote tensor + if (std::dynamic_pointer_cast(tensor._ptr) && m_has_context) { + evaludate_flag = false; + auto remote_tensor = std::dynamic_pointer_cast(tensor._ptr); + if (remote_tensor->get_device_name() != get_compiled_model()->get_context()->get_device_name()) + OPENVINO_THROW("cannot consume the buffer!"); + } + output_tensors.emplace_back(ov::make_tensor(tensor)); + } + if (evaludate_flag) { + std::this_thread::sleep_for(std::chrono::milliseconds(100)); // add delay for test + m_model->evaluate(output_tensors, input_tensors); + } + } + std::vector> query_state() const override { + OPENVINO_NOT_IMPLEMENTED; + } + std::vector get_profiling_info() const override { + OPENVINO_NOT_IMPLEMENTED; + } + +private: + void allocate_tensor_impl(ov::SoPtr& tensor, + const ov::element::Type& element_type, + const ov::Shape& shape, + bool has_context, + ov::SoPtr context) { + if (!tensor || tensor->get_element_type() != element_type) { + if (has_context) { + tensor = context->create_tensor(element_type, shape, {}); + } else { + tensor = ov::SoPtr(ov::make_tensor(element_type, shape), nullptr); + } + } else { + tensor->set_shape(shape); + } + } + std::shared_ptr m_model; + bool m_has_context; +}; + +std::shared_ptr MockCompiledModel::create_sync_infer_request() const { + return std::make_shared(std::dynamic_pointer_cast(shared_from_this())); +} + +class MockRemoteTensor : public ov::IRemoteTensor { + ov::AnyMap m_properties; + std::string m_dev_name; + ov::element::Type m_element_type; + ov::Shape m_shape; + +public: + MockRemoteTensor(const std::string& name, + const ov::AnyMap& props, + const ov::element::Type& type, + const ov::Shape& shape) + : m_properties(props), + m_dev_name(name), + m_element_type(type), + m_shape(shape) {} + + const ov::AnyMap& get_properties() const override { + return m_properties; + } + const std::string& get_device_name() const override { + return m_dev_name; + } + void set_shape(ov::Shape shape) override { + OPENVINO_NOT_IMPLEMENTED; + } + + const ov::element::Type& get_element_type() const override { + return m_element_type; + } + + const ov::Shape& get_shape() const override { + return m_shape; + } + + const ov::Strides& get_strides() const override { + OPENVINO_NOT_IMPLEMENTED; + } +}; + +class MockRemoteContext : public ov::IRemoteContext { + ov::AnyMap m_property = {{"IS_DEFAULT", true}}; + std::string m_dev_name; + +public: + MockRemoteContext(const std::string& dev_name) : m_dev_name(dev_name) {} + const std::string& get_device_name() const override { + return m_dev_name; + } + + const ov::AnyMap& get_property() const override { + return m_property; + } + + ov::SoPtr create_tensor(const ov::element::Type& type, + const ov::Shape& shape, + const ov::AnyMap& params = {}) override { + auto remote_tensor = std::make_shared(m_dev_name, m_property, type, shape); + return {remote_tensor, nullptr}; + } +}; + +class MockCustomRemoteContext : public ov::IRemoteContext { + ov::AnyMap m_property = {{"IS_DEFAULT", false}}; + std::string m_dev_name; + +public: + MockCustomRemoteContext(const std::string& dev_name) : m_dev_name(dev_name) {} + const std::string& get_device_name() const override { + return m_dev_name; + } + + const ov::AnyMap& get_property() const override { + return m_property; + } + + ov::SoPtr create_tensor(const ov::element::Type& type, + const ov::Shape& shape, + const ov::AnyMap& params = {}) override { + auto remote_tensor = std::make_shared(m_dev_name, m_property, type, shape); + return {remote_tensor, nullptr}; + } +}; + +class MockPluginBase : public ov::IPlugin { +public: + std::shared_ptr compile_model(const std::shared_ptr& model, + const ov::AnyMap& properties) const override { + OPENVINO_ASSERT(model); + if (!support_model(model, query_model(model, properties))) + OPENVINO_THROW("Unsupported model"); + + return std::make_shared(model, shared_from_this(), properties); + } + + std::shared_ptr compile_model(const std::string& model_path, + const ov::AnyMap& properties) const override { + OPENVINO_NOT_IMPLEMENTED; + } + + std::shared_ptr compile_model(const std::shared_ptr& model, + const ov::AnyMap& properties, + const ov::SoPtr& context) const override { + if (!support_model(model, query_model(model, properties))) + OPENVINO_THROW("Unsupported model"); + + return std::make_shared(model, shared_from_this(), properties, context); + } + + void set_property(const ov::AnyMap& properties) override { + OPENVINO_NOT_IMPLEMENTED; + } + + ov::Any get_property(const std::string& name, const ov::AnyMap& arguments) const override { + OPENVINO_NOT_IMPLEMENTED; + } + + ov::SoPtr create_context(const ov::AnyMap& remote_properties) const override { + OPENVINO_NOT_IMPLEMENTED; + } + + ov::SoPtr get_default_context(const ov::AnyMap& remote_properties) const override { + OPENVINO_NOT_IMPLEMENTED; + } + + std::shared_ptr import_model(std::istream& model, const ov::AnyMap& properties) const override { + std::string xmlString, xmlInOutString; + ov::Tensor weights; + + ov::pass::StreamSerialize::DataHeader hdr = {}; + model.read(reinterpret_cast(&hdr), sizeof hdr); + + // read CNNNetwork input/output precisions + model.seekg(hdr.custom_data_offset); + xmlInOutString.resize(hdr.custom_data_size); + model.read(const_cast(xmlInOutString.c_str()), hdr.custom_data_size); + + // read blob content + model.seekg(hdr.consts_offset); + if (hdr.consts_size) { + weights = ov::Tensor(ov::element::i8, ov::Shape{hdr.consts_size}); + char* data = static_cast(weights.data()); + model.read(data, hdr.consts_size); + } + + // read XML content + model.seekg(hdr.model_offset); + xmlString.resize(hdr.model_size); + model.read(const_cast(xmlString.c_str()), hdr.model_size); + + ov::Core core; + auto ov_model = core.read_model(xmlString, weights); + return compile_model(ov_model, properties); + } + + std::shared_ptr import_model(std::istream& model, + const ov::SoPtr& context, + const ov::AnyMap& properties) const override { + std::string xmlString, xmlInOutString; + ov::Tensor weights; + + ov::pass::StreamSerialize::DataHeader hdr = {}; + model.read(reinterpret_cast(&hdr), sizeof hdr); + + // read CNNNetwork input/output precisions + model.seekg(hdr.custom_data_offset); + xmlInOutString.resize(hdr.custom_data_size); + model.read(const_cast(xmlInOutString.c_str()), hdr.custom_data_size); + + // read blob content + model.seekg(hdr.consts_offset); + if (hdr.consts_size) { + weights = ov::Tensor(ov::element::i8, ov::Shape{hdr.consts_size}); + char* data = static_cast(weights.data()); + model.read(data, hdr.consts_size); + } + + // read XML content + model.seekg(hdr.model_offset); + xmlString.resize(hdr.model_size); + model.read(const_cast(xmlString.c_str()), hdr.model_size); + + ov::Core core; + auto ov_model = core.read_model(xmlString, weights); + return compile_model(ov_model, properties, context); + } + + ov::SupportedOpsMap query_model(const std::shared_ptr& model, + const ov::AnyMap& properties) const override { + OPENVINO_NOT_IMPLEMENTED; + } +}; + +class MockPluginSupportBatchAndContext : public MockPluginBase { +public: + ov::SupportedOpsMap query_model(const std::shared_ptr& model, + const ov::AnyMap& properties) const override { + OPENVINO_ASSERT(model); + + std::unordered_set supported_ops = {"Parameter", "Result", "Add", "Constant", "Reshape"}; + + ov::SupportedOpsMap res; + for (const auto& op : model->get_ordered_ops()) { + if (supported_ops.find(op->get_type_info().name) == supported_ops.end()) + continue; + res.emplace(op->get_friendly_name(), get_device_name()); + } + return res; + } + + ov::SoPtr create_context(const ov::AnyMap& remote_properties) const override { + if (remote_properties.find("CUSTOM_CTX") == remote_properties.end()) + return std::make_shared(get_device_name()); + return std::make_shared(get_device_name()); + } + + ov::SoPtr get_default_context(const ov::AnyMap& remote_properties) const override { + std::string device_name = get_device_name(); + if (remote_properties.find(ov::device::id.name()) != remote_properties.end()) + device_name = device_name + "." + remote_properties.at(ov::device::id.name()).as(); + + return std::make_shared(device_name); + } + + void set_property(const ov::AnyMap& properties) override { + for (const auto& it : properties) { + if (it.first == ov::num_streams.name()) + num_streams = it.second.as(); + else if (it.first == ov::enable_profiling.name()) + m_profiling = it.second.as(); + else if (it.first == ov::hint::performance_mode.name()) + m_perf_hint = it.second.as(); + else if (it.first == ov::hint::num_requests.name()) + m_request = it.second.as(); + else if (it.first == ov::device::id.name()) + m_id = it.second.as(); + else if (it.first == ov::cache_dir.name()) + continue; + else + OPENVINO_THROW(get_device_name(), " set config: " + it.first); + } + } + + ov::Any get_property(const std::string& name, const ov::AnyMap& arguments) const override { + const std::vector roProperties{RO_property(ov::supported_properties.name()), + RO_property(ov::optimal_batch_size.name()), + RO_property(ov::device::capabilities.name()), + RO_property(ov::device::type.name()), + RO_property(ov::device::uuid.name()), + RO_property(ov::device::id.name()), + RO_property(ov::intel_gpu::memory_statistics.name())}; + // the whole config is RW before network is loaded. + const std::vector rwProperties{RW_property(ov::num_streams.name()), + RW_property(ov::enable_profiling.name()), + RW_property(ov::compilation_num_threads.name()), + RW_property(ov::hint::performance_mode.name()), + RW_property(ov::hint::num_requests.name())}; + if (name == ov::supported_properties) { + std::vector supportedProperties; + supportedProperties.reserve(roProperties.size() + rwProperties.size()); + supportedProperties.insert(supportedProperties.end(), roProperties.begin(), roProperties.end()); + supportedProperties.insert(supportedProperties.end(), rwProperties.begin(), rwProperties.end()); + + return decltype(ov::supported_properties)::value_type(supportedProperties); + } else if (name == ov::hint::num_requests.name()) { + return decltype(ov::hint::num_requests)::value_type(1); + } else if (name == ov::hint::performance_mode.name()) { + return decltype(ov::hint::performance_mode)::value_type(ov::hint::PerformanceMode::LATENCY); + } else if (name == ov::optimal_batch_size.name()) { + return decltype(ov::optimal_batch_size)::value_type(4); + } else if (name == ov::device::capabilities.name()) { + return decltype(ov::device::capabilities)::value_type( + {"FP32", "FP16", "BATCHED_BLOB", "BIN", "INT8", ov::device::capability::EXPORT_IMPORT}); + } else if (name == ov::device::type.name()) { + return decltype(ov::device::type)::value_type(ov::device::Type::INTEGRATED); + } else if (name == ov::loaded_from_cache.name()) { + return false; + } else if (name == ov::enable_profiling.name()) { + return decltype(ov::enable_profiling)::value_type{false}; + } else if (name == ov::streams::num.name()) { + return decltype(ov::streams::num)::value_type{2}; + } else if (name == ov::compilation_num_threads.name()) { + return decltype(ov::compilation_num_threads)::value_type{4}; + } else if (name == "SUPPORTED_CONFIG_KEYS") { // TODO: Remove this key + std::vector configs; + for (const auto& property : rwProperties) { + configs.emplace_back(property); + } + return configs; + } else if (name == "SUPPORTED_METRICS") { // TODO: Remove this key + std::vector configs; + for (const auto& property : roProperties) { + configs.emplace_back(property); + } + return configs; + } else if (name == ov::internal::supported_properties) { + return decltype(ov::internal::supported_properties)::value_type( + {ov::PropertyName{ov::internal::caching_properties.name(), ov::PropertyMutability::RO}}); + } else if (ov::internal::caching_properties == name) { + std::vector caching_properties = {ov::device::uuid, ov::device::id}; + return decltype(ov::internal::caching_properties)::value_type(caching_properties); + } else if (name == ov::device::uuid) { + ov::device::UUID uuid = {}; + return decltype(ov::device::uuid)::value_type{uuid}; + } else if (name == ov::device::id) { + return decltype(ov::device::id)::value_type{m_id}; + } else if (name == ov::loaded_from_cache.name()) { + return m_loaded_from_cache; + } else if (name == ov::intel_gpu::memory_statistics) { + return decltype(ov::intel_gpu::memory_statistics)::value_type{{}}; + } + OPENVINO_NOT_IMPLEMENTED; + } + +private: + int32_t num_streams{0}; + bool m_profiling = false; + bool m_loaded_from_cache{false}; + ov::hint::PerformanceMode m_perf_hint = ov::hint::PerformanceMode::THROUGHPUT; + uint32_t m_request = 0; + std::string m_id; +}; + +void ov::auto_plugin::tests::AutoFuncTests::reg_plugin(ov::Core& core, + std::shared_ptr& plugin, + const std::string& device_name, + const ov::AnyMap& properties) { + std::string libraryPath = get_mock_engine_path(); + if (!m_so) + m_so = ov::util::load_shared_object(libraryPath.c_str()); + plugin->set_device_name(device_name); + std::function injectProxyEngine = make_std_function(m_so, "InjectPlugin"); + + injectProxyEngine(plugin.get()); + core.register_plugin(ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), + std::string("mock_engine") + IE_BUILD_POSTFIX), + device_name, + properties); + m_mock_plugins.emplace_back(plugin); +} + +// test +void ov::auto_plugin::tests::AutoFuncTests::register_plugin_mock_gpu(ov::Core& core, + const std::string& device_name, + const ov::AnyMap& properties) { + std::shared_ptr base_plugin = std::make_shared(); + reg_plugin(core, base_plugin, device_name, properties); +} + +class MockPlugin : public MockPluginBase { +public: + ov::SupportedOpsMap query_model(const std::shared_ptr& model, + const ov::AnyMap& properties) const override { + OPENVINO_ASSERT(model); + + std::unordered_set supported_ops = {"Parameter", "Result", "Add", "Constant", "Reshape"}; + + ov::SupportedOpsMap res; + for (const auto& op : model->get_ordered_ops()) { + if (supported_ops.find(op->get_type_info().name) == supported_ops.end()) + continue; + res.emplace(op->get_friendly_name(), get_device_name()); + } + return res; + } + + void set_property(const ov::AnyMap& properties) override { + for (const auto& it : properties) { + if (it.first == ov::num_streams.name()) + num_streams = it.second.as(); + else if (it.first == ov::enable_profiling.name()) + m_profiling = it.second.as(); + else if (it.first == ov::device::id.name()) + continue; + else if (it.first == ov::cache_dir.name()) + continue; + else + OPENVINO_THROW(get_device_name(), " set config: " + it.first); + } + } + + ov::Any get_property(const std::string& name, const ov::AnyMap& arguments) const override { + const std::vector roProperties{RO_property(ov::supported_properties.name()), + RO_property(ov::device::uuid.name()), + RO_property(ov::device::capabilities.name())}; + // the whole config is RW before network is loaded. + const std::vector rwProperties{RW_property(ov::num_streams.name()), + RW_property(ov::enable_profiling.name()), + RW_property(ov::hint::performance_mode.name())}; + if (name == ov::supported_properties) { + std::vector supportedProperties; + supportedProperties.reserve(roProperties.size() + rwProperties.size()); + supportedProperties.insert(supportedProperties.end(), roProperties.begin(), roProperties.end()); + supportedProperties.insert(supportedProperties.end(), rwProperties.begin(), rwProperties.end()); + + return decltype(ov::supported_properties)::value_type(supportedProperties); + } else if (name == ov::loaded_from_cache.name()) { + return false; + } else if (name == ov::enable_profiling.name()) { + return decltype(ov::enable_profiling)::value_type{false}; + } else if (name == ov::streams::num.name()) { + return decltype(ov::streams::num)::value_type{2}; + } else if (name == "SUPPORTED_CONFIG_KEYS") { // TODO: Remove this key + std::vector configs; + for (const auto& property : rwProperties) { + configs.emplace_back(property); + } + return configs; + } else if (name == "SUPPORTED_METRICS") { // TODO: Remove this key + std::vector configs; + for (const auto& property : roProperties) { + configs.emplace_back(property); + } + return configs; + } else if (name == ov::internal::supported_properties) { + return decltype(ov::internal::supported_properties)::value_type( + {ov::PropertyName{ov::internal::caching_properties.name(), ov::PropertyMutability::RO}}); + } else if (name == ov::device::capabilities) { + std::vector capabilities; + capabilities.push_back(ov::device::capability::EXPORT_IMPORT); + return decltype(ov::device::capabilities)::value_type(capabilities); + } else if (ov::internal::caching_properties == name) { + std::vector caching_properties = {ov::device::uuid}; + return decltype(ov::internal::caching_properties)::value_type(caching_properties); + } else if (name == ov::device::uuid) { + ov::device::UUID uuid = {}; + return decltype(ov::device::uuid)::value_type{uuid}; + } else if (name == ov::loaded_from_cache.name()) { + return m_loaded_from_cache; + } + OPENVINO_NOT_IMPLEMENTED; + } + +private: + int32_t num_streams{0}; + bool m_profiling = false; + bool m_loaded_from_cache{false}; +}; + +void ov::auto_plugin::tests::AutoFuncTests::register_plugin_mock_cpu(ov::Core& core, + const std::string& device_name, + const ov::AnyMap& properties) { + std::shared_ptr base_plugin = std::make_shared(); + + reg_plugin(core, base_plugin, device_name, properties); +} + +void ov::auto_plugin::tests::AutoFuncTests::register_plugin_mock_gpu_compile_slower(ov::Core& core, + const std::string& device_name, + const ov::AnyMap& properties) { + class MockPluginCompileSlower : public MockPluginSupportBatchAndContext { + public: + std::shared_ptr compile_model(const std::shared_ptr& model, + const ov::AnyMap& properties) const override { + OPENVINO_ASSERT(model); + if (!support_model(model, query_model(model, properties))) + OPENVINO_THROW("Unsupported model"); + std::this_thread::sleep_for(std::chrono::milliseconds(1000)); // add delay for test + return std::make_shared(model, shared_from_this(), properties); + } + std::shared_ptr compile_model(const std::shared_ptr& model, + const ov::AnyMap& properties, + const ov::SoPtr& context) const override { + if (!support_model(model, query_model(model, properties))) + OPENVINO_THROW("Unsupported model"); + std::this_thread::sleep_for(std::chrono::milliseconds(1000)); // add delay for test + return std::make_shared(model, shared_from_this(), properties, context); + } + }; + + std::shared_ptr base_plugin = std::make_shared(); + reg_plugin(core, base_plugin, device_name, properties); +} + +void ov::auto_plugin::tests::AutoFuncTests::register_plugin_mock_cpu_compile_slower(ov::Core& core, + const std::string& device_name, + const ov::AnyMap& properties) { + class MockCPUPluginCompileSlower : public MockPlugin { + public: + std::shared_ptr compile_model(const std::shared_ptr& model, + const ov::AnyMap& properties) const override { + OPENVINO_ASSERT(model); + if (!support_model(model, query_model(model, properties))) + OPENVINO_THROW("Unsupported model"); + std::this_thread::sleep_for(std::chrono::milliseconds(1000)); // add delay for test + return std::make_shared(model, shared_from_this(), properties); + } + std::shared_ptr compile_model(const std::shared_ptr& model, + const ov::AnyMap& properties, + const ov::SoPtr& context) const override { + if (!support_model(model, query_model(model, properties))) + OPENVINO_THROW("Unsupported model"); + std::this_thread::sleep_for(std::chrono::milliseconds(1000)); // add delay for test + return std::make_shared(model, shared_from_this(), properties, context); + } + }; + + std::shared_ptr base_plugin = std::make_shared(); + reg_plugin(core, base_plugin, device_name, properties); +} \ No newline at end of file diff --git a/src/plugins/auto/tests/functional/behavior/auto_func_test.hpp b/src/plugins/auto/tests/functional/behavior/auto_func_test.hpp new file mode 100644 index 00000000000000..711355315b4516 --- /dev/null +++ b/src/plugins/auto/tests/functional/behavior/auto_func_test.hpp @@ -0,0 +1,133 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include +#include + +#include +#include + +#include "openvino/runtime/auto/properties.hpp" +#include "openvino/runtime/core.hpp" +#include "openvino/runtime/iplugin.hpp" + +namespace ov { +namespace auto_plugin { +namespace tests { + +#define ASSERT_THROW_WITH_MESSAGE(code, expected_exception, expected_message) \ + do { \ + try { \ + { code; } \ + FAIL() << "no exception occured" << std::endl; \ + } catch (const expected_exception& e) { \ + EXPECT_THAT(e.what(), testing::HasSubstr(expected_message)); \ + } catch (const std::exception& e) { \ + FAIL() << "an unexpected exception occured: " << e.what() << std::endl; \ + } catch (...) { \ + FAIL() << "an unknown exception occured" << std::endl; \ + } \ + } while (0); + +class PluginRemoteTensor : public ov::RemoteTensor { +public: + /** + * @brief Checks that type defined runtime parameters are presented in remote object + * @param tensor a tensor to check + */ + static void type_check(const Tensor& tensor) { + RemoteTensor::type_check(tensor, {{"IS_DEFAULT", {}}}); + } + + bool is_default() { + return get_params().at("IS_DEFAULT").as(); + } +}; + +class PluginRemoteContext : public ov::RemoteContext { +public: + // Needed to make create_tensor overloads from base class visible for user + using RemoteContext::create_host_tensor; + using RemoteContext::create_tensor; + /** + * @brief Checks that type defined runtime parameters are presented in remote object + * @param remote_context A remote context to check + */ + static void type_check(const RemoteContext& remote_context) { + RemoteContext::type_check(remote_context, {{"IS_DEFAULT", {}}}); + } + + bool is_default() { + return get_params().at("IS_DEFAULT").as(); + } +}; + +class AutoFuncTests : public ::testing::Test { +public: + ov::Core core; + + void SetUp() override; + void TearDown() override; + + ov::Tensor create_and_fill_tensor(const ov::element::Type& type, const ov::Shape& shape); + +protected: + void register_plugin_mock_cpu(ov::Core& core, const std::string& device_name, const ov::AnyMap& properties); + void register_plugin_mock_cpu_compile_slower(ov::Core& core, + const std::string& device_name, + const ov::AnyMap& properties); + void register_plugin_mock_gpu(ov::Core& core, const std::string& device_name, const ov::AnyMap& properties); + void register_plugin_mock_gpu_compile_slower(ov::Core& core, + const std::string& device_name, + const ov::AnyMap& properties); + std::shared_ptr model_can_batch; + std::shared_ptr model_cannot_batch; + std::string cache_path; + +private: + template + ov::Tensor create_tensor(const ov::element::Type& type, const ov::Shape& shape) { + ov::Tensor tensor(type, shape); + T* data = tensor.data(); + for (size_t i = 0; i < tensor.get_size(); i++) { + data[i] = static_cast(i); + } + return tensor; + } + std::vector> m_mock_plugins; + std::shared_ptr m_so; + + void reg_plugin(ov::Core& core, + std::shared_ptr& plugin, + const std::string& device_name, + const ov::AnyMap& properties); + std::shared_ptr create_model_with_batch_possible(); + std::shared_ptr create_model_with_reshape(); +}; + +class ThreadingTest { +public: + static void runParallel(std::function func, + const unsigned int iterations = 100, + const unsigned int threadsNum = 8) { + std::vector threads(threadsNum); + + for (auto& thread : threads) { + thread = std::thread([&]() { + for (unsigned int i = 0; i < iterations; ++i) { + func(); + } + }); + } + + for (auto& thread : threads) { + if (thread.joinable()) + thread.join(); + } + } +}; +} // namespace tests +} // namespace auto_plugin +} // namespace ov \ No newline at end of file diff --git a/src/plugins/auto/tests/functional/behavior/caching_test.cpp b/src/plugins/auto/tests/functional/behavior/caching_test.cpp new file mode 100644 index 00000000000000..ab34fe7b1a83f3 --- /dev/null +++ b/src/plugins/auto/tests/functional/behavior/caching_test.cpp @@ -0,0 +1,60 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "auto_func_test.hpp" +#include "common_test_utils/include/common_test_utils/file_utils.hpp" + +using namespace ov::auto_plugin::tests; + +TEST_F(AutoFuncTests, compiled_with_cache_enabled) { + core.set_property(ov::cache_dir(cache_path)); + core.set_property("MOCK_GPU", ov::device::id("test")); // device id for cache property distinguish with MOCK_CPU + auto compiled_model = + core.compile_model(model_cannot_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}); + ASSERT_EQ(ov::test::utils::listFilesWithExt(cache_path, "blob").size(), 2); + compiled_model = core.compile_model(model_cannot_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}); + // can reuse the cache, no extra cache generated + ASSERT_EQ(ov::test::utils::listFilesWithExt(cache_path, "blob").size(), 2); + core.set_property("MOCK_GPU", ov::device::id("test_regenerate")); + compiled_model = core.compile_model(model_cannot_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}); + // new cache file expected + ASSERT_EQ(ov::test::utils::listFilesWithExt(cache_path, "blob").size(), 3); + core.set_property(ov::cache_dir("")); +} + +TEST_F(AutoFuncTests, compiled_with_cache_enabled_batch_enabled) { +#ifdef ENABLE_AUTO_BATCH + core.set_property(ov::cache_dir(cache_path)); + core.set_property("MOCK_GPU", ov::device::id("test")); // device id for cache property distinguish with MOCK_CPU + auto compiled_model = + core.compile_model(model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}); + ASSERT_EQ(ov::test::utils::listFilesWithExt(cache_path, "blob").size(), 3); + compiled_model = core.compile_model(model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}); + // can reuse the cache, no extra cache generated + ASSERT_EQ(ov::test::utils::listFilesWithExt(cache_path, "blob").size(), 3); + core.set_property("MOCK_GPU", ov::device::id("test_regenerate")); + compiled_model = core.compile_model(model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}); + // new cache file expected + ASSERT_EQ(ov::test::utils::listFilesWithExt(cache_path, "blob").size(), 5); + core.set_property(ov::cache_dir("")); +#endif +} \ No newline at end of file diff --git a/src/plugins/auto/tests/functional/behavior/callback_test.cpp b/src/plugins/auto/tests/functional/behavior/callback_test.cpp new file mode 100644 index 00000000000000..5219677b99a5cb --- /dev/null +++ b/src/plugins/auto/tests/functional/behavior/callback_test.cpp @@ -0,0 +1,116 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include + +#include "auto_func_test.hpp" + +using namespace ov::auto_plugin::tests; + +TEST_F(AutoFuncTests, can_infer_with_cpu_help) { + ov::CompiledModel compiled_model; + ASSERT_NO_THROW(compiled_model = + core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")})); + auto req = compiled_model.create_infer_request(); + bool is_called = false; + ASSERT_NO_THROW(req.set_callback([&](std::exception_ptr exception_ptr) { + // HSD_1805940120: Wait on starting callback return HDDL_ERROR_INVAL_TASK_HANDLE + ASSERT_EQ(exception_ptr, nullptr); + is_called = true; + })); + ASSERT_NO_THROW(req.start_async()); + ASSERT_NO_THROW(req.wait()); + ASSERT_TRUE(is_called); +} + +TEST_F(AutoFuncTests, impl_does_not_copy_callback) { + ov::CompiledModel compiled_model; + ASSERT_NO_THROW(compiled_model = + core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")})); + ov::InferRequest req; + ASSERT_NO_THROW(req = compiled_model.create_infer_request()); + { + auto somePtr = std::make_shared(42); + ASSERT_NO_THROW(req.set_callback([somePtr](std::exception_ptr exception_ptr) { + ASSERT_EQ(nullptr, exception_ptr); + ASSERT_EQ(1, somePtr.use_count()); + })); + } + ASSERT_NO_THROW(req.start_async()); + ASSERT_NO_THROW(req.wait()); +} + +TEST_F(AutoFuncTests, return_result_not_ready) { + ov::CompiledModel compiled_model; + ASSERT_NO_THROW(compiled_model = + core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")})); + ov::InferRequest req; + ASSERT_NO_THROW(req = compiled_model.create_infer_request()); + std::promise callbackTimeStamp; + auto callbackTimeStampFuture = callbackTimeStamp.get_future(); + // add a callback to the request and capture the timestamp + ASSERT_NO_THROW(req.set_callback([&](std::exception_ptr exception_ptr) { + if (exception_ptr) { + callbackTimeStamp.set_exception(exception_ptr); + } else { + callbackTimeStamp.set_value(std::chrono::system_clock::now()); + } + })); + ASSERT_NO_THROW(req.start_async()); + bool ready = false; + ASSERT_NO_THROW(ready = req.wait_for({})); + // get timestamp taken AFTER return from the wait(STATUS_ONLY) + const auto afterWaitTimeStamp = std::chrono::system_clock::now(); + if (afterWaitTimeStamp < callbackTimeStampFuture.get()) { + ASSERT_FALSE(ready); + } + ASSERT_NO_THROW(req.wait()); +} + +TEST_F(AutoFuncTests, rethrow_if_callback_throw) { + ov::CompiledModel compiled_model; + ASSERT_NO_THROW(compiled_model = + core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")})); + ov::InferRequest req; + ASSERT_NO_THROW(req = compiled_model.create_infer_request()); + ASSERT_NO_THROW(req.set_callback([](std::exception_ptr) { + OPENVINO_THROW("Throw"); + })); + ASSERT_NO_THROW(req.start_async()); + ASSERT_THROW(req.wait(), ov::Exception); +} + +TEST_F(AutoFuncTests, can_start_several_async_inside_completion_callback_with_safedtor) { + const int NUM_ITER = 10; + struct TestUserData { + std::atomic numIter = {0}; + std::promise promise; + }; + TestUserData data; + ov::CompiledModel compiled_model; + ASSERT_NO_THROW(compiled_model = + core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")})); + ov::InferRequest req; + ASSERT_NO_THROW(req = compiled_model.create_infer_request()); + ASSERT_NO_THROW(req.set_callback([&](std::exception_ptr exception_ptr) { + if (exception_ptr) { + data.promise.set_exception(exception_ptr); + } else { + if (data.numIter.fetch_add(1) != NUM_ITER) { + req.start_async(); + } else { + data.promise.set_value(true); + } + } + })); + auto future = data.promise.get_future(); + ASSERT_NO_THROW(req.start_async()); + ASSERT_NO_THROW(req.wait()); + future.wait(); + auto callbackStatus = future.get(); + ASSERT_TRUE(callbackStatus); + auto dataNumIter = data.numIter - 1; + ASSERT_EQ(NUM_ITER, dataNumIter); +} \ No newline at end of file diff --git a/src/plugins/auto/tests/functional/behavior/infer_consistency_test.cpp b/src/plugins/auto/tests/functional/behavior/infer_consistency_test.cpp new file mode 100644 index 00000000000000..2d71b8ee7d16b0 --- /dev/null +++ b/src/plugins/auto/tests/functional/behavior/infer_consistency_test.cpp @@ -0,0 +1,25 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "infer_consistency_test.hpp" + +using namespace ov::auto_plugin::tests; +namespace { +auto props = []() { + return std::vector{{ov::device::priorities("MOCK_GPU", "MOCK_CPU")}, + {ov::device::priorities("MOCK_GPU")}, + {ov::device::priorities("MOCK_CPU", "MOCK_GPU")}}; +}; + +const std::vector get_vs_set{true, false}; + +const std::vector target_device{"AUTO", "MULTI"}; + +INSTANTIATE_TEST_SUITE_P(AutoFuncTests, + Consistency_Test, + ::testing::Combine(::testing::ValuesIn(target_device), + ::testing::ValuesIn(get_vs_set), + ::testing::ValuesIn(props())), + Consistency_Test::getTestCaseName); +} // namespace diff --git a/src/plugins/auto/tests/functional/behavior/infer_consistency_test.hpp b/src/plugins/auto/tests/functional/behavior/infer_consistency_test.hpp new file mode 100644 index 00000000000000..6243a45b16e366 --- /dev/null +++ b/src/plugins/auto/tests/functional/behavior/infer_consistency_test.hpp @@ -0,0 +1,105 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include +#include +#include +#include + +#include "auto_func_test.hpp" +#include "common_test_utils/include/common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/test_common.hpp" +#include "ov_models/subgraph_builders.hpp" + +namespace ov { +namespace auto_plugin { +namespace tests { + +using consistency_test_param = std::tuple; // property + +class Consistency_Test : public AutoFuncTests, public testing::WithParamInterface { + void SetUp() override { + AutoFuncTests::SetUp(); + std::tie(target_device, use_get_tensor, property) = this->GetParam(); + }; + +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + ov::AnyMap property; + bool use_get_tensor; + std::string target_device; + std::tie(target_device, use_get_tensor, property) = obj.param; + std::ostringstream result; + result << "target_device=" << target_device << "_"; + result << std::string(use_get_tensor ? "_get_blob" : "_set_blob") << "_"; + if (!property.empty()) { + for (auto& iter : property) { + result << "priority=" << iter.first << "_" << iter.second.as(); + } + } + return result.str(); + } + +protected: + bool use_get_tensor; + ov::AnyMap property; + std::string target_device; + + void run() { + std::vector irs; + std::vector> ref; + std::map, ov::Tensor> input_data; + + auto compiled_model = core.compile_model(model_cannot_batch, target_device, property); + auto inputs = compiled_model.inputs(); + auto outputs = compiled_model.outputs(); + auto num_requests = compiled_model.get_property(ov::optimal_number_of_infer_requests); + for (size_t j = 0; j < num_requests; j++) { + auto inf_req = compiled_model.create_infer_request(); + irs.push_back(inf_req); + for (auto& iter : inputs) { + auto tensor = ov::test::utils::create_and_fill_tensor(iter.get_element_type(), iter.get_shape()); + if (use_get_tensor) + memcpy(reinterpret_cast(inf_req.get_tensor(iter).data()), + reinterpret_cast(tensor.data()), + tensor.get_byte_size()); + else + inf_req.set_tensor(iter, tensor); + auto node_ptr = iter.get_node_shared_ptr(); + input_data.insert({std::const_pointer_cast(node_ptr), tensor}); + } + for (auto& iter : outputs) { + if (!use_get_tensor) { + auto tensor = ov::Tensor(iter.get_element_type(), iter.get_shape()); + inf_req.set_tensor(iter, tensor); + } + } + auto refOutData = ngraph::helpers::interpretFunction(model_cannot_batch, input_data); + ref.push_back(refOutData); + } + for (size_t i = 0; i < 50; i++) { + for (auto ir : irs) { + ir.start_async(); + } + + for (auto ir : irs) { + ir.wait(); + } + } + for (size_t i = 0; i < irs.size(); ++i) { + for (auto& iter : outputs) { + ov::test::utils::compare(irs[i].get_tensor(iter), ref[i][0]); + } + } + } +}; + +TEST_P(Consistency_Test, infer_consistency_test) { + run(); +} + +} // namespace tests +} // namespace auto_plugin +} // namespace ov diff --git a/src/plugins/auto/tests/functional/behavior/infer_multi_threading_tests.cpp b/src/plugins/auto/tests/functional/behavior/infer_multi_threading_tests.cpp new file mode 100644 index 00000000000000..4065c5abed7b20 --- /dev/null +++ b/src/plugins/auto/tests/functional/behavior/infer_multi_threading_tests.cpp @@ -0,0 +1,114 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "auto_func_test.hpp" + +using namespace ov::auto_plugin::tests; + +TEST_F(AutoFuncTests, can_run_3syncrequests_consistently_from_threads) { + ov::CompiledModel compiled_model; + ASSERT_NO_THROW(compiled_model = core.compile_model( + model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); + ov::InferRequest req1, req2, req3; + ASSERT_NO_THROW(req1 = compiled_model.create_infer_request()); + ASSERT_NO_THROW(req2 = compiled_model.create_infer_request()); + ASSERT_NO_THROW(req3 = compiled_model.create_infer_request()); + auto f1 = std::async(std::launch::async, [&] { + req1.infer(); + }); + auto f2 = std::async(std::launch::async, [&] { + req2.infer(); + }); + auto f3 = std::async(std::launch::async, [&] { + req3.infer(); + }); + + f1.wait(); + f2.wait(); + f3.wait(); + + ASSERT_NO_THROW(f1.get()); + ASSERT_NO_THROW(f2.get()); + ASSERT_NO_THROW(f3.get()); +} + +TEST_F(AutoFuncTests, can_run_3asyncrequests_consistently_from_threads_without_wait) { + ov::CompiledModel compiled_model; + ASSERT_NO_THROW(compiled_model = core.compile_model( + model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); + ov::InferRequest req1, req2, req3; + ASSERT_NO_THROW(req1 = compiled_model.create_infer_request()); + ASSERT_NO_THROW(req2 = compiled_model.create_infer_request()); + ASSERT_NO_THROW(req3 = compiled_model.create_infer_request()); + ASSERT_NO_THROW(req1.infer()); + ASSERT_NO_THROW(req2.infer()); + ASSERT_NO_THROW(req3.infer()); + + auto f1 = std::async(std::launch::async, [&] { + req1.start_async(); + }); + auto f2 = std::async(std::launch::async, [&] { + req2.start_async(); + }); + auto f3 = std::async(std::launch::async, [&] { + req3.start_async(); + }); + + f1.wait(); + f2.wait(); + f3.wait(); + + ASSERT_NO_THROW(f1.get()); + ASSERT_NO_THROW(f2.get()); + ASSERT_NO_THROW(f3.get()); +} + +TEST_F(AutoFuncTests, can_run_3asyncrequests_consistently_with_wait) { + ov::CompiledModel compiled_model; + ASSERT_NO_THROW(compiled_model = core.compile_model( + model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); + ov::InferRequest req1, req2, req3; + ASSERT_NO_THROW(req1 = compiled_model.create_infer_request()); + ASSERT_NO_THROW(req2 = compiled_model.create_infer_request()); + ASSERT_NO_THROW(req3 = compiled_model.create_infer_request()); + req1.start_async(); + ASSERT_NO_THROW(req1.wait()); + + req2.start_async(); + ASSERT_NO_THROW(req2.wait()); + + req3.start_async(); + ASSERT_NO_THROW(req3.wait()); +} + +TEST_F(AutoFuncTests, can_run_3asyncrequests_parallel_with_wait) { + ov::CompiledModel compiled_model; + ASSERT_NO_THROW(compiled_model = core.compile_model( + model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); + ov::InferRequest req1, req2, req3; + ASSERT_NO_THROW(req1 = compiled_model.create_infer_request()); + ASSERT_NO_THROW(req2 = compiled_model.create_infer_request()); + ASSERT_NO_THROW(req3 = compiled_model.create_infer_request()); + req1.start_async(); + req2.start_async(); + req3.start_async(); + + ASSERT_NO_THROW(req2.wait()); + ASSERT_NO_THROW(req1.wait()); + ASSERT_NO_THROW(req3.wait()); +} diff --git a/src/plugins/auto/tests/functional/behavior/io_tensor.cpp b/src/plugins/auto/tests/functional/behavior/io_tensor.cpp new file mode 100644 index 00000000000000..770fe04444a708 --- /dev/null +++ b/src/plugins/auto/tests/functional/behavior/io_tensor.cpp @@ -0,0 +1,172 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "io_tensor.hpp" + +#include "common_test_utils/include/common_test_utils/ov_tensor_utils.hpp" + +using namespace ov::auto_plugin::tests; + +void InferRequest_IOTensor_Test::SetUp() { + AutoFuncTests::SetUp(); + std::tie(target_device, property) = this->GetParam(); + auto compiled_model = + core.compile_model(model_cannot_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")}); + input = compiled_model.input(); + output = compiled_model.output(); +} + +void InferRequest_IOTensor_Test::TearDown() { + input = {}; + output = {}; + AutoFuncTests::TearDown(); +} + +TEST_P(InferRequest_IOTensor_Test, fail_to_set_nullptr_for_input) { + auto compiled_model = core.compile_model(model_cannot_batch, target_device, property); + req = compiled_model.create_infer_request(); + ASSERT_THROW(req.set_tensor(input, {}), ov::Exception); +} + +TEST_P(InferRequest_IOTensor_Test, fail_to_set_nullptr_for_output) { + auto compiled_model = core.compile_model(model_cannot_batch, target_device, property); + req = compiled_model.create_infer_request(); + ASSERT_THROW(req.set_tensor(output, {}), ov::Exception); +} + +TEST_P(InferRequest_IOTensor_Test, can_set_and_get_input) { + auto compiled_model = core.compile_model(model_cannot_batch, target_device, property); + req = compiled_model.create_infer_request(); + auto tensor = ov::test::utils::create_and_fill_tensor(input.get_element_type(), input.get_shape()); + ASSERT_NO_THROW(req.set_tensor(input, tensor)); + ov::Tensor actual_tensor; + ASSERT_NO_THROW(actual_tensor = req.get_tensor(input)); + + ASSERT_TRUE(actual_tensor); + ASSERT_NE(nullptr, actual_tensor.data()); + ASSERT_EQ(tensor.data(), actual_tensor.data()); + ASSERT_EQ(input.get_element_type(), actual_tensor.get_element_type()); + ASSERT_EQ(input.get_shape(), actual_tensor.get_shape()); +} + +TEST_P(InferRequest_IOTensor_Test, fail_to_set_tensor_with_incorrect_name) { + auto compiled_model = core.compile_model(model_cannot_batch, target_device, property); + req = compiled_model.create_infer_request(); + auto tensor = ov::test::utils::create_and_fill_tensor(input.get_element_type(), input.get_shape()); + ASSERT_THROW(req.set_tensor("incorrect_input", tensor), ov::Exception); +} + +TEST_P(InferRequest_IOTensor_Test, fail_input_set_size_incorrect) { + auto compiled_model = core.compile_model(model_cannot_batch, target_device, property); + req = compiled_model.create_infer_request(); + auto shape = input.get_shape(); + shape[0] *= 2; + auto tensor = ov::test::utils::create_and_fill_tensor(input.get_element_type(), shape); + ASSERT_THROW(req.set_tensor(input, tensor), ov::Exception); +} + +TEST_P(InferRequest_IOTensor_Test, fail_output_set_size_incorrect) { + auto compiled_model = core.compile_model(model_cannot_batch, target_device, property); + req = compiled_model.create_infer_request(); + auto shape = output.get_shape(); + shape[0] *= 2; + auto tensor = ov::test::utils::create_and_fill_tensor(output.get_element_type(), shape); + ASSERT_THROW(req.set_tensor(output, tensor), ov::Exception); +} + +TEST_P(InferRequest_IOTensor_Test, second_call_get_input) { + auto compiled_model = core.compile_model(model_cannot_batch, target_device, property); + req = compiled_model.create_infer_request(); + ov::Tensor tensor1, tensor2; + ASSERT_NO_THROW(tensor1 = req.get_tensor(input)); + ASSERT_NO_THROW(tensor2 = req.get_tensor(input)); + ASSERT_EQ(tensor1.data(), tensor2.data()); +} + +TEST_P(InferRequest_IOTensor_Test, second_call_get_output) { + auto compiled_model = core.compile_model(model_cannot_batch, target_device, property); + req = compiled_model.create_infer_request(); + ov::Tensor tensor1, tensor2; + ASSERT_NO_THROW(tensor1 = req.get_tensor(output)); + ASSERT_NO_THROW(tensor2 = req.get_tensor(output)); + ASSERT_EQ(tensor1.data(), tensor2.data()); +} + +TEST_P(InferRequest_IOTensor_Test, second_call_get_input_after_async) { + auto compiled_model = core.compile_model(model_cannot_batch, target_device, property); + req = compiled_model.create_infer_request(); + ov::Tensor tensor1, tensor2; + ASSERT_NO_THROW(req.infer()); + ASSERT_NO_THROW(tensor1 = req.get_tensor(input)); + ASSERT_NO_THROW(req.start_async()); + ASSERT_NO_THROW(req.wait()); + ASSERT_NO_THROW(tensor2 = req.get_tensor(input)); + ASSERT_EQ(tensor1.data(), tensor2.data()); +} + +TEST_P(InferRequest_IOTensor_Test, second_call_get_output_after_async) { + auto compiled_model = core.compile_model(model_cannot_batch, target_device, property); + req = compiled_model.create_infer_request(); + ov::Tensor tensor1, tensor2; + ASSERT_NO_THROW(req.infer()); + ASSERT_NO_THROW(tensor1 = req.get_tensor(output)); + ASSERT_NO_THROW(req.start_async()); + ASSERT_NO_THROW(req.wait()); + ASSERT_NO_THROW(tensor2 = req.get_tensor(output)); + ASSERT_EQ(tensor1.data(), tensor2.data()); +} + +TEST_P(InferRequest_IOTensor_Test, can_infer_with_set_tensor) { + auto compiled_model = core.compile_model(model_cannot_batch, target_device, property); + req = compiled_model.create_infer_request(); + auto input_tensor = ov::test::utils::create_and_fill_tensor(input.get_element_type(), input.get_shape()); + ASSERT_NO_THROW(req.set_tensor(input, input_tensor)); + auto output_tensor = ov::test::utils::create_and_fill_tensor(output.get_element_type(), output.get_shape()); + ASSERT_NO_THROW(req.set_tensor(output, output_tensor)); + ASSERT_NO_THROW(req.infer()); + + auto actual_input_tensor = req.get_tensor(input); + ASSERT_EQ(actual_input_tensor.data(), input_tensor.data()); + auto actual_output_tensor = req.get_tensor(output); + ASSERT_EQ(actual_output_tensor.data(), output_tensor.data()); +} + +TEST_P(InferRequest_IOTensor_Test, can_infer_after_io_realloc) { + auto compiled_model = core.compile_model(model_cannot_batch, target_device, property); + req = compiled_model.create_infer_request(); + ov::Tensor input_tensor, output_tensor; + auto in_shape = input.get_shape(); + auto out_shape = output.get_shape(); + + // imitates blob reallocation + ASSERT_NO_THROW(input_tensor = req.get_tensor(input)); + ASSERT_NO_THROW(input_tensor.set_shape({5, 5, 5, 5})); + ASSERT_NO_THROW(input_tensor.set_shape(in_shape)); + + ASSERT_NO_THROW(output_tensor = req.get_tensor(output)); + ASSERT_NO_THROW(output_tensor.set_shape({20, 20, 20, 20})); + ASSERT_NO_THROW(output_tensor.set_shape(out_shape)); + + ASSERT_NO_THROW(req.infer()); + ASSERT_NO_THROW(req.start_async()); + ASSERT_NO_THROW(req.wait()); + ASSERT_NO_THROW(req.get_tensor(output)); +} +namespace { +auto props = []() { + return std::vector{{ov::device::priorities("MOCK_GPU", "MOCK_CPU")}, + {ov::device::priorities("MOCK_GPU")}, + {ov::device::priorities("MOCK_CPU", "MOCK_GPU")}}; +}; + +INSTANTIATE_TEST_SUITE_P(AutoFuncTests, + InferRequest_IOTensor_Test, + ::testing::Combine(::testing::Values("AUTO"), ::testing::ValuesIn(props())), + InferRequest_IOTensor_Test::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(AutoFuncTestsCumu, + InferRequest_IOTensor_Test, + ::testing::Combine(::testing::Values("MULTI"), ::testing::ValuesIn(props())), + InferRequest_IOTensor_Test::getTestCaseName); +} // namespace \ No newline at end of file diff --git a/src/plugins/auto/tests/functional/behavior/io_tensor.hpp b/src/plugins/auto/tests/functional/behavior/io_tensor.hpp new file mode 100644 index 00000000000000..c4e000395f3eac --- /dev/null +++ b/src/plugins/auto/tests/functional/behavior/io_tensor.hpp @@ -0,0 +1,51 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include +#include + +#include +#include + +#include "auto_func_test.hpp" +#include "openvino/runtime/auto/properties.hpp" +#include "openvino/runtime/core.hpp" +#include "openvino/runtime/iplugin.hpp" + +namespace ov { +namespace auto_plugin { +namespace tests { + +using test_params = std::tuple; + +class InferRequest_IOTensor_Test : public AutoFuncTests, public ::testing::WithParamInterface { +public: + static std::string getTestCaseName(testing::TestParamInfo obj) { + std::string target_device; + ov::AnyMap configuration; + std::tie(target_device, configuration) = obj.param; + std::ostringstream result; + result << "target_device=" << target_device << "_"; + if (!configuration.empty()) { + for (auto& iter : configuration) { + result << "priority=" << iter.first << "_" << iter.second.as(); + } + } + return result.str(); + } + + void SetUp() override; + void TearDown() override; + +protected: + std::string target_device; + ov::InferRequest req; + ov::Output input; + ov::Output output; + ov::AnyMap property; +}; +} // namespace tests +} // namespace auto_plugin +} // namespace ov \ No newline at end of file diff --git a/src/plugins/auto/tests/functional/behavior/life_time_batch_enabled_test.cpp b/src/plugins/auto/tests/functional/behavior/life_time_batch_enabled_test.cpp new file mode 100644 index 00000000000000..ba358be578943a --- /dev/null +++ b/src/plugins/auto/tests/functional/behavior/life_time_batch_enabled_test.cpp @@ -0,0 +1,57 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "auto_func_test.hpp" +#include "common_test_utils/include/common_test_utils/file_utils.hpp" +#include "openvino/runtime/make_tensor.hpp" + +using namespace ov::auto_plugin::tests; + +TEST_F(AutoFuncTests, tensor_life_time_with_batch_model) { + auto gpu_compiled_model = core.compile_model(model_can_batch, "MOCK_GPU"); + auto gpu_request = gpu_compiled_model.create_infer_request(); + auto input = gpu_compiled_model.input(); + auto gpu_tensor = gpu_request.get_tensor(input); + auto gpu_tensor_detail = ov::get_tensor_impl(gpu_tensor); + + auto compiled_model = core.compile_model( + model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU"), ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)}); + auto request = compiled_model.create_infer_request(); + auto tensor = request.get_tensor(input); + auto tensor_detail = ov::get_tensor_impl(gpu_tensor); + ASSERT_EQ(tensor_detail._so, gpu_tensor_detail._so); +} + +TEST_F(AutoFuncTests, tensor_life_time_with_batch_model_latency_hint) { + auto gpu_compiled_model = core.compile_model(model_can_batch, "MOCK_GPU"); + auto gpu_request = gpu_compiled_model.create_infer_request(); + auto input = gpu_compiled_model.input(); + auto gpu_tensor = gpu_request.get_tensor(input); + auto gpu_tensor_detail = ov::get_tensor_impl(gpu_tensor); + + auto compiled_model = core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU")}); + auto request = compiled_model.create_infer_request(); + auto tensor = request.get_tensor(input); + auto tensor_detail = ov::get_tensor_impl(gpu_tensor); + ASSERT_EQ(tensor_detail._so, gpu_tensor_detail._so); +} + +TEST_F(AutoFuncTests, tensor_life_time_with_batch_not_applicable_model) { + auto gpu_compiled_model = core.compile_model(model_cannot_batch, "MOCK_GPU"); + auto gpu_request = gpu_compiled_model.create_infer_request(); + auto input = gpu_compiled_model.input(); + auto gpu_tensor = gpu_request.get_tensor(input); + auto gpu_tensor_detail = ov::get_tensor_impl(gpu_tensor); + + auto compiled_model = core.compile_model( + model_cannot_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU"), ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)}); + auto request = compiled_model.create_infer_request(); + auto tensor = request.get_tensor(input); + auto tensor_detail = ov::get_tensor_impl(gpu_tensor); + ASSERT_EQ(tensor_detail._so, gpu_tensor_detail._so); +} \ No newline at end of file diff --git a/src/plugins/auto/tests/functional/behavior/property_test.cpp b/src/plugins/auto/tests/functional/behavior/property_test.cpp new file mode 100644 index 00000000000000..cfba5f0308b01b --- /dev/null +++ b/src/plugins/auto/tests/functional/behavior/property_test.cpp @@ -0,0 +1,99 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "auto_func_test.hpp" + +using namespace ov::auto_plugin::tests; + +TEST_F(AutoFuncTests, default_perfmode_for_multi) { + auto compiled_model = + core.compile_model(model_cannot_batch, "MULTI", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")}); + EXPECT_EQ(compiled_model.get_property(ov::hint::performance_mode), ov::hint::PerformanceMode::THROUGHPUT); +} + +TEST_F(AutoFuncTests, respect_secondary_property_for_multi) { + auto compiled_model = core.compile_model( + model_cannot_batch, + "MULTI", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::device::properties("MOCK_GPU", ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)), + ov::device::properties("MOCK_CPU", ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))}); + EXPECT_EQ(compiled_model.get_property(ov::hint::performance_mode), ov::hint::PerformanceMode::THROUGHPUT); + auto prop = compiled_model.get_property(ov::device::properties.name()).as(); + for (auto& item : prop) { + for (auto& item2 : item.second.as()) { + if (item2.first == ov::hint::performance_mode) { + if (item.first == "MOCK_CPU") { + EXPECT_EQ(item2.second, ov::hint::PerformanceMode::LATENCY); + } else if (item.first == "MOCK_GPU") { + EXPECT_EQ(item2.second, ov::hint::PerformanceMode::THROUGHPUT); + } + } + } + } +} + +TEST_F(AutoFuncTests, default_perfmode_for_auto_ctput) { + auto compiled_model = + core.compile_model(model_cannot_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}); + EXPECT_EQ(compiled_model.get_property(ov::hint::performance_mode), + ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT); + auto prop = compiled_model.get_property(ov::device::properties.name()).as(); + for (auto& item : prop) { + for (auto& item2 : item.second.as()) { + if (item2.first == ov::hint::performance_mode) { + if (item.first == "MOCK_CPU") { + EXPECT_EQ(item2.second, ov::hint::PerformanceMode::THROUGHPUT); + } else if (item.first == "MOCK_GPU") { + EXPECT_EQ(item2.second, ov::hint::PerformanceMode::THROUGHPUT); + } + } + } + } +} + +TEST_F(AutoFuncTests, default_perfmode_for_auto) { + auto compiled_model = + core.compile_model(model_cannot_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")}); + EXPECT_EQ(compiled_model.get_property(ov::hint::performance_mode), ov::hint::PerformanceMode::LATENCY); + auto prop = compiled_model.get_property(ov::device::properties.name()).as(); + for (auto& item : prop) { + for (auto& item2 : item.second.as()) { + if (item2.first == ov::hint::performance_mode) { + if (item.first == "MOCK_CPU") { + EXPECT_EQ(item2.second, ov::hint::PerformanceMode::LATENCY); + } else if (item.first == "MOCK_GPU") { + EXPECT_EQ(item2.second, ov::hint::PerformanceMode::LATENCY); + } + } + } + } +} + +TEST_F(AutoFuncTests, respect_secondary_property_auto_ctput) { + auto compiled_model = core.compile_model( + model_cannot_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT), + ov::device::properties("MOCK_GPU", ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)), + ov::device::properties("MOCK_CPU", ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))}); + EXPECT_EQ(compiled_model.get_property(ov::hint::performance_mode), + ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT); + auto prop = compiled_model.get_property(ov::device::properties.name()).as(); + for (auto& item : prop) { + for (auto& item2 : item.second.as()) { + if (item2.first == ov::hint::performance_mode) { + if (item.first == "MOCK_CPU") { + EXPECT_EQ(item2.second, ov::hint::PerformanceMode::LATENCY); + } else if (item.first == "MOCK_GPU") { + EXPECT_EQ(item2.second, ov::hint::PerformanceMode::THROUGHPUT); + } + } + } + } +} \ No newline at end of file diff --git a/src/plugins/auto/tests/functional/behavior/remote_tensor_test.cpp b/src/plugins/auto/tests/functional/behavior/remote_tensor_test.cpp new file mode 100644 index 00000000000000..ef8446ee6f6efe --- /dev/null +++ b/src/plugins/auto/tests/functional/behavior/remote_tensor_test.cpp @@ -0,0 +1,104 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "auto_func_test.hpp" + +using namespace ov::auto_plugin::tests; + +TEST_F(AutoFuncTests, can_create_remotetensor_then_infer_with_affinity) { + ov::CompiledModel compiled_model; + compiled_model = core.compile_model(model_cannot_batch, "MULTI", {ov::device::priorities("MOCK_GPU")}); + auto input = model_cannot_batch->get_parameters().at(0); + auto output = model_cannot_batch->get_results().at(0); + auto fake_img_data = ov::Tensor(input->get_element_type(), input->get_shape()); + auto inf_req_regular = compiled_model.create_infer_request(); + inf_req_regular.set_tensor(input, fake_img_data); + // infer using system memory + ASSERT_NO_THROW(inf_req_regular.infer()); + auto output_tensor_regular = inf_req_regular.get_tensor(output); + auto cldnn_context = core.get_default_context("MOCK_GPU"); + auto remote_tensor = cldnn_context.create_tensor(input->get_element_type(), input->get_shape()); + + auto infer_req_remote = compiled_model.create_infer_request(); + infer_req_remote.set_tensor(input, remote_tensor); + // infer using remote tensor + ASSERT_NO_THROW(infer_req_remote.start_async()); + // no actual inference for remote tensor, due to data not able to mmap + infer_req_remote.wait(); +} + +TEST_F(AutoFuncTests, cannot_infer_remote_if_not_initialized_for_device) { + core.compile_model(model_cannot_batch, "MOCK_CPU"); + core.compile_model(model_cannot_batch, "MOCK_GPU"); // need to initialize the order of plugins in mock_engine + // simulate 2 hardware devices + register_plugin_mock_gpu(core, "MOCK_3", {}); + ov::CompiledModel compiled_model; + auto cldnn_context = core.get_default_context("MOCK_GPU"); + auto input = model_cannot_batch->get_parameters().at(0); + auto remote_tensor = cldnn_context.create_tensor(input->get_element_type(), input->get_shape()); + ASSERT_NO_THROW(compiled_model = + core.compile_model(model_cannot_batch, "MULTI", {ov::device::priorities("MOCK_3")})); + auto infer_req_remote = compiled_model.create_infer_request(); + infer_req_remote.set_tensor(input, remote_tensor); + ASSERT_NO_THROW(infer_req_remote.start_async()); + ASSERT_THROW(infer_req_remote.wait(), ov::Exception); +} + +TEST_F(AutoFuncTests, can_create_remotetensor_then_infer_with_affinity_2_devices) { + core.compile_model(model_cannot_batch, "MOCK_CPU"); + core.compile_model(model_cannot_batch, "MOCK_GPU"); // need to initialize the order of plugins in mock_engine + register_plugin_mock_gpu(core, "MOCK_3", {}); + ov::CompiledModel compiled_model; + auto input = model_cannot_batch->get_parameters().at(0); + ASSERT_NO_THROW( + compiled_model = + core.compile_model(model_cannot_batch, "MULTI", {ov::device::priorities("MOCK_GPU", "MOCK_3")})); + std::vector inf_req_shared = {}; + auto cldnn_context = core.get_default_context("MOCK_GPU"); + auto remote_tensor = cldnn_context.create_tensor(input->get_element_type(), input->get_shape()); + ASSERT_EQ(remote_tensor.get_device_name(), "MOCK_GPU"); + auto cldnn_context_2 = core.get_default_context("MOCK_3"); + auto remote_tensor_2 = cldnn_context_2.create_tensor(input->get_element_type(), input->get_shape()); + ASSERT_EQ(remote_tensor_2.get_device_name(), "MOCK_3"); + auto infer_req_remote = compiled_model.create_infer_request(); + infer_req_remote.set_tensor(input, remote_tensor); + auto infer_req_remote_2 = compiled_model.create_infer_request(); + infer_req_remote_2.set_tensor(input, remote_tensor_2); + // infer using remote tensor + ASSERT_NO_THROW(infer_req_remote.start_async()); + ASSERT_NO_THROW(infer_req_remote_2.start_async()); + ASSERT_NO_THROW(infer_req_remote.wait()); + ASSERT_NO_THROW(infer_req_remote_2.wait()); +} + +TEST_F(AutoFuncTests, can_create_remotetensor_then_infer_with_affinity_2_devices_device_id) { + ov::CompiledModel compiled_model; + auto input = model_cannot_batch->get_parameters().at(0); + ASSERT_NO_THROW( + compiled_model = + core.compile_model(model_cannot_batch, "MULTI", {ov::device::priorities("MOCK_GPU.1", "MOCK_CPU")})); + auto cldnn_context = core.get_default_context("MOCK_GPU"); + auto remote_tensor = cldnn_context.create_tensor(input->get_element_type(), input->get_shape()); + ASSERT_EQ(remote_tensor.get_device_name(), "MOCK_GPU"); + auto infer_req_remote = compiled_model.create_infer_request(); + infer_req_remote.set_tensor(input, remote_tensor); + // infer using remote tensor + ASSERT_NO_THROW(infer_req_remote.start_async()); + ASSERT_THROW_WITH_MESSAGE(infer_req_remote.wait(), + ov::Exception, + "None of the devices supports a remote tensor created on the device named MOCK_GPU"); +} + +TEST_F(AutoFuncTests, can_throw_if_oversubsciption_of_inferrequest) { + ov::CompiledModel compiled_model; + ASSERT_NO_THROW(compiled_model = core.compile_model( + model_cannot_batch, + "MULTI", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), ov::intel_auto::device_bind_buffer(true)})); + auto optimal_num = compiled_model.get_property(ov::optimal_number_of_infer_requests); + for (size_t i = 0; i < optimal_num; i++) { + compiled_model.create_infer_request(); + } + ASSERT_THROW(compiled_model.create_infer_request(), ov::Exception); +} \ No newline at end of file diff --git a/src/plugins/auto/tests/functional/behavior/wait_test.cpp b/src/plugins/auto/tests/functional/behavior/wait_test.cpp new file mode 100644 index 00000000000000..33f8ee50b56c8b --- /dev/null +++ b/src/plugins/auto/tests/functional/behavior/wait_test.cpp @@ -0,0 +1,73 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "auto_func_test.hpp" +#include "openvino/runtime/exception.hpp" + +using namespace ov::auto_plugin::tests; + +TEST_F(AutoFuncTests, can_infer_and_wait_for_result) { + ov::CompiledModel compiled_model; + ASSERT_NO_THROW(compiled_model = core.compile_model( + model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); + auto req = compiled_model.create_infer_request(); + ov::Tensor tensor; + auto input = compiled_model.input(); + auto output = compiled_model.output(); + ASSERT_NO_THROW(tensor = req.get_tensor(input)); + ASSERT_NO_THROW(req.infer()); + ASSERT_NO_THROW(req.start_async()); + ASSERT_NO_THROW(req.wait()); + ASSERT_NO_THROW(tensor = req.get_tensor(output)); +} + +TEST_F(AutoFuncTests, can_wait_without_startasync) { + ov::CompiledModel compiled_model; + ASSERT_NO_THROW(compiled_model = core.compile_model( + model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); + auto req = compiled_model.create_infer_request(); + ASSERT_NO_THROW(req.wait()); + ASSERT_NO_THROW(req.wait_for({})); + ASSERT_NO_THROW(req.wait_for(std::chrono::milliseconds{1})); +} + +TEST_F(AutoFuncTests, can_throw_if_request_busy) { + ov::CompiledModel compiled_model; + ASSERT_NO_THROW(compiled_model = core.compile_model( + model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); + auto req = compiled_model.create_infer_request(); + auto input = compiled_model.input(); + auto output = compiled_model.output(); + auto output_tensor = req.get_tensor(input); + ASSERT_NO_THROW(req.wait_for({})); + ASSERT_NO_THROW(req.start_async()); + ASSERT_NO_THROW(try { req.set_tensor(input, output_tensor); } catch (const ov::Busy&){}); + ASSERT_NO_THROW(req.wait_for({})); + ASSERT_NO_THROW(req.wait()); +} + +TEST_F(AutoFuncTests, can_throw_on_get_tensor_if_request_busy) { + ov::CompiledModel compiled_model; + ASSERT_NO_THROW(compiled_model = core.compile_model( + model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); + auto req = compiled_model.create_infer_request(); + auto input = compiled_model.input(); + ASSERT_NO_THROW(req.start_async()); + ASSERT_NO_THROW(try { req.get_tensor(input); } catch (const ov::Busy&){}); + ASSERT_NO_THROW(req.wait()); +} \ No newline at end of file diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/executable_network/exec_network_base.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/executable_network/exec_network_base.cpp new file mode 100644 index 00000000000000..845f63ee4cee8c --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/executable_network/exec_network_base.cpp @@ -0,0 +1,44 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/executable_network/exec_network_base.hpp" + +#include "ie_plugin_config.hpp" + +using namespace BehaviorTestsDefinitions; +namespace { +const std::vector> auto_configs = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + ExecutableNetworkBaseTest, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(auto_configs)), + ExecutableNetworkBaseTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + ExecutableNetworkBaseTest, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(auto_configs)), + ExecutableNetworkBaseTest::getTestCaseName); + +const std::vector netPrecisions = {InferenceEngine::Precision::FP32, + InferenceEngine::Precision::U8, + InferenceEngine::Precision::I16, + InferenceEngine::Precision::U16}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + ExecNetSetPrecision, + ::testing::Combine(::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(auto_configs)), + ExecNetSetPrecision::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + ExecNetSetPrecision, + ::testing::Combine(::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(auto_configs)), + ExecNetSetPrecision::getTestCaseName); +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp new file mode 100644 index 00000000000000..ca702dc66db4bc --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp @@ -0,0 +1,36 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/executable_network/get_metric.hpp" + +using namespace BehaviorTestsDefinitions; + +using namespace InferenceEngine::PluginConfigParams; + +namespace { + +// +// Executable Network GetMetric +// + +INSTANTIATE_TEST_SUITE_P(smoke_IEClassExecutableNetworkGetMetricTest, + IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, + ::testing::Values("AUTO:TEMPLATE", "MULTI:TEMPLATE")); + +INSTANTIATE_TEST_SUITE_P(smoke_IEClassExecutableNetworkGetMetricTest, + IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS, + ::testing::Values("AUTO:TEMPLATE", "MULTI:TEMPLATE")); + +INSTANTIATE_TEST_SUITE_P(smoke_IEClassExecutableNetworkGetMetricTest, + IEClassExecutableNetworkGetMetricTest_NETWORK_NAME, + ::testing::Values("MULTI:TEMPLATE", "AUTO:TEMPLATE")); + +INSTANTIATE_TEST_SUITE_P(smoke_IEClassExecutableNetworkGetMetricTest, + IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS, + ::testing::Values("MULTI:TEMPLATE", "AUTO:TEMPLATE")); + +INSTANTIATE_TEST_SUITE_P(smoke_IEClassExecutableNetworkGetMetricTest, + IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported, + ::testing::Values("MULTI:TEMPLATE", "AUTO:TEMPLATE")); +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp new file mode 100644 index 00000000000000..c02a5c44c30e35 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp @@ -0,0 +1,23 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/infer_request/callback.hpp" + +using namespace BehaviorTestsDefinitions; +namespace { +const std::vector> multiConfigs = { + {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + InferRequestCallbackTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(multiConfigs)), + InferRequestCallbackTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + InferRequestCallbackTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(multiConfigs)), + InferRequestCallbackTests::getTestCaseName); +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/io_blob.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/io_blob.cpp new file mode 100644 index 00000000000000..483067a521c1b0 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/io_blob.cpp @@ -0,0 +1,29 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/infer_request/io_blob.hpp" + +#include + +#include "ie_plugin_config.hpp" + +using namespace BehaviorTestsDefinitions; +namespace { +const std::vector> Autoconfigs = { + {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + InferRequestIOBBlobTest, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(Autoconfigs)), + InferRequestIOBBlobTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + InferRequestIOBBlobTest, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(Autoconfigs)), + InferRequestIOBBlobTest::getTestCaseName); + +} // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp similarity index 53% rename from src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp rename to src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp index 60e9d3542d41c3..5cbda535d8b2df 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp @@ -2,8 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include "behavior/infer_request/memory_states.hpp" + +#include + #include "functional_test_utils/plugin_cache.hpp" #include "ov_models/builders.hpp" @@ -11,17 +13,17 @@ using namespace BehaviorTestsDefinitions; namespace { std::vector memoryStateTestCases = { -#ifdef ENABLE_INTEL_CPU + memoryStateParams(InferRequestVariableStateTest::getNetwork(), + {"c_1-3", "r_1-3"}, + ov::test::utils::DEVICE_AUTO, + {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}), memoryStateParams(InferRequestVariableStateTest::getNetwork(), {"c_1-3", "r_1-3"}, ov::test::utils::DEVICE_MULTI, - {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), - ov::test::utils::DEVICE_GPU + std::string(",") + ov::test::utils::DEVICE_CPU}}) -#endif -}; + {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}})}; -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - InferRequestQueryStateExceptionTest, +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + InferRequestVariableStateTest, ::testing::ValuesIn(memoryStateTestCases), - InferRequestQueryStateExceptionTest::getTestCaseName); + InferRequestVariableStateTest::getTestCaseName); } // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/multitheading.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/multitheading.cpp new file mode 100644 index 00000000000000..27a82693f28ff6 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/multitheading.cpp @@ -0,0 +1,27 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "behavior/infer_request/multithreading.hpp" +#include "ie_plugin_config.hpp" + +using namespace BehaviorTestsDefinitions; +namespace { +const std::vector> Multiconfigs = { + {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + InferRequestMultithreadingTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(Multiconfigs)), + InferRequestMultithreadingTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + InferRequestMultithreadingTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(Multiconfigs)), + InferRequestMultithreadingTests::getTestCaseName); + +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp new file mode 100644 index 00000000000000..baa0c4fe978c29 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp @@ -0,0 +1,24 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/infer_request/perf_counters.hpp" + +using namespace BehaviorTestsDefinitions; +namespace { +const std::vector> Autoconfigs = { + {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + InferRequestPerfCountersTest, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(Autoconfigs)), + InferRequestPerfCountersTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + InferRequestPerfCountersTest, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(Autoconfigs)), + InferRequestPerfCountersTest::getTestCaseName); + +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp new file mode 100644 index 00000000000000..c1037519a72f8e --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/infer_request/set_blob_by_type.hpp" + +#include "common_test_utils/test_constants.hpp" + +using namespace BehaviorTestsDefinitions; +using namespace InferenceEngine; + +const std::vector BlobTypes = { + FuncTestUtils::BlobType::Compound, + FuncTestUtils::BlobType::Batched, + FuncTestUtils::BlobType::Memory, +}; + +const std::map autoConfig{ + {MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Behavior_Multi, + InferRequestSetBlobByType, + ::testing::Combine(::testing::ValuesIn(BlobTypes), + ::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::Values(autoConfig)), + InferRequestSetBlobByType::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Behavior_Auto, + InferRequestSetBlobByType, + ::testing::Combine(::testing::ValuesIn(BlobTypes), + ::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::Values(autoConfig)), + InferRequestSetBlobByType::getTestCaseName); diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp new file mode 100644 index 00000000000000..e1307f5092f6a5 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp @@ -0,0 +1,28 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/infer_request/wait.hpp" + +#include + +#include "ie_plugin_config.hpp" + +using namespace BehaviorTestsDefinitions; +namespace { +const std::vector> Autoconfigs = { + {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + InferRequestWaitTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(Autoconfigs)), + InferRequestWaitTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + InferRequestWaitTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(Autoconfigs)), + InferRequestWaitTests::getTestCaseName); + +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/core_integration.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/core_integration.cpp new file mode 100644 index 00000000000000..20cb407b3ee694 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/core_integration.cpp @@ -0,0 +1,30 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/compiled_model/properties.hpp" +#include "openvino/runtime/core.hpp" + +using namespace ov::test::behavior; + +using namespace InferenceEngine::PluginConfigParams; + +namespace { +// +// Executable Network GetMetric +// + +INSTANTIATE_TEST_SUITE_P(smoke_OVClassCompiledModelGetPropertyTest, + OVClassCompiledModelGetPropertyTest, + ::testing::Values("MULTI:TEMPLATE", "AUTO:TEMPLATE")); + +// +// Executable Network GetConfig / SetConfig +// + +INSTANTIATE_TEST_SUITE_P(smoke_OVClassCompiledModelGetIncorrectPropertyTest, + OVClassCompiledModelGetIncorrectPropertyTest, + ::testing::Values("MULTI:TEMPLATE", "AUTO:TEMPLATE")); +////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp new file mode 100644 index 00000000000000..60d1c7b6a90e3b --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp @@ -0,0 +1,35 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/compiled_model/compiled_model_base.hpp" +#include "ie_plugin_config.hpp" + +using namespace ov::test::behavior; +namespace { +const std::vector multiConfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + OVCompiledModelBaseTest, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(multiConfigs)), + OVCompiledModelBaseTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + OVCompiledModelBaseTest, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(multiConfigs)), + OVCompiledModelBaseTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + OVCompiledModelBaseTestOptional, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(multiConfigs)), + OVCompiledModelBaseTestOptional::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + OVCompiledModelBaseTestOptional, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(multiConfigs)), + OVCompiledModelBaseTestOptional::getTestCaseName); +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp new file mode 100644 index 00000000000000..f264a55c667a9f --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp @@ -0,0 +1,32 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include + +#include "behavior/compiled_model/import_export.hpp" +#include "ie_plugin_config.hpp" + +using namespace ov::test::behavior; +namespace { +const std::vector netPrecisions = { + ov::element::i8, + ov::element::i16, + ov::element::i32, + ov::element::i64, + ov::element::u8, + ov::element::u16, + ov::element::u32, + ov::element::u64, + ov::element::f16, + ov::element::f32, +}; + +const std::vector auto_configs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + OVCompiledGraphImportExportTest, + ::testing::Combine(::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(auto_configs)), + OVCompiledGraphImportExportTest::getTestCaseName); +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp new file mode 100644 index 00000000000000..981e8d66aa48b3 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp @@ -0,0 +1,144 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/compiled_model/properties.hpp" + +#include "ie_system_conf.h" +#include "openvino/runtime/properties.hpp" + +using namespace ov::test::behavior; + +namespace { + +const std::vector inproperties = { + {ov::device::id("UNSUPPORTED_DEVICE_ID_STRING")}, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, + OVClassCompiledModelPropertiesIncorrectTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI, "AUTO:TEMPLATE"), + ::testing::ValuesIn(inproperties)), + OVClassCompiledModelPropertiesIncorrectTests::getTestCaseName); + +#if (defined(__APPLE__) || defined(_WIN32)) +auto default_affinity = [] { + auto numaNodes = InferenceEngine::getAvailableNUMANodes(); + auto coreTypes = InferenceEngine::getAvailableCoresTypes(); + if (coreTypes.size() > 1) { + return ov::Affinity::HYBRID_AWARE; + } else if (numaNodes.size() > 1) { + return ov::Affinity::NUMA; + } else { + return ov::Affinity::NONE; + } +}(); +#else +auto default_affinity = [] { + auto coreTypes = InferenceEngine::getAvailableCoresTypes(); + if (coreTypes.size() > 1) { + return ov::Affinity::HYBRID_AWARE; + } else { + return ov::Affinity::CORE; + } +}(); +#endif + +const std::vector multi_properties = { + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), ov::num_streams(ov::streams::AUTO)}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, + InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + OVClassCompiledModelPropertiesTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(multi_properties)), + OVClassCompiledModelPropertiesTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_OVCompiledModelIncorrectDevice, + OVCompiledModelIncorrectDevice, + ::testing::Values("TEMPLATE")); + +const std::vector auto_multi_device_properties = { + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), ov::device::properties("TEMPLATE", ov::num_streams(4))}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::device::properties("TEMPLATE", ov::num_streams(4), ov::enable_profiling(true))}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::device::properties(ov::AnyMap{{"TEMPLATE", ov::AnyMap{{ov::num_streams(4), ov::enable_profiling(true)}}}})}}; + +INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiSetAndCompileModelBehaviorTestsNoThrow, + OVClassCompiledModelPropertiesTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO, + ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(auto_multi_device_properties)), + OVClassCompiledModelPropertiesTests::getTestCaseName); + +const std::vector configsWithSecondaryProperties = { + {ov::device::properties("TEMPLATE", ov::num_streams(4))}, + {ov::device::properties("TEMPLATE", + ov::num_streams(4), + ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))}}; + +const std::vector autoConfigsWithSecondaryProperties = { + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::device::properties("AUTO", + ov::enable_profiling(false), + ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::device::properties("TEMPLATE", + ov::num_streams(4), + ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::device::properties("AUTO", + ov::enable_profiling(false), + ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)), + ov::device::properties("TEMPLATE", + ov::num_streams(4), + ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))}}; + +// IE Class Load network +INSTANTIATE_TEST_SUITE_P(smoke_CPUOVClassCompileModelWithCorrectPropertiesTest, + OVClassCompileModelWithCorrectPropertiesTest, + ::testing::Combine(::testing::Values("AUTO:TEMPLATE", "MULTI:TEMPLATE"), + ::testing::ValuesIn(configsWithSecondaryProperties))); + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_OVClassCompileModelWithCorrectPropertiesTest, + OVClassCompileModelWithCorrectPropertiesTest, + ::testing::Combine(::testing::Values("MULTI"), + ::testing::ValuesIn(autoConfigsWithSecondaryProperties))); + +INSTANTIATE_TEST_SUITE_P(smoke_AUTO_OVClassCompileModelWithCorrectPropertiesTest, + OVClassCompileModelWithCorrectPropertiesTest, + ::testing::Combine(::testing::Values("AUTO"), + ::testing::ValuesIn(autoConfigsWithSecondaryProperties))); + +const std::vector> automultiExeDeviceConfigs = { + std::make_pair(ov::AnyMap{{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}}, "TEMPLATE")}; + +INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiCompileModelBehaviorTests, + OVCompileModelGetExecutionDeviceTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO, + ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(automultiExeDeviceConfigs)), + OVCompileModelGetExecutionDeviceTests::getTestCaseName); + +const std::vector multiDevicePriorityConfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}}; + +INSTANTIATE_TEST_SUITE_P(smoke_OVClassCompiledModelGetPropertyTest, + OVClassCompiledModelGetPropertyTest_DEVICE_PRIORITY, + ::testing::Combine(::testing::Values("MULTI", "AUTO"), + ::testing::ValuesIn(multiDevicePriorityConfigs)), + OVClassCompiledModelGetPropertyTest_DEVICE_PRIORITY::getTestCaseName); + +const std::vector multiModelPriorityConfigs = {{ov::hint::model_priority(ov::hint::Priority::HIGH)}, + {ov::hint::model_priority(ov::hint::Priority::MEDIUM)}, + {ov::hint::model_priority(ov::hint::Priority::LOW)}, + {ov::hint::model_priority(ov::hint::Priority::DEFAULT)}}; + +INSTANTIATE_TEST_SUITE_P(smoke_OVClassCompiledModelGetPropertyTest, + OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY, + ::testing::Combine(::testing::Values("AUTO:TEMPLATE"), + ::testing::ValuesIn(multiModelPriorityConfigs))); +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp new file mode 100644 index 00000000000000..7f274bedbfc2d2 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp @@ -0,0 +1,25 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_infer_request/callback.hpp" + +#include + +using namespace ov::test::behavior; + +namespace { +const std::vector multiConfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + OVInferRequestCallbackTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(multiConfigs)), + OVInferRequestCallbackTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + OVInferRequestCallbackTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(multiConfigs)), + OVInferRequestCallbackTests::getTestCaseName); +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp new file mode 100644 index 00000000000000..aa4f9957601bed --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp @@ -0,0 +1,47 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_infer_request/infer_request_dynamic.hpp" + +#include + +using namespace ov::test::behavior; + +namespace { +const std::vector AutoConfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}}; + +std::shared_ptr getFunction2() { + const std::vector inputShape = {1, 4, 20, 20}; + const ngraph::element::Type_t ngPrc = ngraph::element::Type_t::f32; + + ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; + params.front()->set_friendly_name("Param_1"); + params.front()->get_output_tensor(0).set_names({"input_tensor"}); + auto split = ngraph::builder::makeSplit(params[0], ngPrc, 2, 1); + + auto in2add = ngraph::builder::makeConstant(ngPrc, {1, 2, 1, 1}, std::vector{}, true); + auto add = ngraph::builder::makeEltwise(split->output(0), in2add, ngraph::helpers::EltwiseTypes::ADD); + auto relu1 = std::make_shared(add); + + auto in2mult = ngraph::builder::makeConstant(ngPrc, {1, 2, 1, 1}, std::vector{}, true); + auto mult = ngraph::builder::makeEltwise(split->output(1), in2mult, ngraph::helpers::EltwiseTypes::MULTIPLY); + auto relu2 = std::make_shared(mult); + + auto concat = std::make_shared(ngraph::OutputVector{relu1->output(0), relu2->output(0)}, 3); + concat->get_output_tensor(0).set_names({"concat"}); + + return std::make_shared(concat, params, "SplitAddConcat"); +} + +INSTANTIATE_TEST_SUITE_P( + smoke_Auto_BehaviorTests, + OVInferRequestDynamicTests, + ::testing::Combine(::testing::Values(getFunction2()), + ::testing::Values(std::vector, std::vector>>{ + {{1, 4, 20, 20}, {1, 2, 20, 40}}, + {{2, 4, 20, 20}, {2, 2, 20, 40}}}), + ::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(AutoConfigs)), + OVInferRequestDynamicTests::getTestCaseName); +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp new file mode 100644 index 00000000000000..6ae0a4eab6b20f --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp @@ -0,0 +1,25 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_infer_request/inference_chaining.hpp" + +#include "common_test_utils/test_constants.hpp" + +using namespace ov::test::behavior; + +namespace { +const std::vector AutoConfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + OVInferenceChaining, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(AutoConfigs)), + OVInferenceChaining::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + OVInferenceChainingStatic, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(AutoConfigs)), + OVInferenceChainingStatic::getTestCaseName); +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp new file mode 100644 index 00000000000000..e57698f7487f92 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp @@ -0,0 +1,74 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_infer_request/io_tensor.hpp" + +#include + +using namespace ov::test::behavior; + +namespace { +const std::vector Autoconfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}}; + +const std::vector emptyConfigs = {{}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + OVInferRequestIOTensorTest, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(Autoconfigs)), + OVInferRequestIOTensorTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + OVInferRequestIOTensorTest, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(Autoconfigs)), + OVInferRequestIOTensorTest::getTestCaseName); + +std::vector prcs = { + ov::element::boolean, + ov::element::bf16, + ov::element::f16, + ov::element::f32, + ov::element::f64, + ov::element::i4, + ov::element::i8, + ov::element::i16, + ov::element::i32, + ov::element::i64, + ov::element::u1, + ov::element::u4, + ov::element::u8, + ov::element::u16, + ov::element::u32, + ov::element::u64, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + OVInferRequestIOTensorSetPrecisionTest, + ::testing::Combine(::testing::ValuesIn(prcs), + ::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(Autoconfigs)), + OVInferRequestIOTensorSetPrecisionTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + OVInferRequestIOTensorSetPrecisionTest, + ::testing::Combine(::testing::ValuesIn(prcs), + ::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(Autoconfigs)), + OVInferRequestIOTensorSetPrecisionTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + OVInferRequestCheckTensorPrecision, + ::testing::Combine(::testing::ValuesIn(prcs), + ::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(Autoconfigs)), + OVInferRequestCheckTensorPrecision::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + OVInferRequestCheckTensorPrecision, + ::testing::Combine(::testing::ValuesIn(prcs), + ::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(Autoconfigs)), + OVInferRequestCheckTensorPrecision::getTestCaseName); +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp new file mode 100644 index 00000000000000..6bbf4fe9cf7d30 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp @@ -0,0 +1,26 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_infer_request/multithreading.hpp" + +#include + +using namespace ov::test::behavior; + +namespace { +const std::vector Multiconfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + OVInferRequestMultithreadingTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(Multiconfigs)), + OVInferRequestMultithreadingTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + OVInferRequestMultithreadingTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(Multiconfigs)), + OVInferRequestMultithreadingTests::getTestCaseName); + +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp new file mode 100644 index 00000000000000..bcb27758486db3 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp @@ -0,0 +1,26 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_infer_request/perf_counters.hpp" + +using namespace ov::test::behavior; + +namespace { +const std::vector Autoconfigs = { + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), ov::enable_profiling(true)}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), ov::intel_auto::device_bind_buffer(true)}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + OVInferRequestPerfCountersTest, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(Autoconfigs)), + OVInferRequestPerfCountersTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + OVInferRequestPerfCountersTest, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(Autoconfigs)), + OVInferRequestPerfCountersTest::getTestCaseName); +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp new file mode 100644 index 00000000000000..0bf4c2fbaa496a --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp @@ -0,0 +1,26 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_infer_request/wait.hpp" + +#include + +using namespace ov::test::behavior; + +namespace { +const std::vector Autoconfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + OVInferRequestWaitTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(Autoconfigs)), + OVInferRequestWaitTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + OVInferRequestWaitTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(Autoconfigs)), + OVInferRequestWaitTests::getTestCaseName); + +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp new file mode 100644 index 00000000000000..998280199f00d1 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp @@ -0,0 +1,47 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_plugin/caching_tests.hpp" + +#include +#include +#include + +using namespace ov::test::behavior; +using namespace ngraph; + +namespace { +static const std::vector precisionsTemplate = { + ov::element::f32, +}; + +static const std::vector batchSizesTemplate = {1, 2}; + +const std::vector autoConfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_CachingSupportCase, + CompileModelCacheTestBase, + ::testing::Combine(::testing::ValuesIn(CompileModelCacheTestBase::getStandardFunctions()), + ::testing::ValuesIn(precisionsTemplate), + ::testing::ValuesIn(batchSizesTemplate), + ::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(autoConfigs)), + CompileModelCacheTestBase::getTestCaseName); + +const std::vector LoadFromFileConfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}}; +const std::vector TestTargets = { + ov::test::utils::DEVICE_AUTO, + ov::test::utils::DEVICE_MULTI, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_CachingSupportCase, + CompileModelLoadFromFileTestBase, + ::testing::Combine(::testing::ValuesIn(TestTargets), ::testing::ValuesIn(LoadFromFileConfigs)), + CompileModelLoadFromFileTestBase::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_CachingSupportCase, + CompileModelLoadFromMemoryTestBase, + ::testing::Combine(::testing::ValuesIn(TestTargets), ::testing::ValuesIn(LoadFromFileConfigs)), + CompileModelLoadFromMemoryTestBase::getTestCaseName); +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp new file mode 100644 index 00000000000000..c5afda521a5ca1 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp @@ -0,0 +1,60 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_plugin/core_integration.hpp" + +#include + +#include "behavior/ov_plugin/core_integration_sw.hpp" +#include "behavior/ov_plugin/query_model.hpp" +#include "openvino/core/type/element_type.hpp" +#include "openvino/runtime/core.hpp" + +using namespace ov::test::behavior; +using namespace InferenceEngine::PluginConfigParams; + +// defined in plugin_name.cpp +extern const char* cpu_plugin_file_name; + +namespace { +// +// IE Class Common tests with +// + +const std::vector configsWithEmpty = {{}}; +const std::vector configsWithMetaPlugin = {{ov::device::priorities("AUTO")}, + {ov::device::priorities("MULTI")}, + {ov::device::priorities("AUTO", "MULTI")}, + {ov::device::priorities("AUTO", "TEMPLATE")}, + {ov::device::priorities("MULTI", "TEMPLATE")}}; + +INSTANTIATE_TEST_SUITE_P( + smoke_MULTI_AUTO_DoNotSupportMetaPluginLoadingItselfRepeatedlyWithEmptyConfigTest, + OVClassCompileModelWithCondidateDeviceListContainedMetaPluginTest, + ::testing::Combine(::testing::Values("MULTI:AUTO", "AUTO:MULTI", "MULTI:AUTO,TEMPLATE", "AUTO:TEMPLATE,MULTI"), + ::testing::ValuesIn(configsWithEmpty)), + ::testing::PrintToStringParamName()); + +INSTANTIATE_TEST_SUITE_P(smoke_MULTI_AUTO_DoNotSupportMetaPluginLoadingItselfRepeatedlyTest, + OVClassCompileModelWithCondidateDeviceListContainedMetaPluginTest, + ::testing::Combine(::testing::Values("MULTI", "AUTO"), + ::testing::ValuesIn(configsWithMetaPlugin)), + ::testing::PrintToStringParamName()); + +// Several devices case +/* enable below in nightly tests*/ +/* +INSTANTIATE_TEST_SUITE_P(nightly_OVClassSeveralDevicesTest, + OVClassSeveralDevicesTestCompileModel, + ::testing::Values(std::vector({"GPU.0", "GPU.1"}))); + +INSTANTIATE_TEST_SUITE_P(nightly_OVClassSeveralDevicesTest, + OVClassSeveralDevicesTestQueryModel, + ::testing::Values(std::vector({"GPU.0", "GPU.1"}))); + +INSTANTIATE_TEST_SUITE_P(nightly_OVClassSeveralDevicesTest, + OVClassSeveralDevicesTestDefaultCore, + ::testing::Values(std::vector({"GPU.0", "GPU.1"}))); +*/ +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/life_time.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/life_time.cpp new file mode 100644 index 00000000000000..dc88e4b57f9a01 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/life_time.cpp @@ -0,0 +1,22 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_plugin/life_time.hpp" + +using namespace ov::test::behavior; +namespace { +INSTANTIATE_TEST_SUITE_P(smoke_VirtualPlugin_BehaviorTests, + OVHoldersTest, + ::testing::Values("AUTO:TEMPLATE", "MULTI:TEMPLATE"), + OVHoldersTest::getTestCaseName); + +const std::vector device_names_and_priorities = { + "MULTI:TEMPLATE", // GPU via MULTI, + "AUTO:TEMPLATE", // GPU via AUTO, +}; +INSTANTIATE_TEST_SUITE_P(smoke_VirtualPlugin_BehaviorTests, + OVHoldersTestWithConfig, + ::testing::ValuesIn(device_names_and_priorities), + OVHoldersTestWithConfig::getTestCaseName); +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp new file mode 100644 index 00000000000000..39756244e9fdad --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp @@ -0,0 +1,165 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_plugin/properties_tests.hpp" + +#include + +using namespace ov::test::behavior; +using namespace InferenceEngine::PluginConfigParams; + +namespace { +const std::vector multi_Auto_properties = { + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::hint::execution_mode(ov::hint::ExecutionMode::ACCURACY)}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::hint::execution_mode(ov::hint::ExecutionMode::PERFORMANCE)}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), ov::intel_auto::device_bind_buffer("YES")}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), ov::intel_auto::device_bind_buffer("NO")}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), ov::intel_auto::enable_startup_fallback("YES")}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), ov::intel_auto::enable_startup_fallback("NO")}, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiBehaviorTests, + OVPropertiesTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO, + ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(multi_Auto_properties)), + OVPropertiesTests::getTestCaseName); + +const std::vector multi_setcore_properties = { + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY), + ov::hint::model_priority(ov::hint::Priority::HIGH)}}; + +const std::vector multi_compileModel_properties = { + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), + ov::hint::model_priority(ov::hint::Priority::MEDIUM)}}; + +INSTANTIATE_TEST_SUITE_P(smoke_MultiCompileModelBehaviorTests, + OVSetPropComplieModleGetPropTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(multi_setcore_properties), + ::testing::ValuesIn(multi_compileModel_properties)), + OVSetPropComplieModleGetPropTests::getTestCaseName); + +const std::vector auto_setcore_properties = { + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), + ov::hint::model_priority(ov::hint::Priority::HIGH)}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY), + ov::hint::model_priority(ov::hint::Priority::HIGH)}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT), + ov::hint::model_priority(ov::hint::Priority::HIGH)}}; + +const std::vector auto_compileModel_properties = { + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY), + ov::hint::model_priority(ov::hint::Priority::MEDIUM)}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT), + ov::hint::model_priority(ov::hint::Priority::MEDIUM)}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), + ov::hint::model_priority(ov::hint::Priority::MEDIUM)}}; + +INSTANTIATE_TEST_SUITE_P(smoke_AutoCompileModelBehaviorTests, + OVSetPropComplieModleGetPropTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(auto_setcore_properties), + ::testing::ValuesIn(auto_compileModel_properties)), + OVSetPropComplieModleGetPropTests::getTestCaseName); + +const std::vector default_properties = {{ov::enable_profiling(false)}, + {ov::log::level("LOG_NONE")}, + {ov::hint::model_priority(ov::hint::Priority::MEDIUM)}, + {ov::hint::execution_mode(ov::hint::ExecutionMode::PERFORMANCE)}, + {ov::intel_auto::device_bind_buffer(false)}, + {ov::intel_auto::enable_startup_fallback(true)}, + {ov::device::priorities("")}}; +INSTANTIATE_TEST_SUITE_P(smoke_AutoBehaviorTests, + OVPropertiesDefaultTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(default_properties)), + OVPropertiesDefaultTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, + OVPropertiesDefaultSupportedTests, + ::testing::Values(ov::test::utils::DEVICE_TEMPLATE, ov::test::utils::DEVICE_AUTO)); + +const std::vector auto_multi_incorrect_device_properties = { + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::num_streams(4), + ov::device::properties("TEMPLATE", ov::num_streams(4))}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::num_streams(4), + ov::device::properties("TEMPLATE", ov::num_streams(4), ov::enable_profiling(true))}}; + +INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiSetAndCompileModelBehaviorTestsThrow, + OVSetUnsupportPropCompileModelWithoutConfigTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO, + ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(auto_multi_incorrect_device_properties)), + OVSetUnsupportPropCompileModelWithoutConfigTests::getTestCaseName); + +// +// IE Class GetMetric +// + +INSTANTIATE_TEST_SUITE_P(smoke_AutoOVGetMetricPropsTest, OVGetMetricPropsTest, ::testing::Values("MULTI", "AUTO")); + +INSTANTIATE_TEST_SUITE_P( + smoke_AutoOVCheckGetSupportedROMetricsPropsTests, + OVCheckGetSupportedROMetricsPropsTests, + ::testing::Combine(::testing::Values("MULTI", "AUTO"), + ::testing::ValuesIn(OVCheckGetSupportedROMetricsPropsTests::configureProperties( + {ov::device::full_name.name()}))), + OVCheckGetSupportedROMetricsPropsTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + OVCheckSetSupportedRWMandatoryMetricsPropsTests, + OVCheckSetSupportedRWMetricsPropsTests, + ::testing::Combine(::testing::Values("MULTI:TEMPLATE", "AUTO:TEMPLATE"), + ::testing::ValuesIn(OVCheckSetSupportedRWMetricsPropsTests::getRWMandatoryPropertiesValues( + {ov::hint::model_priority.name(), ov::log::level.name()}))), + OVCheckSetSupportedRWMetricsPropsTests::getTestCaseName); + +const std::vector multiConfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}}; + +INSTANTIATE_TEST_SUITE_P(smoke_OVClassSetDevicePriorityConfigPropsTest, + OVClassSetDevicePriorityConfigPropsTest, + ::testing::Combine(::testing::Values("MULTI", "AUTO"), ::testing::ValuesIn(multiConfigs))); + +const std::vector auto_properties = {{ov::device::priorities("TEMPLATE")}, + {ov::device::priorities("TEMPLATE(1)")}}; + +INSTANTIATE_TEST_SUITE_P(smoke_AutoBehaviorTests, + OVPropertiesTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(auto_properties)), + OVPropertiesTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_MultiBehaviorTests, + OVPropertiesTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(auto_properties)), + OVPropertiesTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + smoke_MultiAutoOVCheckSetSupportedRWMetricsPropsTests, + OVCheckSetSupportedRWMetricsPropsTests, + ::testing::Combine(::testing::Values("MULTI:TEMPLATE", "AUTO:TEMPLATE"), + ::testing::ValuesIn(OVCheckSetSupportedRWMetricsPropsTests::getRWMandatoryPropertiesValues( + {ov::hint::model_priority.name(), ov::log::level.name()}))), + OVCheckSetSupportedRWMetricsPropsTests::getTestCaseName); +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp new file mode 100644 index 00000000000000..bad8c61b42cf27 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp @@ -0,0 +1,191 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/plugin/configuration_tests.hpp" + +#include "ie_plugin_config.hpp" +#include "ie_system_conf.h" + +using namespace BehaviorTestsDefinitions; + +namespace { +#if (defined(__APPLE__) || defined(_WIN32)) +auto defaultBindThreadParameter = InferenceEngine::Parameter{[] { + auto numaNodes = InferenceEngine::getAvailableNUMANodes(); + auto coreTypes = InferenceEngine::getAvailableCoresTypes(); + if (coreTypes.size() > 1) { + return std::string{CONFIG_VALUE(HYBRID_AWARE)}; + } else if (numaNodes.size() > 1) { + return std::string{CONFIG_VALUE(NUMA)}; + } else { + return std::string{CONFIG_VALUE(NO)}; + } +}()}; +#else +auto defaultBindThreadParameter = InferenceEngine::Parameter{[] { + auto coreTypes = InferenceEngine::getAvailableCoresTypes(); + if (coreTypes.size() > 1) { + return std::string{CONFIG_VALUE(HYBRID_AWARE)}; + } else { + return std::string{CONFIG_VALUE(YES)}; + } +}()}; +#endif + +const std::vector netPrecisions = {InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16}; + +const std::vector> conf = {{}}; + +const std::vector> MultiConfigs = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::NO}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "1"}}}; + +const std::vector> AutoConfigs = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "1"}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_NONE}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_ERROR}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_WARNING}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_INFO}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_DEBUG}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_TRACE}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, + InferenceEngine::PluginConfigParams::MODEL_PRIORITY_HIGH}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, + InferenceEngine::PluginConfigParams::MODEL_PRIORITY_MED}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, + InferenceEngine::PluginConfigParams::MODEL_PRIORITY_LOW}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + CorrectConfigTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(MultiConfigs)), + CorrectConfigTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + CorrectConfigTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(AutoConfigs)), + CorrectConfigTests::getTestCaseName); + +const std::vector> multiinconfigs = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, "DOESN'T EXIST"}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "-1"}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "should be int"}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, "OFF"}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS, "OFF"}}, +}; + +const std::vector> autoinconfigs = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, "DOESN'T EXIST"}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "-1"}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "should be int"}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "OFF"}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, "OFF"}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, "-1"}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, "ABC"}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, "NAN"}}}; + +const std::vector> multiconf = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "1"}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + IncorrectConfigTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(multiinconfigs)), + IncorrectConfigTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + IncorrectConfigTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(autoinconfigs)), + IncorrectConfigTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + IncorrectConfigAPITests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(multiinconfigs)), + IncorrectConfigAPITests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + IncorrectConfigAPITests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(autoinconfigs)), + IncorrectConfigAPITests::getTestCaseName); + +const std::vector> auto_multi_prop_config = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, + {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, + InferenceEngine::PluginConfigParams::MODEL_PRIORITY_MED}}}; + +const std::vector> auto_multi_loadNetWork_config = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, + {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, + InferenceEngine::PluginConfigParams::MODEL_PRIORITY_HIGH}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + SetPropLoadNetWorkGetPropTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(auto_multi_prop_config), + ::testing::ValuesIn(auto_multi_loadNetWork_config)), + SetPropLoadNetWorkGetPropTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + SetPropLoadNetWorkGetPropTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(auto_multi_prop_config), + ::testing::ValuesIn(auto_multi_loadNetWork_config)), + SetPropLoadNetWorkGetPropTests::getTestCaseName); +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp new file mode 100644 index 00000000000000..29097f845f876d --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp @@ -0,0 +1,46 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/plugin/core_integration.hpp" + +using namespace BehaviorTestsDefinitions; + +using namespace InferenceEngine::PluginConfigParams; + +// defined in plugin_name.cpp +extern const char* cpu_plugin_file_name; + +namespace { +// +// IE Class Common tests with +// +// +// IE Class GetMetric +// + +INSTANTIATE_TEST_SUITE_P(smoke_IEClassGetMetricTest, + IEClassGetMetricTest_SUPPORTED_CONFIG_KEYS, + ::testing::Values("MULTI", "AUTO")); + +INSTANTIATE_TEST_SUITE_P(smoke_IEClassGetMetricTest, + IEClassGetMetricTest_SUPPORTED_METRICS, + ::testing::Values("MULTI", "AUTO")); + +INSTANTIATE_TEST_SUITE_P(smoke_IEClassGetMetricTest, + IEClassGetMetricTest_FULL_DEVICE_NAME, + ::testing::Values("MULTI", "AUTO")); + +INSTANTIATE_TEST_SUITE_P(smoke_IEClassGetMetricTest, + IEClassGetMetricTest_OPTIMIZATION_CAPABILITIES, + ::testing::Values("MULTI", "AUTO")); + +INSTANTIATE_TEST_SUITE_P(smoke_IEClassGetMetricTest, + IEClassGetMetricTest_ThrowUnsupported, + ::testing::Values("MULTI", "AUTO")); + +INSTANTIATE_TEST_SUITE_P(smoke_IEClassGetConfigTest, + IEClassGetConfigTest_ThrowUnsupported, + ::testing::Values("MULTI", "AUTO")); +////////////////////////////////////////////////////////////////////////////////////////// +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp new file mode 100644 index 00000000000000..12553dbab98b03 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp @@ -0,0 +1,37 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#ifdef __GLIBC__ +# include +# if __GLIBC_MINOR__ >= 34 +# define ENABLETESTMULTI +# endif +#else +# define ENABLETESTMULTI +#endif + +namespace { + +const Params params[] = { + std::tuple{ov::test::utils::DEVICE_TEMPLATE, {{CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES)}}}, +#ifdef ENABLETESTMULTI + std::tuple{ov::test::utils::DEVICE_MULTI, + {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}}, + std::tuple{ov::test::utils::DEVICE_AUTO, + {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}}, +#endif +}; +} // namespace +/* +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, CoreThreadingTests, testing::ValuesIn(params), +CoreThreadingTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, CoreThreadingTestsWithIterations, + testing::Combine(testing::ValuesIn(params), + testing::Values(4), + testing::Values(50), + testing::Values(ModelClass::Default)), + CoreThreadingTestsWithIterations::getTestCaseName); +*/ diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/set_preprocess.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/set_preprocess.cpp new file mode 100644 index 00000000000000..b75c4a4a93c51e --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/set_preprocess.cpp @@ -0,0 +1,84 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/plugin/set_preprocess.hpp" + +#ifdef ENABLE_GAPI_PREPROCESSING + +using namespace BehaviorTestsDefinitions; +namespace { +const std::vector netPrecisions = {InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16}; + +const std::vector> multiConfigs = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}}}; + +const std::vector ioPrecisions = {InferenceEngine::Precision::FP32, + InferenceEngine::Precision::U8}; +const std::vector netLayouts = { + InferenceEngine::Layout::NCHW, + // InferenceEngine::Layout::NHWC +}; + +const std::vector ioLayouts = {InferenceEngine::Layout::NCHW, InferenceEngine::Layout::NHWC}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + InferRequestPreprocessConversionTest, + ::testing::Combine(::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(ioPrecisions), + ::testing::ValuesIn(ioPrecisions), + ::testing::ValuesIn(netLayouts), + ::testing::ValuesIn(ioLayouts), + ::testing::ValuesIn(ioLayouts), + ::testing::Bool(), + ::testing::Bool(), + ::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(multiConfigs)), + InferRequestPreprocessConversionTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + InferRequestPreprocessDynamicallyInSetBlobTest, + ::testing::Combine(::testing::ValuesIn(netPrecisions), + ::testing::Bool(), + ::testing::Bool(), + ::testing::ValuesIn(netLayouts), + ::testing::Bool(), + ::testing::Bool(), + ::testing::Values(true), // only SetBlob + ::testing::Values(true), // only SetBlob + ::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(multiConfigs)), + InferRequestPreprocessDynamicallyInSetBlobTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + InferRequestPreprocessConversionTest, + ::testing::Combine(::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(ioPrecisions), + ::testing::ValuesIn(ioPrecisions), + ::testing::ValuesIn(netLayouts), + ::testing::ValuesIn(ioLayouts), + ::testing::ValuesIn(ioLayouts), + ::testing::Bool(), + ::testing::Bool(), + ::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(multiConfigs)), + InferRequestPreprocessConversionTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + InferRequestPreprocessDynamicallyInSetBlobTest, + ::testing::Combine(::testing::ValuesIn(netPrecisions), + ::testing::Bool(), + ::testing::Bool(), + ::testing::ValuesIn(netLayouts), + ::testing::Bool(), + ::testing::Bool(), + ::testing::Values(true), // only SetBlob + ::testing::Values(true), // only SetBlob + ::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(multiConfigs)), + InferRequestPreprocessDynamicallyInSetBlobTest::getTestCaseName); + +} // namespace + +#endif // ENABLE_GAPI_PREPROCESSING diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/version.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/version.cpp new file mode 100644 index 00000000000000..796149e7cb1e76 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/version.cpp @@ -0,0 +1,18 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/plugin/version.hpp" + +using namespace BehaviorTestsDefinitions; +namespace { +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + VersionTest, + ::testing::Values(ov::test::utils::DEVICE_MULTI), + VersionTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + VersionTest, + ::testing::Values(ov::test::utils::DEVICE_AUTO), + VersionTest::getTestCaseName); +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/core_config.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/core_config.cpp new file mode 100644 index 00000000000000..2c54a0d17b2f8d --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/core_config.cpp @@ -0,0 +1,17 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "functional_test_utils/core_config.hpp" + +#include "shared_test_classes/base/ov_subgraph.hpp" + +void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {} + +namespace ov { +namespace test { + +void core_configuration(ov::test::SubgraphBaseTest* test) {} + +} // namespace test +} // namespace ov diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/set_device_name.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/set_device_name.cpp new file mode 100644 index 00000000000000..564307d2daad46 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/set_device_name.cpp @@ -0,0 +1,17 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include "set_device_name.hpp" + +#include +#include + +namespace ov { +namespace test { +void set_device_suffix(const std::string& suffix) { + if (!suffix.empty()) { + throw std::runtime_error("The suffix can't be used for CPU device!"); + } +} +} // namespace test +} // namespace ov diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/skip_tests_config.cpp new file mode 100644 index 00000000000000..bf32bfb031b4b2 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -0,0 +1,80 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "functional_test_utils/skip_tests_config.hpp" + +#include + +#include +#include + +#include "ie_parallel.hpp" + +std::vector disabledTestPatterns() { + std::vector retVector{ + // TODO: Issue: 43793 + R"(.*InferRequestPreprocessDynamicallyInSetBlobTest.*iPRC=0.*_iLT=1.*)", + R"(.*InferRequestPreprocessDynamicallyInSetBlobTest.*oPRC=0.*_oLT=1.*)", + + // Not expected behavior + R"(.*Behavior.*InferRequestSetBlobByType.*Batched.*)", + R"(.*Auto.*Behavior.*ExecutableNetworkBaseTest.*canLoadCorrectNetworkToGetExecutableWithIncorrectConfig.*)", + + // Not implemented yet: + R"(.*Behavior.*ExecutableNetworkBaseTest.*canSetConfigToExecNet.*)", + R"(.*Behavior.*OVCompiledModelBaseTest.*canSetConfigToCompiledModel.*)", + R"(.*Behavior.*ExecutableNetworkBaseTest.*canExport.*)", + R"(.*Behavior.*OVCompiledModelBaseTest.*canExportModel.*)", + R"(.*Behavior.*ExecutableNetworkBaseTest.*canSetConfigToExecNetWithIncorrectConfig.*)", + R"(.*Behavior.*OVCompiledModelBaseTest.*canSetConfigToCompiledModelWithIncorrectConfig.*)", + + // TODO: CVS-104942 + R"(.*(Auto|Multi).*Behavior.*ExecutableNetworkBaseTest.*canLoadCorrectNetworkToGetExecutableAndCheckConfig.*)", + R"(.*(Auto|Multi).*SetPropLoadNetWorkGetPropTests.*)", + + // CPU does not support dynamic rank + // Issue: CVS-66778 + R"(.*smoke_Auto_BehaviorTests.*InferFullyDynamicNetworkWith(S|G)etTensor.*)", + R"(.*smoke_Auto_BehaviorTests.*DynamicOutputToDynamicInput.*)", + R"(.*smoke_Auto_BehaviorTests.*DynamicInputToDynamicOutput.*)", + // unsupported metrics + R"(.*smoke_AutoOVGetMetricPropsTest.*OVGetMetricPropsTest.*(AVAILABLE_DEVICES|OPTIMIZATION_CAPABILITIES|RANGE_FOR_ASYNC_INFER_REQUESTS|RANGE_FOR_STREAMS).*)", + + // Issue: + // New API tensor tests + R"(.*OVInferRequestCheckTensorPrecision.*type=i4.*)", + R"(.*OVInferRequestCheckTensorPrecision.*type=u1.*)", + R"(.*OVInferRequestCheckTensorPrecision.*type=u4.*)", + + // AUTO does not support import / export + R"(.*smoke_Auto_BehaviorTests/OVCompiledGraphImportExportTest.*(mportExport|readFromV10IR).*/targetDevice=(AUTO).*)", + + // New plugin API doesn't support changes of pre-processing + R"(.*(Auto|Multi).*InferRequestPreprocessTest.*SetPreProcessToInputInfo.*)", + R"(.*(Auto|Multi).*InferRequestPreprocessTest.*SetPreProcessToInferRequest.*)", + // New plugin work with tensors, so it means that blob in old API can have different pointers + R"(.*(Auto|Multi).*InferRequestIOBBlobTest.*secondCallGetInputDoNotReAllocateData.*)", + R"(.*(Auto|Multi).*InferRequestIOBBlobTest.*secondCallGetOutputDoNotReAllocateData.*)", + R"(.*(Auto|Multi).*InferRequestIOBBlobTest.*secondCallGetInputAfterInferSync.*)", + R"(.*(Auto|Multi).*InferRequestIOBBlobTest.*secondCallGetOutputAfterInferSync.*)", + // TODO Issue 100145 + R"(.*Behavior.*InferRequestIOBBlobTest.*canReallocateExternalBlobViaGet.*)", + R"(.*Behavior.*OVInferRequestIOTensorTest.*canInferAfterIOBlobReallocation.*)", + R"(.*Behavior.*OVInferRequestDynamicTests.*InferUpperBoundNetworkAfterIOTensorsReshaping.*)", + // Not expected behavior + R"(.*Behavior.*(Multi|Auto).*InferRequestSetBlobByType.*Batched.*)", + R"(.*(Multi|Auto).*Behavior.*InferRequestIOBBlobTest.*canProcessDeallocatedOutputBlobAfterGetAndSetBlob.*)", + // template plugin doesn't support this case + R"(.*OVInferRequestPerfCountersTest.*CheckOperationInProfilingInfo.*)"}; + +#if !defined(OPENVINO_ARCH_X86_64) + // very time-consuming test + retVector.emplace_back(R"(.*OVInferConsistencyTest.*)"); +#endif + +#if defined(_WIN32) + retVector.emplace_back(R"(.*LoadNetworkCompiledKernelsCacheTest.*CanCreateCacheDirAndDumpBinariesUnicodePath.*)"); +#endif + return retVector; +} diff --git a/src/plugins/auto/tests/unit/auto_unit_test.cpp b/src/plugins/auto/tests/unit/auto_unit_test.cpp index 64de772b599bdd..139533bc378bba 100644 --- a/src/plugins/auto/tests/unit/auto_unit_test.cpp +++ b/src/plugins/auto/tests/unit/auto_unit_test.cpp @@ -3,11 +3,22 @@ // #include "include/auto_unit_test.hpp" + #include "common_test_utils/file_utils.hpp" #include "openvino/core/any.hpp" +#include "openvino/opsets/opset11.hpp" +#include "openvino/runtime/make_tensor.hpp" #include "openvino/runtime/properties.hpp" #include "openvino/util/file_util.hpp" -#include "openvino/util/shared_object.hpp" + +namespace testing { +namespace internal { +template <> +void PrintTo(const ov::Any& a, std::ostream* os) { + *os << "using custom PrintTo ov::Any"; +} +} // namespace internal +} // namespace testing std::shared_ptr ov::mock_auto_plugin::tests::BaseTest::create_model() { auto param = std::make_shared(ov::element::i64, ov::Shape{1, 3, 2, 2}); @@ -30,52 +41,56 @@ ov::mock_auto_plugin::tests::BaseTest::BaseTest() { NiceMock* mock_auto = new NiceMock(); plugin.reset(mock_auto); // construct mock plugin - mock_plugin_cpu = std::make_shared>(); - mock_plugin_gpu = std::make_shared>(); + mock_plugin_cpu = std::make_shared>(); + mock_plugin_gpu = std::make_shared>(); // prepare mockExeNetwork - mockIExeNet = std::make_shared>(model, mock_plugin_cpu); + mockIExeNet = std::make_shared>(model, mock_plugin_cpu); mockExeNetwork = {mockIExeNet, {}}; - mockIExeNetActual = std::make_shared>(model, mock_plugin_gpu); + mockIExeNetActual = std::make_shared>(model, mock_plugin_gpu); mockExeNetworkActual = {mockIExeNetActual, {}}; - inferReqInternal = std::make_shared(mockIExeNet); + ON_CALL(*mockIExeNet.get(), inputs()).WillByDefault(ReturnRefOfCopy(model->inputs())); + ON_CALL(*mockIExeNet.get(), outputs()).WillByDefault(ReturnRefOfCopy(model->outputs())); + ON_CALL(*mockIExeNetActual.get(), inputs()).WillByDefault(ReturnRefOfCopy(model->inputs())); + ON_CALL(*mockIExeNetActual.get(), outputs()).WillByDefault(ReturnRefOfCopy(model->outputs())); + inferReqInternal = std::make_shared(mockIExeNet); + ON_CALL(*mockIExeNet.get(), create_sync_infer_request()).WillByDefault(Return(inferReqInternal)); optimalNum = (uint32_t)1; ON_CALL(*mockIExeNet.get(), get_property(StrEq(ov::optimal_number_of_infer_requests.name()))) .WillByDefault(Return(optimalNum)); - inferReqInternalActual = std::make_shared(mockIExeNetActual); + inferReqInternalActual = std::make_shared(mockIExeNetActual); + ON_CALL(*mockIExeNetActual.get(), create_sync_infer_request()).WillByDefault(Return(inferReqInternalActual)); ON_CALL(*mockIExeNetActual.get(), get_property(StrEq(ov::optimal_number_of_infer_requests.name()))) .WillByDefault(Return(optimalNum)); ON_CALL(*mockIExeNet.get(), create_infer_request()).WillByDefault([this]() { - return mockIExeNet->ICompiledModel::create_infer_request(); - }); + return mockIExeNet->ICompiledModel::create_infer_request(); + }); ON_CALL(*mockIExeNetActual.get(), create_infer_request()).WillByDefault([this]() { - return mockIExeNetActual->ICompiledModel::create_infer_request(); - }); + return mockIExeNetActual->ICompiledModel::create_infer_request(); + }); std::vector supported_props = {ov::hint::num_requests}; ON_CALL(*mockIExeNet.get(), get_property(StrEq(ov::supported_properties.name()))) .WillByDefault(Return(ov::Any(supported_props))); ON_CALL(*mockIExeNetActual.get(), get_property(StrEq(ov::supported_properties.name()))) .WillByDefault(Return(ov::Any(supported_props))); unsigned int num = 1; - ON_CALL(*mockIExeNet.get(), get_property(StrEq(ov::hint::num_requests.name()))) - .WillByDefault(Return(ov::Any(num))); + ON_CALL(*mockIExeNet.get(), get_property(StrEq(ov::hint::num_requests.name()))).WillByDefault(Return(ov::Any(num))); ON_CALL(*mockIExeNetActual.get(), get_property(StrEq(ov::hint::num_requests.name()))) .WillByDefault(Return(ov::Any(num))); ON_CALL(*plugin, get_device_list).WillByDefault([this](const ov::AnyMap& config) { return plugin->Plugin::get_device_list(config); }); ON_CALL(*plugin, parse_meta_devices) - .WillByDefault( - [this](const std::string& priorityDevices, const ov::AnyMap& config) { + .WillByDefault([this](const std::string& priorityDevices, const ov::AnyMap& config) { return plugin->Plugin::parse_meta_devices(priorityDevices, config); }); ON_CALL(*plugin, select_device) .WillByDefault([this](const std::vector& metaDevices, - const std::string& netPrecision, - unsigned int priority) { + const std::string& netPrecision, + unsigned int priority) { return plugin->Plugin::select_device(metaDevices, netPrecision, priority); }); @@ -115,47 +130,46 @@ ov::mock_auto_plugin::tests::AutoTest::AutoTest() { ON_CALL(*core, get_property(_, StrEq(ov::supported_properties.name()), _)) .WillByDefault(RETURN_MOCK_VALUE(supportedProps)); ON_CALL(*core, get_property(_, StrEq(ov::compilation_num_threads.name()), _)).WillByDefault(Return(12)); - std::vector cpuCability = {"FP32", "FP16", "INT8", "BIN"}; - std::vector gpuCability = {"FP32", "FP16", "BATCHED_BLOB", "BIN", "INT8"}; - std::vector othersCability = {"FP32", "FP16"}; + std::vector cpuCability = {"FP32", "FP16", "INT8", "BIN"}; + std::vector gpuCability = {"FP32", "FP16", "BATCHED_BLOB", "BIN", "INT8"}; + std::vector othersCability = {"FP32", "FP16"}; std::string igpuArchitecture = "GPU: vendor=0x8086 arch=0"; std::string dgpuArchitecture = "GPU: vendor=0x8086 arch=1"; auto iGpuType = ov::device::Type::INTEGRATED; auto dGpuType = ov::device::Type::DISCRETE; - ON_CALL(*core, get_property(StrEq(ov::test::utils::DEVICE_CPU), - StrEq(ov::device::capabilities.name()), _)).WillByDefault(RETURN_MOCK_VALUE(cpuCability)); - ON_CALL(*core, get_property(HasSubstr("GPU"), - StrEq(ov::device::capabilities.name()), _)).WillByDefault(RETURN_MOCK_VALUE(gpuCability)); - ON_CALL(*core, get_property(StrEq("OTHERS"), - StrEq(ov::device::capabilities.name()), _)).WillByDefault(RETURN_MOCK_VALUE(othersCability)); - ON_CALL(*core, get_property(StrEq("GPU"), - StrEq(ov::device::architecture.name()), _)).WillByDefault(RETURN_MOCK_VALUE(igpuArchitecture)); - ON_CALL(*core, get_property(StrEq("GPU.0"), - StrEq(ov::device::architecture.name()), _)).WillByDefault(RETURN_MOCK_VALUE(igpuArchitecture)); - ON_CALL(*core, get_property(StrEq("GPU.1"), - StrEq(ov::device::architecture.name()), _)).WillByDefault(RETURN_MOCK_VALUE(dgpuArchitecture)); - ON_CALL(*core, get_property(StrEq("GPU"), - StrEq(ov::device::type.name()), _)).WillByDefault(RETURN_MOCK_VALUE(iGpuType)); - ON_CALL(*core, get_property(StrEq("GPU.0"), - StrEq(ov::device::type.name()), _)).WillByDefault(RETURN_MOCK_VALUE(iGpuType)); - ON_CALL(*core, get_property(StrEq("GPU.1"), - StrEq(ov::device::type.name()), _)).WillByDefault(RETURN_MOCK_VALUE(dGpuType)); - const std::vector metrics = {METRIC_KEY(SUPPORTED_CONFIG_KEYS), ov::device::full_name.name(), ov::device::id.name()}; + ON_CALL(*core, get_property(StrEq(ov::test::utils::DEVICE_CPU), StrEq(ov::device::capabilities.name()), _)) + .WillByDefault(RETURN_MOCK_VALUE(cpuCability)); + ON_CALL(*core, get_property(HasSubstr("GPU"), StrEq(ov::device::capabilities.name()), _)) + .WillByDefault(RETURN_MOCK_VALUE(gpuCability)); + ON_CALL(*core, get_property(StrEq("OTHERS"), StrEq(ov::device::capabilities.name()), _)) + .WillByDefault(RETURN_MOCK_VALUE(othersCability)); + ON_CALL(*core, get_property(StrEq("GPU"), StrEq(ov::device::architecture.name()), _)) + .WillByDefault(RETURN_MOCK_VALUE(igpuArchitecture)); + ON_CALL(*core, get_property(StrEq("GPU.0"), StrEq(ov::device::architecture.name()), _)) + .WillByDefault(RETURN_MOCK_VALUE(igpuArchitecture)); + ON_CALL(*core, get_property(StrEq("GPU.1"), StrEq(ov::device::architecture.name()), _)) + .WillByDefault(RETURN_MOCK_VALUE(dgpuArchitecture)); + ON_CALL(*core, get_property(StrEq("GPU"), StrEq(ov::device::type.name()), _)) + .WillByDefault(RETURN_MOCK_VALUE(iGpuType)); + ON_CALL(*core, get_property(StrEq("GPU.0"), StrEq(ov::device::type.name()), _)) + .WillByDefault(RETURN_MOCK_VALUE(iGpuType)); + ON_CALL(*core, get_property(StrEq("GPU.1"), StrEq(ov::device::type.name()), _)) + .WillByDefault(RETURN_MOCK_VALUE(dGpuType)); + const std::vector metrics = {METRIC_KEY(SUPPORTED_CONFIG_KEYS), + ov::device::full_name.name(), + ov::device::id.name()}; const char igpuFullDeviceName[] = "Intel(R) Gen9 HD Graphics (iGPU)"; const char dgpuFullDeviceName[] = "Intel(R) Iris(R) Xe MAX Graphics (dGPU)"; - ON_CALL(*core, get_property(_, StrEq(METRIC_KEY(SUPPORTED_METRICS)), _)) - .WillByDefault(RETURN_MOCK_VALUE(metrics)); - ON_CALL(*core, get_property(_, ov::supported_properties.name(), _)) - .WillByDefault(Return(ov::Any(supportedProps))); - ON_CALL(*core, get_property(StrEq("GPU"), - StrEq(ov::device::full_name.name()), _)).WillByDefault(RETURN_MOCK_VALUE(igpuFullDeviceName)); - ON_CALL(*core, get_property(StrEq("GPU"), - StrEq(ov::device::id.name()), _)).WillByDefault(Return(ov::Any("0"))); - ON_CALL(*core, get_property(StrEq("GPU.0"), - StrEq(ov::device::full_name.name()), _)).WillByDefault(RETURN_MOCK_VALUE(igpuFullDeviceName)); - ON_CALL(*core, get_property(StrEq("GPU.1"), - StrEq(ov::device::full_name.name()), _)).WillByDefault(RETURN_MOCK_VALUE(dgpuFullDeviceName)); - const std::vector availableDevs = {"CPU", "GPU.0", "GPU.1"}; + ON_CALL(*core, get_property(_, StrEq(METRIC_KEY(SUPPORTED_METRICS)), _)).WillByDefault(RETURN_MOCK_VALUE(metrics)); + ON_CALL(*core, get_property(_, ov::supported_properties.name(), _)).WillByDefault(Return(ov::Any(supportedProps))); + ON_CALL(*core, get_property(StrEq("GPU"), StrEq(ov::device::full_name.name()), _)) + .WillByDefault(RETURN_MOCK_VALUE(igpuFullDeviceName)); + ON_CALL(*core, get_property(StrEq("GPU"), StrEq(ov::device::id.name()), _)).WillByDefault(Return(ov::Any("0"))); + ON_CALL(*core, get_property(StrEq("GPU.0"), StrEq(ov::device::full_name.name()), _)) + .WillByDefault(RETURN_MOCK_VALUE(igpuFullDeviceName)); + ON_CALL(*core, get_property(StrEq("GPU.1"), StrEq(ov::device::full_name.name()), _)) + .WillByDefault(RETURN_MOCK_VALUE(dgpuFullDeviceName)); + const std::vector availableDevs = {"CPU", "GPU.0", "GPU.1"}; ON_CALL(*core, get_available_devices()).WillByDefault(Return(availableDevs)); ON_CALL(*core, get_supported_property).WillByDefault([](const std::string& device, const ov::AnyMap& fullConfigs) { auto item = fullConfigs.find(ov::device::properties.name()); @@ -186,180 +200,35 @@ ov::mock_auto_plugin::tests::AutoTest::~AutoTest() { core.reset(); } -namespace { - -std::string get_mock_engine_path() { - std::string mockEngineName("mock_engine"); - return ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - mockEngineName + IE_BUILD_POSTFIX); -} - -template -std::function make_std_function(const std::shared_ptr so, const std::string& functionName) { - std::function ptr(reinterpret_cast(ov::util::get_symbol(so, functionName.c_str()))); - return ptr; -} - -ov::PropertyName RO_property(const std::string& propertyName) { - return ov::PropertyName(propertyName, ov::PropertyMutability::RO); -} - -ov::PropertyName RW_property(const std::string& propertyName) { - return ov::PropertyName(propertyName, ov::PropertyMutability::RW); -} - -} // namespace - -ov::mock_auto_plugin::tests::AutoTestWithRealCore::AutoTestWithRealCore() { - register_plugin_simple(core, "MOCK_CPU", {}); - // validate the mock plugin, to ensure the order as well - core.get_property("MOCK_CPU", ov::supported_properties); - register_plugin_support_batch_and_context(core, "MOCK_GPU", {}); - // validate the mock plugin - core.get_property("MOCK_GPU", ov::supported_properties); - ov::Any optimalNum = (uint32_t)1; - ON_CALL(*mock_plugin_cpu.get(), compile_model(::testing::Matcher&>(_), _)) - .WillByDefault(Return(mockIExeNet)); - ON_CALL(*mock_plugin_cpu.get(), compile_model(::testing::Matcher&>(_), _)) - .WillByDefault(Return(mockIExeNetActual)); -} - -void ov::mock_auto_plugin::tests::AutoTestWithRealCore::reg_plugin(ov::Core& core, - std::shared_ptr plugin, - const std::string& device_name, - const ov::AnyMap& properties) { - std::string libraryPath = get_mock_engine_path(); - if (!m_so) - m_so = ov::util::load_shared_object(libraryPath.c_str()); - if (device_name.find("MULTI") == std::string::npos && device_name.find("AUTO") == std::string::npos) - plugin->set_device_name(device_name); - std::function inject_mock_plugin = make_std_function(m_so, "InjectPlugin"); - - inject_mock_plugin(plugin.get()); - core.register_plugin(ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - std::string("mock_engine") + IE_BUILD_POSTFIX), - device_name, - properties); -} - -// test -void ov::mock_auto_plugin::tests::AutoTestWithRealCore::register_plugin_support_batch_and_context(ov::Core& core, - const std::string& device_name, - const ov::AnyMap& properties) { - auto remote_context = std::make_shared(mock_plugin_gpu->get_device_name()); - m_mock_contexts.push_back(remote_context); - ON_CALL(*mock_plugin_gpu, compile_model(_, _)).WillByDefault(Return(mockIExeNetActual)); - ON_CALL(*mock_plugin_gpu, create_context).WillByDefault(Return(ov::SoPtr(remote_context, nullptr))); - ON_CALL(*mock_plugin_gpu, get_default_context).WillByDefault(Return(ov::SoPtr(remote_context, nullptr))); - ON_CALL(*mock_plugin_gpu, get_property).WillByDefault([](const std::string& name, const ov::AnyMap& property) -> ov::Any { - const std::vector roProperties{ - RO_property(ov::supported_properties.name()), - RO_property(ov::optimal_batch_size.name()), - RO_property(ov::optimal_number_of_infer_requests.name()), - RO_property(ov::device::capabilities.name()), - RO_property(ov::device::type.name()), - RO_property(ov::device::uuid.name()), - }; - // the whole config is RW before network is loaded. - const std::vector rwProperties{ - RW_property(ov::num_streams.name()), - RW_property(ov::enable_profiling.name()), - RW_property(ov::compilation_num_threads.name()), - RW_property(ov::hint::performance_mode.name()), - RW_property(ov::hint::num_requests.name()) - }; - if (name == ov::supported_properties) { - std::vector supportedProperties; - supportedProperties.reserve(roProperties.size() + rwProperties.size()); - supportedProperties.insert(supportedProperties.end(), roProperties.begin(), roProperties.end()); - supportedProperties.insert(supportedProperties.end(), rwProperties.begin(), rwProperties.end()); - - return decltype(ov::supported_properties)::value_type(supportedProperties); - } else if (name == ov::optimal_number_of_infer_requests.name()) { - return decltype(ov::optimal_number_of_infer_requests)::value_type(1); - } else if (name == ov::optimal_batch_size.name()) { - return decltype(ov::optimal_batch_size)::value_type(4); - } else if (name == ov::device::capabilities.name()) { - return decltype(ov::device::capabilities)::value_type({"FP32", "FP16", "BATCHED_BLOB", "BIN", "INT8"}); - } else if (name == ov::device::type.name()) { - return decltype(ov::device::type)::value_type(ov::device::Type::INTEGRATED); - } else if (name == ov::loaded_from_cache.name()) { - return false; - } else if (name == ov::enable_profiling.name()) { - return decltype(ov::enable_profiling)::value_type{false}; - } else if (name == ov::streams::num.name()) { - return decltype(ov::streams::num)::value_type{2}; - } else if (name == ov::compilation_num_threads.name()) { - return decltype(ov::compilation_num_threads)::value_type{4}; - } else if (name == "SUPPORTED_CONFIG_KEYS") { // TODO: Remove this key - std::vector configs; - for (const auto& property : rwProperties) { - configs.emplace_back(property); - } - return configs; - } else if (name == "SUPPORTED_METRICS") { // TODO: Remove this key - std::vector configs; - for (const auto& property : roProperties) { - configs.emplace_back(property); - } - return configs; - } else if (name == ov::internal::supported_properties) { - return decltype(ov::internal::supported_properties)::value_type({}); - } - OPENVINO_NOT_IMPLEMENTED; - }); - std::shared_ptr base_plugin = mock_plugin_gpu; - reg_plugin(core, base_plugin, device_name, properties); +void ov::mock_auto_plugin::MockISyncInferRequest::allocate_tensor_impl(ov::SoPtr& tensor, + const element::Type& element_type, + const Shape& shape) { + if (!tensor || tensor->get_element_type() != element_type) { + tensor = ov::make_tensor(element_type, shape); + } else { + tensor->set_shape(shape); + } } -void ov::mock_auto_plugin::tests::AutoTestWithRealCore::register_plugin_simple(ov::Core& core, - const std::string& device_name, - const ov::AnyMap& properties) { - ON_CALL(*mock_plugin_cpu, compile_model(_, _)).WillByDefault(Return(mockIExeNet)); - ON_CALL(*mock_plugin_cpu, create_context).WillByDefault(Throw(ov::Exception{"NotImplemented"})); - ON_CALL(*mock_plugin_cpu, get_default_context).WillByDefault(Throw(ov::Exception{"NotImplemented"})); - ON_CALL(*mock_plugin_cpu, get_property).WillByDefault([](const std::string& name, const ov::AnyMap& property) -> ov::Any { - const std::vector roProperties{ - RO_property(ov::supported_properties.name()), - RO_property(ov::device::uuid.name()), - }; - // the whole config is RW before network is loaded. - const std::vector rwProperties{ - RW_property(ov::num_streams.name()), - RW_property(ov::enable_profiling.name()), - RW_property(ov::hint::performance_mode.name()) - }; - if (name == ov::supported_properties) { - std::vector supportedProperties; - supportedProperties.reserve(roProperties.size() + rwProperties.size()); - supportedProperties.insert(supportedProperties.end(), roProperties.begin(), roProperties.end()); - supportedProperties.insert(supportedProperties.end(), rwProperties.begin(), rwProperties.end()); - - return decltype(ov::supported_properties)::value_type(supportedProperties); - } else if (name == ov::loaded_from_cache.name()) { - return false; - } else if (name == ov::enable_profiling.name()) { - return decltype(ov::enable_profiling)::value_type{false}; - } else if (name == ov::streams::num.name()) { - return decltype(ov::streams::num)::value_type{2}; - } else if (name == "SUPPORTED_CONFIG_KEYS") { // TODO: Remove this key - std::vector configs; - for (const auto& property : rwProperties) { - configs.emplace_back(property); - } - return configs; - } else if (name == "SUPPORTED_METRICS") { // TODO: Remove this key - std::vector configs; - for (const auto& property : roProperties) { - configs.emplace_back(property); - } - return configs; - } else if (name == ov::internal::supported_properties) { - return decltype(ov::internal::supported_properties)::value_type({}); - } - OPENVINO_NOT_IMPLEMENTED; - }); - std::shared_ptr base_plugin = mock_plugin_cpu; - - reg_plugin(core, base_plugin, device_name, properties); +ov::mock_auto_plugin::MockISyncInferRequest::MockISyncInferRequest( + const std::shared_ptr& compiled_model) + : ov::ISyncInferRequest(compiled_model) { + OPENVINO_ASSERT(compiled_model); + // Allocate input/output tensors + for (const auto& input : get_inputs()) { + allocate_tensor(input, [this, input](ov::SoPtr& tensor) { + // Can add a check to avoid double work in case of shared tensors + allocate_tensor_impl(tensor, + input.get_element_type(), + input.get_partial_shape().is_dynamic() ? ov::Shape{0} : input.get_shape()); + }); + } + for (const auto& output : get_outputs()) { + allocate_tensor(output, [this, output](ov::SoPtr& tensor) { + // Can add a check to avoid double work in case of shared tensors + allocate_tensor_impl(tensor, + output.get_element_type(), + output.get_partial_shape().is_dynamic() ? ov::Shape{0} : output.get_shape()); + }); + } } \ No newline at end of file diff --git a/src/plugins/auto/tests/unit/compile_model_metric_test.cpp b/src/plugins/auto/tests/unit/compile_model_metric_test.cpp index 698c7deb03d990..772dca30497ae4 100644 --- a/src/plugins/auto/tests/unit/compile_model_metric_test.cpp +++ b/src/plugins/auto/tests/unit/compile_model_metric_test.cpp @@ -97,11 +97,11 @@ class ExecNetworkget_propertyOptimalNumInferReq : public tests::AutoTest, } }; -using modelPrioPerfHintTestParams = std::tuple; class ExecNetworkget_propertyOtherTest : public tests::AutoTest, @@ -113,11 +113,7 @@ class ExecNetworkget_propertyOtherTest : public tests::AutoTest, std::string actualDeviceName; std::string performanceMode; ov::Any modelPriority; - std::tie(isNewAPI, - actualSleep, - actualDeviceName, - performanceMode, - modelPriority) = obj.param; + std::tie(isNewAPI, actualSleep, actualDeviceName, performanceMode, modelPriority) = obj.param; std::ostringstream result; if (isNewAPI) { result << "_isNewAPI_" @@ -227,47 +223,60 @@ TEST_P(ExecNetworkget_propertyOptimalNumInferReq, OPTIMAL_NUMBER_OF_INFER_REQUES EXPECT_CALL(*plugin, select_device(_, _, _)).Times(1); if (cpuSleep) { - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), _)) - .WillByDefault(InvokeWithoutArgs([this]() { - std::this_thread::sleep_for(std::chrono::milliseconds(100)); - return mockExeNetwork; - })); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), + _)) + .WillByDefault(InvokeWithoutArgs([this]() { + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + return mockExeNetwork; + })); } else { - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), _)) - .WillByDefault(Return(mockExeNetwork)); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), + _)) + .WillByDefault(Return(mockExeNetwork)); } if (actualSleep) { - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(actualDeviceName)), _)) - .WillByDefault(InvokeWithoutArgs([this]() { - std::this_thread::sleep_for(std::chrono::milliseconds(100)); - return mockExeNetworkActual; - })); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(actualDeviceName)), + _)) + .WillByDefault(InvokeWithoutArgs([this]() { + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + return mockExeNetworkActual; + })); } else { - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(actualDeviceName)), _)) - .WillByDefault(Return(mockExeNetworkActual)); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(actualDeviceName)), + _)) + .WillByDefault(Return(mockExeNetworkActual)); } ON_CALL(*mockIExeNet.get(), get_property(StrEq(ov::optimal_number_of_infer_requests.name()))) - .WillByDefault(RETURN_MOCK_VALUE(cpuOptimalNum)); + .WillByDefault(RETURN_MOCK_VALUE(cpuOptimalNum)); ON_CALL(*mockIExeNetActual.get(), get_property(StrEq(ov::optimal_number_of_infer_requests.name()))) - .WillByDefault(RETURN_MOCK_VALUE(actualOptimalNum)); + .WillByDefault(RETURN_MOCK_VALUE(actualOptimalNum)); - EXPECT_CALL(*mockIExeNet.get(), get_property(StrEq(ov::optimal_number_of_infer_requests.name()))) - .Times(AtLeast(1)); + EXPECT_CALL(*mockIExeNet.get(), get_property(StrEq(ov::optimal_number_of_infer_requests.name()))).Times(AtLeast(1)); EXPECT_CALL(*mockIExeNetActual.get(), get_property(StrEq(ov::optimal_number_of_infer_requests.name()))) - .Times(AtLeast(1)); + .Times(AtLeast(1)); - EXPECT_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), _)).Times(1); + EXPECT_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), + _)) + .Times(1); - EXPECT_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(actualDeviceName)), _)).Times(1); + EXPECT_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(actualDeviceName)), + _)) + .Times(1); if (cpuCustomerNum == -1) { EXPECT_CALL(*mockIExeNet.get(), create_sync_infer_request()).Times(cpuOptimalNum); @@ -281,7 +290,7 @@ TEST_P(ExecNetworkget_propertyOptimalNumInferReq, OPTIMAL_NUMBER_OF_INFER_REQUES EXPECT_CALL(*mockIExeNetActual.get(), create_sync_infer_request()).Times(actualCustomerNum); } - auto AutoExecNetwork = plugin->compile_model(model, config); + auto AutoExecNetwork = plugin->compile_model(model, config); auto result = AutoExecNetwork->get_property(ov::optimal_number_of_infer_requests.name()).as(); EXPECT_EQ(result, expectOptimalNum); } @@ -292,57 +301,58 @@ TEST_P(ExecNetworkget_propertyOptimalNumInferReq, OPTIMAL_NUMBER_OF_INFER_REQUES // every element for ConfigParams // {is throughput mode, cpuOptimalNum, customer hope for cpu infer requset num, if cpu sleep when load, // actualOptimalNum, customer hope for actual infer requset num, if actual sleep when load, actual device Name -// expectOptimalNum of Auto ExecNetwork, gpu Number of requests, if actual supported OptimalNum, default Value of OptimalNum} +// expectOptimalNum of Auto ExecNetwork, gpu Number of requests, if actual supported OptimalNum, default Value of +// OptimalNum} // const std::vector testConfigs = { - ConfigParams {false, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_GPU, 1, 0, false, true}, - ConfigParams {true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_GPU, 48, 0, false, true}, - ConfigParams {false, 3, -1, true, 2, -1, false, ov::test::utils::DEVICE_GPU, 2, 0, false, true}, - ConfigParams {true, 3, -1, true, 2, -1, false, ov::test::utils::DEVICE_GPU, 2, 0, false, true}, - ConfigParams {false, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 1, 0, false, true}, - ConfigParams {true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 48, 0, false, true}, - ConfigParams {false, 3, 5, true, 2, 5, false, ov::test::utils::DEVICE_GPU, 2, 0, false, true}, - ConfigParams {true, 3, 5, true, 2, 5, false, ov::test::utils::DEVICE_GPU, 2, 0, false, true}, - ConfigParams {true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 48, 48, false, true}, - ConfigParams {true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, false, true}, - ConfigParams {true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, false, true}, - ConfigParams {true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 2, 0, true, true}, - ConfigParams {true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 48, 0, false, true}, - ConfigParams {true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 48, 0, true, true}, - ConfigParams {true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, false, false}, - ConfigParams {true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 8, 10, false, false}, - ConfigParams {true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, true, false}, - ConfigParams {true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 2, 6, true, false}, - ConfigParams {true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 8, 0, false, false}, - ConfigParams {true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 8, 0, true, false}, - ConfigParams {true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 2, 0, true, false}, - ConfigParams {true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, true, true}, - ConfigParams {false, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 1, 6, true, true}, - ConfigParams {true, 3, 5, false, 6, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, true, true}, - ConfigParams {false, 3, 5, false, 6, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, true, true}, - ConfigParams {false, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 1, 0, false, true}, - ConfigParams {true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 0, false, true}, - ConfigParams {true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 0, false, false}, - ConfigParams {true, 3, -1, false, 0, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 0, true, false}, - ConfigParams {true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 2, 0, true, false}, - ConfigParams {true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 2, 1, true, false}, - ConfigParams {true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 6, 6, false, false}, - ConfigParams {true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 10, false, false}, - ConfigParams {true, 3, -1, false, 4, -1, true, ov::test::utils::DEVICE_KEEMBAY, 4, 6, true, true}, - ConfigParams {true, 3, -1, false, 4, -1, true, ov::test::utils::DEVICE_KEEMBAY, 2, 2, true, true}, - ConfigParams {true, 3, -1, false, 0, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 10, true, true}, - ConfigParams {true, 3, -1, false, 0, -1, true, ov::test::utils::DEVICE_KEEMBAY, 6, 6, true, true}, - ConfigParams {false, 3, -1, false, 0, -1, true, ov::test::utils::DEVICE_KEEMBAY, 1, 0, true, true}, - ConfigParams {true, 3, -1, false, 0, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 0, true, true}, - ConfigParams {false, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 2, 0, true, true}, - ConfigParams {true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 2, 0, true, true}, - ConfigParams {false, 3, -1, true, 2, -1, false, ov::test::utils::DEVICE_KEEMBAY, 2, 0, false, true}, - ConfigParams {true, 3, -1, true, 2, -1, false, ov::test::utils::DEVICE_KEEMBAY, 2, 0, false, true}, - ConfigParams {false, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_KEEMBAY, 1, 0, false, true}, - ConfigParams {true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_KEEMBAY, 8, 0, false, true}, - ConfigParams {false, 3, 5, true, 2, 5, false, ov::test::utils::DEVICE_KEEMBAY, 2, 0, false, true}, - ConfigParams {true, 3, 5, true, 2, 5, false, ov::test::utils::DEVICE_KEEMBAY, 2, 0, false, true}, - }; + ConfigParams{false, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_GPU, 1, 0, false, true}, + ConfigParams{true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_GPU, 48, 0, false, true}, + ConfigParams{false, 3, -1, true, 2, -1, false, ov::test::utils::DEVICE_GPU, 2, 0, false, true}, + ConfigParams{true, 3, -1, true, 2, -1, false, ov::test::utils::DEVICE_GPU, 2, 0, false, true}, + ConfigParams{false, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 1, 0, false, true}, + ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 48, 0, false, true}, + ConfigParams{false, 3, 5, true, 2, 5, false, ov::test::utils::DEVICE_GPU, 2, 0, false, true}, + ConfigParams{true, 3, 5, true, 2, 5, false, ov::test::utils::DEVICE_GPU, 2, 0, false, true}, + ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 48, 48, false, true}, + ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, false, true}, + ConfigParams{true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, false, true}, + ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 2, 0, true, true}, + ConfigParams{true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 48, 0, false, true}, + ConfigParams{true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 48, 0, true, true}, + ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, false, false}, + ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 8, 10, false, false}, + ConfigParams{true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, true, false}, + ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 2, 6, true, false}, + ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 8, 0, false, false}, + ConfigParams{true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 8, 0, true, false}, + ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 2, 0, true, false}, + ConfigParams{true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, true, true}, + ConfigParams{false, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 1, 6, true, true}, + ConfigParams{true, 3, 5, false, 6, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, true, true}, + ConfigParams{false, 3, 5, false, 6, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, true, true}, + ConfigParams{false, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 1, 0, false, true}, + ConfigParams{true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 0, false, true}, + ConfigParams{true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 0, false, false}, + ConfigParams{true, 3, -1, false, 0, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 0, true, false}, + ConfigParams{true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 2, 0, true, false}, + ConfigParams{true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 2, 1, true, false}, + ConfigParams{true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 6, 6, false, false}, + ConfigParams{true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 10, false, false}, + ConfigParams{true, 3, -1, false, 4, -1, true, ov::test::utils::DEVICE_KEEMBAY, 4, 6, true, true}, + ConfigParams{true, 3, -1, false, 4, -1, true, ov::test::utils::DEVICE_KEEMBAY, 2, 2, true, true}, + ConfigParams{true, 3, -1, false, 0, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 10, true, true}, + ConfigParams{true, 3, -1, false, 0, -1, true, ov::test::utils::DEVICE_KEEMBAY, 6, 6, true, true}, + ConfigParams{false, 3, -1, false, 0, -1, true, ov::test::utils::DEVICE_KEEMBAY, 1, 0, true, true}, + ConfigParams{true, 3, -1, false, 0, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 0, true, true}, + ConfigParams{false, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 2, 0, true, true}, + ConfigParams{true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 2, 0, true, true}, + ConfigParams{false, 3, -1, true, 2, -1, false, ov::test::utils::DEVICE_KEEMBAY, 2, 0, false, true}, + ConfigParams{true, 3, -1, true, 2, -1, false, ov::test::utils::DEVICE_KEEMBAY, 2, 0, false, true}, + ConfigParams{false, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_KEEMBAY, 1, 0, false, true}, + ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_KEEMBAY, 8, 0, false, true}, + ConfigParams{false, 3, 5, true, 2, 5, false, ov::test::utils::DEVICE_KEEMBAY, 2, 0, false, true}, + ConfigParams{true, 3, 5, true, 2, 5, false, ov::test::utils::DEVICE_KEEMBAY, 2, 0, false, true}, +}; INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, ExecNetworkget_propertyOptimalNumInferReq, @@ -357,11 +367,7 @@ class ExecNetworkGetMetricOtherTest : public tests::AutoTest, std::string actualDeviceName; std::string performanceMode; ov::Any modelPriority; - std::tie(isNewAPI, - actualSleep, - actualDeviceName, - performanceMode, - modelPriority) = obj.param; + std::tie(isNewAPI, actualSleep, actualDeviceName, performanceMode, modelPriority) = obj.param; std::ostringstream result; if (isNewAPI) { result << "_isNewAPI_" @@ -392,11 +398,7 @@ TEST_P(ExecNetworkGetMetricOtherTest, modelPriority_perfHint_exclusiveAsyncReq_t std::string actualDeviceName; std::string performanceHint; ov::Any modelPriority; - std::tie(isNewAPI, - actualSleep, - actualDeviceName, - performanceHint, - modelPriority) = this->GetParam(); + std::tie(isNewAPI, actualSleep, actualDeviceName, performanceHint, modelPriority) = this->GetParam(); config.insert(ov::device::priorities(ov::test::utils::DEVICE_CPU + std::string(",") + actualDeviceName)); config.insert(ov::hint::performance_mode(performanceHint)); config.insert({ov::hint::model_priority.name(), modelPriority.as()}); @@ -418,14 +420,17 @@ TEST_P(ExecNetworkGetMetricOtherTest, modelPriority_perfHint_exclusiveAsyncReq_t EXPECT_CALL(*plugin, select_device(_, _, _)).Times(1); ON_CALL(*core, get_property(_, StrEq(ov::compilation_num_threads.name()), _)).WillByDefault(Return(8)); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), _)) - .WillByDefault(Return(mockExeNetwork)); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), + _)) + .WillByDefault(Return(mockExeNetwork)); if (actualSleep) { ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(actualDeviceName)), _)) + ::testing::Matcher(StrEq(actualDeviceName)), + _)) .WillByDefault(InvokeWithoutArgs([this]() { std::this_thread::sleep_for(std::chrono::milliseconds(5000)); return mockExeNetworkActual; @@ -433,14 +438,15 @@ TEST_P(ExecNetworkGetMetricOtherTest, modelPriority_perfHint_exclusiveAsyncReq_t } else { ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(actualDeviceName)), _)) + ::testing::Matcher(StrEq(actualDeviceName)), + _)) .WillByDefault(Return(mockExeNetworkActual)); } ON_CALL(*mockIExeNet.get(), get_property(StrEq(ov::optimal_number_of_infer_requests.name()))) - .WillByDefault(RETURN_MOCK_VALUE(cpuOptimalNum)); + .WillByDefault(RETURN_MOCK_VALUE(cpuOptimalNum)); ON_CALL(*mockIExeNetActual.get(), get_property(StrEq(ov::optimal_number_of_infer_requests.name()))) - .WillByDefault(RETURN_MOCK_VALUE(actualOptimalNum)); + .WillByDefault(RETURN_MOCK_VALUE(actualOptimalNum)); auto AutoExecNetwork = plugin->compile_model(model, config); auto result = AutoExecNetwork->get_property(ov::hint::performance_mode.name()).as(); @@ -455,61 +461,25 @@ const std::vector modelPrioPerfHintConfig = { ov::test::utils::DEVICE_GPU, "THROUGHPUT", CONFIG_VALUE(MODEL_PRIORITY_LOW)}, - modelPrioPerfHintTestParams{false, - true, - ov::test::utils::DEVICE_GPU, - "LATENCY", - CONFIG_VALUE(MODEL_PRIORITY_LOW)}, + modelPrioPerfHintTestParams{false, true, ov::test::utils::DEVICE_GPU, "LATENCY", CONFIG_VALUE(MODEL_PRIORITY_LOW)}, modelPrioPerfHintTestParams{false, true, ov::test::utils::DEVICE_GPU, "THROUGHPUT", CONFIG_VALUE(MODEL_PRIORITY_MED)}, - modelPrioPerfHintTestParams{false, - true, - ov::test::utils::DEVICE_GPU, - "LATENCY", - CONFIG_VALUE(MODEL_PRIORITY_MED)}, + modelPrioPerfHintTestParams{false, true, ov::test::utils::DEVICE_GPU, "LATENCY", CONFIG_VALUE(MODEL_PRIORITY_MED)}, modelPrioPerfHintTestParams{false, true, ov::test::utils::DEVICE_GPU, CONFIG_VALUE(THROUGHPUT), CONFIG_VALUE(MODEL_PRIORITY_HIGH)}, - modelPrioPerfHintTestParams{false, - true, - ov::test::utils::DEVICE_GPU, - "LATENCY", - CONFIG_VALUE(MODEL_PRIORITY_HIGH)}, - modelPrioPerfHintTestParams{true, - true, - ov::test::utils::DEVICE_GPU, - "THROUGHPUT", - "LOW"}, - modelPrioPerfHintTestParams{true, - true, - ov::test::utils::DEVICE_GPU, - "LATENCY", - "LOW"}, - modelPrioPerfHintTestParams{true, - true, - ov::test::utils::DEVICE_GPU, - "THROUGHPUT", - "MEDIUM"}, - modelPrioPerfHintTestParams{true, - true, - ov::test::utils::DEVICE_GPU, - "LATENCY", - "MEDIUM"}, - modelPrioPerfHintTestParams{true, - true, - ov::test::utils::DEVICE_GPU, - "THROUGHPUT", - "HIGH"}, - modelPrioPerfHintTestParams{true, - true, - ov::test::utils::DEVICE_GPU, - "LATENCY", - "HIGH"}}; + modelPrioPerfHintTestParams{false, true, ov::test::utils::DEVICE_GPU, "LATENCY", CONFIG_VALUE(MODEL_PRIORITY_HIGH)}, + modelPrioPerfHintTestParams{true, true, ov::test::utils::DEVICE_GPU, "THROUGHPUT", "LOW"}, + modelPrioPerfHintTestParams{true, true, ov::test::utils::DEVICE_GPU, "LATENCY", "LOW"}, + modelPrioPerfHintTestParams{true, true, ov::test::utils::DEVICE_GPU, "THROUGHPUT", "MEDIUM"}, + modelPrioPerfHintTestParams{true, true, ov::test::utils::DEVICE_GPU, "LATENCY", "MEDIUM"}, + modelPrioPerfHintTestParams{true, true, ov::test::utils::DEVICE_GPU, "THROUGHPUT", "HIGH"}, + modelPrioPerfHintTestParams{true, true, ov::test::utils::DEVICE_GPU, "LATENCY", "HIGH"}}; INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, ExecNetworkGetMetricOtherTest, diff --git a/src/plugins/auto/tests/unit/compile_model_property_test.cpp b/src/plugins/auto/tests/unit/compile_model_property_test.cpp index 7dacdae919cc04..278c3dbbfe3363 100644 --- a/src/plugins/auto/tests/unit/compile_model_property_test.cpp +++ b/src/plugins/auto/tests/unit/compile_model_property_test.cpp @@ -24,7 +24,7 @@ using namespace ov::mock_auto_plugin; using ConfigParams = std::tuple, // hardware device name to expect loading network on - ov::AnyMap>; // secondary property setting to device + ov::AnyMap>; // secondary property setting to device static std::vector testConfigs; @@ -52,51 +52,72 @@ class LoadNetworkWithSecondaryConfigsMockTest : public tests::AutoTest, public : static std::vector CreateConfigs() { testConfigs.clear(); testConfigs.push_back( - ConfigParams{"AUTO", {"CPU"}, {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); + ConfigParams{"AUTO", + {"CPU"}, + {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); testConfigs.push_back(ConfigParams{"AUTO", {"CPU"}, {{"NUM_STREAMS", "12"}, {"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); testConfigs.push_back( - ConfigParams{"AUTO", {"CPU", "GPU"}, {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); + ConfigParams{"AUTO", + {"CPU", "GPU"}, + {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); testConfigs.push_back(ConfigParams{"AUTO", {"CPU", "GPU"}, {{"NUM_STREAMS", "15"}, {"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); testConfigs.push_back( - ConfigParams{"AUTO:CPU", {"CPU"}, {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); + ConfigParams{"AUTO:CPU", + {"CPU"}, + {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); testConfigs.push_back( - ConfigParams{"AUTO:CPU,GPU", {"CPU"}, {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); + ConfigParams{"AUTO:CPU,GPU", + {"CPU"}, + {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); testConfigs.push_back( - ConfigParams{"AUTO:GPU", {"GPU"}, {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:5}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); - testConfigs.push_back(ConfigParams{"AUTO:GPU,CPU", - {"CPU", "GPU"}, - {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:5}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); + ConfigParams{"AUTO:GPU", + {"GPU"}, + {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:5}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); + testConfigs.push_back( + ConfigParams{"AUTO:GPU,CPU", + {"CPU", "GPU"}, + {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:5}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); testConfigs.push_back( - ConfigParams{"MULTI:CPU", {"CPU"}, {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); - testConfigs.push_back(ConfigParams{"MULTI:CPU,GPU", - {"CPU", "GPU"}, - {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); + ConfigParams{"MULTI:CPU", + {"CPU"}, + {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); testConfigs.push_back( - ConfigParams{"MULTI:GPU", {"GPU"}, {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:5}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); - testConfigs.push_back(ConfigParams{"MULTI:GPU,CPU", - {"CPU", "GPU"}, - {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:5}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); + ConfigParams{"MULTI:CPU,GPU", + {"CPU", "GPU"}, + {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); + testConfigs.push_back( + ConfigParams{"MULTI:GPU", + {"GPU"}, + {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:5}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); + testConfigs.push_back( + ConfigParams{"MULTI:GPU,CPU", + {"CPU", "GPU"}, + {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:5}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); return testConfigs; } void SetUp() override { std::vector availableDevs = {"CPU", "GPU"}; ON_CALL(*core, get_available_devices()).WillByDefault(Return(availableDevs)); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), _)) + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), + _)) .WillByDefault(Return(mockExeNetwork)); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrNe(ov::test::utils::DEVICE_CPU)), _)) - .WillByDefault(Return(mockExeNetworkActual)); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrNe(ov::test::utils::DEVICE_CPU)), + _)) + .WillByDefault(Return(mockExeNetworkActual)); } }; @@ -123,11 +144,10 @@ TEST_P(LoadNetworkWithSecondaryConfigsMockTest, LoadNetworkWithSecondaryConfigsT ov::util::Read{}(strConfigs, deviceConfigs); } } - EXPECT_CALL( - *core, - compile_model(::testing::Matcher&>(_), - ::testing::Matcher(deviceName), - ::testing::Matcher(MapContains(deviceConfigs)))) + EXPECT_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(deviceName), + ::testing::Matcher(MapContains(deviceConfigs)))) .Times(1); } @@ -144,32 +164,40 @@ TEST_P(AutoLoadExeNetworkFailedTest, checkLoadFailMassage) { if (device.find("MULTI") != std::string::npos) plugin->set_device_name("MULTI"); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_GPU)), - ::testing::Matcher(_))) - .WillByDefault(Throw(ov::Exception{"Mock GPU Load Failed"})); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), - ::testing::Matcher(_))) - .WillByDefault(Throw(ov::Exception{"Mock CPU Load Failed"})); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_GPU)), + ::testing::Matcher(_))) + .WillByDefault(Throw(ov::Exception{"Mock GPU Load Failed"})); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), + ::testing::Matcher(_))) + .WillByDefault(Throw(ov::Exception{"Mock CPU Load Failed"})); if (device == "AUTO") { - EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config), ov::Exception, - "[AUTO] compile model failed, GPU:Mock GPU Load Failed; CPU:Mock CPU Load Failed"); + EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config), + ov::Exception, + "[AUTO] compile model failed, GPU:Mock GPU Load Failed; CPU:Mock CPU Load Failed"); } else if (device == "AUTO:CPU") { - EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config), ov::Exception, - "[AUTO] compile model failed, CPU:Mock CPU Load Failed"); + EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config), + ov::Exception, + "[AUTO] compile model failed, CPU:Mock CPU Load Failed"); } else if (device == "AUTO:GPU") { - EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config), ov::Exception, - "[AUTO] compile model failed, GPU:Mock GPU Load Failed"); + EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config), + ov::Exception, + "[AUTO] compile model failed, GPU:Mock GPU Load Failed"); } else if (device == "MULTI") { - EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config), ov::Exception, - "[MULTI] compile model failed, GPU:Mock GPU Load Failed; CPU:Mock CPU Load Failed"); + EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config), + ov::Exception, + "[MULTI] compile model failed, GPU:Mock GPU Load Failed; CPU:Mock CPU Load Failed"); } else if (device == "MULTI:CPU") { - EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config), ov::Exception, - "[MULTI] compile model failed, CPU:Mock CPU Load Failed"); + EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config), + ov::Exception, + "[MULTI] compile model failed, CPU:Mock CPU Load Failed"); } else if (device == "MULTI:GPU") { - EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config), ov::Exception, - "[MULTI] compile model failed, GPU:Mock GPU Load Failed"); + EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config), + ov::Exception, + "[MULTI] compile model failed, GPU:Mock GPU Load Failed"); } } @@ -184,9 +212,9 @@ const std::vector testConfigsAutoLoadFailed = { ConfigParams{"AUTO:GPU", {"GPU"}, {{"MULTI_DEVICE_PRIORITIES", "GPU"}}}, ConfigParams{"MULTI", {"CPU", "GPU"}, {{"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}, ConfigParams{"MULTI:CPU", {"CPU"}, {{"MULTI_DEVICE_PRIORITIES", "CPU"}}}, - ConfigParams{"MULTI:GPU", {"GPU"}, {{"MULTI_DEVICE_PRIORITIES", "GPU"}}} - }; + ConfigParams{"MULTI:GPU", {"GPU"}, {{"MULTI_DEVICE_PRIORITIES", "GPU"}}}}; -INSTANTIATE_TEST_SUITE_P(smoke_AutoLoadExeNetworkFailedTest, AutoLoadExeNetworkFailedTest, - ::testing::ValuesIn(testConfigsAutoLoadFailed), - AutoLoadExeNetworkFailedTest::getTestCaseName); \ No newline at end of file +INSTANTIATE_TEST_SUITE_P(smoke_AutoLoadExeNetworkFailedTest, + AutoLoadExeNetworkFailedTest, + ::testing::ValuesIn(testConfigsAutoLoadFailed), + AutoLoadExeNetworkFailedTest::getTestCaseName); \ No newline at end of file diff --git a/src/plugins/auto/tests/unit/ctput_test.cpp b/src/plugins/auto/tests/unit/ctput_test.cpp index 8839acd23e4dcd..4b9cfd987b0133 100644 --- a/src/plugins/auto/tests/unit/ctput_test.cpp +++ b/src/plugins/auto/tests/unit/ctput_test.cpp @@ -8,8 +8,7 @@ using namespace ov::mock_auto_plugin; using Config = std::map; using ConfigParams = std::tuple>; -class LoadNetworkWithCTPUTMockTest : public tests::AutoTest, - public ::testing::TestWithParam { +class LoadNetworkWithCTPUTMockTest : public tests::AutoTest, public ::testing::TestWithParam { public: static std::string getTestCaseName(testing::TestParamInfo obj) { std::vector targetDevices; @@ -29,12 +28,16 @@ class LoadNetworkWithCTPUTMockTest : public tests::AutoTest, void SetUp() override { std::vector availableDevs = {"CPU", "GPU"}; ON_CALL(*core, get_available_devices()).WillByDefault(Return(availableDevs)); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), _)) + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), + _)) .WillByDefault(Return(mockExeNetwork)); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_GPU)), _)) - .WillByDefault(Return(mockExeNetworkActual)); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_GPU)), + _)) + .WillByDefault(Return(mockExeNetworkActual)); } }; @@ -51,17 +54,15 @@ TEST_P(LoadNetworkWithCTPUTMockTest, CTPUTSingleDevLogicTest) { // Call single device logic and performance hint is THROUGHPUT EXPECT_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(targetDevice), - ::testing::Matcher( - ComparePerfHint("THROUGHPUT")))) + ::testing::Matcher(targetDevice), + ::testing::Matcher(ComparePerfHint("THROUGHPUT")))) .Times(1); // if target device only has GPU, no CPU helper to be called if (targetDevice.find("GPU") != std::string::npos) { EXPECT_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(ov::test::utils::DEVICE_CPU), - ::testing::Matcher( - ComparePerfHint("LATENCY")))) + ::testing::Matcher(ov::test::utils::DEVICE_CPU), + ::testing::Matcher(ComparePerfHint("LATENCY")))) .Times(0); } } else { @@ -71,18 +72,16 @@ TEST_P(LoadNetworkWithCTPUTMockTest, CTPUTSingleDevLogicTest) { targetDev += ((deviceName == targetDevices.back()) ? "" : ","); EXPECT_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(deviceName), - ::testing::Matcher( - ComparePerfHint("THROUGHPUT")))) + ::testing::Matcher(deviceName), + ::testing::Matcher(ComparePerfHint("THROUGHPUT")))) .Times(1); } config.insert(ov::device::priorities(targetDev)); // no CPU helper to be called EXPECT_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(ov::test::utils::DEVICE_CPU), - ::testing::Matcher( - ComparePerfHint("LATENCY")))) + ::testing::Matcher(ov::test::utils::DEVICE_CPU), + ::testing::Matcher(ComparePerfHint("LATENCY")))) .Times(0); } @@ -150,12 +149,16 @@ class AutoCTPUTCallMulti : public tests::AutoTest, public ::testing::TestWithPar void SetUp() override { std::vector availableDevs = {"CPU", "GPU"}; ON_CALL(*core, get_available_devices()).WillByDefault(Return(availableDevs)); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), _)) + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), + _)) .WillByDefault(Return(mockExeNetwork)); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_GPU)), _)) - .WillByDefault(Return(mockExeNetworkActual)); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_GPU)), + _)) + .WillByDefault(Return(mockExeNetworkActual)); } }; @@ -176,21 +179,21 @@ TEST_P(AutoCTPUTCallMulti, CTPUTDeviceLoadFailedNoExceptionThrowTest) { config.insert(ov::device::priorities(targetDev)); ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(loadFailedDevice)), - ::testing::Matcher(_))) + ::testing::Matcher(StrEq(loadFailedDevice)), + ::testing::Matcher(_))) .WillByDefault(Throw(InferenceEngine::GeneralError{""})); if (loadFailedDevice != ov::test::utils::DEVICE_CPU) { EXPECT_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(ov::test::utils::DEVICE_CPU), - ::testing::Matcher(_))) + ::testing::Matcher(ov::test::utils::DEVICE_CPU), + ::testing::Matcher(_))) .Times(1); } if (loadFailedDevice != ov::test::utils::DEVICE_GPU) { EXPECT_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(ov::test::utils::DEVICE_GPU), - ::testing::Matcher(_))) + ::testing::Matcher(ov::test::utils::DEVICE_GPU), + ::testing::Matcher(_))) .Times(1); } ASSERT_NO_THROW(exeNetwork = plugin->compile_model(model, config)); diff --git a/src/plugins/auto/tests/unit/default_perf_hint_test.cpp b/src/plugins/auto/tests/unit/default_perf_hint_test.cpp index 731c4e5ab34e88..332d55a10ee880 100644 --- a/src/plugins/auto/tests/unit/default_perf_hint_test.cpp +++ b/src/plugins/auto/tests/unit/default_perf_hint_test.cpp @@ -7,12 +7,11 @@ using namespace ov::mock_auto_plugin; using ConfigParams = std::tuple, // hardware device name to expect loading network on - ov::AnyMap>; // secondary property setting to device + ov::AnyMap>; // secondary property setting to device static std::vector testConfigs; -class AutoDefaultPerfHintTest : public tests::AutoTest, - public ::testing::TestWithParam { +class AutoDefaultPerfHintTest : public tests::AutoTest, public ::testing::TestWithParam { public: static std::string getTestCaseName(testing::TestParamInfo obj) { std::string deviceName; @@ -37,35 +36,36 @@ class AutoDefaultPerfHintTest : public tests::AutoTest, testConfigs.clear(); testConfigs.push_back( ConfigParams{"AUTO", {"CPU"}, {{"MULTI_DEVICE_PRIORITIES", "CPU"}}}); // CPU: get default_hint:lantency + testConfigs.push_back(ConfigParams{"AUTO", + {"CPU"}, + {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, + {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); // CPU: no perf_hint testConfigs.push_back( ConfigParams{"AUTO", - {"CPU"}, - {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); // CPU: no perf_hint + {"CPU", "GPU"}, + {{"MULTI_DEVICE_PRIORITIES", + "GPU,CPU"}}}); // CPU: as helper, get default_hint:lantency GPU:get default_hint:lantency testConfigs.push_back( ConfigParams{"AUTO", {"CPU", "GPU"}, - {{"MULTI_DEVICE_PRIORITIES", + {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, + {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); // CPU: as helper, get default_hint:lantency GPU:get default_hint:lantency - testConfigs.push_back(ConfigParams{ - "AUTO", - {"CPU", "GPU"}, - {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, - {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); // CPU: as helper, get default_hint:lantency GPU:get default_hint:lantency testConfigs.push_back(ConfigParams{ "AUTO", {"CPU", "GPU"}, {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); // CPU: as helper, get default_hint:lantency GPU:no perf_hint - testConfigs.push_back( - ConfigParams{"AUTO", - {"CPU"}, - {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:5}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: no perf_hint + testConfigs.push_back(ConfigParams{"AUTO", + {"CPU"}, + {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:5}}"}, + {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: no perf_hint testConfigs.push_back( ConfigParams{"AUTO", {"GPU"}, {{"MULTI_DEVICE_PRIORITIES", "GPU"}}}); // GPU: get default_hint:lantency - testConfigs.push_back( - ConfigParams{"AUTO", - {"GPU"}, - {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); // GPU: no perf_hint + testConfigs.push_back(ConfigParams{"AUTO", + {"GPU"}, + {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:3}}"}, + {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); // GPU: no perf_hint testConfigs.push_back(ConfigParams{ "MULTI:CPU,GPU", @@ -91,30 +91,30 @@ class AutoDefaultPerfHintTest : public tests::AutoTest, static std::vector CreatePerfHintAndDefaultPerfHintTestConfigs() { testConfigs.clear(); - testConfigs.push_back(ConfigParams{ - "AUTO", - {"CPU"}, - {{"DEVICE_PROPERTIES", "{CPU:{PERFORMANCE_HINT:THROUGHPUT}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); // CPU: get perf_hint:tput + testConfigs.push_back(ConfigParams{"AUTO", + {"CPU"}, + {{"DEVICE_PROPERTIES", "{CPU:{PERFORMANCE_HINT:THROUGHPUT}}"}, + {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); // CPU: get perf_hint:tput testConfigs.push_back( ConfigParams{"AUTO", {"CPU", "GPU"}, {{"DEVICE_PROPERTIES", "{CPU:{PERFORMANCE_HINT:THROUGHPUT}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); // CPU: as helper, get perf_hint:lantency GPU:get default_hint:lantency - testConfigs.push_back( - ConfigParams{"AUTO", - {"CPU", "GPU"}, - {{"DEVICE_PROPERTIES", "{CPU:{PERFORMANCE_HINT:THROUGHPUT},GPU:{PERFORMANCE_HINT:THROUGHPUT}}"}, - {"MULTI_DEVICE_PRIORITIES", - "GPU,CPU"}}}); // CPU: as helper, get perf_hint:lantency GPU:get perf_hint:tput + testConfigs.push_back(ConfigParams{ + "AUTO", + {"CPU", "GPU"}, + {{"DEVICE_PROPERTIES", "{CPU:{PERFORMANCE_HINT:THROUGHPUT},GPU:{PERFORMANCE_HINT:THROUGHPUT}}"}, + {"MULTI_DEVICE_PRIORITIES", + "GPU,CPU"}}}); // CPU: as helper, get perf_hint:lantency GPU:get perf_hint:tput testConfigs.push_back(ConfigParams{"AUTO", {"CPU"}, {{"DEVICE_PROPERTIES", "{CPU:{PERFORMANCE_HINT:THROUGHPUT}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: get perf_hint:tput - testConfigs.push_back(ConfigParams{ - "AUTO", - {"GPU"}, - {{"DEVICE_PROPERTIES", "{GPU:{PERFORMANCE_HINT:THROUGHPUT}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); // GPU: get perf_hint:tput + testConfigs.push_back(ConfigParams{"AUTO", + {"GPU"}, + {{"DEVICE_PROPERTIES", "{GPU:{PERFORMANCE_HINT:THROUGHPUT}}"}, + {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); // GPU: get perf_hint:tput testConfigs.push_back(ConfigParams{ "MULTI:CPU,GPU", @@ -136,30 +136,29 @@ class AutoDefaultPerfHintTest : public tests::AutoTest, static std::vector CreateSecPropAndDefaultPerfHintTestConfigs() { testConfigs.clear(); - testConfigs.push_back(ConfigParams{ - "AUTO", - {"CPU"}, - {{"DEVICE_PROPERTIES", "{CPU:{ALLOW_AUTO_BATCHING:TRUE}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); // CPU: no perf_hint + testConfigs.push_back(ConfigParams{"AUTO", + {"CPU"}, + {{"DEVICE_PROPERTIES", "{CPU:{ALLOW_AUTO_BATCHING:TRUE}}"}, + {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); // CPU: no perf_hint testConfigs.push_back( ConfigParams{"AUTO", {"CPU", "GPU"}, {{"DEVICE_PROPERTIES", "{CPU:{ALLOW_AUTO_BATCHING:TRUE}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); // CPU: as helper, get perf_hint:lantency GPU:get default_hint:lantency - testConfigs.push_back( - ConfigParams{"AUTO", - {"CPU", "GPU"}, - {{"DEVICE_PROPERTIES", "{CPU:{ALLOW_AUTO_BATCHING:TRUE},GPU:{ALLOW_AUTO_BATCHING:TRUE}}"}, - {"MULTI_DEVICE_PRIORITIES", - "GPU,CPU"}}}); // CPU: as helper, get perf_hint:lantency GPU:no perf_hint + testConfigs.push_back(ConfigParams{ + "AUTO", + {"CPU", "GPU"}, + {{"DEVICE_PROPERTIES", "{CPU:{ALLOW_AUTO_BATCHING:TRUE},GPU:{ALLOW_AUTO_BATCHING:TRUE}}"}, + {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); // CPU: as helper, get perf_hint:lantency GPU:no perf_hint testConfigs.push_back(ConfigParams{"AUTO", {"CPU"}, {{"DEVICE_PROPERTIES", "{CPU:{ALLOW_AUTO_BATCHING:FALSE}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: no perf_hint - testConfigs.push_back(ConfigParams{ - "AUTO", - {"GPU"}, - {{"DEVICE_PROPERTIES", "{GPU:{ALLOW_AUTO_BATCHING:FALSE}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); // GPU: no perf_hint + testConfigs.push_back(ConfigParams{"AUTO", + {"GPU"}, + {{"DEVICE_PROPERTIES", "{GPU:{ALLOW_AUTO_BATCHING:FALSE}}"}, + {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); // GPU: no perf_hint testConfigs.push_back(ConfigParams{ "MULTI:CPU,GPU", @@ -171,11 +170,11 @@ class AutoDefaultPerfHintTest : public tests::AutoTest, {"CPU", "GPU"}, {{"DEVICE_PROPERTIES", "{GPU:{ALLOW_AUTO_BATCHING:FALSE}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: get default_hint:tput GPU: get default_hint:tput - testConfigs.push_back(ConfigParams{ - "MULTI:CPU,GPU", - {"CPU", "GPU"}, - {{"DEVICE_PROPERTIES", "{CPU:{ALLOW_AUTO_BATCHING:TRUE},GPU:{ALLOW_AUTO_BATCHING:FALSE}}"}, - {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: no perf_hint GPU: get default_hint:tput + testConfigs.push_back( + ConfigParams{"MULTI:CPU,GPU", + {"CPU", "GPU"}, + {{"DEVICE_PROPERTIES", "{CPU:{ALLOW_AUTO_BATCHING:TRUE},GPU:{ALLOW_AUTO_BATCHING:FALSE}}"}, + {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: no perf_hint GPU: get default_hint:tput return testConfigs; } @@ -183,13 +182,17 @@ class AutoDefaultPerfHintTest : public tests::AutoTest, std::vector availableDevs = {"CPU", "GPU"}; ON_CALL(*core, get_available_devices()).WillByDefault(Return(availableDevs)); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq("CPU")), _)) - .WillByDefault(Return(mockExeNetwork)); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq("CPU")), + _)) + .WillByDefault(Return(mockExeNetwork)); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq("GPU")), _)) - .WillByDefault(Return(mockExeNetworkActual)); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq("GPU")), + _)) + .WillByDefault(Return(mockExeNetworkActual)); } }; @@ -246,21 +249,21 @@ TEST_P(NumStreamsAndDefaultPerfHintMockTest, NumStreamsAndDefaultPerfHintTest) { // do not pass default perf_hint to HW HW_PerfHint = "No PERFORMANCE_HINT"; } - EXPECT_CALL( - *core, - compile_model(::testing::Matcher&>(_), - ::testing::Matcher(deviceName), - ::testing::Matcher(ComparePerfHint(HW_PerfHint)))) + EXPECT_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(deviceName), + ::testing::Matcher(ComparePerfHint(HW_PerfHint)))) .Times(1); } ASSERT_NO_THROW(plugin->compile_model(model, config)); } -INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiMock_NumStreamsAndDefaultPerfHintToHWTest, - NumStreamsAndDefaultPerfHintMockTest, - ::testing::ValuesIn(NumStreamsAndDefaultPerfHintMockTest::CreateNumStreamsAndDefaultPerfHintTestConfigs()), - NumStreamsAndDefaultPerfHintMockTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P( + smoke_AutoMultiMock_NumStreamsAndDefaultPerfHintToHWTest, + NumStreamsAndDefaultPerfHintMockTest, + ::testing::ValuesIn(NumStreamsAndDefaultPerfHintMockTest::CreateNumStreamsAndDefaultPerfHintTestConfigs()), + NumStreamsAndDefaultPerfHintMockTest::getTestCaseName); TEST_P(PerHintAndDefaultPerfHintMockTest, PerfHintAndDefaultPerfHintTest) { std::string device; @@ -309,21 +312,21 @@ TEST_P(PerHintAndDefaultPerfHintMockTest, PerfHintAndDefaultPerfHintTest) { if (itor != deviceConfigs.end() && !isCPUHelper) { HW_PerfHint = itor->second.as(); } - EXPECT_CALL( - *core, - compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(deviceName)), - ::testing::Matcher(ComparePerfHint(HW_PerfHint)))) + EXPECT_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(deviceName)), + ::testing::Matcher(ComparePerfHint(HW_PerfHint)))) .Times(1); } ASSERT_NO_THROW(plugin->compile_model(model, config)); } -INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiMock_PerHintAndDefaultPerfHintToHWTest, - PerHintAndDefaultPerfHintMockTest, - ::testing::ValuesIn(PerHintAndDefaultPerfHintMockTest::CreatePerfHintAndDefaultPerfHintTestConfigs()), - PerHintAndDefaultPerfHintMockTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P( + smoke_AutoMultiMock_PerHintAndDefaultPerfHintToHWTest, + PerHintAndDefaultPerfHintMockTest, + ::testing::ValuesIn(PerHintAndDefaultPerfHintMockTest::CreatePerfHintAndDefaultPerfHintTestConfigs()), + PerHintAndDefaultPerfHintMockTest::getTestCaseName); TEST_P(SecPropAndDefaultPerfHintMockTest, SecPropAndDefaultPerfHintTest) { std::string device; @@ -372,18 +375,18 @@ TEST_P(SecPropAndDefaultPerfHintMockTest, SecPropAndDefaultPerfHintTest) { } } } - EXPECT_CALL( - *core, - compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(deviceName)), - ::testing::Matcher(ComparePerfHint(HW_PerfHint)))) + EXPECT_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(deviceName)), + ::testing::Matcher(ComparePerfHint(HW_PerfHint)))) .Times(1); } ASSERT_NO_THROW(plugin->compile_model(model, config)); } -INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiMock_SecPropAndDefaultPerfHintToHWTest, - SecPropAndDefaultPerfHintMockTest, - ::testing::ValuesIn(SecPropAndDefaultPerfHintMockTest::CreateSecPropAndDefaultPerfHintTestConfigs()), - SecPropAndDefaultPerfHintMockTest::getTestCaseName); \ No newline at end of file +INSTANTIATE_TEST_SUITE_P( + smoke_AutoMultiMock_SecPropAndDefaultPerfHintToHWTest, + SecPropAndDefaultPerfHintMockTest, + ::testing::ValuesIn(SecPropAndDefaultPerfHintMockTest::CreateSecPropAndDefaultPerfHintTestConfigs()), + SecPropAndDefaultPerfHintMockTest::getTestCaseName); \ No newline at end of file diff --git a/src/plugins/auto/tests/unit/dynamic_output_test.cpp b/src/plugins/auto/tests/unit/dynamic_output_test.cpp index 0ff5c35c116d1d..afade1a3d1d4e8 100644 --- a/src/plugins/auto/tests/unit/dynamic_output_test.cpp +++ b/src/plugins/auto/tests/unit/dynamic_output_test.cpp @@ -2,8 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include +#include + #include "include/auto_unit_test.hpp" using DynamicOutputConfigParams = std::tuple< @@ -11,8 +12,7 @@ using DynamicOutputConfigParams = std::tuple< ov::Any // expected device to run inference on >; -class DynamicOutputInferenceTest : public tests::AutoTest, - public ::testing::TestWithParam { +class DynamicOutputInferenceTest : public tests::AutoTest, public ::testing::TestWithParam { public: std::shared_ptr create_dynamic_output_model(); static std::string getTestCaseName(testing::TestParamInfo obj); @@ -45,27 +45,35 @@ std::shared_ptr DynamicOutputInferenceTest::create_dynamic_output_mod auto scores = std::make_shared(ov::element::f32, ov::Shape{1, 1, 2}); scores->set_friendly_name("param_2"); scores->get_output_tensor(0).set_names({"input_tensor_2"}); - auto max_output_boxes_per_class = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{}, {10}); + auto max_output_boxes_per_class = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{}, {10}); auto iou_threshold = ov::op::v0::Constant::create(ov::element::f32, ov::Shape{}, {0.75}); auto score_threshold = ov::op::v0::Constant::create(ov::element::f32, ov::Shape{}, {0.7}); - auto nms = std::make_shared(boxes, scores, max_output_boxes_per_class, - iou_threshold, score_threshold); + auto nms = std::make_shared(boxes, + scores, + max_output_boxes_per_class, + iou_threshold, + score_threshold); auto res = std::make_shared(nms); res->set_friendly_name("output_dynamic"); return std::make_shared(ov::NodeVector{nms}, ov::ParameterVector{boxes, scores}); } void DynamicOutputInferenceTest::SetUp() { - model = create_dynamic_output_model(); - std::tie(priorityList, targetList) = GetParam(); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_GPU)), _)) - .WillByDefault(InvokeWithoutArgs([this]() { - std::this_thread::sleep_for(std::chrono::milliseconds(200)); - return mockExeNetworkActual; })); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), - (_))).WillByDefault(Return(mockExeNetwork)); + model = create_dynamic_output_model(); + std::tie(priorityList, targetList) = GetParam(); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_GPU)), + _)) + .WillByDefault(InvokeWithoutArgs([this]() { + std::this_thread::sleep_for(std::chrono::milliseconds(200)); + return mockExeNetworkActual; + })); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), + (_))) + .WillByDefault(Return(mockExeNetwork)); } TEST_P(DynamicOutputInferenceTest, CanSelectCorrectTargetDeviceandInitizeBlobWithCorrectSize) { @@ -74,27 +82,26 @@ TEST_P(DynamicOutputInferenceTest, CanSelectCorrectTargetDeviceandInitizeBlobWit config.insert(ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)); std::shared_ptr exeNetwork; for (auto& iter : targets) { - EXPECT_CALL( - *core, - compile_model(::testing::Matcher&>(_), - ::testing::Matcher(HasSubstr(iter)), - ::testing::Matcher(_))) - .Times(1); + EXPECT_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(HasSubstr(iter)), + ::testing::Matcher(_))) + .Times(1); } - EXPECT_CALL( - *core, + EXPECT_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(HasSubstr("GPU")), - ::testing::Matcher(_))) - .Times(0); + ::testing::Matcher(HasSubstr("GPU")), + ::testing::Matcher(_))) + .Times(0); ASSERT_NO_THROW(exeNetwork = plugin->compile_model(model, config)); } const std::vector testConfigs = { - DynamicOutputConfigParams {"CPU,GPU", std::vector{"CPU"}}, - DynamicOutputConfigParams {"GPU,CPU", std::vector{"CPU"}}, + DynamicOutputConfigParams{"CPU,GPU", std::vector{"CPU"}}, + DynamicOutputConfigParams{"GPU,CPU", std::vector{"CPU"}}, }; -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, DynamicOutputInferenceTest, - ::testing::ValuesIn(testConfigs), - DynamicOutputInferenceTest::getTestCaseName); \ No newline at end of file +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + DynamicOutputInferenceTest, + ::testing::ValuesIn(testConfigs), + DynamicOutputInferenceTest::getTestCaseName); diff --git a/src/plugins/auto/tests/unit/get_device_list.cpp b/src/plugins/auto/tests/unit/get_device_list.cpp index 5fc8d4eedb4b43..59c6babb8f19c9 100644 --- a/src/plugins/auto/tests/unit/get_device_list.cpp +++ b/src/plugins/auto/tests/unit/get_device_list.cpp @@ -10,10 +10,9 @@ using namespace ov::mock_auto_plugin; const std::vector availableDevs = {"CPU", "GPU", "NPU"}; const std::vector availableDevsWithId = {"CPU", "GPU.0", "GPU.1", "NPU"}; using Params = std::tuple; -using ConfigParams = std::tuple< - std::vector, // Available devices retrieved from Core - Params // Params {devicePriority, expect metaDevices} - >; +using ConfigParams = std::tuple, // Available devices retrieved from Core + Params // Params {devicePriority, expect metaDevices} + >; class GetDeviceListTest : public tests::AutoTest, public ::testing::TestWithParam { public: static std::string getTestCaseName(testing::TestParamInfo obj) { @@ -36,10 +35,9 @@ class GetDeviceListTest : public tests::AutoTest, public ::testing::TestWithPara } void SetUp() override { - ON_CALL(*plugin, get_device_list).WillByDefault([this]( - const ov::AnyMap& config) { - return plugin->Plugin::get_device_list(config); - }); + ON_CALL(*plugin, get_device_list).WillByDefault([this](const ov::AnyMap& config) { + return plugin->Plugin::get_device_list(config); + }); } }; @@ -76,8 +74,8 @@ TEST_P(GetDeviceListTestWithNotInteldGPU, GetDeviceListTestWithExcludeList) { ON_CALL(*core, get_available_devices()).WillByDefault(Return(availableDevs)); std::string dgpuArchitecture = "GPU: vendor=0x10DE arch=0"; - ON_CALL(*core, get_property(StrEq("GPU.1"), - StrEq(ov::device::architecture.name()), _)).WillByDefault(RETURN_MOCK_VALUE(dgpuArchitecture)); + ON_CALL(*core, get_property(StrEq("GPU.1"), StrEq(ov::device::architecture.name()), _)) + .WillByDefault(RETURN_MOCK_VALUE(dgpuArchitecture)); EXPECT_CALL(*core, get_available_devices()).Times(1); if (metaDevices == "") { EXPECT_THROW(plugin->get_device_list({ov::device::priorities(priorityDevices)}), ov::Exception); @@ -88,29 +86,30 @@ TEST_P(GetDeviceListTestWithNotInteldGPU, GetDeviceListTestWithExcludeList) { } } -const std::vector testConfigsWithId = {Params{" ", " "}, - Params{"", "CPU,GPU.0,GPU.1"}, - Params{"CPU, ", "CPU, "}, - Params{" ,CPU", " ,CPU"}, - Params{"CPU,", "CPU"}, - Params{"CPU,,GPU", "CPU,GPU.0,GPU.1"}, - Params{"CPU, ,GPU", "CPU, ,GPU.0,GPU.1"}, - Params{"CPU,GPU,GPU.1", "CPU,GPU.0,GPU.1"}, - Params{"CPU,GPU,NPU,INVALID_DEVICE", "CPU,GPU.0,GPU.1,NPU,INVALID_DEVICE"}, - Params{"NPU,GPU,CPU,-GPU.0", "NPU,GPU.1,CPU"}, - Params{"-GPU.0,GPU,CPU", "GPU.1,CPU"}, - Params{"-GPU.0,GPU", "GPU.1"}, - Params{"-GPU,GPU.0", "GPU.0"}, - Params{"-GPU.0", "CPU,GPU.1"}, - Params{"-GPU.0,-GPU.1", "CPU"}, - Params{"-GPU.0,-GPU.1,INVALID_DEVICE", "INVALID_DEVICE"}, - Params{"-GPU.0,-GPU.1,-INVALID_DEVICE", "CPU"}, - Params{"-GPU.0,-GPU.1,-CPU", ""}, - Params{"GPU,-GPU.0", "GPU.1"}, - Params{"-GPU,CPU", "CPU"}, - Params{"-GPU,-CPU", ""}, - Params{"GPU.0,-GPU", "GPU.0"}, - Params{"-GPU.0,-CPU", "GPU.1"}}; +const std::vector testConfigsWithId = { + Params{" ", " "}, + Params{"", "CPU,GPU.0,GPU.1"}, + Params{"CPU, ", "CPU, "}, + Params{" ,CPU", " ,CPU"}, + Params{"CPU,", "CPU"}, + Params{"CPU,,GPU", "CPU,GPU.0,GPU.1"}, + Params{"CPU, ,GPU", "CPU, ,GPU.0,GPU.1"}, + Params{"CPU,GPU,GPU.1", "CPU,GPU.0,GPU.1"}, + Params{"CPU,GPU,NPU,INVALID_DEVICE", "CPU,GPU.0,GPU.1,NPU,INVALID_DEVICE"}, + Params{"NPU,GPU,CPU,-GPU.0", "NPU,GPU.1,CPU"}, + Params{"-GPU.0,GPU,CPU", "GPU.1,CPU"}, + Params{"-GPU.0,GPU", "GPU.1"}, + Params{"-GPU,GPU.0", "GPU.0"}, + Params{"-GPU.0", "CPU,GPU.1"}, + Params{"-GPU.0,-GPU.1", "CPU"}, + Params{"-GPU.0,-GPU.1,INVALID_DEVICE", "INVALID_DEVICE"}, + Params{"-GPU.0,-GPU.1,-INVALID_DEVICE", "CPU"}, + Params{"-GPU.0,-GPU.1,-CPU", ""}, + Params{"GPU,-GPU.0", "GPU.1"}, + Params{"-GPU,CPU", "CPU"}, + Params{"-GPU,-CPU", ""}, + Params{"GPU.0,-GPU", "GPU.0"}, + Params{"-GPU.0,-CPU", "GPU.1"}}; const std::vector testConfigs = {Params{" ", " "}, Params{"", "CPU,GPU"}, @@ -139,35 +138,36 @@ const std::vector testConfigs = {Params{" ", " "}, Params{"-CPU,INVALID_DEVICE", "INVALID_DEVICE"}, Params{"CPU,GPU,NPU", "CPU,GPU,NPU"}}; -const std::vector testConfigsWithIdNotInteldGPU = {Params{" ", " "}, - Params{"", "CPU,GPU.0"}, - Params{"CPU, ", "CPU, "}, - Params{" ,CPU", " ,CPU"}, - Params{"CPU,", "CPU"}, - Params{"CPU,,GPU", "CPU,GPU.0,GPU.1"}, - Params{"CPU, ,GPU", "CPU, ,GPU.0,GPU.1"}, - Params{"CPU,GPU,GPU.1", "CPU,GPU.0,GPU.1"}, - Params{"CPU,GPU,NPU,INVALID_DEVICE", "CPU,GPU.0,GPU.1,NPU,INVALID_DEVICE"}, - Params{"NPU,GPU,CPU,-GPU.0", "NPU,GPU.1,CPU"}, - Params{"-GPU.0,GPU,CPU", "GPU.1,CPU"}, - Params{"-GPU.0,GPU", "GPU.1"}, - Params{"-GPU,GPU.0", "GPU.0"}, - Params{"-GPU.0", "CPU"}, - Params{"-GPU.0,-GPU.1", "CPU"}, - Params{"-GPU.0,-GPU.1,INVALID_DEVICE", "INVALID_DEVICE"}, - Params{"-GPU.0,-GPU.1,-INVALID_DEVICE", "CPU"}, - Params{"-GPU.0,-GPU.1,-CPU", ""}, - Params{"GPU,-GPU.0", "GPU.1"}, - Params{"GPU.0,-GPU", "GPU.0"}, - Params{"GPU", "GPU.0,GPU.1"}, - Params{"GPU.0", "GPU.0"}, - Params{"GPU.1", "GPU.1"}, - Params{"-CPU", "GPU.0"}, - Params{"-CPU,-GPU", ""}, - Params{"-CPU,-GPU.0", ""}, - Params{"-CPU,-GPU.1", "GPU.0"}, - Params{"-GPU,CPU", "CPU"}, - Params{"-GPU.0,-CPU", ""}}; +const std::vector testConfigsWithIdNotInteldGPU = { + Params{" ", " "}, + Params{"", "CPU,GPU.0"}, + Params{"CPU, ", "CPU, "}, + Params{" ,CPU", " ,CPU"}, + Params{"CPU,", "CPU"}, + Params{"CPU,,GPU", "CPU,GPU.0,GPU.1"}, + Params{"CPU, ,GPU", "CPU, ,GPU.0,GPU.1"}, + Params{"CPU,GPU,GPU.1", "CPU,GPU.0,GPU.1"}, + Params{"CPU,GPU,NPU,INVALID_DEVICE", "CPU,GPU.0,GPU.1,NPU,INVALID_DEVICE"}, + Params{"NPU,GPU,CPU,-GPU.0", "NPU,GPU.1,CPU"}, + Params{"-GPU.0,GPU,CPU", "GPU.1,CPU"}, + Params{"-GPU.0,GPU", "GPU.1"}, + Params{"-GPU,GPU.0", "GPU.0"}, + Params{"-GPU.0", "CPU"}, + Params{"-GPU.0,-GPU.1", "CPU"}, + Params{"-GPU.0,-GPU.1,INVALID_DEVICE", "INVALID_DEVICE"}, + Params{"-GPU.0,-GPU.1,-INVALID_DEVICE", "CPU"}, + Params{"-GPU.0,-GPU.1,-CPU", ""}, + Params{"GPU,-GPU.0", "GPU.1"}, + Params{"GPU.0,-GPU", "GPU.0"}, + Params{"GPU", "GPU.0,GPU.1"}, + Params{"GPU.0", "GPU.0"}, + Params{"GPU.1", "GPU.1"}, + Params{"-CPU", "GPU.0"}, + Params{"-CPU,-GPU", ""}, + Params{"-CPU,-GPU.0", ""}, + Params{"-CPU,-GPU.1", "GPU.0"}, + Params{"-GPU,CPU", "CPU"}, + Params{"-GPU.0,-CPU", ""}}; INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests_GetDeviceListWithID, GetDeviceListTest, @@ -182,8 +182,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests_GetDeviceList, INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests_GetDeviceListNotInteldGPU, GetDeviceListTestWithNotInteldGPU, - ::testing::Combine(::testing::Values(availableDevsWithId), ::testing::ValuesIn(testConfigsWithIdNotInteldGPU)), + ::testing::Combine(::testing::Values(availableDevsWithId), + ::testing::ValuesIn(testConfigsWithIdNotInteldGPU)), GetDeviceListTestWithNotInteldGPU::getTestCaseName); -//toDo need add test for ParseMetaDevices(_, config) to check device config of -//return metaDevices +// toDo need add test for ParseMetaDevices(_, config) to check device config of +// return metaDevices diff --git a/src/plugins/auto/tests/unit/include/auto_unit_test.hpp b/src/plugins/auto/tests/unit/include/auto_unit_test.hpp index 1142c7d871cad0..02043f1e45a1d7 100644 --- a/src/plugins/auto/tests/unit/include/auto_unit_test.hpp +++ b/src/plugins/auto/tests/unit/include/auto_unit_test.hpp @@ -3,43 +3,31 @@ // #pragma once -#include #include +#include + +#include #include -#include "plugin.hpp" -#include "openvino/runtime/core.hpp" + #include "gmock_plugin.hpp" -#include "mock_common.hpp" -#include +#include "openvino/runtime/core.hpp" +#include "plugin.hpp" +#include "unit_test_utils/mocks/openvino/runtime/mock_icompiled_model.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" +#include "unit_test_utils/mocks/openvino/runtime/mock_iplugin.hpp" +#include "unit_test_utils/mocks/openvino/runtime/mock_isync_infer_request.hpp" -using ::testing::MatcherCast; -using ::testing::AllOf; -using ::testing::Throw; -using ::testing::Matches; -using ::testing::_; -using ::testing::StrEq; -using ::testing::StrNe; -using ::testing::Return; -using ::testing::Property; -using ::testing::Eq; -using ::testing::ReturnRef; -using ::testing::AtLeast; -using ::testing::AnyNumber; -using ::testing::InvokeWithoutArgs; -using ::testing::HasSubstr; -using ::testing::NiceMock; +using namespace ::testing; using namespace ov::mock_auto_plugin; -#define EXPECT_THROW_WITH_MESSAGE(stmt, etype, whatstring) EXPECT_THROW( \ - try { \ - stmt; \ - } catch (const etype& ex) { \ +#define EXPECT_THROW_WITH_MESSAGE(stmt, etype, whatstring) \ + EXPECT_THROW( \ + try { stmt; } catch (const etype& ex) { \ EXPECT_THAT(std::string(ex.what()), HasSubstr(whatstring)); \ - throw; \ - } \ - , etype) + throw; \ + }, \ + etype) // define a matcher to check if perf hint expects MATCHER_P(ComparePerfHint, perfHint, "Check if perf hint expects.") { @@ -51,28 +39,51 @@ MATCHER_P(ComparePerfHint, perfHint, "Check if perf hint expects.") { return perfHint == arg_perfHint.as(); } + +#define IE_SET_METRIC(key, name, ...) \ + typename ::InferenceEngine::Metrics::MetricType<::InferenceEngine::Metrics::key>::type name = __VA_ARGS__; + +#define RETURN_MOCK_VALUE(value) \ + InvokeWithoutArgs([value]() { \ + return ov::Any(value); \ + }) + +// getMetric will return a fake ov::Any, gmock will call ostreamer << ov::Any +// it will cause core dump, so add this special implemented +namespace testing { +namespace internal { +template <> +void PrintTo(const ov::Any& a, std::ostream* os); +} +} // namespace testing + +#define ENABLE_LOG_IN_MOCK() \ + ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { \ + std::cout << stream.str() << std::endl; \ + }); + namespace ov { namespace mock_auto_plugin { namespace tests { - class BaseTest { public: - std::shared_ptr model; - std::shared_ptr> mock_plugin_cpu; - std::shared_ptr> mock_plugin_gpu; - std::shared_ptr> plugin; - //mock exeNetwork helper - ov::SoPtr mockExeNetwork; - std::shared_ptr mockIExeNet; - //mock exeNetwork actual - ov::SoPtr mockExeNetworkActual; - std::shared_ptr mockIExeNetActual; + std::shared_ptr model; + std::shared_ptr model_can_batch; + std::shared_ptr> mock_plugin_cpu; + std::shared_ptr> mock_plugin_gpu; + std::shared_ptr> plugin; + // mock exeNetwork helper + ov::SoPtr mockExeNetwork; + std::shared_ptr mockIExeNet; + // mock exeNetwork actual + ov::SoPtr mockExeNetworkActual; + std::shared_ptr mockIExeNetActual; // config for Auto device - ov::AnyMap config; - std::vector metaDevices; - std::shared_ptr inferReqInternal; - std::shared_ptr inferReqInternalActual; + ov::AnyMap config; + std::vector metaDevices; + std::shared_ptr inferReqInternal; + std::shared_ptr inferReqInternalActual; ov::Any optimalNum; virtual ~BaseTest(); @@ -84,32 +95,10 @@ class BaseTest { // for auto unit tests which can covered by mock core, or need to test with gmock icore class AutoTest : public BaseTest { public: - std::shared_ptr> core; + std::shared_ptr> core; AutoTest(); ~AutoTest(); }; - -// for unit tests which requires real core, batch support or remote context -// mock plugin name: MOCK_CPU,MOCK_HARDWARE -// please extend as needed - -class AutoTestWithRealCore : public BaseTest { -public: - AutoTestWithRealCore(); - ~AutoTestWithRealCore() = default; - ov::Core core; - -protected: - void register_plugin_simple(ov::Core& core, const std::string& device_name, const ov::AnyMap& properties); - void register_plugin_support_batch_and_context(ov::Core& core, const std::string& device_name, const ov::AnyMap& properties); - std::vector> m_mock_contexts; - std::shared_ptr m_so; - std::shared_ptr compiled_model; - void reg_plugin(ov::Core& core, - std::shared_ptr plugin, - const std::string& device_name, - const ov::AnyMap& properties); -}; } // namespace tests } // namespace mock_auto_plugin } // namespace ov diff --git a/src/plugins/auto/tests/unit/include/gmock_plugin.hpp b/src/plugins/auto/tests/unit/include/gmock_plugin.hpp index 01d46dddf5d001..71b9f3269bb9d7 100644 --- a/src/plugins/auto/tests/unit/include/gmock_plugin.hpp +++ b/src/plugins/auto/tests/unit/include/gmock_plugin.hpp @@ -4,9 +4,11 @@ #pragma once #include + +#include + #include "openvino/runtime/core.hpp" #include "plugin.hpp" -#include using namespace ov::mock_auto_plugin; namespace ov { @@ -19,10 +21,47 @@ class MockAutoPlugin : public Plugin { get_valid_device, ((const std::vector&), const std::string&), (const, override)); - MOCK_METHOD(DeviceInformation, select_device, ((const std::vector&), - const std::string&, unsigned int), (override)); - MOCK_METHOD((std::vector), parse_meta_devices, - (const std::string&, const ov::AnyMap&), (const, override)); + MOCK_METHOD(DeviceInformation, + select_device, + ((const std::vector&), const std::string&, unsigned int), + (override)); + MOCK_METHOD((std::vector), + parse_meta_devices, + (const std::string&, const ov::AnyMap&), + (const, override)); +}; + +class MockISyncInferRequest : public ISyncInferRequest { +public: + MockISyncInferRequest(const std::shared_ptr& compiled_model); + MOCK_METHOD(std::vector, get_profiling_info, (), (const, override)); + MOCK_METHOD(void, infer, (), (override)); + MOCK_METHOD(std::vector>, query_state, (), (const, override)); + ~MockISyncInferRequest() = default; + +private: + void allocate_tensor_impl(ov::SoPtr& tensor, + const ov::element::Type& element_type, + const ov::Shape& shape); +}; + +class MockAsyncInferRequest : public IAsyncInferRequest { +public: + MockAsyncInferRequest(const std::shared_ptr& request, + const std::shared_ptr& task_executor, + const std::shared_ptr& callback_executor, + bool ifThrow) + : IAsyncInferRequest(request, task_executor, callback_executor), + m_throw(ifThrow) { + m_pipeline = {}; + m_pipeline.push_back({task_executor, [this] { + if (m_throw) + OPENVINO_THROW("runtime inference failure"); + }}); + } + +private: + bool m_throw; }; -} // namespace mock_auto_plugin -} // namespace ov +} // namespace mock_auto_plugin +} // namespace ov diff --git a/src/plugins/auto/tests/unit/include/mock_common.hpp b/src/plugins/auto/tests/unit/include/mock_common.hpp deleted file mode 100644 index 0bbf58d68b0f53..00000000000000 --- a/src/plugins/auto/tests/unit/include/mock_common.hpp +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include -#include -#include "openvino/runtime/iplugin.hpp" -#include "openvino/opsets/opset11.hpp" -#include "openvino/runtime/iasync_infer_request.hpp" -#include "openvino/runtime/iplugin.hpp" -#include "openvino/runtime/iremote_context.hpp" -#include "openvino/runtime/iremote_tensor.hpp" -#include "openvino/runtime/make_tensor.hpp" - -#define IE_SET_METRIC(key, name, ...) \ - typename ::InferenceEngine::Metrics::MetricType<::InferenceEngine::Metrics::key>::type name = \ - __VA_ARGS__; - -#define RETURN_MOCK_VALUE(value) \ - InvokeWithoutArgs([value](){return ov::Any(value);}) - -// getMetric will return a fake ov::Any, gmock will call ostreamer << ov::Any -// it will cause core dump, so add this special implemented -namespace testing { -namespace internal { - template<> - void PrintTo(const ov::Any& a, std::ostream* os); -} -} - -#define ENABLE_LOG_IN_MOCK() \ - ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { \ - std::cout << stream.str() << std::endl; \ - }); - -namespace ov { -class MockPluginBase : public ov::IPlugin { -public: - MOCK_METHOD(std::shared_ptr, compile_model, ((const std::shared_ptr&), (const ov::AnyMap&)), (const, override)); - MOCK_METHOD(std::shared_ptr, compile_model, - ((const std::shared_ptr&), (const ov::AnyMap&), (const ov::SoPtr&)), (const, override)); - MOCK_METHOD(void, set_property, (const AnyMap&), (override)); - MOCK_METHOD(ov::Any, get_property, ((const std::string&), (const ov::AnyMap&)), (const, override)); - MOCK_METHOD(ov::SoPtr, create_context, (const ov::AnyMap&), (const, override)); - MOCK_METHOD(ov::SoPtr, get_default_context, (const ov::AnyMap&), (const, override)); - MOCK_METHOD(std::shared_ptr, import_model, ((std::istream&), (const ov::AnyMap&)), (const, override)); - MOCK_METHOD(std::shared_ptr, import_model, - ((std::istream&), (const ov::SoPtr&), (const ov::AnyMap&)), (const, override)); - MOCK_METHOD(ov::SupportedOpsMap, query_model, ((const std::shared_ptr&), (const ov::AnyMap&)), (const, override)); -}; - -class MockCompiledModel : public ICompiledModel { -public: - MockCompiledModel(const std::shared_ptr& model, const std::shared_ptr& plugin) - : ICompiledModel(model, plugin) {} - MOCK_METHOD(std::shared_ptr, create_sync_infer_request, (), (const, override)); - MOCK_METHOD(Any, get_property, (const std::string&), (const, override)); - MOCK_METHOD(void, set_property, (const AnyMap&), (override)); - MOCK_METHOD(void, export_model, (std::ostream&), (const, override)); - MOCK_METHOD(std::shared_ptr, get_runtime_model, (), (const, override)); - MOCK_METHOD(std::shared_ptr, create_infer_request, (), (const, override)); -}; - -class MockAsyncInferRequest : public IAsyncInferRequest { -public: - MockAsyncInferRequest(const std::shared_ptr& request, - const std::shared_ptr& task_executor, - const std::shared_ptr& callback_executor, - bool ifThrow); -private: - bool m_throw; -}; - -class MockSyncInferRequest : public ISyncInferRequest { -public: - MockSyncInferRequest(const std::shared_ptr& compiled_model); - MOCK_METHOD(std::vector, get_profiling_info, (), (const, override)); - //MOCK_METHOD(Tensor, get_tensor, (const Output&), (const, override)); - //MOCK_METHOD(void, set_tensor, (const Output&, const Tensor&), (override)); - //MOCK_METHOD(std::vector, get_tensors, (const Output&), (const, override)); - //MOCK_METHOD(void, set_tensors, (const Output&, const std::vector&), (override)); - MOCK_METHOD(void, infer, (), (override)); - MOCK_METHOD(std::vector>, query_state, (), (const, override)); - //MOCK_METHOD(const std::shared_ptr&, get_compiled_model, (), (const, override)); - //MOCK_METHOD(const std::vector>&, get_inputs, (), (const, override)); - //MOCK_METHOD(const std::vector>&, get_outputs, (), (const, override)); - //MOCK_METHOD(void, check_tensors, (), (const, override)); - ~MockSyncInferRequest() = default; - -private: - void allocate_tensor_impl(ov::SoPtr& tensor, const ov::element::Type& element_type, const ov::Shape& shape); -}; - -class MockRemoteTensor : public ov::IRemoteTensor { - ov::AnyMap m_properties; - std::string m_dev_name; - -public: - MockRemoteTensor(const std::string& name, const ov::AnyMap& props) : m_properties(props), m_dev_name(name) {} - const ov::AnyMap& get_properties() const override { - return m_properties; - } - const std::string& get_device_name() const override { - return m_dev_name; - } - void set_shape(ov::Shape shape) override { - OPENVINO_NOT_IMPLEMENTED; - } - - const ov::element::Type& get_element_type() const override { - OPENVINO_NOT_IMPLEMENTED; - } - - const ov::Shape& get_shape() const override { - OPENVINO_NOT_IMPLEMENTED; - } - - const ov::Strides& get_strides() const override { - OPENVINO_NOT_IMPLEMENTED; - } -}; - -class MockRemoteContext : public ov::IRemoteContext { - ov::AnyMap m_property = {{"IS_DEFAULT", true}}; - std::string m_dev_name; - -public: - MockRemoteContext(const std::string& dev_name) : m_dev_name(dev_name) {} - const std::string& get_device_name() const override { - return m_dev_name; - } - - const ov::AnyMap& get_property() const override { - OPENVINO_NOT_IMPLEMENTED; - } - - ov::SoPtr create_tensor(const ov::element::Type& type, - const ov::Shape& shape, - const ov::AnyMap& params = {}) override { - auto remote_tensor = std::make_shared(m_dev_name, m_property); - return {remote_tensor, nullptr}; - } -}; -} // namespace ov diff --git a/src/plugins/auto/tests/unit/include/mock_log_utils.hpp b/src/plugins/auto/tests/unit/include/mock_log_utils.hpp index 80383b42ff6fc0..51b0a7f4e622ad 100644 --- a/src/plugins/auto/tests/unit/include/mock_log_utils.hpp +++ b/src/plugins/auto/tests/unit/include/mock_log_utils.hpp @@ -4,15 +4,15 @@ #pragma once #include + #include "utils/log.hpp" namespace ov { namespace mock_auto_plugin { class MockLog : public Log { public: - MOCK_METHOD(void, print, (std::stringstream& stream), (override)); - MockLog(std::string unittest):Log(unittest) { - } + MOCK_METHOD(void, print, (std::stringstream & stream), (override)); + MockLog(std::string unittest) : Log(unittest) {} static MockLog* get_instance() { if (m_mocklog == NULL) { m_mocklog = new MockLog("unittest"); @@ -27,5 +27,5 @@ class MockLog : public Log { } static MockLog* m_mocklog; }; -}// namespace mock_auto_plugin -} //namespace ov +} // namespace mock_auto_plugin +} // namespace ov diff --git a/src/plugins/auto/tests/unit/key_network_priority_test.cpp b/src/plugins/auto/tests/unit/key_network_priority_test.cpp index e284aedc6572c7..616f14040486b6 100644 --- a/src/plugins/auto/tests/unit/key_network_priority_test.cpp +++ b/src/plugins/auto/tests/unit/key_network_priority_test.cpp @@ -7,16 +7,15 @@ using Config = std::map; using namespace ov::mock_auto_plugin; -using PriorityParams = std::tuple; //{modelpriority, deviceUniquName} +using PriorityParams = std::tuple; //{modelpriority, deviceUniquName} -using ConfigParams = std::tuple< - std::string, // netPrecision - bool, // enable device priority - std::vector // {{modelpriority, expect device unique_name}} - >; +using ConfigParams = std::tuple // {{modelpriority, expect device unique_name}} + >; class KeyNetworkPriorityTest : public tests::AutoTest, public ::testing::TestWithParam { public: - std::vector metaDevices; + std::vector metaDevices; public: static std::string getTestCaseName(testing::TestParamInfo obj) { @@ -31,8 +30,8 @@ class KeyNetworkPriorityTest : public tests::AutoTest, public ::testing::TestWit result << "_enableDevicePriority_false"; } for (auto& item : PriorityConfigs) { - result << "_priority_" << std::get<0>(item); - result << "_return_" << std::get<1>(item); + result << "_priority_" << std::get<0>(item); + result << "_return_" << std::get<1>(item); } result << "netPrecision_" << netPrecision; return result.str(); @@ -45,9 +44,9 @@ class KeyNetworkPriorityTest : public tests::AutoTest, public ::testing::TestWit void SetUp() override { std::tie(netPrecision, enableDevicePriority, PriorityConfigs) = GetParam(); sizeOfConfigs = static_cast(PriorityConfigs.size()); - std::vector gpuCability = {"FP32", "FP16", "BATCHED_BLOB", "BIN"}; - ON_CALL(*core, get_property(HasSubstr("GPU"), - StrEq(ov::device::capabilities.name()), _)).WillByDefault(RETURN_MOCK_VALUE(gpuCability)); + std::vector gpuCability = {"FP32", "FP16", "BATCHED_BLOB", "BIN"}; + ON_CALL(*core, get_property(HasSubstr("GPU"), StrEq(ov::device::capabilities.name()), _)) + .WillByDefault(RETURN_MOCK_VALUE(gpuCability)); std::vector otherCability = {"INT8"}; ON_CALL(*core, get_property(HasSubstr("OTHER"), StrEq(ov::device::capabilities.name()), _)) @@ -69,14 +68,14 @@ TEST_P(KeyNetworkPriorityTest, SelectDevice) { std::vector resDevInfo; if (enableDevicePriority) { metaDevices = {{ov::test::utils::DEVICE_CPU, {}, 2, "", "CPU_01", 0}, - {"GPU.0", {}, 2, "01", "iGPU_01", 1}, - {"GPU.1", {}, 2, "01", "dGPU_01", 2}, - {"OTHER", {}, 2, "01", "OTHER_01", 3}}; + {"GPU.0", {}, 2, "01", "iGPU_01", 1}, + {"GPU.1", {}, 2, "01", "dGPU_01", 2}, + {"OTHER", {}, 2, "01", "OTHER_01", 3}}; } else { metaDevices = {{ov::test::utils::DEVICE_CPU, {}, 2, "", "CPU_01", 0}, - {"GPU.0", {}, 2, "01", "iGPU_01", 0}, - {"GPU.1", {}, 2, "01", "dGPU_01", 0}, - {"OTHER", {}, 2, "01", "OTHER_01", 0}}; + {"GPU.0", {}, 2, "01", "iGPU_01", 0}, + {"GPU.1", {}, 2, "01", "dGPU_01", 0}, + {"OTHER", {}, 2, "01", "OTHER_01", 0}}; } EXPECT_CALL(*plugin, select_device(_, _, _)).Times(sizeOfConfigs); @@ -110,16 +109,16 @@ TEST_P(KeyNetworkPriorityTest, MultiThreadsSelectDevice) { // selectdevice in multi threads, and UnregisterPriority them all, should not affect the // Priority Map for (auto& item : PriorityConfigs) { - unsigned int priority = std::get<0>(item); - auto future = std::async(std::launch::async, [this, priority] { - auto deviceInfo = plugin->select_device(metaDevices, netPrecision, priority); - plugin->unregister_priority(priority, deviceInfo.unique_name); - }); - futureVect.push_back(std::move(future)); + unsigned int priority = std::get<0>(item); + auto future = std::async(std::launch::async, [this, priority] { + auto deviceInfo = plugin->select_device(metaDevices, netPrecision, priority); + plugin->unregister_priority(priority, deviceInfo.unique_name); + }); + futureVect.push_back(std::move(future)); } for (auto& item : futureVect) { - item.get(); + item.get(); } for (auto& item : PriorityConfigs) { @@ -138,152 +137,206 @@ TEST_P(KeyNetworkPriorityTest, MultiThreadsSelectDevice) { // {netPrecision, enableDevicePriority, PriorityParamsVector{{modelpriority, expect device unique_name}}} const std::vector testConfigs = { - ConfigParams {"FP32", false, {PriorityParams {0, "dGPU_01"}, - PriorityParams {1, "iGPU_01"}, - PriorityParams {2, "CPU_01"}, - PriorityParams {2, "CPU_01"}}}, - ConfigParams {"FP32", false, {PriorityParams {2, "dGPU_01"}, - PriorityParams {3, "iGPU_01"}, - PriorityParams {4, "CPU_01"}}}, - ConfigParams {"FP32", false, {PriorityParams {2, "dGPU_01"}, - PriorityParams {0, "dGPU_01"}, - PriorityParams {2, "iGPU_01"}, - PriorityParams {2, "iGPU_01"}}}, - ConfigParams {"FP32", false, {PriorityParams {2, "dGPU_01"}, - PriorityParams {0, "dGPU_01"}, - PriorityParams {2, "iGPU_01"}, - PriorityParams {3, "CPU_01"}}}, - ConfigParams {"FP32", false, {PriorityParams {0, "dGPU_01"}, - PriorityParams {1, "iGPU_01"}, - PriorityParams {2, "CPU_01"}, - PriorityParams {0, "dGPU_01"}, - PriorityParams {1, "iGPU_01"}, - PriorityParams {2, "CPU_01"}}}, - ConfigParams {"INT8", false, {PriorityParams {0, "OTHER_01"}, - PriorityParams {1, "CPU_01"}, - PriorityParams {2, "CPU_01"}, - PriorityParams {2, "CPU_01"}}}, - ConfigParams {"INT8", false, {PriorityParams {2, "OTHER_01"}, - PriorityParams {3, "CPU_01"}, - PriorityParams {4, "CPU_01"}, - PriorityParams {5, "CPU_01"}}}, - ConfigParams {"INT8", false, {PriorityParams {2, "OTHER_01"}, - PriorityParams {0, "OTHER_01"}, - PriorityParams {2, "CPU_01"}, - PriorityParams {2, "CPU_01"}}}, - ConfigParams {"INT8", false, {PriorityParams {2, "OTHER_01"}, - PriorityParams {0, "OTHER_01"}, - PriorityParams {2, "CPU_01"}, - PriorityParams {3, "CPU_01"}}}, - ConfigParams {"INT8", false, {PriorityParams {0, "OTHER_01"}, - PriorityParams {1, "CPU_01"}, - PriorityParams {2, "CPU_01"}, - PriorityParams {3, "CPU_01"}, - PriorityParams {0, "OTHER_01"}, - PriorityParams {1, "CPU_01"}, - PriorityParams {2, "CPU_01"}, - PriorityParams {3, "CPU_01"}}}, - ConfigParams {"BIN", false, {PriorityParams {0, "dGPU_01"}, - PriorityParams {1, "iGPU_01"}, - PriorityParams {2, "CPU_01"}, - PriorityParams {2, "CPU_01"}}}, - ConfigParams {"BIN", false, {PriorityParams {2, "dGPU_01"}, - PriorityParams {3, "iGPU_01"}, - PriorityParams {4, "CPU_01"}, - PriorityParams {5, "CPU_01"}}}, - ConfigParams {"BIN", false, {PriorityParams {2, "dGPU_01"}, - PriorityParams {0, "dGPU_01"}, - PriorityParams {2, "iGPU_01"}, - PriorityParams {2, "iGPU_01"}}}, - ConfigParams {"BIN", false, {PriorityParams {2, "dGPU_01"}, - PriorityParams {0, "dGPU_01"}, - PriorityParams {2, "iGPU_01"}, - PriorityParams {3, "CPU_01"}}}, - ConfigParams {"BIN", false, {PriorityParams {0, "dGPU_01"}, - PriorityParams {1, "iGPU_01"}, - PriorityParams {2, "CPU_01"}, - PriorityParams {3, "CPU_01"}, - PriorityParams {0, "dGPU_01"}, - PriorityParams {1, "iGPU_01"}, - PriorityParams {2, "CPU_01"}, - PriorityParams {3, "CPU_01"}}}, + ConfigParams{"FP32", + false, + {PriorityParams{0, "dGPU_01"}, + PriorityParams{1, "iGPU_01"}, + PriorityParams{2, "CPU_01"}, + PriorityParams{2, "CPU_01"}}}, + ConfigParams{"FP32", + false, + {PriorityParams{2, "dGPU_01"}, PriorityParams{3, "iGPU_01"}, PriorityParams{4, "CPU_01"}}}, + ConfigParams{"FP32", + false, + {PriorityParams{2, "dGPU_01"}, + PriorityParams{0, "dGPU_01"}, + PriorityParams{2, "iGPU_01"}, + PriorityParams{2, "iGPU_01"}}}, + ConfigParams{"FP32", + false, + {PriorityParams{2, "dGPU_01"}, + PriorityParams{0, "dGPU_01"}, + PriorityParams{2, "iGPU_01"}, + PriorityParams{3, "CPU_01"}}}, + ConfigParams{"FP32", + false, + {PriorityParams{0, "dGPU_01"}, + PriorityParams{1, "iGPU_01"}, + PriorityParams{2, "CPU_01"}, + PriorityParams{0, "dGPU_01"}, + PriorityParams{1, "iGPU_01"}, + PriorityParams{2, "CPU_01"}}}, + ConfigParams{"INT8", + false, + {PriorityParams{0, "OTHER_01"}, + PriorityParams{1, "CPU_01"}, + PriorityParams{2, "CPU_01"}, + PriorityParams{2, "CPU_01"}}}, + ConfigParams{"INT8", + false, + {PriorityParams{2, "OTHER_01"}, + PriorityParams{3, "CPU_01"}, + PriorityParams{4, "CPU_01"}, + PriorityParams{5, "CPU_01"}}}, + ConfigParams{"INT8", + false, + {PriorityParams{2, "OTHER_01"}, + PriorityParams{0, "OTHER_01"}, + PriorityParams{2, "CPU_01"}, + PriorityParams{2, "CPU_01"}}}, + ConfigParams{"INT8", + false, + {PriorityParams{2, "OTHER_01"}, + PriorityParams{0, "OTHER_01"}, + PriorityParams{2, "CPU_01"}, + PriorityParams{3, "CPU_01"}}}, + ConfigParams{"INT8", + false, + {PriorityParams{0, "OTHER_01"}, + PriorityParams{1, "CPU_01"}, + PriorityParams{2, "CPU_01"}, + PriorityParams{3, "CPU_01"}, + PriorityParams{0, "OTHER_01"}, + PriorityParams{1, "CPU_01"}, + PriorityParams{2, "CPU_01"}, + PriorityParams{3, "CPU_01"}}}, + ConfigParams{"BIN", + false, + {PriorityParams{0, "dGPU_01"}, + PriorityParams{1, "iGPU_01"}, + PriorityParams{2, "CPU_01"}, + PriorityParams{2, "CPU_01"}}}, + ConfigParams{"BIN", + false, + {PriorityParams{2, "dGPU_01"}, + PriorityParams{3, "iGPU_01"}, + PriorityParams{4, "CPU_01"}, + PriorityParams{5, "CPU_01"}}}, + ConfigParams{"BIN", + false, + {PriorityParams{2, "dGPU_01"}, + PriorityParams{0, "dGPU_01"}, + PriorityParams{2, "iGPU_01"}, + PriorityParams{2, "iGPU_01"}}}, + ConfigParams{"BIN", + false, + {PriorityParams{2, "dGPU_01"}, + PriorityParams{0, "dGPU_01"}, + PriorityParams{2, "iGPU_01"}, + PriorityParams{3, "CPU_01"}}}, + ConfigParams{"BIN", + false, + {PriorityParams{0, "dGPU_01"}, + PriorityParams{1, "iGPU_01"}, + PriorityParams{2, "CPU_01"}, + PriorityParams{3, "CPU_01"}, + PriorityParams{0, "dGPU_01"}, + PriorityParams{1, "iGPU_01"}, + PriorityParams{2, "CPU_01"}, + PriorityParams{3, "CPU_01"}}}, // metaDevices = {{ov::test::utils::DEVICE_CPU, {}, 2, "", "CPU_01", 0}, // {ov::test::utils::DEVICE_GPU, {}, 2, "01", "iGPU_01", 1}, // {ov::test::utils::DEVICE_GPU, {}, 2, "01", "dGPU_01", 2}, // cpu > igpu > dgpu > OTHER - ConfigParams {"FP32", true, {PriorityParams {0, "CPU_01"}, - PriorityParams {1, "iGPU_01"}, - PriorityParams {2, "dGPU_01"}, - PriorityParams {2, "dGPU_01"}}}, - ConfigParams {"FP32", true, {PriorityParams {2, "CPU_01"}, - PriorityParams {3, "iGPU_01"}, - PriorityParams {4, "dGPU_01"}}}, - ConfigParams {"FP32", true, {PriorityParams {2, "CPU_01"}, - PriorityParams {0, "CPU_01"}, - PriorityParams {2, "iGPU_01"}, - PriorityParams {2, "iGPU_01"}}}, - ConfigParams {"FP32", true, {PriorityParams {2, "CPU_01"}, - PriorityParams {0, "CPU_01"}, - PriorityParams {2, "iGPU_01"}, - PriorityParams {3, "dGPU_01"}}}, - ConfigParams {"FP32", true, {PriorityParams {0, "CPU_01"}, - PriorityParams {1, "iGPU_01"}, - PriorityParams {2, "dGPU_01"}, - PriorityParams {0, "CPU_01"}, - PriorityParams {1, "iGPU_01"}, - PriorityParams {2, "dGPU_01"}}}, - ConfigParams {"INT8", true, {PriorityParams {0, "CPU_01"}, - PriorityParams {1, "OTHER_01"}, - PriorityParams {2, "OTHER_01"}, - PriorityParams {2, "OTHER_01"}}}, - ConfigParams {"INT8", true, {PriorityParams {2, "CPU_01"}, - PriorityParams {3, "OTHER_01"}, - PriorityParams {4, "OTHER_01"}, - PriorityParams {5, "OTHER_01"}}}, - ConfigParams {"INT8", true, {PriorityParams {2, "CPU_01"}, - PriorityParams {0, "CPU_01"}, - PriorityParams {2, "OTHER_01"}, - PriorityParams {2, "OTHER_01"}}}, - ConfigParams {"INT8", true, {PriorityParams {2, "CPU_01"}, - PriorityParams {0, "CPU_01"}, - PriorityParams {2, "OTHER_01"}, - PriorityParams {3, "OTHER_01"}}}, - ConfigParams {"INT8", true, {PriorityParams {0, "CPU_01"}, - PriorityParams {1, "OTHER_01"}, - PriorityParams {2, "OTHER_01"}, - PriorityParams {3, "OTHER_01"}, - PriorityParams {0, "CPU_01"}, - PriorityParams {1, "OTHER_01"}, - PriorityParams {2, "OTHER_01"}, - PriorityParams {3, "OTHER_01"}}}, - ConfigParams {"BIN", true, {PriorityParams {0, "CPU_01"}, - PriorityParams {1, "iGPU_01"}, - PriorityParams {2, "dGPU_01"}, - PriorityParams {2, "dGPU_01"}}}, - ConfigParams {"BIN", true, {PriorityParams {2, "CPU_01"}, - PriorityParams {3, "iGPU_01"}, - PriorityParams {4, "dGPU_01"}, - PriorityParams {5, "dGPU_01"}}}, - ConfigParams {"BIN", true, {PriorityParams {2, "CPU_01"}, - PriorityParams {0, "CPU_01"}, - PriorityParams {2, "iGPU_01"}, - PriorityParams {2, "iGPU_01"}}}, - ConfigParams {"BIN", true, {PriorityParams {2, "CPU_01"}, - PriorityParams {0, "CPU_01"}, - PriorityParams {2, "iGPU_01"}, - PriorityParams {3, "dGPU_01"}}}, - ConfigParams {"BIN", true, {PriorityParams {0, "CPU_01"}, - PriorityParams {1, "iGPU_01"}, - PriorityParams {2, "dGPU_01"}, - PriorityParams {3, "dGPU_01"}, - PriorityParams {0, "CPU_01"}, - PriorityParams {1, "iGPU_01"}, - PriorityParams {2, "dGPU_01"}, - PriorityParams {3, "dGPU_01"}}} -}; - - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, KeyNetworkPriorityTest, - ::testing::ValuesIn(testConfigs), - KeyNetworkPriorityTest::getTestCaseName); + ConfigParams{"FP32", + true, + {PriorityParams{0, "CPU_01"}, + PriorityParams{1, "iGPU_01"}, + PriorityParams{2, "dGPU_01"}, + PriorityParams{2, "dGPU_01"}}}, + ConfigParams{"FP32", + true, + {PriorityParams{2, "CPU_01"}, PriorityParams{3, "iGPU_01"}, PriorityParams{4, "dGPU_01"}}}, + ConfigParams{"FP32", + true, + {PriorityParams{2, "CPU_01"}, + PriorityParams{0, "CPU_01"}, + PriorityParams{2, "iGPU_01"}, + PriorityParams{2, "iGPU_01"}}}, + ConfigParams{"FP32", + true, + {PriorityParams{2, "CPU_01"}, + PriorityParams{0, "CPU_01"}, + PriorityParams{2, "iGPU_01"}, + PriorityParams{3, "dGPU_01"}}}, + ConfigParams{"FP32", + true, + {PriorityParams{0, "CPU_01"}, + PriorityParams{1, "iGPU_01"}, + PriorityParams{2, "dGPU_01"}, + PriorityParams{0, "CPU_01"}, + PriorityParams{1, "iGPU_01"}, + PriorityParams{2, "dGPU_01"}}}, + ConfigParams{"INT8", + true, + {PriorityParams{0, "CPU_01"}, + PriorityParams{1, "OTHER_01"}, + PriorityParams{2, "OTHER_01"}, + PriorityParams{2, "OTHER_01"}}}, + ConfigParams{"INT8", + true, + {PriorityParams{2, "CPU_01"}, + PriorityParams{3, "OTHER_01"}, + PriorityParams{4, "OTHER_01"}, + PriorityParams{5, "OTHER_01"}}}, + ConfigParams{"INT8", + true, + {PriorityParams{2, "CPU_01"}, + PriorityParams{0, "CPU_01"}, + PriorityParams{2, "OTHER_01"}, + PriorityParams{2, "OTHER_01"}}}, + ConfigParams{"INT8", + true, + {PriorityParams{2, "CPU_01"}, + PriorityParams{0, "CPU_01"}, + PriorityParams{2, "OTHER_01"}, + PriorityParams{3, "OTHER_01"}}}, + ConfigParams{"INT8", + true, + {PriorityParams{0, "CPU_01"}, + PriorityParams{1, "OTHER_01"}, + PriorityParams{2, "OTHER_01"}, + PriorityParams{3, "OTHER_01"}, + PriorityParams{0, "CPU_01"}, + PriorityParams{1, "OTHER_01"}, + PriorityParams{2, "OTHER_01"}, + PriorityParams{3, "OTHER_01"}}}, + ConfigParams{"BIN", + true, + {PriorityParams{0, "CPU_01"}, + PriorityParams{1, "iGPU_01"}, + PriorityParams{2, "dGPU_01"}, + PriorityParams{2, "dGPU_01"}}}, + ConfigParams{"BIN", + true, + {PriorityParams{2, "CPU_01"}, + PriorityParams{3, "iGPU_01"}, + PriorityParams{4, "dGPU_01"}, + PriorityParams{5, "dGPU_01"}}}, + ConfigParams{"BIN", + true, + {PriorityParams{2, "CPU_01"}, + PriorityParams{0, "CPU_01"}, + PriorityParams{2, "iGPU_01"}, + PriorityParams{2, "iGPU_01"}}}, + ConfigParams{"BIN", + true, + {PriorityParams{2, "CPU_01"}, + PriorityParams{0, "CPU_01"}, + PriorityParams{2, "iGPU_01"}, + PriorityParams{3, "dGPU_01"}}}, + ConfigParams{"BIN", + true, + {PriorityParams{0, "CPU_01"}, + PriorityParams{1, "iGPU_01"}, + PriorityParams{2, "dGPU_01"}, + PriorityParams{3, "dGPU_01"}, + PriorityParams{0, "CPU_01"}, + PriorityParams{1, "iGPU_01"}, + PriorityParams{2, "dGPU_01"}, + PriorityParams{3, "dGPU_01"}}}}; +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + KeyNetworkPriorityTest, + ::testing::ValuesIn(testConfigs), + KeyNetworkPriorityTest::getTestCaseName); diff --git a/src/plugins/auto/tests/unit/life_time_test.cpp b/src/plugins/auto/tests/unit/life_time_test.cpp new file mode 100644 index 00000000000000..a014505ddfcd58 --- /dev/null +++ b/src/plugins/auto/tests/unit/life_time_test.cpp @@ -0,0 +1,84 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include "include/auto_unit_test.hpp" +#include "unit_test_utils/mocks/openvino/runtime/mock_ivariable_state.hpp" +using namespace ov::mock_auto_plugin; + +using ConfigParams = std::tuple; + +class AutoLifeTimeTest : public tests::AutoTest, public ::testing::Test { +public: + void SetUp() override { + plugin->set_device_name("AUTO"); + mock_compiled_model = {mockIExeNetActual, std::make_shared("for test")}; + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher("GPU.0"), + _)) + .WillByDefault(Return(mock_compiled_model)); + mock_states = {ov::SoPtr(std::make_shared>(), + std::make_shared("for test"))}; + EXPECT_CALL(*inferReqInternalActual, query_state()).WillRepeatedly(Return(mock_states)); + } + + void TearDown() override { + testing::Mock::AllowLeak(mock_states.front()._ptr.get()); + testing::Mock::AllowLeak(inferReqInternalActual.get()); + } + +protected: + ov::SoPtr mock_compiled_model; + std::vector> mock_states; +}; + +TEST_F(AutoLifeTimeTest, loaded_tensor) { + // get Parameter + config.insert(ov::device::priorities("GPU.0")); + std::shared_ptr compiled_model; + ASSERT_NO_THROW(compiled_model = plugin->compile_model(model, config)); + auto request = compiled_model->create_infer_request(); + for (auto& iter : request->get_inputs()) { + auto tensor = request->get_tensor(iter); + ASSERT_EQ(tensor._so, mock_compiled_model._so); + } +} + +TEST_F(AutoLifeTimeTest, loaded_states) { + // get Parameter + config.insert(ov::device::priorities("GPU.0")); + std::shared_ptr compiled_model; + ASSERT_NO_THROW(compiled_model = plugin->compile_model(model, config)); + auto request = compiled_model->create_infer_request(); + auto states = request->query_state(); + auto res_so = mock_states.front()._so; + for (auto& state : states) + ASSERT_EQ(state._so, res_so); +} + +TEST_F(AutoLifeTimeTest, loaded_tensor_multi) { + plugin->set_device_name("MULTI"); + // get Parameter + config.insert(ov::device::priorities("GPU.0")); + std::shared_ptr compiled_model; + ASSERT_NO_THROW(compiled_model = plugin->compile_model(model, config)); + auto request = compiled_model->create_infer_request(); + for (auto& iter : request->get_inputs()) { + auto tensor = request->get_tensor(iter); + ASSERT_EQ(tensor._so, mock_compiled_model._so); + } +} + +TEST_F(AutoLifeTimeTest, loaded_states_bind_buffer) { + // get Parameter + config.insert(ov::device::priorities("GPU.0")); + config.insert(ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)); + config.insert(ov::intel_auto::device_bind_buffer(true)); + std::shared_ptr compiled_model; + ASSERT_NO_THROW(compiled_model = plugin->compile_model(model, config)); + auto request = compiled_model->create_infer_request(); + auto states = request->query_state(); + auto res_so = mock_states.front()._so; + for (auto& state : states) + ASSERT_EQ(state._so, res_so); +} \ No newline at end of file diff --git a/src/plugins/auto/tests/unit/log_utils_format_test.cpp b/src/plugins/auto/tests/unit/log_utils_format_test.cpp index 74d6cd9f09317e..a7a8498affee69 100644 --- a/src/plugins/auto/tests/unit/log_utils_format_test.cpp +++ b/src/plugins/auto/tests/unit/log_utils_format_test.cpp @@ -2,11 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // - -#include #include -#include "utils/log_util.hpp" +#include + #include + +#include "utils/log_util.hpp" using namespace ov::mock_auto_plugin; using ::testing::_; class LogUtilsFormatTest : public ::testing::Test { @@ -19,7 +20,7 @@ class LogUtilsFormatTest : public ::testing::Test { MockLog::release(); } - void traceCallStacksTest(){ + void traceCallStacksTest() { TraceCallStacks("test"); } }; @@ -34,8 +35,8 @@ TEST_F(LogUtilsFormatTest, format_s) { std::string pattern{"abc"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%sabc", "DEBUG"); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -45,8 +46,8 @@ TEST_F(LogUtilsFormatTest, format_d) { std::string pattern{"abc"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%dabc", -1); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -57,8 +58,8 @@ TEST_F(LogUtilsFormatTest, format_ld) { std::string pattern{"abc"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%ldabc", -3); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -69,8 +70,8 @@ TEST_F(LogUtilsFormatTest, format_u) { std::string pattern{"abc"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%uabc", 1); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -81,8 +82,8 @@ TEST_F(LogUtilsFormatTest, format_lu) { std::string pattern{"abc"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%luabc", 3); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -93,8 +94,8 @@ TEST_F(LogUtilsFormatTest, format_s_d_ld_u_lu) { std::string pattern{"abc"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%s,%d,%ld,%u,%lu,abc", "DEBUG", -1, -3, 1, 3); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -105,8 +106,8 @@ TEST_F(LogUtilsFormatTest, format_s_d_ld_u_lu2) { std::string pattern{"abc"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%s%d%ld%u%luabc", "DEBUG", -1, -3, 1, 3); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -117,8 +118,8 @@ TEST_F(LogUtilsFormatTest, format_lf) { std::string pattern{"abc"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%lfabc", 1.33); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -129,8 +130,8 @@ TEST_F(LogUtilsFormatTest, format_p) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%p", MockLog::m_mocklog); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -141,8 +142,8 @@ TEST_F(LogUtilsFormatTest, format_x) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%x", 3); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -153,8 +154,8 @@ TEST_F(LogUtilsFormatTest, format_X) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%X", 3); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -165,8 +166,8 @@ TEST_F(LogUtilsFormatTest, format_o) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%o", 3); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -177,8 +178,8 @@ TEST_F(LogUtilsFormatTest, format_e) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%e", 3); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -189,8 +190,8 @@ TEST_F(LogUtilsFormatTest, format_E) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%E", 3); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -201,8 +202,8 @@ TEST_F(LogUtilsFormatTest, format_f) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%f", 3); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -213,8 +214,8 @@ TEST_F(LogUtilsFormatTest, format_F) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%F", 3); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -225,8 +226,8 @@ TEST_F(LogUtilsFormatTest, format_g) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%g", 3); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -237,21 +238,20 @@ TEST_F(LogUtilsFormatTest, format_G) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%G", 3); EXPECT_TRUE(std::regex_search(printResult, regex)); } - TEST_F(LogUtilsFormatTest, format_a) { std::string printResult = ""; std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%a", 3); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -262,8 +262,8 @@ TEST_F(LogUtilsFormatTest, format_A) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%A", 3); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -274,8 +274,8 @@ TEST_F(LogUtilsFormatTest, format_c) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%c", 3); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -287,8 +287,8 @@ TEST_F(LogUtilsFormatTest, format_n) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%n", &num); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -299,8 +299,8 @@ TEST_F(LogUtilsFormatTest, format__) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%%"); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -311,8 +311,8 @@ TEST_F(LogUtilsFormatTest, format_s__) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%s%%", "DEBUG"); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -324,8 +324,8 @@ TEST_F(LogUtilsFormatTest, format_dn) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%d%n", num, &num); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -337,8 +337,8 @@ TEST_F(LogUtilsFormatTest, format_ccccdn) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("cccc%d%n", num, &num); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -349,8 +349,8 @@ TEST_F(LogUtilsFormatTest, logPrintFormat_error) { std::string pattern{"\\[[0-9]+:[0-9]+:[0-9]+\\.[0-9]+\\]ERROR\\[.+:[0-9]+\\].*"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_ERROR("test"); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -361,8 +361,8 @@ TEST_F(LogUtilsFormatTest, logPrintFormat_warning) { std::string pattern{"\\[[0-9]+:[0-9]+:[0-9]+\\.[0-9]+\\]W\\[.+:[0-9]+\\].*"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_WARNING("test"); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -373,8 +373,8 @@ TEST_F(LogUtilsFormatTest, logPrintFormat_info) { std::string pattern{"\\[[0-9]+:[0-9]+:[0-9]+\\.[0-9]+\\]I\\[.+:[0-9]+\\].*"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_INFO("test"); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -385,8 +385,8 @@ TEST_F(LogUtilsFormatTest, logPrintFormat_debug) { std::string pattern{"\\[[0-9]+:[0-9]+:[0-9]+\\.[0-9]+\\]D\\[.+:[0-9]+\\].*"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("test"); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -398,8 +398,8 @@ TEST_F(LogUtilsFormatTest, logPrintFormat_trace) { std::string pattern{"\\[[0-9]+:[0-9]+:[0-9]+\\.[0-9]+\\]T\\[.+:[0-9]+\\].*"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_TRACE(true, "test", "TRACE"); EXPECT_TRUE(std::regex_search(printResult, regex)); diff --git a/src/plugins/auto/tests/unit/log_utils_test.cpp b/src/plugins/auto/tests/unit/log_utils_test.cpp index 53dc7c64fec4d6..6a9b154225d377 100644 --- a/src/plugins/auto/tests/unit/log_utils_test.cpp +++ b/src/plugins/auto/tests/unit/log_utils_test.cpp @@ -2,31 +2,31 @@ // SPDX-License-Identifier: Apache-2.0 // - -#include #include -#include "utils/log_util.hpp" +#include + #include + +#include "utils/log_util.hpp" using ::testing::_; using namespace ov::mock_auto_plugin; // disable using windows.h #if 0 -#if defined(_WIN32) -#include -#elif defined(__linux__) -#include -#elif defined(__APPLE__) -#include -#else -#endif +# if defined(_WIN32) +# include +# elif defined(__linux__) +# include +# elif defined(__APPLE__) +# include +# else +# endif #endif MockLog* MockLog::m_mocklog = NULL; -using ConfigParams = std::tuple< - std::string, // logLevel - std::string, // envlogLevel - int // expectCallNum - >; +using ConfigParams = std::tuple; class LogUtilsTest : public ::testing::TestWithParam { public: std::string _logLevel; @@ -40,21 +40,20 @@ class LogUtilsTest : public ::testing::TestWithParam { int expectCallNum; std::tie(logLevel, envLogLevel, expectCallNum) = obj.param; std::ostringstream result; - result << "logLevel_" << logLevel << "_expectCallNum_" << expectCallNum - << "envlogLevel" << envLogLevel; + result << "logLevel_" << logLevel << "_expectCallNum_" << expectCallNum << "envlogLevel" << envLogLevel; return result.str(); } #if 0 void SetTestEnv(std::string key, std::string value) { -#ifdef WIN32 +# ifdef WIN32 SetEnvironmentVariable(key.c_str(), value.c_str()); -#elif defined(__linux__) +# elif defined(__linux__) ::setenv(key.c_str(), value.c_str(), true); -#elif defined(__APPLE__) +# elif defined(__APPLE__) ::setenv(key.c_str(), value.c_str(), true); -#else -#endif +# else +# endif } #endif void SetUp() override { @@ -88,9 +87,10 @@ TEST_P(LogUtilsTest, set_log_level) { TEST_P(LogUtilsTest, INFO_RUN) { set_log_level(_logLevel); int a = 0; - INFO_RUN([&a](){a++;}); - if (_logLevel == "LOG_INFO" || _logLevel == "LOG_DEBUG" || - _logLevel == "LOG_TRACE") { + INFO_RUN([&a]() { + a++; + }); + if (_logLevel == "LOG_INFO" || _logLevel == "LOG_DEBUG" || _logLevel == "LOG_TRACE") { EXPECT_EQ(a, 1); } else { EXPECT_EQ(a, 0); @@ -100,7 +100,9 @@ TEST_P(LogUtilsTest, INFO_RUN) { TEST_P(LogUtilsTest, DEBUG_RUN) { set_log_level(_logLevel); int a = 0; - DEBUG_RUN([&a](){a++;}); + DEBUG_RUN([&a]() { + a++; + }); if (_logLevel == "LOG_DEBUG" || _logLevel == "LOG_TRACE") { EXPECT_EQ(a, 1); } else { @@ -117,10 +119,10 @@ TEST_P(LogUtilsTest, setEnvNotAffectset_log_level) { } #endif -//can not test ENV case. because of the ENV variable is readed at the -//beginning of test application and modify it in runtime is not valid -//still need to test it in different platform manully -//TEST_P(LogUtilsTest, setEnvLogLevel) { +// can not test ENV case. because of the ENV variable is readed at the +// beginning of test application and modify it in runtime is not valid +// still need to test it in different platform manully +// TEST_P(LogUtilsTest, setEnvLogLevel) { // SetTestEnv("AUTO_LOG_LEVEL", _envLogLevel); // EXPECT_CALL(*(HLogger), print(_)).Times(_expectCallNum); // printLog(); @@ -132,8 +134,8 @@ TEST(smoke_Auto_BehaviorTests, LogUtilsSingleton) { std::shared_ptr instanceVector[20]; for (unsigned int i = 0; i < 20; i++) { auto future = std::async(std::launch::async, [&instanceVector, i] { - instanceVector[i] = Log::instance(); - }); + instanceVector[i] = Log::instance(); + }); futureVect.push_back(std::move(future)); } @@ -143,20 +145,19 @@ TEST(smoke_Auto_BehaviorTests, LogUtilsSingleton) { for (unsigned int i = 0; i < 19; i++) { EXPECT_NE(instanceVector[i].get(), nullptr); - EXPECT_EQ(instanceVector[i].get(), instanceVector[i+1].get()); + EXPECT_EQ(instanceVector[i].get(), instanceVector[i + 1].get()); } } -const std::vector testConfigs = -{ConfigParams {"LOG_NONE", "0", 0}, - ConfigParams {"LOG_NONE", "1", 0}, - ConfigParams {"LOG_ERROR", "2", 2}, - ConfigParams {"LOG_WARNING", "3", 4}, - ConfigParams {"LOG_INFO", "4", 6}, - ConfigParams {"LOG_DEBUG", "5", 8}, - ConfigParams {"LOG_TRACE", "6", 10}}; - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, LogUtilsTest, - ::testing::ValuesIn(testConfigs), - LogUtilsTest::getTestCaseName); - +const std::vector testConfigs = {ConfigParams{"LOG_NONE", "0", 0}, + ConfigParams{"LOG_NONE", "1", 0}, + ConfigParams{"LOG_ERROR", "2", 2}, + ConfigParams{"LOG_WARNING", "3", 4}, + ConfigParams{"LOG_INFO", "4", 6}, + ConfigParams{"LOG_DEBUG", "5", 8}, + ConfigParams{"LOG_TRACE", "6", 10}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + LogUtilsTest, + ::testing::ValuesIn(testConfigs), + LogUtilsTest::getTestCaseName); diff --git a/src/plugins/auto/tests/unit/mock_common.cpp b/src/plugins/auto/tests/unit/mock_common.cpp deleted file mode 100644 index 122fe8b9ecc58a..00000000000000 --- a/src/plugins/auto/tests/unit/mock_common.cpp +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "include/mock_common.hpp" -#include "openvino/runtime/make_tensor.hpp" - -// getMetric will return a fake ov::Any, gmock will call ostreamer << ov::Any -// it will cause core dump, so add this special implemented -namespace testing { -namespace internal { - template<> - void PrintTo(const ov::Any& a, std::ostream* os) { - *os << "using custom PrintTo ov::Any"; - } -} -} - -namespace ov { -MockAsyncInferRequest::MockAsyncInferRequest(const std::shared_ptr& request, - const std::shared_ptr& task_executor, - const std::shared_ptr& callback_executor, - bool ifThrow) - : IAsyncInferRequest(request, task_executor, callback_executor), m_throw(ifThrow) { - m_pipeline = {}; - m_pipeline.push_back({task_executor, - [this] { - if (m_throw) - OPENVINO_THROW("runtime inference failure"); - } }); -} - -void MockSyncInferRequest::allocate_tensor_impl(ov::SoPtr& tensor, const element::Type& element_type, const Shape& shape) { - if (!tensor || tensor->get_element_type() != element_type) { - tensor = ov::make_tensor(element_type, shape); - } else { - tensor->set_shape(shape); - } -} - -MockSyncInferRequest::MockSyncInferRequest(const std::shared_ptr& compiled_model) - : ov::ISyncInferRequest(compiled_model) { - OPENVINO_ASSERT(compiled_model); - // Allocate input/output tensors - for (const auto& input : get_inputs()) { - allocate_tensor(input, [this, input](ov::SoPtr& tensor) { - // Can add a check to avoid double work in case of shared tensors - allocate_tensor_impl(tensor, - input.get_element_type(), - input.get_partial_shape().is_dynamic() ? ov::Shape{0} : input.get_shape()); - }); - } - for (const auto& output : get_outputs()) { - allocate_tensor(output, [this, output](ov::SoPtr& tensor) { - // Can add a check to avoid double work in case of shared tensors - allocate_tensor_impl(tensor, - output.get_element_type(), - output.get_partial_shape().is_dynamic() ? ov::Shape{0} : output.get_shape()); - }); - } -} -} //namespace ov diff --git a/src/plugins/auto/tests/unit/parse_meta_device_test.cpp b/src/plugins/auto/tests/unit/parse_meta_device_test.cpp index 422eff1a8d325a..42b6d3de2ca97e 100644 --- a/src/plugins/auto/tests/unit/parse_meta_device_test.cpp +++ b/src/plugins/auto/tests/unit/parse_meta_device_test.cpp @@ -38,18 +38,18 @@ class ParseMetaDeviceTest : public tests::AutoTest, public ::testing::TestWithPa void SetUp() override { ON_CALL(*core, get_supported_property(StrEq("INVALID_DEVICE"), _)).WillByDefault(Throw(ov::Exception(""))); ON_CALL(*core, get_property(StrEq("GPU.2"), ov::supported_properties.name(), _)) - .WillByDefault(Throw(ov::Exception(""))); - ON_CALL(*plugin, parse_meta_devices).WillByDefault([this](const std::string& priorityDevices, - const ov::AnyMap& config) { - return plugin->Plugin::parse_meta_devices(priorityDevices, config); - }); - std::tie(priorityDevices, metaDevices, throwException, expectedTimes) = GetParam(); + .WillByDefault(Throw(ov::Exception(""))); + ON_CALL(*plugin, parse_meta_devices) + .WillByDefault([this](const std::string& priorityDevices, const ov::AnyMap& config) { + return plugin->Plugin::parse_meta_devices(priorityDevices, config); + }); + std::tie(priorityDevices, metaDevices, throwException, expectedTimes) = GetParam(); } void compare(std::vector& result, std::vector& expect) { EXPECT_EQ(result.size(), expect.size()); if (result.size() == expect.size()) { - for (unsigned int i = 0 ; i < result.size(); i++) { + for (unsigned int i = 0; i < result.size(); i++) { EXPECT_EQ(result[i].device_name, expect[i].device_name); EXPECT_EQ(result[i].unique_name, expect[i].unique_name); EXPECT_EQ(result[i].num_requests_per_devices, expect[i].num_requests_per_devices); @@ -61,7 +61,7 @@ class ParseMetaDeviceTest : public tests::AutoTest, public ::testing::TestWithPa void compareDevicePriority(std::vector& result, std::vector& expect) { EXPECT_EQ(result.size(), expect.size()); if (result.size() == expect.size()) { - for (unsigned int i = 0 ; i < result.size(); i++) { + for (unsigned int i = 0; i < result.size(); i++) { EXPECT_EQ(result[i].device_priority, expect[i].device_priority); } } @@ -84,9 +84,9 @@ TEST_P(ParseMetaDeviceTest, ParseMetaDevicesWithPriority) { if (throwException) { ASSERT_ANY_THROW(plugin->parse_meta_devices(priorityDevices, {})); } else { - auto result = plugin->parse_meta_devices(priorityDevices, {ov::device::priorities(priorityDevices)}); - compare(result, metaDevices); - compareDevicePriority(result, metaDevices); + auto result = plugin->parse_meta_devices(priorityDevices, {ov::device::priorities(priorityDevices)}); + compare(result, metaDevices); + compareDevicePriority(result, metaDevices); } } @@ -97,16 +97,16 @@ TEST_P(ParseMetaDeviceTest, ParseMetaDevicesNotWithPriority) { if (throwException) { ASSERT_ANY_THROW(plugin->parse_meta_devices(priorityDevices, {})); } else { - auto result = plugin->parse_meta_devices(priorityDevices, {}); - compare(result, metaDevices); - for (unsigned int i = 0 ; i < result.size(); i++) { - EXPECT_EQ(result[i].device_priority, 0); - } - auto result2 = plugin->parse_meta_devices(priorityDevices, {ov::device::priorities("")}); - compare(result2, metaDevices); - for (unsigned int i = 0 ; i < result.size(); i++) { - EXPECT_EQ(result2[i].device_priority, 0); - } + auto result = plugin->parse_meta_devices(priorityDevices, {}); + compare(result, metaDevices); + for (unsigned int i = 0; i < result.size(); i++) { + EXPECT_EQ(result[i].device_priority, 0); + } + auto result2 = plugin->parse_meta_devices(priorityDevices, {ov::device::priorities("")}); + compare(result2, metaDevices); + for (unsigned int i = 0; i < result.size(); i++) { + EXPECT_EQ(result2[i].device_priority, 0); + } } } @@ -119,9 +119,9 @@ TEST_P(ParseMetaDeviceNoIDTest, ParseMetaDevices) { if (throwException) { ASSERT_ANY_THROW(plugin->parse_meta_devices(priorityDevices, {})); } else { - auto result = plugin->parse_meta_devices(priorityDevices, {ov::device::priorities(priorityDevices)}); - compare(result, metaDevices); - compareDevicePriority(result, metaDevices); + auto result = plugin->parse_meta_devices(priorityDevices, {ov::device::priorities(priorityDevices)}); + compare(result, metaDevices); + compareDevicePriority(result, metaDevices); } } // ConfigParams details @@ -129,11 +129,7 @@ TEST_P(ParseMetaDeviceNoIDTest, ParseMetaDevices) { // ConfigParams {devicePriority, expect metaDevices, ifThrowException} const std::vector testConfigs = { - ConfigParams{"CPU,GPU.2,OTHER", - {{"CPU", {}, -1, "", "CPU_", 0}, - {"OTHER", {}, -1, "", "OTHER_", 2}}, - false, - 3}, + ConfigParams{"CPU,GPU.2,OTHER", {{"CPU", {}, -1, "", "CPU_", 0}, {"OTHER", {}, -1, "", "OTHER_", 2}}, false, 3}, ConfigParams{"CPU,GPU,OTHER", {{"CPU", {}, -1, "", "CPU_", 0}, {"GPU.0", {}, -1, "", std::string(igpuFullDeviceName) + "_0", 1}, @@ -189,13 +185,15 @@ const std::vector testConfigsNoID = { 3}, }; -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, ParseMetaDeviceTest, - ::testing::ValuesIn(testConfigs), - ParseMetaDeviceTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + ParseMetaDeviceTest, + ::testing::ValuesIn(testConfigs), + ParseMetaDeviceTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, ParseMetaDeviceNoIDTest, - ::testing::ValuesIn(testConfigsNoID), - ParseMetaDeviceTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + ParseMetaDeviceNoIDTest, + ::testing::ValuesIn(testConfigsNoID), + ParseMetaDeviceTest::getTestCaseName); -//toDo need add test for ParseMetaDevices(_, config) to check device config of -//return metaDevices +// toDo need add test for ParseMetaDevices(_, config) to check device config of +// return metaDevices diff --git a/src/plugins/auto/tests/unit/property_test.cpp b/src/plugins/auto/tests/unit/property_test.cpp deleted file mode 100644 index 0639830f7357fd..00000000000000 --- a/src/plugins/auto/tests/unit/property_test.cpp +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "include/auto_unit_test.hpp" -using namespace ov::mock_auto_plugin::tests; - -class MultiPropertyTest : public tests::AutoTestWithRealCore, public ::testing::Test { -public: - void SetUp() override { - plugin->set_device_name("MULTI"); - std::shared_ptr base_plugin = plugin; - reg_plugin(core, base_plugin, "MOCK_MULTI", {}); - // validate mock plugin - core.get_property("MOCK_MULTI", ov::supported_properties); - } -}; - -class AutoPropertyTest : public tests::AutoTestWithRealCore, public ::testing::Test { -public: - void SetUp() override { - plugin->set_device_name("AUTO"); - std::shared_ptr base_plugin = plugin; - reg_plugin(core, base_plugin, "MOCK_AUTO", {}); - core.get_property("MOCK_AUTO", ov::supported_properties); - } -}; - - -/* to be enabled if expect multi throw for latency mode -TEST_F(PropertyTest, tputmodeonly_for_multi) { - EXPECT_THROW_WITH_MESSAGE(core.compile_model(model, "MULTI", ov::device::priorities("MOCK_GPU", "MOCK_CPU"), - ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)), ov::Exception, - "MULTI does not support perf mode"); - ASSERT_NO_THROW(compiled_model = core.compile_model(model, "MULTI", ov::device::priorities("MOCK_GPU", "MOCK_CPU"))); - EXPECT_EQ(compiled_model.get_property(ov::hint::performance_mode), ov::hint::PerformanceMode::THROUGHPUT); -} - -TEST_F(PropertyTest, tputmodeonly_for_multi_propertyset) { - ASSERT_NO_THROW(core.get_property("MULTI", ov::supported_properties)); - EXPECT_THROW_WITH_MESSAGE(core.set_property("MULTI", ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)), ov::Exception, - "MULTI does not support perf mode"); -} -*/ -/* -TEST_F(PropertyTest, default_perfmode_for_auto) { - ov::CompiledModel compiled_model; - EXPECT_NO_THROW(compiled_model = core.compile_model(model, "AUTO", ov::device::priorities("MOCK_GPU", "MOCK_CPU"))); - EXPECT_EQ(compiled_model.get_property(ov::hint::performance_mode), ov::hint::PerformanceMode::LATENCY); -} -*/ - -TEST_F(MultiPropertyTest, default_perfmode_for_multi) { - EXPECT_CALL(*mock_plugin_cpu.get(), compile_model(::testing::Matcher&>(_), - ::testing::Matcher(ComparePerfHint("THROUGHPUT")))).Times(1); - EXPECT_CALL(*mock_plugin_gpu.get(), compile_model(::testing::Matcher&>(_), - ::testing::Matcher(ComparePerfHint("THROUGHPUT")))).Times(1); - ASSERT_NO_THROW(compiled_model = plugin->compile_model(model, {ov::device::priorities("MOCK_GPU", "MOCK_CPU")})); - EXPECT_EQ(compiled_model->get_property(ov::hint::performance_mode.name()), ov::hint::PerformanceMode::THROUGHPUT); -} - -TEST_F(MultiPropertyTest, respect_secondary_property) { - EXPECT_CALL(*mock_plugin_cpu.get(), compile_model(::testing::Matcher&>(_), - ::testing::Matcher(ComparePerfHint("LATENCY")))).Times(1); - EXPECT_CALL(*mock_plugin_gpu.get(), compile_model(::testing::Matcher&>(_), - ::testing::Matcher(ComparePerfHint("LATENCY")))).Times(1); - ASSERT_NO_THROW(compiled_model = plugin->compile_model(model, {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), - {"DEVICE_PROPERTIES", "{MOCK_CPU:{PERFORMANCE_HINT:LATENCY},MOCK_GPU:{PERFORMANCE_HINT:LATENCY}"}})); - EXPECT_EQ(compiled_model->get_property(ov::hint::performance_mode.name()), ov::hint::PerformanceMode::THROUGHPUT); -} - -TEST_F(AutoPropertyTest, default_perfmode_for_auto_ctput) { - EXPECT_CALL(*mock_plugin_cpu.get(), compile_model(::testing::Matcher&>(_), - ::testing::Matcher(ComparePerfHint("THROUGHPUT")))).Times(1); - EXPECT_CALL(*mock_plugin_gpu.get(), compile_model(::testing::Matcher&>(_), - ::testing::Matcher(ComparePerfHint("THROUGHPUT")))).Times(1); - ASSERT_NO_THROW(compiled_model = plugin->compile_model(model, {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); - EXPECT_EQ(compiled_model->get_property(ov::hint::performance_mode.name()), ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT); -} - -TEST_F(AutoPropertyTest, default_perfmode_for_auto) { - EXPECT_CALL(*mock_plugin_cpu.get(), compile_model(::testing::Matcher&>(_), - ::testing::Matcher(ComparePerfHint("LATENCY")))).Times(1); - EXPECT_CALL(*mock_plugin_gpu.get(), compile_model(::testing::Matcher&>(_), - ::testing::Matcher(ComparePerfHint("LATENCY")))).Times(1); - compiled_model = plugin->compile_model(model, {ov::device::priorities("MOCK_GPU", "MOCK_CPU")}); - EXPECT_EQ(compiled_model->get_property(ov::hint::performance_mode.name()), ov::hint::PerformanceMode::LATENCY); -} - -TEST_F(AutoPropertyTest, respect_secondary_property_auto_ctput) { - EXPECT_CALL(*mock_plugin_cpu.get(), compile_model(::testing::Matcher&>(_), - ::testing::Matcher(ComparePerfHint("LATENCY")))).Times(1); - EXPECT_CALL(*mock_plugin_gpu.get(), compile_model(::testing::Matcher&>(_), - ::testing::Matcher(ComparePerfHint("THROUGHPUT")))).Times(1); - ASSERT_NO_THROW(compiled_model = plugin->compile_model(model, {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT), - {"DEVICE_PROPERTIES", "{MOCK_CPU:{PERFORMANCE_HINT:LATENCY},MOCK_GPU:{PERFORMANCE_HINT:THROUGHPUT}"}})); - EXPECT_EQ(compiled_model->get_property(ov::hint::performance_mode.name()), ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT); -} \ No newline at end of file diff --git a/src/plugins/auto/tests/unit/release_helper_test.cpp b/src/plugins/auto/tests/unit/release_helper_test.cpp index c90139bdd8f244..77f35a9cefe9f1 100644 --- a/src/plugins/auto/tests/unit/release_helper_test.cpp +++ b/src/plugins/auto/tests/unit/release_helper_test.cpp @@ -2,18 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // +#include #include -#include "common_test_utils/test_constants.hpp" #include "include/auto_unit_test.hpp" using Config = std::map; using namespace ov::mock_auto_plugin; -using ConfigParams = std::tuple< - bool, // cpu load success - bool // hw device load success - >; +using ConfigParams = std::tuple; class AutoReleaseHelperTest : public tests::AutoTest, public ::testing::TestWithParam { public: static std::string getTestCaseName(testing::TestParamInfo obj) { @@ -21,7 +20,7 @@ class AutoReleaseHelperTest : public tests::AutoTest, public ::testing::TestWith bool accSuccess; std::tie(cpuSuccess, accSuccess) = obj.param; std::ostringstream result; - if (!cpuSuccess) { + if (!cpuSuccess) { result << "cpuLoadFailure_"; } else { result << "cpuLoadSuccess_"; @@ -43,33 +42,42 @@ TEST_P(AutoReleaseHelperTest, releaseResource) { size_t decreaseCount = 0; // test auto plugin plugin->set_device_name("AUTO"); - const std::string strDevices = ov::test::utils::DEVICE_GPU + std::string(",") + - ov::test::utils::DEVICE_CPU; + const std::string strDevices = ov::test::utils::DEVICE_GPU + std::string(",") + ov::test::utils::DEVICE_CPU; if (accSuccess) { - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_GPU)), _)) - .WillByDefault(InvokeWithoutArgs([this]() { - std::this_thread::sleep_for(std::chrono::milliseconds(200)); - return mockExeNetworkActual; })); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_GPU)), + _)) + .WillByDefault(InvokeWithoutArgs([this]() { + std::this_thread::sleep_for(std::chrono::milliseconds(200)); + return mockExeNetworkActual; + })); } else { - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_GPU)), _)) - .WillByDefault(InvokeWithoutArgs([this]() { - std::this_thread::sleep_for(std::chrono::milliseconds(200)); - OPENVINO_THROW(""); - return mockExeNetworkActual; })); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_GPU)), + _)) + .WillByDefault(InvokeWithoutArgs([this]() { + std::this_thread::sleep_for(std::chrono::milliseconds(200)); + OPENVINO_THROW(""); + return mockExeNetworkActual; + })); } if (cpuSuccess) { - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), _)) - .WillByDefault(Return(mockExeNetwork)); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), + _)) + .WillByDefault(Return(mockExeNetwork)); if (accSuccess) decreaseCount++; } else { - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), _)) - .WillByDefault(Throw(InferenceEngine::GeneralError{""})); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), + _)) + .WillByDefault(Throw(InferenceEngine::GeneralError{""})); } metaDevices = {{ov::test::utils::DEVICE_CPU, {}, -1}, {ov::test::utils::DEVICE_GPU, {}, -1}}; DeviceInformation devInfo; @@ -80,15 +88,16 @@ TEST_P(AutoReleaseHelperTest, releaseResource) { return devices; }); ON_CALL(*plugin, select_device(Property(&std::vector::size, Eq(2)), _, _)) - .WillByDefault(Return(metaDevices[1])); + .WillByDefault(Return(metaDevices[1])); ON_CALL(*plugin, select_device(Property(&std::vector::size, Eq(1)), _, _)) - .WillByDefault(Return(metaDevices[0])); + .WillByDefault(Return(metaDevices[0])); config.insert(ov::device::priorities(ov::test::utils::DEVICE_CPU + std::string(",") + ov::test::utils::DEVICE_GPU)); std::shared_ptr exeNetwork; if (cpuSuccess || accSuccess) { ASSERT_NO_THROW(exeNetwork = plugin->compile_model(model, config)); if (!cpuSuccess) - EXPECT_EQ(exeNetwork->get_property(ov::execution_devices.name()).as(), ov::test::utils::DEVICE_GPU); + EXPECT_EQ(exeNetwork->get_property(ov::execution_devices.name()).as(), + ov::test::utils::DEVICE_GPU); else EXPECT_EQ(exeNetwork->get_property(ov::execution_devices.name()).as(), "(CPU)"); } else { @@ -101,19 +110,21 @@ TEST_P(AutoReleaseHelperTest, releaseResource) { EXPECT_EQ(inferReqInternal.use_count(), requestsharedcount - decreaseCount); if (cpuSuccess || accSuccess) { if (accSuccess) - EXPECT_EQ(exeNetwork->get_property(ov::execution_devices.name()).as(), ov::test::utils::DEVICE_GPU); + EXPECT_EQ(exeNetwork->get_property(ov::execution_devices.name()).as(), + ov::test::utils::DEVICE_GPU); else - EXPECT_EQ(exeNetwork->get_property(ov::execution_devices.name()).as(), ov::test::utils::DEVICE_CPU); + EXPECT_EQ(exeNetwork->get_property(ov::execution_devices.name()).as(), + ov::test::utils::DEVICE_CPU); } } // -const std::vector testConfigs = {ConfigParams {true, true}, - ConfigParams {true, false}, - ConfigParams {false, true}, - ConfigParams {false, false} - }; +const std::vector testConfigs = {ConfigParams{true, true}, + ConfigParams{true, false}, + ConfigParams{false, true}, + ConfigParams{false, false}}; -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, AutoReleaseHelperTest, - ::testing::ValuesIn(testConfigs), - AutoReleaseHelperTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + AutoReleaseHelperTest, + ::testing::ValuesIn(testConfigs), + AutoReleaseHelperTest::getTestCaseName); diff --git a/src/plugins/auto/tests/unit/runtime_fallback_test.cpp b/src/plugins/auto/tests/unit/runtime_fallback_test.cpp index 12158884c3fd64..bf837b7aaf4dcf 100644 --- a/src/plugins/auto/tests/unit/runtime_fallback_test.cpp +++ b/src/plugins/auto/tests/unit/runtime_fallback_test.cpp @@ -5,29 +5,28 @@ #include #include "include/auto_unit_test.hpp" -#include "openvino/runtime/threading/immediate_executor.hpp" #include "openvino/runtime/auto/properties.hpp" +#include "openvino/runtime/threading/immediate_executor.hpp" using namespace ov::mock_auto_plugin; using ConfigParams = std::tuple>, int, bool, bool, bool, bool>; -class AutoRuntimeFallback : public tests::AutoTest, - public ::testing::TestWithParam { +class AutoRuntimeFallback : public tests::AutoTest, public ::testing::TestWithParam { public: - ov::SoPtr mockExeNetworkGPU_1; - ov::SoPtr mockExeNetworkOTHER; + ov::SoPtr mockExeNetworkGPU_1; + ov::SoPtr mockExeNetworkOTHER; - std::shared_ptr> inferReqInternalGPU_1; - std::shared_ptr> inferReqInternalOTHER; + std::shared_ptr> inferReqInternalGPU_1; + std::shared_ptr> inferReqInternalOTHER; - std::shared_ptr> mockIExeNetGPU_1; - std::shared_ptr> mockIExeNetOTHER; + std::shared_ptr> mockIExeNetGPU_1; + std::shared_ptr> mockIExeNetOTHER; - std::shared_ptr mockInferrequest; - std::shared_ptr mockInferrequestGPU_0; - std::shared_ptr mockInferrequestGPU_1; - std::shared_ptr mockInferrequestOTHER; + std::shared_ptr mockInferrequest; + std::shared_ptr mockInferrequestGPU_0; + std::shared_ptr mockInferrequestGPU_1; + std::shared_ptr mockInferrequestOTHER; std::shared_ptr mockExecutor; std::shared_ptr mockExecutorGPU_0; @@ -42,7 +41,12 @@ class AutoRuntimeFallback : public tests::AutoTest, bool expectThrow; bool loadNetworkFail; bool generateWorkersFail; - std::tie(targetDevices, loadNetworkNum, enableRumtimeFallback, expectThrow, loadNetworkFail, generateWorkersFail) = obj.param; + std::tie(targetDevices, + loadNetworkNum, + enableRumtimeFallback, + expectThrow, + loadNetworkFail, + generateWorkersFail) = obj.param; std::ostringstream result; result << "auto_runtime_fallback_"; for (auto deviceInfo : targetDevices) { @@ -82,40 +86,59 @@ class AutoRuntimeFallback : public tests::AutoTest, void SetUp() override { // prepare extra mockExeNetwork - mockIExeNetGPU_1 = std::make_shared>(model, plugin); + mockIExeNetGPU_1 = std::make_shared>(model, plugin); mockExeNetworkGPU_1 = {mockIExeNetGPU_1, {}}; - mockIExeNetOTHER = std::make_shared>(model, plugin); + mockIExeNetOTHER = std::make_shared>(model, plugin); mockExeNetworkOTHER = {mockIExeNetOTHER, {}}; - + ON_CALL(*mockIExeNetGPU_1.get(), inputs()).WillByDefault(ReturnRefOfCopy(model->inputs())); + ON_CALL(*mockIExeNetGPU_1.get(), outputs()).WillByDefault(ReturnRefOfCopy(model->outputs())); + ON_CALL(*mockIExeNetOTHER.get(), inputs()).WillByDefault(ReturnRefOfCopy(model->inputs())); + ON_CALL(*mockIExeNetOTHER.get(), outputs()).WillByDefault(ReturnRefOfCopy(model->outputs())); // prepare mockicore and cnnNetwork for loading - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq("GPU.0")), _)).WillByDefault(InvokeWithoutArgs([this]() { - std::this_thread::sleep_for(std::chrono::milliseconds(200)); - return mockExeNetworkActual; })); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq("GPU.1")), _)).WillByDefault(InvokeWithoutArgs([this]() { - std::this_thread::sleep_for(std::chrono::milliseconds(200)); - return mockExeNetworkGPU_1; })); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq("OTHER")), _)).WillByDefault(InvokeWithoutArgs([this]() { - std::this_thread::sleep_for(std::chrono::milliseconds(200)); - return mockExeNetworkOTHER; })); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq("GPU.0")), + _)) + .WillByDefault(InvokeWithoutArgs([this]() { + std::this_thread::sleep_for(std::chrono::milliseconds(200)); + return mockExeNetworkActual; + })); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq("GPU.1")), + _)) + .WillByDefault(InvokeWithoutArgs([this]() { + std::this_thread::sleep_for(std::chrono::milliseconds(200)); + return mockExeNetworkGPU_1; + })); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq("OTHER")), + _)) + .WillByDefault(InvokeWithoutArgs([this]() { + std::this_thread::sleep_for(std::chrono::milliseconds(200)); + return mockExeNetworkOTHER; + })); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), - (_))).WillByDefault(Return(mockExeNetwork)); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), + (_))) + .WillByDefault(Return(mockExeNetwork)); mockExecutor = std::make_shared(); mockExecutorGPU_0 = std::make_shared(); - inferReqInternalGPU_1 = std::make_shared>(mockIExeNetGPU_1); + inferReqInternalGPU_1 = + std::make_shared>(mockIExeNetGPU_1); mockExecutorGPU_1 = std::make_shared(); ON_CALL(*mockIExeNetGPU_1, get_property(StrEq(ov::optimal_number_of_infer_requests.name()))) - .WillByDefault(Return(optimalNum)); + .WillByDefault(Return(optimalNum)); - inferReqInternalOTHER = std::make_shared>(mockIExeNetOTHER); + inferReqInternalOTHER = + std::make_shared>(mockIExeNetOTHER); mockExecutorOTHER = std::make_shared(); ON_CALL(*mockIExeNetOTHER, get_property(StrEq(ov::optimal_number_of_infer_requests.name()))) .WillByDefault(Return(optimalNum)); @@ -132,11 +155,14 @@ TEST_P(AutoRuntimeFallback, releaseResource) { bool expectThrow; bool loadNetworkFail; bool generateWorkersFail; - std::tie(targetDevices, loadNetworkNum, enableRumtimeFallback, expectThrow, loadNetworkFail, generateWorkersFail) = this->GetParam(); + std::tie(targetDevices, loadNetworkNum, enableRumtimeFallback, expectThrow, loadNetworkFail, generateWorkersFail) = + this->GetParam(); if (loadNetworkFail) { - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq("GPU.1")), - _)).WillByDefault(Throw(ov::Exception{"compile model error"})); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq("GPU.1")), + _)) + .WillByDefault(Throw(ov::Exception{"compile model error"})); } for (auto& deviceInfo : targetDevices) { std::string deviceName; @@ -145,30 +171,45 @@ TEST_P(AutoRuntimeFallback, releaseResource) { targetDev += deviceName; targetDev += ((deviceInfo == targetDevices.back()) ? "" : ","); if (deviceName == "CPU") { - mockInferrequest = std::make_shared( - inferReqInternal, mockExecutor, nullptr, ifThrow); + mockInferrequest = std::make_shared(inferReqInternal, + mockExecutor, + nullptr, + ifThrow); ON_CALL(*mockIExeNet.get(), create_infer_request()).WillByDefault(Return(mockInferrequest)); } else if (deviceName == "GPU.0") { - mockInferrequestGPU_0 = std::make_shared( - inferReqInternalActual, mockExecutorGPU_0, nullptr, ifThrow); + mockInferrequestGPU_0 = + std::make_shared(inferReqInternalActual, + mockExecutorGPU_0, + nullptr, + ifThrow); ON_CALL(*mockIExeNetActual.get(), create_infer_request()).WillByDefault(InvokeWithoutArgs([this]() { - std::this_thread::sleep_for(std::chrono::milliseconds(0)); - return mockInferrequestGPU_0; })); + std::this_thread::sleep_for(std::chrono::milliseconds(0)); + return mockInferrequestGPU_0; + })); } else if (deviceName == "GPU.1") { if (generateWorkersFail) { - mockInferrequestGPU_1 = std::make_shared( - inferReqInternalGPU_1, mockExecutorGPU_1, nullptr, ifThrow); + mockInferrequestGPU_1 = + std::make_shared(inferReqInternalGPU_1, + mockExecutorGPU_1, + nullptr, + ifThrow); ON_CALL(*mockIExeNetGPU_1.get(), create_infer_request()).WillByDefault(Throw(ov::Exception{"error"})); } else { - mockInferrequestGPU_1 = std::make_shared( - inferReqInternalGPU_1, mockExecutorGPU_1, nullptr, ifThrow); + mockInferrequestGPU_1 = + std::make_shared(inferReqInternalGPU_1, + mockExecutorGPU_1, + nullptr, + ifThrow); ON_CALL(*mockIExeNetGPU_1.get(), create_infer_request()).WillByDefault(InvokeWithoutArgs([this]() { - std::this_thread::sleep_for(std::chrono::milliseconds(0)); - return mockInferrequestGPU_1; })); + std::this_thread::sleep_for(std::chrono::milliseconds(0)); + return mockInferrequestGPU_1; + })); } } else if (deviceName == "OTHER") { - mockInferrequestOTHER = - std::make_shared(inferReqInternalOTHER, mockExecutorOTHER, nullptr, ifThrow); + mockInferrequestOTHER = std::make_shared(inferReqInternalOTHER, + mockExecutorOTHER, + nullptr, + ifThrow); ON_CALL(*mockIExeNetOTHER.get(), create_infer_request()).WillByDefault(InvokeWithoutArgs([this]() { std::this_thread::sleep_for(std::chrono::milliseconds(0)); return mockInferrequestOTHER; @@ -185,8 +226,8 @@ TEST_P(AutoRuntimeFallback, releaseResource) { EXPECT_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(_), - ::testing::Matcher(_))) + ::testing::Matcher(_), + ::testing::Matcher(_))) .Times(loadNetworkNum); std::shared_ptr exeNetwork; @@ -206,10 +247,10 @@ const std::vector testConfigs = { ConfigParams{{{"GPU.0", true}, {"GPU.1", false}}, 2, true, false, false, false}, ConfigParams{{{"GPU.0", false}, {"GPU.1", true}}, 1, true, false, false, false}, ConfigParams{{{"GPU.0", false}, {"GPU.1", false}}, 1, true, false, false, false}, - //CPU_HELP does not throw + // CPU_HELP does not throw ConfigParams{{{"GPU.0", false}, {"CPU", false}}, 2, true, false, false, false}, ConfigParams{{{"GPU.0", true}, {"CPU", false}}, 2, true, false, false, false}, - //CPU_HELP throw + // CPU_HELP throw ConfigParams{{{"GPU.0", false}, {"CPU", true}}, 2, true, false, false, false}, ConfigParams{{{"GPU.0", true}, {"CPU", true}}, 2, true, true, false, false}, // 3 devices @@ -217,11 +258,11 @@ const std::vector testConfigs = { ConfigParams{{{"GPU.0", true}, {"GPU.1", false}, {"OTHER", false}}, 2, true, false, false, false}, ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"OTHER", false}}, 3, true, false, false, false}, ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"OTHER", true}}, 3, true, true, false, false}, - //CPU_HELP does not throw + // CPU_HELP does not throw ConfigParams{{{"GPU.0", false}, {"GPU.1", false}, {"CPU", false}}, 2, true, false, false, false}, ConfigParams{{{"GPU.0", true}, {"GPU.1", false}, {"CPU", false}}, 2, true, false, false, false}, ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"CPU", false}}, 2, true, false, false, false}, - //CPU_HELP throw + // CPU_HELP throw ConfigParams{{{"GPU.0", false}, {"GPU.1", false}, {"CPU", true}}, 2, true, false, false, false}, ConfigParams{{{"GPU.0", true}, {"GPU.1", false}, {"CPU", true}}, 3, true, false, false, false}, ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"CPU", true}}, 3, true, true, false, false}, @@ -230,10 +271,10 @@ const std::vector testConfigs = { ConfigParams{{{"GPU.0", true}, {"GPU.1", false}}, 1, false, true, false, false}, ConfigParams{{{"GPU.0", false}, {"GPU.1", true}}, 1, false, false, false, false}, ConfigParams{{{"GPU.0", false}, {"GPU.1", false}}, 1, false, false, false, false}, - //CPU_HELP does not throw + // CPU_HELP does not throw ConfigParams{{{"GPU.0", false}, {"CPU", false}}, 2, false, false, false, false}, ConfigParams{{{"GPU.0", true}, {"CPU", false}}, 2, false, false, false, false}, - //CPU_HELP throw + // CPU_HELP throw ConfigParams{{{"GPU.0", false}, {"CPU", true}}, 2, false, true, false, false}, ConfigParams{{{"GPU.0", true}, {"CPU", true}}, 2, false, true, false, false}, // 3 devices @@ -241,11 +282,11 @@ const std::vector testConfigs = { ConfigParams{{{"GPU.0", true}, {"GPU.1", false}, {"OTHER", false}}, 1, false, true, false, false}, ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"OTHER", false}}, 1, false, true, false, false}, ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"OTHER", true}}, 1, false, true, false, false}, - //CPU_HELP does not throw + // CPU_HELP does not throw ConfigParams{{{"GPU.0", false}, {"GPU.1", false}, {"CPU", false}}, 2, false, false, false, false}, ConfigParams{{{"GPU.0", true}, {"GPU.1", false}, {"CPU", false}}, 2, false, false, false, false}, ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"CPU", false}}, 2, false, false, false, false}, - //CPU_HELP throw + // CPU_HELP throw ConfigParams{{{"GPU.0", false}, {"GPU.1", false}, {"CPU", true}}, 2, false, true, false, false}, ConfigParams{{{"GPU.0", true}, {"GPU.1", false}, {"CPU", true}}, 2, false, true, false, false}, ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"CPU", true}}, 2, false, true, false, false}, @@ -254,23 +295,27 @@ const std::vector testConfigs = { ConfigParams{{{"GPU.0", true}, {"GPU.1", false}, {"OTHER", false}}, 3, true, false, false, true}, }; -INSTANTIATE_TEST_SUITE_P(smoke_AutoRuntimeFallback, AutoRuntimeFallback, - ::testing::ValuesIn(testConfigs), - AutoRuntimeFallback::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_AutoRuntimeFallback, + AutoRuntimeFallback, + ::testing::ValuesIn(testConfigs), + AutoRuntimeFallback::getTestCaseName); TEST_P(AutoCTPUTRuntimeFallback, ctputDeviceInferFailTest) { std::string targetDev; - std::vector> targetDevices; //std::tuple + std::vector> targetDevices; // std::tuple int loadNetworkNum; bool enableRumtimeFallback; bool expectThrow; bool loadNetworkFail; bool generateWorkersFail; - std::tie(targetDevices, loadNetworkNum, enableRumtimeFallback, expectThrow, loadNetworkFail, generateWorkersFail) = this->GetParam(); + std::tie(targetDevices, loadNetworkNum, enableRumtimeFallback, expectThrow, loadNetworkFail, generateWorkersFail) = + this->GetParam(); if (loadNetworkFail) { - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq("GPU.1")), - _)).WillByDefault(Throw(ov::Exception{"compile model error"})); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq("GPU.1")), + _)) + .WillByDefault(Throw(ov::Exception{"compile model error"})); } for (auto& deviceInfo : targetDevices) { std::string deviceName; @@ -279,26 +324,39 @@ TEST_P(AutoCTPUTRuntimeFallback, ctputDeviceInferFailTest) { targetDev += deviceName; targetDev += ((deviceInfo == targetDevices.back()) ? "" : ","); if (deviceName == "CPU") { - mockInferrequest = std::make_shared( - inferReqInternal, mockExecutor, nullptr, ifThrow); + mockInferrequest = std::make_shared(inferReqInternal, + mockExecutor, + nullptr, + ifThrow); ON_CALL(*mockIExeNet.get(), create_infer_request()).WillByDefault(Return(mockInferrequest)); } else if (deviceName == "GPU.0") { - mockInferrequestGPU_0 = std::make_shared( - inferReqInternalActual, mockExecutorGPU_0, nullptr, ifThrow); + mockInferrequestGPU_0 = + std::make_shared(inferReqInternalActual, + mockExecutorGPU_0, + nullptr, + ifThrow); ON_CALL(*mockIExeNetActual.get(), create_infer_request()).WillByDefault(InvokeWithoutArgs([this]() { - std::this_thread::sleep_for(std::chrono::milliseconds(0)); - return mockInferrequestGPU_0; })); + std::this_thread::sleep_for(std::chrono::milliseconds(0)); + return mockInferrequestGPU_0; + })); } else if (deviceName == "GPU.1") { if (generateWorkersFail) { - mockInferrequestGPU_1 = std::make_shared( - inferReqInternalGPU_1, mockExecutorGPU_1, nullptr, ifThrow); + mockInferrequestGPU_1 = + std::make_shared(inferReqInternalGPU_1, + mockExecutorGPU_1, + nullptr, + ifThrow); ON_CALL(*mockIExeNetGPU_1.get(), create_infer_request()).WillByDefault(Throw(ov::Exception{"error"})); } else { - mockInferrequestGPU_1 = std::make_shared( - inferReqInternalGPU_1, mockExecutorGPU_1, nullptr, ifThrow); + mockInferrequestGPU_1 = + std::make_shared(inferReqInternalGPU_1, + mockExecutorGPU_1, + nullptr, + ifThrow); ON_CALL(*mockIExeNetGPU_1.get(), create_infer_request()).WillByDefault(InvokeWithoutArgs([this]() { - std::this_thread::sleep_for(std::chrono::milliseconds(0)); - return mockInferrequestGPU_1; })); + std::this_thread::sleep_for(std::chrono::milliseconds(0)); + return mockInferrequestGPU_1; + })); } } } @@ -311,8 +369,8 @@ TEST_P(AutoCTPUTRuntimeFallback, ctputDeviceInferFailTest) { EXPECT_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(_), - ::testing::Matcher(_))) + ::testing::Matcher(_), + ::testing::Matcher(_))) .Times(loadNetworkNum); std::shared_ptr exeNetwork; diff --git a/src/plugins/auto/tests/unit/select_device_failed_test.cpp b/src/plugins/auto/tests/unit/select_device_failed_test.cpp index 92afffef4b6f82..b1a74a7113e61a 100644 --- a/src/plugins/auto/tests/unit/select_device_failed_test.cpp +++ b/src/plugins/auto/tests/unit/select_device_failed_test.cpp @@ -15,18 +15,16 @@ enum MODEL { THROUGHPUT = 2, }; -using ConfigParams = std::tuple< - bool, // if can continue to run - bool, // if select throw exception - MODEL, // config model general, latency, throughput - std::vector, // {device, loadSuccess} - unsigned int, // select count - unsigned int, // load count - unsigned int // load device success count - >; +using ConfigParams = std::tuple, // {device, loadSuccess} + unsigned int, // select count + unsigned int, // load count + unsigned int // load device success count + >; -class AutoLoadFailedTest : public tests::AutoTest, - public ::testing::TestWithParam { +class AutoLoadFailedTest : public tests::AutoTest, public ::testing::TestWithParam { public: static std::string getTestCaseName(testing::TestParamInfo obj) { unsigned int selectCount; @@ -36,8 +34,8 @@ class AutoLoadFailedTest : public tests::AutoTest, bool continueRun; bool thrExcWheSelect; MODEL configModel; - std::tie(continueRun, thrExcWheSelect, configModel, deviceConfigs, - selectCount, loadCount, loadSuccessCount) = obj.param; + std::tie(continueRun, thrExcWheSelect, configModel, deviceConfigs, selectCount, loadCount, loadSuccessCount) = + obj.param; std::ostringstream result; for (auto& item : deviceConfigs) { if (std::get<1>(item)) { @@ -53,22 +51,21 @@ class AutoLoadFailedTest : public tests::AutoTest, } switch (configModel) { - case GENERAL: - result << "GENERAL"; - break; - case LATENCY: - result << "LATENCY"; - break; - case THROUGHPUT: - result << "THROUGHPUT"; - break; - default: - LOG_ERROR("should not come here"); - break; + case GENERAL: + result << "GENERAL"; + break; + case LATENCY: + result << "LATENCY"; + break; + case THROUGHPUT: + result << "THROUGHPUT"; + break; + default: + LOG_ERROR("should not come here"); + break; } - result << "select_" << selectCount << "_loadCount_" - << loadCount << "_loadSuccessCount_" << loadSuccessCount; + result << "select_" << selectCount << "_loadCount_" << loadCount << "_loadSuccessCount_" << loadSuccessCount; return result.str(); } void SetUp() override { @@ -87,8 +84,8 @@ TEST_P(AutoLoadFailedTest, LoadCNNetWork) { bool continueRun; bool thrExcWheSelect; MODEL configModel; - std::tie(continueRun, thrExcWheSelect, configModel, deviceConfigs, selectCount, - loadCount, loadSuccessCount) = this->GetParam(); + std::tie(continueRun, thrExcWheSelect, configModel, deviceConfigs, selectCount, loadCount, loadSuccessCount) = + this->GetParam(); // test auto plugin plugin->set_device_name("AUTO"); @@ -99,30 +96,37 @@ TEST_P(AutoLoadFailedTest, LoadCNNetWork) { bool loadSuccess = std::get<1>(*iter); // accoding to device loading config, set if the loading will successful or throw exception. if (loadSuccess) { - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(deviceName)), - (_))).WillByDefault(Return(mockExeNetwork)); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(deviceName)), + (_))) + .WillByDefault(Return(mockExeNetwork)); } else { - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(deviceName)), - (_))) + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(deviceName)), + (_))) .WillByDefault(Throw(ov::Exception{"compile error"})); } DeviceInformation devInfo; switch (configModel) { - case GENERAL: - devInfo = {deviceName, {}, 2, ""}; - break; - case LATENCY: - devInfo = {deviceName, {ov::hint::performance_mode("LATENCY"), ov::hint::allow_auto_batching(true), ov::auto_batch_timeout(1000)}, - 2, ""}; - break; - case THROUGHPUT: - devInfo = {deviceName, {ov::hint::performance_mode("THROUGHPUT")}, 2, ""}; - break; - default: - LOG_ERROR("should not come here"); - break; + case GENERAL: + devInfo = {deviceName, {}, 2, ""}; + break; + case LATENCY: + devInfo = {deviceName, + {ov::hint::performance_mode("LATENCY"), + ov::hint::allow_auto_batching(true), + ov::auto_batch_timeout(1000)}, + 2, + ""}; + break; + case THROUGHPUT: + devInfo = {deviceName, {ov::hint::performance_mode("THROUGHPUT")}, 2, ""}; + break; + default: + LOG_ERROR("should not come here"); + break; } metaDevices.push_back(std::move(devInfo)); @@ -156,9 +160,11 @@ TEST_P(AutoLoadFailedTest, LoadCNNetWork) { EXPECT_CALL(*plugin, parse_meta_devices(_, _)).Times(AtLeast(1)); EXPECT_CALL(*plugin, select_device(_, _, _)).Times(selectCount); - EXPECT_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(_), - ::testing::Matcher(_))).Times(loadCount); + EXPECT_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(_), + ::testing::Matcher(_))) + .Times(loadCount); // if loadSuccess will get the optimalNum requset of per device, in this test is 2; EXPECT_CALL(*mockIExeNet.get(), get_property(StrEq(ov::optimal_number_of_infer_requests.name()))) @@ -177,8 +183,8 @@ TEST_P(AutoLoadFailedTest, LoadCNNetWork) { // DeviceParams {ov::test::utils::DEVICE_CPU, true}}, 2, 3, 2}, // // every element for ConfigParams -// {continueRun, selectThrowException, config model, deviceLoadsuccessVector, selectCount, loadCount, loadSuccessCount} -// { true, false, GENERAL, 3 device, 2, 3, 2} +// {continueRun, selectThrowException, config model, deviceLoadsuccessVector, selectCount, loadCount, +// loadSuccessCount} { true, false, GENERAL, 3 device, 2, 3, 2} // // there are three devices for loading // CPU load for accelerator success, but GPU will load faild and then select NPU and load again @@ -353,7 +359,7 @@ const std::vector testConfigs = { 3, 2}}; -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, AutoLoadFailedTest, - ::testing::ValuesIn(testConfigs), - AutoLoadFailedTest::getTestCaseName); - +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + AutoLoadFailedTest, + ::testing::ValuesIn(testConfigs), + AutoLoadFailedTest::getTestCaseName); diff --git a/src/plugins/auto/tests/unit/select_device_test.cpp b/src/plugins/auto/tests/unit/select_device_test.cpp index 81a61ab3b027e5..baef090b32459c 100644 --- a/src/plugins/auto/tests/unit/select_device_test.cpp +++ b/src/plugins/auto/tests/unit/select_device_test.cpp @@ -5,30 +5,28 @@ #include "include/auto_unit_test.hpp" using namespace ov::mock_auto_plugin; -using ConfigParams = std::tuple< - std::string, // netPrecision - std::vector, // metaDevices for select - DeviceInformation, // expect DeviceInformation - bool, // throw exception - bool, // enabledevice_priority - bool // reverse total device - >; +using ConfigParams = std::tuple, // metaDevices for select + DeviceInformation, // expect DeviceInformation + bool, // throw exception + bool, // enabledevice_priority + bool // reverse total device + >; const DeviceInformation CPU_INFO = {ov::test::utils::DEVICE_CPU, {}, 2, "01", "CPU_01"}; const DeviceInformation IGPU_INFO = {"GPU.0", {}, 2, "01", "iGPU_01"}; const DeviceInformation DGPU_INFO = {"GPU.1", {}, 2, "01", "dGPU_01"}; -const DeviceInformation OTHERS_INFO = {"OTHERS", {}, 2, "01", "OTHERS" }; +const DeviceInformation OTHERS_INFO = {"OTHERS", {}, 2, "01", "OTHERS"}; const std::vector fp32DeviceVector = {DGPU_INFO, IGPU_INFO, OTHERS_INFO, CPU_INFO}; const std::vector fp16DeviceVector = {DGPU_INFO, IGPU_INFO, OTHERS_INFO, CPU_INFO}; const std::vector int8DeviceVector = {DGPU_INFO, IGPU_INFO, CPU_INFO}; -const std::vector binDeviceVector = {DGPU_INFO, IGPU_INFO, CPU_INFO}; -const std::vector batchedblobDeviceVector = {DGPU_INFO, IGPU_INFO}; +const std::vector binDeviceVector = {DGPU_INFO, IGPU_INFO, CPU_INFO}; +const std::vector batchedblobDeviceVector = {DGPU_INFO, IGPU_INFO}; std::map> devicesMap = {{"FP32", fp32DeviceVector}, - {"FP16", fp16DeviceVector}, - {"INT8", int8DeviceVector}, - {"BIN", binDeviceVector}, - {"BATCHED_BLOB", batchedblobDeviceVector} - }; + {"FP16", fp16DeviceVector}, + {"INT8", int8DeviceVector}, + {"BIN", binDeviceVector}, + {"BATCHED_BLOB", batchedblobDeviceVector}}; const std::vector totalDevices = {DGPU_INFO, IGPU_INFO, OTHERS_INFO, CPU_INFO}; const std::vector reverseTotalDevices = {CPU_INFO, OTHERS_INFO, IGPU_INFO, DGPU_INFO}; const std::vector netPrecisions = {"FP32", "FP16", "INT8", "BIN", "BATCHED_BLOB"}; @@ -47,7 +45,7 @@ class SelectDeviceTest : public tests::AutoTest, public ::testing::TestWithParam std::ostringstream result; result << "_netPrecision_" << netPrecision; for (auto& item : devices) { - result << "_device_" << item.unique_name; + result << "_device_" << item.unique_name; } result << "_expect_" << expect.unique_name; if (throwExcept) { @@ -72,9 +70,14 @@ class SelectDeviceTest : public tests::AutoTest, public ::testing::TestWithParam } // combine select_num devices from devices and make them to ConfigParams // insert the ConfigParams into testConfigs - static void combine_device(const std::vector& devices, size_t start, - size_t* result, size_t result_index, const size_t select_num, std::string& netPrecision, - bool enabledevice_priority, bool reverse) { + static void combine_device(const std::vector& devices, + size_t start, + size_t* result, + size_t result_index, + const size_t select_num, + std::string& netPrecision, + bool enabledevice_priority, + bool reverse) { for (size_t i = start; i < devices.size() + 1 - result_index; i++) { result[result_index - 1] = i; if (result_index - 1 == 0) { @@ -100,8 +103,11 @@ class SelectDeviceTest : public tests::AutoTest, public ::testing::TestWithParam if (enabledevice_priority) { std::vector validDevices; for (auto& item : devicesInfo) { - auto device = std::find_if(metaDevices.begin(), metaDevices.end(), - [&item](const DeviceInformation& d)->bool{return d.unique_name == item.unique_name;}); + auto device = std::find_if(metaDevices.begin(), + metaDevices.end(), + [&item](const DeviceInformation& d) -> bool { + return d.unique_name == item.unique_name; + }); if (device != metaDevices.end()) { validDevices.push_back(*device); } @@ -118,8 +124,11 @@ class SelectDeviceTest : public tests::AutoTest, public ::testing::TestWithParam } } else { for (auto& item : devicesInfo) { - auto device = std::find_if(metaDevices.begin(), metaDevices.end(), - [&item](const DeviceInformation& d)->bool{return d.unique_name == item.unique_name;}); + auto device = std::find_if(metaDevices.begin(), + metaDevices.end(), + [&item](const DeviceInformation& d) -> bool { + return d.unique_name == item.unique_name; + }); if (device != metaDevices.end()) { find = true; expect = item; @@ -133,11 +142,17 @@ class SelectDeviceTest : public tests::AutoTest, public ::testing::TestWithParam } else { find = false; } - testConfigs.push_back(std::make_tuple(netPrecision, metaDevices, - expect, !find, enabledevice_priority, reverse)); + testConfigs.push_back( + std::make_tuple(netPrecision, metaDevices, expect, !find, enabledevice_priority, reverse)); } else { - combine_device(devices, i + 1, result, result_index - 1, - select_num, netPrecision, enabledevice_priority, reverse); + combine_device(devices, + i + 1, + result, + result_index - 1, + select_num, + netPrecision, + enabledevice_priority, + reverse); } } } @@ -178,7 +193,7 @@ class SelectDeviceTest : public tests::AutoTest, public ::testing::TestWithParam combine_device(reverseTotalDevices, 0, result, i, i, netPrecision, true, true); } } - delete []result; + delete[] result; return testConfigs; } @@ -189,14 +204,16 @@ class SelectDeviceTest : public tests::AutoTest, public ::testing::TestWithParam } void SetUp() override { - ON_CALL(*plugin, select_device).WillByDefault([this](const std::vector& metaDevices, - const std::string& netPrecision, unsigned int priority) { - return plugin->Plugin::select_device(metaDevices, netPrecision, priority); - }); - ON_CALL(*plugin, get_valid_device) - .WillByDefault([this](const std::vector& metaDevices, const std::string& netPrecision) { - return plugin->Plugin::get_valid_device(metaDevices, netPrecision); - }); + ON_CALL(*plugin, select_device) + .WillByDefault([this](const std::vector& metaDevices, + const std::string& netPrecision, + unsigned int priority) { + return plugin->Plugin::select_device(metaDevices, netPrecision, priority); + }); + ON_CALL(*plugin, get_valid_device) + .WillByDefault([this](const std::vector& metaDevices, const std::string& netPrecision) { + return plugin->Plugin::get_valid_device(metaDevices, netPrecision); + }); } }; @@ -220,13 +237,12 @@ TEST_P(SelectDeviceTest, SelectDevice) { if (throwExcept) { ASSERT_THROW(plugin->select_device(devices, netPrecision, 0), ov::Exception); } else { - auto result = plugin->select_device(devices, netPrecision, 0); + auto result = plugin->select_device(devices, netPrecision, 0); compare(result, expect); } } - - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, SelectDeviceTest, - ::testing::ValuesIn(SelectDeviceTest::CreateConfigs()), - SelectDeviceTest::getTestCaseName); \ No newline at end of file +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + SelectDeviceTest, + ::testing::ValuesIn(SelectDeviceTest::CreateConfigs()), + SelectDeviceTest::getTestCaseName); \ No newline at end of file diff --git a/src/plugins/auto/tests/unit/set_log_level_test.cpp b/src/plugins/auto/tests/unit/set_log_level_test.cpp index bba4687ba331bd..cbc7e5235fe71f 100644 --- a/src/plugins/auto/tests/unit/set_log_level_test.cpp +++ b/src/plugins/auto/tests/unit/set_log_level_test.cpp @@ -4,14 +4,14 @@ #include "include/auto_unit_test.hpp" namespace { -void custom_unsetenv(const char *name) { +void custom_unsetenv(const char* name) { #ifdef _WIN32 _putenv((std::string(name) + "=").c_str()); #else ::unsetenv(name); #endif } -} // namespace +} // namespace using ConfigParams = std::tuple; using namespace ov::mock_auto_plugin; @@ -28,9 +28,10 @@ class AutoSetLogLevel : public tests::AutoTest, public ::testing::TestWithParam< } void SetUp() override { - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(_), - ::testing::Matcher(_))) + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(_), + ::testing::Matcher(_))) .WillByDefault(Return(mockExeNetwork)); metaDevices = {{ov::test::utils::DEVICE_CPU, {}, -1}, {ov::test::utils::DEVICE_GPU, {}, -1}}; @@ -57,8 +58,12 @@ TEST_P(AutoSetLogLevel, setLogLevelFromConfig) { plugin->set_device_name("AUTO"); plugin->compile_model(model, config); int a = 0; - DEBUG_RUN([&a](){a++;}); - INFO_RUN([&a](){a++;}); + DEBUG_RUN([&a]() { + a++; + }); + INFO_RUN([&a]() { + a++; + }); if (log_level == "LOG_DEBUG" || log_level == "LOG_TRACE") { EXPECT_EQ(a, 2); } else if (log_level == "LOG_INFO") { diff --git a/src/plugins/auto/tests/unit/startup_fallback_property_test.cpp b/src/plugins/auto/tests/unit/startup_fallback_property_test.cpp index afba53502e1b92..3618dcb27ee425 100644 --- a/src/plugins/auto/tests/unit/startup_fallback_property_test.cpp +++ b/src/plugins/auto/tests/unit/startup_fallback_property_test.cpp @@ -5,8 +5,7 @@ using namespace ov::mock_auto_plugin; -using ConfigParams = std::tuple; +using ConfigParams = std::tuple; // define a matcher if all the elements of subMap are contained in the map. MATCHER_P(MapContains, subMap, "Check if all the elements of the subMap are contained in the map.") { @@ -32,17 +31,19 @@ class AutoStartupFallback : public tests::AutoTest, public ::testing::TestWithPa public: void SetUp() override { plugin->set_device_name("AUTO"); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(_), _)) + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(_), + _)) .WillByDefault(Return(mockExeNetwork)); metaDevices = {{ov::test::utils::DEVICE_CPU, {}, -1}, {ov::test::utils::DEVICE_GPU, {}, -1}}; ON_CALL(*plugin, parse_meta_devices(_, _)).WillByDefault(Return(metaDevices)); ON_CALL(*plugin, get_valid_device) - .WillByDefault([](const std::vector& metaDevices, const std::string& netPrecision) { - std::list devices(metaDevices.begin(), metaDevices.end()); - return devices; - }); - ON_CALL(*plugin, select_device(_, _, _)).WillByDefault(Return(metaDevices[1])); + .WillByDefault([](const std::vector& metaDevices, const std::string& netPrecision) { + std::list devices(metaDevices.begin(), metaDevices.end()); + return devices; + }); + ON_CALL(*plugin, select_device(_, _, _)).WillByDefault(Return(metaDevices[1])); } }; @@ -52,30 +53,24 @@ TEST_P(AutoStartupFallback, propertytest) { ov::AnyMap config; std::tie(startup_fallback, config) = this->GetParam(); - EXPECT_CALL( - *core, - compile_model(::testing::Matcher&>(_), - ::testing::Matcher(ov::test::utils::DEVICE_GPU), _)) + EXPECT_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(ov::test::utils::DEVICE_GPU), + _)) .Times(1); if (startup_fallback) { std::map test_map = {{"PERFORMANCE_HINT", "LATENCY"}}; - EXPECT_CALL( - *core, - compile_model(::testing::Matcher&>(_), - ::testing::Matcher(ov::test::utils::DEVICE_CPU), - ::testing::Matcher(MapContains(test_map)))) + EXPECT_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(ov::test::utils::DEVICE_CPU), + ::testing::Matcher(MapContains(test_map)))) .Times(1); } ASSERT_NO_THROW(plugin->compile_model(model, config)); } -const std::vector testConfigs = {ConfigParams {true, {{"ENABLE_STARTUP_FALLBACK", "YES"}}}, - ConfigParams {false, {{"ENABLE_STARTUP_FALLBACK", "NO"}}} - }; - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_StartupFallback, - AutoStartupFallback, - ::testing::ValuesIn(testConfigs)); - +const std::vector testConfigs = {ConfigParams{true, {{"ENABLE_STARTUP_FALLBACK", "YES"}}}, + ConfigParams{false, {{"ENABLE_STARTUP_FALLBACK", "NO"}}}}; +INSTANTIATE_TEST_SUITE_P(smoke_Auto_StartupFallback, AutoStartupFallback, ::testing::ValuesIn(testConfigs)); diff --git a/src/plugins/auto_batch/src/sync_infer_request.cpp b/src/plugins/auto_batch/src/sync_infer_request.cpp index 4342ad6a55dfb9..c766c521cea27c 100644 --- a/src/plugins/auto_batch/src/sync_infer_request.cpp +++ b/src/plugins/auto_batch/src/sync_infer_request.cpp @@ -86,7 +86,9 @@ void SyncInferRequest::set_tensors_to_another_request(ov::SoPtrget_element_type(); - if (req->get_tensor(it)->data(type) != tensor->data(type)) { + bool is_remote = std::dynamic_pointer_cast(tensor._ptr) || + std::dynamic_pointer_cast(req->get_tensor(it)._ptr); + if (is_remote || req->get_tensor(it)->data(type) != tensor->data(type)) { req->set_tensor(it, tensor); } } @@ -95,7 +97,9 @@ void SyncInferRequest::set_tensors_to_another_request(ov::SoPtrget_element_type(); - if (req->get_tensor(it)->data(type) != tensor->data(type)) { + bool is_remote = std::dynamic_pointer_cast(tensor._ptr) || + std::dynamic_pointer_cast(req->get_tensor(it)._ptr); + if (is_remote || req->get_tensor(it)->data(type) != tensor->data(type)) { req->set_tensor(it, tensor); } } diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_network_base.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_network_base.cpp index 322a5b2914b547..52a4bee4fbc720 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_network_base.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_network_base.cpp @@ -11,9 +11,6 @@ namespace { const std::vector> configs = { {}, }; - const std::vector> multiConfigs = { - {{ InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}} - }; const std::vector> heteroConfigs = { {{"TARGET_FALLBACK", ov::test::utils::DEVICE_CPU}}}; @@ -24,18 +21,6 @@ namespace { ::testing::ValuesIn(configs)), ExecutableNetworkBaseTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, ExecutableNetworkBaseTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiConfigs)), - ExecutableNetworkBaseTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, ExecutableNetworkBaseTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(multiConfigs)), - ExecutableNetworkBaseTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, ExecutableNetworkBaseTest, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_HETERO), @@ -54,34 +39,10 @@ namespace { {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}} }; - const std::vector> AutoConfigsSetPrc = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}}, - }; - - const std::vector> MultiConfigsSetPrc = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}} - }; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, ExecNetSetPrecision, ::testing::Combine( ::testing::ValuesIn(netPrecisions), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(configSetPrc)), ExecNetSetPrecision::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, ExecNetSetPrecision, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(MultiConfigsSetPrc)), - ExecNetSetPrecision::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, ExecNetSetPrecision, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(AutoConfigsSetPrc)), - ExecNetSetPrecision::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp index c821561cb798f3..1e5badc668ffb0 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp @@ -19,23 +19,23 @@ INSTANTIATE_TEST_SUITE_P( INSTANTIATE_TEST_SUITE_P( smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, - ::testing::Values("CPU", "MULTI:CPU", "HETERO:CPU", "AUTO:CPU")); + ::testing::Values("CPU", "HETERO:CPU")); INSTANTIATE_TEST_SUITE_P( smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS, - ::testing::Values("CPU", "MULTI:CPU", "HETERO:CPU", "AUTO:CPU")); + ::testing::Values("CPU", "HETERO:CPU")); INSTANTIATE_TEST_SUITE_P( smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_NETWORK_NAME, - ::testing::Values("CPU", "MULTI:CPU", "HETERO:CPU", "AUTO:CPU")); + ::testing::Values("CPU", "HETERO:CPU")); INSTANTIATE_TEST_SUITE_P( smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS, - ::testing::Values("CPU", "MULTI:CPU", "HETERO:CPU", "AUTO:CPU")); + ::testing::Values("CPU", "HETERO:CPU")); INSTANTIATE_TEST_SUITE_P( smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported, - ::testing::Values("CPU", "MULTI:CPU", "HETERO:CPU", "AUTO:CPU")); + ::testing::Values("CPU", "HETERO:CPU")); // // Executable Network GetConfig / SetConfig diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp index af232aae5ba9bd..1819bd0cc02198 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp @@ -12,25 +12,9 @@ const std::vector> configs = { {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}} }; -const std::vector> multiConfigs = { - {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU}} -}; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestCallbackTests, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(configs)), InferRequestCallbackTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestCallbackTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiConfigs)), - InferRequestCallbackTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestCallbackTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(multiConfigs)), - InferRequestCallbackTests::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/config.cpp index 59aba8a26f5e32..6d3dd1bfbd2e34 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/config.cpp @@ -10,10 +10,6 @@ namespace { {} }; - const std::vector> multiConfigs = { - {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU}} - }; - const std::vector> InConfigs = { {}, {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}, @@ -23,41 +19,10 @@ namespace { {{InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, InferenceEngine::PluginConfigParams::YES}} }; - const std::vector> MultiInConfigs = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, - InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, - InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_NUMA}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "8"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, InferenceEngine::PluginConfigParams::NO}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, InferenceEngine::PluginConfigParams::YES}} - }; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestConfigTest, ::testing::Combine( ::testing::Values(1u), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(configs)), InferRequestConfigTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestConfigTest, - ::testing::Combine( - ::testing::Values(1u), - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiConfigs)), - InferRequestConfigTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests_, InferRequestConfigTest, - ::testing::Combine( - ::testing::Values(1u), - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(MultiInConfigs)), - InferRequestConfigTest::getTestCaseName); - - } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/io_blob.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/io_blob.cpp index 1c5fc6437eeb68..f93876de32ce21 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/io_blob.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/io_blob.cpp @@ -15,30 +15,9 @@ namespace { {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}} }; - const std::vector> Multiconfigs = { - {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU}} - }; - - const std::vector> Autoconfigs = { - {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU}} - }; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestIOBBlobTest, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(configs)), InferRequestIOBBlobTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestIOBBlobTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Multiconfigs)), - InferRequestIOBBlobTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestIOBBlobTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Autoconfigs)), - InferRequestIOBBlobTest::getTestCaseName); - } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp index e409ad7a866935..00bd57165b7ba3 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp @@ -17,30 +17,8 @@ std::vector memoryStateTestCases = { ov::test::utils::DEVICE_HETERO, {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_CPU}})}; -std::vector memoryStateAutoTestCases = { - memoryStateParams(InferRequestVariableStateTest::getNetwork(), - {"c_1-3", "r_1-3"}, - ov::test::utils::DEVICE_AUTO, - {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_CPU}})}; - -std::vector memoryStateMultiTestCases = { - memoryStateParams(InferRequestVariableStateTest::getNetwork(), - {"c_1-3", "r_1-3"}, - ov::test::utils::DEVICE_MULTI, - {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_CPU}})}; - INSTANTIATE_TEST_SUITE_P(smoke_VariableStateBasic, InferRequestVariableStateTest, ::testing::ValuesIn(memoryStateTestCases), InferRequestVariableStateTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - InferRequestVariableStateTest, - ::testing::ValuesIn(memoryStateAutoTestCases), - InferRequestVariableStateTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - InferRequestVariableStateTest, - ::testing::ValuesIn(memoryStateMultiTestCases), - InferRequestVariableStateTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/multitheading.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/multitheading.cpp index ada6236dd61dc4..3d51e3b53ebcb7 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/multitheading.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/multitheading.cpp @@ -15,26 +15,9 @@ namespace { {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}} }; - const std::vector> Multiconfigs = { - {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU}} - }; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestMultithreadingTests, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(configs)), InferRequestMultithreadingTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestMultithreadingTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Multiconfigs)), - InferRequestMultithreadingTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestMultithreadingTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Multiconfigs)), - InferRequestMultithreadingTests::getTestCaseName); - } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp index 0a74955fc4ea18..f1290d3f6e2564 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp @@ -35,30 +35,9 @@ const std::vector> configs = { {} }; -const std::vector> Multiconfigs = { - {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU}} -}; - -const std::vector> Autoconfigs = { - {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU}} -}; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestPerfCountersTest, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(configs)), InferRequestPerfCountersTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestPerfCountersTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Multiconfigs)), - InferRequestPerfCountersTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestPerfCountersTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Autoconfigs)), - InferRequestPerfCountersTest::getTestCaseName); - } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp index dae5799d5a99d2..056ac921676719 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp @@ -15,8 +15,6 @@ const std::vector BlobTypes = { }; const std::map cpuConfig{}; //nothing special -const std::map autoConfig{}; -const std::map multiConfig{{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU}}; const std::map heteroConfig{{ "TARGET_FALLBACK", ov::test::utils::DEVICE_CPU }}; INSTANTIATE_TEST_SUITE_P(smoke_Behavior, InferRequestSetBlobByType, @@ -25,19 +23,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Behavior, InferRequestSetBlobByType, ::testing::Values(cpuConfig)), InferRequestSetBlobByType::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Behavior_Multi, InferRequestSetBlobByType, - ::testing::Combine(::testing::ValuesIn(BlobTypes), - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::Values(multiConfig)), - InferRequestSetBlobByType::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Behavior_Auto, InferRequestSetBlobByType, - ::testing::Combine(::testing::ValuesIn(BlobTypes), - ::testing::Values(ov::test::utils::DEVICE_AUTO + std::string(":") + ov::test::utils::DEVICE_CPU), - ::testing::Values(autoConfig)), - InferRequestSetBlobByType::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Behavior_Hetero, InferRequestSetBlobByType, ::testing::Combine(::testing::ValuesIn(BlobTypes), ::testing::Values(ov::test::utils::DEVICE_HETERO), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp index 22c9edb1a14aa3..451bd8eb3b98ed 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp @@ -15,30 +15,9 @@ namespace { {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}} }; - const std::vector> Multiconfigs = { - {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU}} - }; - - const std::vector> Autoconfigs = { - {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU}} - }; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestWaitTests, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(configs)), InferRequestWaitTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestWaitTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Multiconfigs)), - InferRequestWaitTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestWaitTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Autoconfigs)), - InferRequestWaitTests::getTestCaseName); - } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/core_integration.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/core_integration.cpp index 52ea12486cbf58..0f71d3e80c30ad 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/core_integration.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/core_integration.cpp @@ -17,7 +17,7 @@ namespace { INSTANTIATE_TEST_SUITE_P( smoke_OVClassCompiledModelGetPropertyTest, OVClassCompiledModelGetPropertyTest, - ::testing::Values("CPU", "MULTI:CPU", "HETERO:CPU", "AUTO:CPU")); + ::testing::Values("CPU", "HETERO:CPU")); const std::vector>> GetMetricTest_ExecutionDevice_CPU = { {"CPU", std::make_pair(ov::AnyMap{}, "CPU")}}; @@ -32,7 +32,7 @@ INSTANTIATE_TEST_SUITE_P( INSTANTIATE_TEST_SUITE_P( smoke_OVClassCompiledModelGetIncorrectPropertyTest, OVClassCompiledModelGetIncorrectPropertyTest, - ::testing::Values("CPU", "MULTI:CPU", "HETERO:CPU", "AUTO:CPU")); + ::testing::Values("CPU", "HETERO:CPU")); INSTANTIATE_TEST_SUITE_P( smoke_OVClassCompiledModelGetConfigTest, OVClassCompiledModelGetConfigTest, diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp index 8673f298bb639f..255a87b07229c9 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp @@ -11,9 +11,6 @@ namespace { const std::vector configs = { {}, }; - const std::vector multiConfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)} - }; const std::vector heteroConfigs = { {ov::device::priorities(ov::test::utils::DEVICE_CPU)}}; @@ -24,18 +21,6 @@ namespace { ::testing::ValuesIn(configs)), OVCompiledModelBaseTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVCompiledModelBaseTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiConfigs)), - OVCompiledModelBaseTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVCompiledModelBaseTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(multiConfigs)), - OVCompiledModelBaseTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVCompiledModelBaseTest, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_HETERO), @@ -48,18 +33,6 @@ namespace { ::testing::ValuesIn(configs)), OVCompiledModelBaseTestOptional::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVCompiledModelBaseTestOptional, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiConfigs)), - OVCompiledModelBaseTestOptional::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVCompiledModelBaseTestOptional, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(multiConfigs)), - OVCompiledModelBaseTestOptional::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVCompiledModelBaseTestOptional, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_HETERO), @@ -77,14 +50,4 @@ namespace { {}, {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}} }; - - const std::vector AutoConfigsSetPrc = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)}, - }; - - const std::vector MultiConfigsSetPrc = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}} - }; } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp index cb78e3c0a11fa8..16f4c82c74be24 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp @@ -23,8 +23,6 @@ const std::vector netPrecisions = { const std::vector configs = { {}, }; -const std::vector multiConfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)}}; const std::vector heteroConfigs = { {ov::device::priorities(ov::test::utils::DEVICE_CPU)}}; @@ -37,14 +35,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, ::testing::ValuesIn(configs)), OVCompiledGraphImportExportTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - OVCompiledGraphImportExportTest, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(multiConfigs)), - OVCompiledGraphImportExportTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVCompiledGraphImportExportTest, ::testing::Combine(::testing::ValuesIn(netPrecisions), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp index 35a8001c0cc133..913315542db85b 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp @@ -21,8 +21,7 @@ const std::vector auto_batch_inproperties = { INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVClassCompiledModelPropertiesIncorrectTests, ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_CPU, - ov::test::utils::DEVICE_HETERO, - ov::test::utils::DEVICE_MULTI, "AUTO:CPU"), + ov::test::utils::DEVICE_HETERO), ::testing::ValuesIn(inproperties)), OVClassCompiledModelPropertiesIncorrectTests::getTestCaseName); @@ -84,13 +83,6 @@ const std::vector hetero_properties = { InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}, }; -const std::vector multi_properties = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU), ov::num_streams(ov::streams::AUTO)}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, - InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}, -}; - const std::vector auto_batch_properties = { {{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG), std::string(ov::test::utils::DEVICE_CPU) + "(4)"}}, {{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG), std::string(ov::test::utils::DEVICE_CPU) + "(4)"}, @@ -111,12 +103,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, ::testing::ValuesIn(hetero_properties)), OVClassCompiledModelPropertiesTests::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - OVClassCompiledModelPropertiesTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multi_properties)), - OVClassCompiledModelPropertiesTests::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, OVClassCompiledModelPropertiesTests, ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_BATCH), @@ -125,22 +111,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, INSTANTIATE_TEST_SUITE_P(smoke_OVCompiledModelIncorrectDevice, OVCompiledModelIncorrectDevice, ::testing::Values("CPU")); - -const std::vector auto_multi_device_properties = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU), ov::device::properties("CPU", ov::num_streams(4))}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::device::properties("CPU", ov::num_streams(4), ov::enable_profiling(true))}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::device::properties(ov::AnyMap{{"CPU", ov::AnyMap{{ov::num_streams(4), ov::enable_profiling(true)}}}})}}; - -INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiSetAndCompileModelBehaviorTestsNoThrow, - OVClassCompiledModelPropertiesTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO, - ov::test::utils::DEVICE_MULTI, - ov::test::utils::DEVICE_HETERO), - ::testing::ValuesIn(auto_multi_device_properties)), - OVClassCompiledModelPropertiesTests::getTestCaseName); - const std::vector configsWithSecondaryProperties = { {ov::device::properties("CPU", ov::num_streams(4))}, {ov::device::properties("CPU", @@ -151,48 +121,6 @@ const std::vector configsWithSecondaryProperties = { ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)), ov::device::properties("GPU", ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))}}; -const std::vector multiConfigsWithSecondaryProperties = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::device::properties("CPU", - ov::num_streams(4), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::device::properties("CPU", - ov::num_streams(4), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)), - ov::device::properties("GPU", ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))}}; - -const std::vector autoConfigsWithSecondaryProperties = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::device::properties("AUTO", - ov::enable_profiling(false), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::device::properties("CPU", - ov::num_streams(4), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::device::properties("CPU", - ov::num_streams(4), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)), - ov::device::properties("GPU", ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::device::properties("AUTO", - ov::enable_profiling(false), - ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)), - ov::device::properties("CPU", - ov::num_streams(4), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::device::properties("AUTO", - ov::enable_profiling(false), - ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)), - ov::device::properties("CPU", - ov::num_streams(4), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)), - ov::device::properties("GPU", ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))}}; - const std::vector heteroConfigsWithSecondaryProperties = { {ov::device::priorities(ov::test::utils::DEVICE_CPU), ov::device::properties("HETERO", @@ -227,52 +155,11 @@ const std::vector heteroConfigsWithSecondaryProperties = { // IE Class Load network INSTANTIATE_TEST_SUITE_P(smoke_CPUOVClassCompileModelWithCorrectPropertiesTest, OVClassCompileModelWithCorrectPropertiesTest, - ::testing::Combine(::testing::Values("CPU", "AUTO:CPU", "MULTI:CPU", "HETERO:CPU"), + ::testing::Combine(::testing::Values("CPU", "HETERO:CPU"), ::testing::ValuesIn(configsWithSecondaryProperties))); -INSTANTIATE_TEST_SUITE_P(smoke_Multi_OVClassCompileModelWithCorrectPropertiesTest, - OVClassCompileModelWithCorrectPropertiesTest, - ::testing::Combine(::testing::Values("MULTI"), - ::testing::ValuesIn(multiConfigsWithSecondaryProperties))); - -INSTANTIATE_TEST_SUITE_P(smoke_AUTO_OVClassCompileModelWithCorrectPropertiesTest, - OVClassCompileModelWithCorrectPropertiesTest, - ::testing::Combine(::testing::Values("AUTO"), - ::testing::ValuesIn(autoConfigsWithSecondaryProperties))); - INSTANTIATE_TEST_SUITE_P(smoke_HETERO_OVClassCompileModelWithCorrectPropertiesTest, OVClassCompileModelWithCorrectPropertiesTest, ::testing::Combine(::testing::Values("HETERO"), ::testing::ValuesIn(heteroConfigsWithSecondaryProperties))); - -const std::vector> automultiExeDeviceConfigs = { - std::make_pair(ov::AnyMap{{ov::device::priorities(ov::test::utils::DEVICE_CPU)}}, "CPU")}; - -INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiCompileModelBehaviorTests, - OVCompileModelGetExecutionDeviceTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO, - ov::test::utils::DEVICE_MULTI, - ov::test::utils::DEVICE_HETERO), - ::testing::ValuesIn(automultiExeDeviceConfigs)), - OVCompileModelGetExecutionDeviceTests::getTestCaseName); - -const std::vector multiDevicePriorityConfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)}}; - -INSTANTIATE_TEST_SUITE_P(smoke_OVClassCompiledModelGetPropertyTest, - OVClassCompiledModelGetPropertyTest_DEVICE_PRIORITY, - ::testing::Combine(::testing::Values("MULTI", "AUTO"), - ::testing::ValuesIn(multiDevicePriorityConfigs)), - OVClassCompiledModelGetPropertyTest_DEVICE_PRIORITY::getTestCaseName); - -const std::vector multiModelPriorityConfigs = { - {ov::hint::model_priority(ov::hint::Priority::HIGH)}, - {ov::hint::model_priority(ov::hint::Priority::MEDIUM)}, - {ov::hint::model_priority(ov::hint::Priority::LOW)}, - {ov::hint::model_priority(ov::hint::Priority::DEFAULT)}}; - -INSTANTIATE_TEST_SUITE_P(smoke_OVClassCompiledModelGetPropertyTest, - OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY, - ::testing::Combine(::testing::Values("AUTO:CPU"), - ::testing::ValuesIn(multiModelPriorityConfigs))); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp index d39adcafc87da3..2051aab35e71ea 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp @@ -15,25 +15,9 @@ const std::vector configs = { {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}} }; -const std::vector multiConfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)} -}; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestCallbackTests, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(configs)), OVInferRequestCallbackTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestCallbackTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiConfigs)), - OVInferRequestCallbackTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestCallbackTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(multiConfigs)), - OVInferRequestCallbackTests::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_consistency.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_consistency.cpp index a0d07511e31f0b..1423d023cbbac3 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_consistency.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_consistency.cpp @@ -20,41 +20,10 @@ std::vector configs = { {{ov::test::utils::DEVICE_CPU, {}}, {ov::test::utils::DEVICE_CPU, {}}} }; -std::vector AutoConfigs = { - { - { - ov::test::utils::DEVICE_AUTO + std::string(":") + ov::test::utils::DEVICE_CPU, - {ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)} - }, - {ov::test::utils::DEVICE_CPU, {}} - }, - { - { - ov::test::utils::DEVICE_AUTO + std::string(":") + ov::test::utils::DEVICE_CPU, - {ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)} - }, - {ov::test::utils::DEVICE_CPU, {}} - }, - { - { - ov::test::utils::DEVICE_AUTO + std::string(":") + ov::test::utils::DEVICE_CPU, - {ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)} - }, - {ov::test::utils::DEVICE_CPU, {}} - } -}; - INSTANTIATE_TEST_SUITE_P(BehaviorTests, OVInferConsistencyTest, ::testing::Combine( ::testing::Values(10),// inferRequest num ::testing::Values(10),// infer counts ::testing::ValuesIn(configs)), OVInferConsistencyTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Auto_BehaviorTests, OVInferConsistencyTest, - ::testing::Combine( - ::testing::Values(10),// inferRequest num - ::testing::Values(10),// infer counts - ::testing::ValuesIn(AutoConfigs)), - OVInferConsistencyTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp index 3b4049eaf05a3b..fa66f4a2c7801d 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp @@ -18,10 +18,6 @@ const std::vector HeteroConfigs = { {ov::device::priorities(ov::test::utils::DEVICE_CPU)} }; -const std::vector AutoConfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)} -}; - std::shared_ptr getFunction1() { const std::vector inputShape = {1, 4, 20, 20}; const ngraph::element::Type_t ngPrc = ngraph::element::Type_t::f32; @@ -93,15 +89,4 @@ INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVInferRequestDynamicTests, ::testing::Values(ov::test::utils::DEVICE_HETERO), ::testing::ValuesIn(HeteroConfigs)), OVInferRequestDynamicTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestDynamicTests, - ::testing::Combine( - ::testing::Values(getFunction2()), - ::testing::Values(std::vector, std::vector>>{ - {{1, 4, 20, 20}, {1, 2, 20, 40}}, - {{2, 4, 20, 20}, {2, 2, 20, 40}}}), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(AutoConfigs)), - OVInferRequestDynamicTests::getTestCaseName); - } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp index a134c54772f118..f8030e4332296f 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp @@ -17,10 +17,6 @@ const std::vector HeteroConfigs = { {ov::device::priorities(ov::test::utils::DEVICE_CPU)} }; -const std::vector AutoConfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)} -}; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferenceChaining, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU), @@ -33,12 +29,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVInferenceChaining, ::testing::ValuesIn(HeteroConfigs)), OVInferenceChaining::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferenceChaining, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(AutoConfigs)), - OVInferenceChaining::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferenceChainingStatic, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU), @@ -50,10 +40,4 @@ INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVInferenceChainingStatic, ::testing::Values(ov::test::utils::DEVICE_HETERO), ::testing::ValuesIn(HeteroConfigs)), OVInferenceChainingStatic::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferenceChainingStatic, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(AutoConfigs)), - OVInferenceChainingStatic::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp index 93b0bb59dfa0ea..07cd925b940595 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp @@ -15,14 +15,6 @@ const std::vector configs = { {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}} }; -const std::vector Multiconfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)} -}; - -const std::vector Autoconfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)} -}; - const std::vector emptyConfigs = {{}}; INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestIOTensorTest, @@ -31,18 +23,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestIOTensorTest, ::testing::ValuesIn(configs)), OVInferRequestIOTensorTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestIOTensorTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Multiconfigs)), - OVInferRequestIOTensorTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestIOTensorTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Autoconfigs)), - OVInferRequestIOTensorTest::getTestCaseName); - std::vector prcs = { ov::element::boolean, ov::element::bf16, @@ -69,38 +49,10 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestIOTensorSetPrecision ::testing::ValuesIn(configs)), OVInferRequestIOTensorSetPrecisionTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestIOTensorSetPrecisionTest, - ::testing::Combine( - ::testing::ValuesIn(prcs), - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Multiconfigs)), - OVInferRequestIOTensorSetPrecisionTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestIOTensorSetPrecisionTest, - ::testing::Combine( - ::testing::ValuesIn(prcs), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Autoconfigs)), - OVInferRequestIOTensorSetPrecisionTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestCheckTensorPrecision, ::testing::Combine( ::testing::ValuesIn(prcs), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(emptyConfigs)), OVInferRequestCheckTensorPrecision::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestCheckTensorPrecision, - ::testing::Combine( - ::testing::ValuesIn(prcs), - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Multiconfigs)), - OVInferRequestCheckTensorPrecision::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestCheckTensorPrecision, - ::testing::Combine( - ::testing::ValuesIn(prcs), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Autoconfigs)), - OVInferRequestCheckTensorPrecision::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/iteration_chaining.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/iteration_chaining.cpp index 6bae74745729c0..2c8678165426b3 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/iteration_chaining.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/iteration_chaining.cpp @@ -17,10 +17,6 @@ const std::vector HeteroConfigs = { {ov::device::priorities(ov::test::utils::DEVICE_CPU)} }; -const std::vector AutoConfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)} -}; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVIterationChaining, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU), @@ -32,11 +28,4 @@ INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVIterationChaining, ::testing::Values(ov::test::utils::DEVICE_HETERO), ::testing::ValuesIn(HeteroConfigs)), OVIterationChaining::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVIterationChaining, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(AutoConfigs)), - OVIterationChaining::getTestCaseName); - } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp index 4aa193dd0ff330..f341cb560def20 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp @@ -16,26 +16,9 @@ const std::vector configs = { {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}} }; -const std::vector Multiconfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)} -}; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestMultithreadingTests, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(configs)), OVInferRequestMultithreadingTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestMultithreadingTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Multiconfigs)), - OVInferRequestMultithreadingTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestMultithreadingTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Multiconfigs)), - OVInferRequestMultithreadingTests::getTestCaseName); - } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp index f051edb92958f4..959bd3fe6cc9fe 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp @@ -11,29 +11,9 @@ const std::vector configs = { {} }; -const std::vector Multiconfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)} -}; - -const std::vector Autoconfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)} -}; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestPerfCountersTest, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(configs)), OVInferRequestPerfCountersTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestPerfCountersTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Multiconfigs)), - OVInferRequestPerfCountersTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestPerfCountersTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Autoconfigs)), - OVInferRequestPerfCountersTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp index 53ead79c66e1dc..a1102d17577e02 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp @@ -16,30 +16,9 @@ const std::vector configs = { {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}} }; -const std::vector Multiconfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)} -}; - -const std::vector Autoconfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)} -}; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestWaitTests, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(configs)), OVInferRequestWaitTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestWaitTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Multiconfigs)), - OVInferRequestWaitTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestWaitTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Autoconfigs)), - OVInferRequestWaitTests::getTestCaseName); - } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp index b41b969c24bc66..efd48c350c7e9a 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp @@ -133,53 +133,6 @@ namespace { ::testing::ValuesIn(autoConfigs)), CompileModelCacheTestBase::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Auto_CachingSupportCase_CPU, CompileModelCacheTestBase, - ::testing::Combine( - ::testing::ValuesIn(CompileModelCacheTestBase::getNumericAnyTypeFunctions()), - ::testing::ValuesIn(precisionsCPU), - ::testing::ValuesIn(batchSizesCPU), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoConfigs)), - CompileModelCacheTestBase::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_CachingSupportCase_CPU_Float, CompileModelCacheTestBase, - ::testing::Combine( - ::testing::ValuesIn(CompileModelCacheTestBase::getFloatingPointOnlyFunctions()), - ::testing::ValuesIn(floatPrecisionsCPU), - ::testing::ValuesIn(batchSizesCPU), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoConfigs)), - CompileModelCacheTestBase::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_CachingSupportCase_CPU_Internal, CompileModelCacheTestBase, - ::testing::Combine( - ::testing::ValuesIn(internal_functions_cpu()), - ::testing::ValuesIn(precisionsCPUInternal), - ::testing::ValuesIn(batchSizesCPUInternal), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoConfigs)), - CompileModelCacheTestBase::getTestCaseName); - - const std::vector LoadFromFileConfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)}, - }; - const std::vector TestTargets = - {ov::test::utils::DEVICE_AUTO, - ov::test::utils::DEVICE_MULTI, - }; - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_CachingSupportCase_CPU, CompileModelLoadFromFileTestBase, - ::testing::Combine( - ::testing::ValuesIn(TestTargets), - ::testing::ValuesIn(LoadFromFileConfigs)), - CompileModelLoadFromFileTestBase::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_CachingSupportCase_CPU, - CompileModelLoadFromMemoryTestBase, - ::testing::Combine(::testing::ValuesIn(TestTargets), - ::testing::ValuesIn(LoadFromFileConfigs)), - CompileModelLoadFromMemoryTestBase::getTestCaseName); - const std::vector CpuConfigs = { {ov::num_streams(2)}, }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp index a9201b7674bdc3..fcc23fe1a8d906 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp @@ -28,25 +28,4 @@ INSTANTIATE_TEST_SUITE_P( // IE Class Query model INSTANTIATE_TEST_SUITE_P(smoke_OVClassQueryModelTest, OVClassQueryModelTest, ::testing::Values("CPU")); - -const std::vector configsWithEmpty = {{}}; -const std::vector configsWithMetaPlugin = {{ov::device::priorities("AUTO")}, - {ov::device::priorities("MULTI")}, - {ov::device::priorities("AUTO", "MULTI")}, - {ov::device::priorities("AUTO", "CPU")}, - {ov::device::priorities("MULTI", "CPU")}}; - -INSTANTIATE_TEST_SUITE_P( - smoke_MULTI_AUTO_DoNotSupportMetaPluginLoadingItselfRepeatedlyWithEmptyConfigTest, - OVClassCompileModelWithCondidateDeviceListContainedMetaPluginTest, - ::testing::Combine(::testing::Values("MULTI:AUTO", "AUTO:MULTI", "MULTI:CPU,AUTO", "AUTO:CPU,MULTI"), - ::testing::ValuesIn(configsWithEmpty)), - ::testing::PrintToStringParamName()); - -INSTANTIATE_TEST_SUITE_P(smoke_MULTI_AUTO_DoNotSupportMetaPluginLoadingItselfRepeatedlyTest, - OVClassCompileModelWithCondidateDeviceListContainedMetaPluginTest, - ::testing::Combine(::testing::Values("MULTI", "AUTO"), - ::testing::ValuesIn(configsWithMetaPlugin)), - ::testing::PrintToStringParamName()); - } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/life_time.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/life_time.cpp index a46121e053bee5..cc6f8cf3217187 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/life_time.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/life_time.cpp @@ -11,9 +11,7 @@ namespace { OVHoldersTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_VirtualPlugin_BehaviorTests, OVHoldersTest, - ::testing::Values("AUTO:CPU", - "MULTI:CPU", - //ov::test::utils::DEVICE_BATCH, + ::testing::Values(//ov::test::utils::DEVICE_BATCH, "HETERO:CPU"), OVHoldersTest::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp index 4e7dfdaeac914d..b736a5ce7b6be2 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp @@ -26,28 +26,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, ::testing::ValuesIn(cpu_properties)), OVPropertiesTests::getTestCaseName); -const std::vector multi_Auto_properties = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), ov::hint::execution_mode(ov::hint::ExecutionMode::ACCURACY)}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::hint::execution_mode(ov::hint::ExecutionMode::PERFORMANCE)}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), ov::intel_auto::device_bind_buffer("YES")}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), ov::intel_auto::device_bind_buffer("NO")}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), ov::intel_auto::enable_startup_fallback("YES")}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), ov::intel_auto::enable_startup_fallback("NO")}}; - -INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiBehaviorTests, - OVPropertiesTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO, - ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multi_Auto_properties)), - OVPropertiesTests::getTestCaseName); - const std::vector cpu_setcore_properties = { {ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), ov::hint::num_requests(2), @@ -64,97 +42,24 @@ INSTANTIATE_TEST_SUITE_P(smoke_cpuCompileModelBehaviorTests, ::testing::ValuesIn(cpu_compileModel_properties)), OVSetPropComplieModleGetPropTests::getTestCaseName); -const std::vector multi_setcore_properties = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY), - ov::hint::model_priority(ov::hint::Priority::HIGH)}}; -const std::vector multi_compileModel_properties = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), - ov::hint::model_priority(ov::hint::Priority::MEDIUM)}}; - -INSTANTIATE_TEST_SUITE_P(smoke_MultiCompileModelBehaviorTests, - OVSetPropComplieModleGetPropTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multi_setcore_properties), - ::testing::ValuesIn(multi_compileModel_properties)), - OVSetPropComplieModleGetPropTests::getTestCaseName); - -const std::vector auto_setcore_properties = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), - ov::hint::model_priority(ov::hint::Priority::HIGH)}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY), - ov::hint::model_priority(ov::hint::Priority::HIGH)}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT), - ov::hint::model_priority(ov::hint::Priority::HIGH)}, -}; -const std::vector auto_compileModel_properties = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY), - ov::hint::model_priority(ov::hint::Priority::MEDIUM)}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT), - ov::hint::model_priority(ov::hint::Priority::MEDIUM)}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), - ov::hint::model_priority(ov::hint::Priority::MEDIUM)}}; -INSTANTIATE_TEST_SUITE_P(smoke_AutoCompileModelBehaviorTests, - OVSetPropComplieModleGetPropTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(auto_setcore_properties), - ::testing::ValuesIn(auto_compileModel_properties)), - OVSetPropComplieModleGetPropTests::getTestCaseName); - -const std::vector default_properties = {{ov::enable_profiling(false)}, - {ov::log::level("LOG_NONE")}, - {ov::hint::model_priority(ov::hint::Priority::MEDIUM)}, - {ov::hint::execution_mode(ov::hint::ExecutionMode::PERFORMANCE)}, - {ov::intel_auto::device_bind_buffer(false)}, - {ov::intel_auto::enable_startup_fallback(true)}, - {ov::device::priorities("")}}; -INSTANTIATE_TEST_SUITE_P(smoke_AutoBehaviorTests, - OVPropertiesDefaultTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(default_properties)), - OVPropertiesDefaultTests::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVPropertiesDefaultSupportedTests, - ::testing::Values(ov::test::utils::DEVICE_CPU, ov::test::utils::DEVICE_AUTO)); - -const std::vector auto_multi_incorrect_device_properties = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::num_streams(4), - ov::device::properties("CPU", ov::num_streams(4))}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::num_streams(4), - ov::device::properties("CPU", ov::num_streams(4), ov::enable_profiling(true))}}; - -INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiSetAndCompileModelBehaviorTestsThrow, - OVSetUnsupportPropCompileModelWithoutConfigTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO, - ov::test::utils::DEVICE_MULTI, - ov::test::utils::DEVICE_HETERO), - ::testing::ValuesIn(auto_multi_incorrect_device_properties)), - OVSetUnsupportPropCompileModelWithoutConfigTests::getTestCaseName); + ::testing::Values(ov::test::utils::DEVICE_CPU)); // // IE Class GetMetric // -INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiHeteroOVGetMetricPropsTest, +INSTANTIATE_TEST_SUITE_P(smoke_HeteroOVGetMetricPropsTest, OVGetMetricPropsTest, - ::testing::Values("MULTI", "HETERO", "AUTO")); + ::testing::Values("HETERO")); INSTANTIATE_TEST_SUITE_P(smoke_OVGetMetricPropsTest, OVGetMetricPropsTest, ::testing::Values("CPU")); INSTANTIATE_TEST_SUITE_P( - smoke_AutoMultiHeteroOVCheckGetSupportedROMetricsPropsTests, + smoke_HeteroOVCheckGetSupportedROMetricsPropsTests, OVCheckGetSupportedROMetricsPropsTests, - ::testing::Combine(::testing::Values("MULTI", "HETERO", "AUTO"), + ::testing::Combine(::testing::Values("HETERO"), ::testing::ValuesIn(OVCheckGetSupportedROMetricsPropsTests::configureProperties( {ov::device::full_name.name()}))), OVCheckGetSupportedROMetricsPropsTests::getTestCaseName); @@ -171,29 +76,11 @@ INSTANTIATE_TEST_SUITE_P(smoke_OVGetAvailableDevicesPropsTest, OVGetAvailableDevicesPropsTest, ::testing::Values("CPU")); -INSTANTIATE_TEST_SUITE_P( - OVCheckSetSupportedRWMandatoryMetricsPropsTests, - OVCheckSetSupportedRWMetricsPropsTests, - ::testing::Combine(::testing::Values("MULTI:CPU", "AUTO:CPU"), - ::testing::ValuesIn(OVCheckSetSupportedRWMetricsPropsTests::getRWMandatoryPropertiesValues( - {ov::hint::model_priority.name(), ov::log::level.name()}))), - OVCheckSetSupportedRWMetricsPropsTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P( - OVCheckSetSupportedRWOptionalMetricsPropsTests, - OVCheckSetSupportedRWMetricsPropsTests, - ::testing::Combine(::testing::Values("MULTI:CPU", "AUTO:CPU"), - ::testing::ValuesIn(OVCheckSetSupportedRWMetricsPropsTests::getRWOptionalPropertiesValues( - {ov::hint::enable_hyper_threading.name(), - ov::hint::enable_cpu_pinning.name(), - ov::hint::scheduling_core_type.name()}))), - OVCheckSetSupportedRWMetricsPropsTests::getTestCaseName); - const std::vector multiConfigs = {{ov::device::priorities(ov::test::utils::DEVICE_CPU)}}; INSTANTIATE_TEST_SUITE_P(smoke_OVClassSetDevicePriorityConfigPropsTest, OVClassSetDevicePriorityConfigPropsTest, - ::testing::Combine(::testing::Values("MULTI", "AUTO", "HETERO"), + ::testing::Combine(::testing::Values("HETERO"), ::testing::ValuesIn(multiConfigs))); const std::vector configsDeviceProperties = { diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp index 3e270874ce9542..5cdbb8fbd7285b 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp @@ -67,67 +67,12 @@ namespace { {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "3"}}, }; - const std::vector> MultiConfigs = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::NO}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "1"}} - }; - - const std::vector> AutoConfigs = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "1"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_NONE}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_ERROR}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_WARNING}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_INFO}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_DEBUG}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_TRACE}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, InferenceEngine::PluginConfigParams::MODEL_PRIORITY_HIGH}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, InferenceEngine::PluginConfigParams::MODEL_PRIORITY_MED}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, InferenceEngine::PluginConfigParams::MODEL_PRIORITY_LOW}} - }; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, CorrectConfigTests, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(Configs)), CorrectConfigTests::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, CorrectConfigTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(MultiConfigs)), - CorrectConfigTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, CorrectConfigTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(AutoConfigs)), - CorrectConfigTests::getTestCaseName); - const std::vector> inconfigs = { {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, "DOESN'T EXIST"}}, {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, @@ -138,89 +83,18 @@ namespace { {{InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, "OFF"}}, }; - const std::vector> multiinconfigs = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, "DOESN'T EXIST"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "-1"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "should be int"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, "OFF"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS, "OFF"}}, - }; - - const std::vector> autoinconfigs = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, "DOESN'T EXIST"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "-1"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "should be int"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "OFF"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, "OFF"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, "-1"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, "ABC"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, "NAN"}} - }; - - const std::vector> multiconf = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "1"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}} - }; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, IncorrectConfigTests, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(inconfigs)), IncorrectConfigTests::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, IncorrectConfigTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiinconfigs)), - IncorrectConfigTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, IncorrectConfigTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoinconfigs)), - IncorrectConfigTests::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, IncorrectConfigAPITests, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(inconfigs)), IncorrectConfigAPITests::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, IncorrectConfigAPITests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiinconfigs)), - IncorrectConfigAPITests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, IncorrectConfigAPITests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoinconfigs)), - IncorrectConfigAPITests::getTestCaseName); - const std::vector> ConfigsCheck = { {}, {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}}, @@ -256,34 +130,10 @@ namespace { {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES}, }}; - const std::vector> auto_multi_prop_config = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, InferenceEngine::PluginConfigParams::MODEL_PRIORITY_MED}}}; - - const std::vector> auto_multi_loadNetWork_config = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, InferenceEngine::PluginConfigParams::MODEL_PRIORITY_HIGH}}}; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, SetPropLoadNetWorkGetPropTests, ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(cpu_prop_config), ::testing::ValuesIn(cpu_loadNetWork_config)), SetPropLoadNetWorkGetPropTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - SetPropLoadNetWorkGetPropTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(auto_multi_prop_config), - ::testing::ValuesIn(auto_multi_loadNetWork_config)), - SetPropLoadNetWorkGetPropTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - SetPropLoadNetWorkGetPropTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(auto_multi_prop_config), - ::testing::ValuesIn(auto_multi_loadNetWork_config)), - SetPropLoadNetWorkGetPropTests::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp index 7d8f93b2144e88..6934ffaa19f78c 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp @@ -30,11 +30,11 @@ INSTANTIATE_TEST_SUITE_P( INSTANTIATE_TEST_SUITE_P( smoke_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_CONFIG_KEYS, - ::testing::Values("CPU", "MULTI", "HETERO", "AUTO")); + ::testing::Values("CPU", "HETERO")); INSTANTIATE_TEST_SUITE_P( smoke_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_METRICS, - ::testing::Values("CPU", "MULTI", "HETERO", "AUTO")); + ::testing::Values("CPU", "HETERO")); INSTANTIATE_TEST_SUITE_P( smoke_IEClassGetMetricTest, IEClassGetMetricTest_AVAILABLE_DEVICES, @@ -42,11 +42,11 @@ INSTANTIATE_TEST_SUITE_P( INSTANTIATE_TEST_SUITE_P( smoke_IEClassGetMetricTest, IEClassGetMetricTest_FULL_DEVICE_NAME, - ::testing::Values("CPU", "MULTI", "HETERO", "AUTO")); + ::testing::Values("CPU", "HETERO")); INSTANTIATE_TEST_SUITE_P( smoke_IEClassGetMetricTest, IEClassGetMetricTest_OPTIMIZATION_CAPABILITIES, - ::testing::Values("CPU", "MULTI", "AUTO")); + ::testing::Values("CPU")); INSTANTIATE_TEST_SUITE_P( smoke_IEClassGetMetricTest, IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS, @@ -58,11 +58,11 @@ INSTANTIATE_TEST_SUITE_P( INSTANTIATE_TEST_SUITE_P( smoke_IEClassGetMetricTest, IEClassGetMetricTest_ThrowUnsupported, - ::testing::Values("CPU", "MULTI", "HETERO", "AUTO")); + ::testing::Values("CPU", "HETERO")); INSTANTIATE_TEST_SUITE_P( smoke_IEClassGetConfigTest, IEClassGetConfigTest_ThrowUnsupported, - ::testing::Values("CPU", "MULTI", "HETERO", "AUTO")); + ::testing::Values("CPU", "HETERO")); INSTANTIATE_TEST_SUITE_P( smoke_IEClassGetAvailableDevices, IEClassGetAvailableDevices, diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp index 0495f2b4cbbb6a..5f1ada306d367a 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp @@ -5,9 +5,6 @@ #include #ifdef __GLIBC__ #include -#if __GLIBC_MINOR__ >= 34 - #define ENABLETESTMULTI -#endif #endif namespace { @@ -15,10 +12,6 @@ namespace { const Params params[] = { std::tuple{ ov::test::utils::DEVICE_CPU, {{ CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES) }}}, std::tuple{ ov::test::utils::DEVICE_HETERO, {{ "TARGET_FALLBACK", ov::test::utils::DEVICE_CPU }}}, -#ifdef ENABLETESTMULTI - std::tuple{ ov::test::utils::DEVICE_MULTI, {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU }}}, - std::tuple{ ov::test::utils::DEVICE_AUTO, {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU }}}, -#endif }; const Params paramsStreams[] = { diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/set_preprocess.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/set_preprocess.cpp index f3f3588d375617..49664de10b4c2d 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/set_preprocess.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/set_preprocess.cpp @@ -23,10 +23,6 @@ namespace { {{ "TARGET_FALLBACK" , ov::test::utils::DEVICE_CPU}} }; - const std::vector> multiConfigs = { - {{ InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}} - }; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestPreprocessTest, ::testing::Combine( ::testing::ValuesIn(netPrecisions), @@ -41,21 +37,6 @@ namespace { ::testing::ValuesIn(heteroConfigs)), InferRequestPreprocessTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestPreprocessTest, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiConfigs)), - InferRequestPreprocessTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestPreprocessTest, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(multiConfigs)), - InferRequestPreprocessTest::getTestCaseName); - - const std::vector ioPrecisions = { InferenceEngine::Precision::FP32, InferenceEngine::Precision::U8 @@ -125,63 +106,6 @@ namespace { ::testing::Values(ov::test::utils::DEVICE_HETERO), ::testing::ValuesIn(heteroConfigs)), InferRequestPreprocessDynamicallyInSetBlobTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestPreprocessConversionTest, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::ValuesIn(ioPrecisions), - ::testing::ValuesIn(ioPrecisions), - ::testing::ValuesIn(netLayouts), - ::testing::ValuesIn(ioLayouts), - ::testing::ValuesIn(ioLayouts), - ::testing::Bool(), - ::testing::Bool(), - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiConfigs)), - InferRequestPreprocessConversionTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestPreprocessDynamicallyInSetBlobTest, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Bool(), - ::testing::Bool(), - ::testing::ValuesIn(netLayouts), - ::testing::Bool(), - ::testing::Bool(), - ::testing::Values(true), // only SetBlob - ::testing::Values(true), // only SetBlob - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiConfigs)), - InferRequestPreprocessDynamicallyInSetBlobTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestPreprocessConversionTest, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::ValuesIn(ioPrecisions), - ::testing::ValuesIn(ioPrecisions), - ::testing::ValuesIn(netLayouts), - ::testing::ValuesIn(ioLayouts), - ::testing::ValuesIn(ioLayouts), - ::testing::Bool(), - ::testing::Bool(), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(multiConfigs)), - InferRequestPreprocessConversionTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestPreprocessDynamicallyInSetBlobTest, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Bool(), - ::testing::Bool(), - ::testing::ValuesIn(netLayouts), - ::testing::Bool(), - ::testing::Bool(), - ::testing::Values(true), // only SetBlob - ::testing::Values(true), // only SetBlob - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(multiConfigs)), - InferRequestPreprocessDynamicallyInSetBlobTest::getTestCaseName); - } // namespace #endif // ENABLE_GAPI_PREPROCESSING diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/version.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/version.cpp index d0bcd50ef58e04..bcc0130ca8e8ab 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/version.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/version.cpp @@ -10,14 +10,6 @@ namespace { ::testing::Values(ov::test::utils::DEVICE_CPU), VersionTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, VersionTest, - ::testing::Values(ov::test::utils::DEVICE_MULTI), - VersionTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, VersionTest, - ::testing::Values(ov::test::utils::DEVICE_AUTO), - VersionTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, VersionTest, ::testing::Values(ov::test::utils::DEVICE_HETERO), VersionTest::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/multi/cpu_remote_blob_tests.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/multi/cpu_remote_blob_tests.cpp deleted file mode 100644 index e3bf419f1208b2..00000000000000 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/multi/cpu_remote_blob_tests.cpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include "multi/multi_remote_blob_tests.hpp" -#include "common_test_utils/test_constants.hpp" - -const std::vector device_names_and_support_for_remote_blobs { - {{CPU}, false, {}}, // CPU via MULTI -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_RemoteBlobCPU, MultiDevice_SupportTest, - ::testing::ValuesIn(device_names_and_support_for_remote_blobs), MultiDevice_SupportTest::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index b9caace0239ab2..3daad41fd0fe5a 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -68,9 +68,6 @@ std::vector disabledTestPatterns() { R"(.*NonZeroLayerTest.*)", // Not expected behavior R"(.*Behavior.*InferRequestSetBlobByType.*Batched.*)", - R"(.*Auto.*Behavior.*ExecutableNetworkBaseTest.*canLoadCorrectNetworkToGetExecutableWithIncorrectConfig.*)", - R"(.*(Auto|Multi).*Behavior.*CorrectConfigAPITests.*CanSetExclusiveAsyncRequests.*)", - R"(.*(Auto|Multi).*Behavior.*IncorrectConfigTests.*CanNotLoadNetworkWithIncorrectConfig.*)", R"(.*OVCompiledModelBaseTest.*(CanGetInputsInfoAndCheck|canSetConfigToCompiledModel).*)", R"(.*Behavior.*CorrectConfigCheck.*(canSetConfigAndCheckGetConfig|canSetConfigTwiceAndCheckGetConfig).*CPU_BIND_THREAD=YES.*)", // Issue: 72021 Unreasonable abs_threshold for comparing bf16 results @@ -88,24 +85,18 @@ std::vector disabledTestPatterns() { R"(.*Hetero.*Behavior.*ExecutableNetworkBaseTest.*ExecGraphInfo.*)", R"(.*Hetero.*Behavior.*OVCompiledModelBaseTest.*ExecGraphInfo.*)", R"(.*Hetero.*Behavior.*ExecutableNetworkBaseTest.*CanCreateTwoExeNetworksAndCheckFunction.*)", - // TODO: 104942 - R"(.*(Auto|Multi).*Behavior.*ExecutableNetworkBaseTest.*canLoadCorrectNetworkToGetExecutableAndCheckConfig.*)", - R"(.*(Auto|Multi).*SetPropLoadNetWorkGetPropTests.*)", R"(.*Hetero.*Behavior.*OVCompiledModelBaseTest.*canCreateTwoCompiledModelAndCheckTheir.*)", // CPU does not support dynamic rank // Issue: 66778 R"(.*smoke_BehaviorTests.*InferFullyDynamicNetworkWith(S|G)etTensor.*)", R"(.*smoke_Hetero_BehaviorTests.*InferFullyDynamicNetworkWith(S|G)etTensor.*)", - R"(.*smoke_Auto_BehaviorTests.*InferFullyDynamicNetworkWith(S|G)etTensor.*)", R"(.*smoke_BehaviorTests.*DynamicOutputToDynamicInput.*)", R"(.*smoke_BehaviorTests.*DynamicInputToDynamicOutput.*)", R"(.*smoke_Hetero_BehaviorTests.*DynamicOutputToDynamicInput.*)", R"(.*smoke_Hetero_BehaviorTests.*DynamicInputToDynamicOutput.*)", - R"(.*smoke_Auto_BehaviorTests.*DynamicOutputToDynamicInput.*)", - R"(.*smoke_Auto_BehaviorTests.*DynamicInputToDynamicOutput.*)", // unsupported metrics R"(.*OVGetMetricPropsTest.*OVGetMetricPropsTest.*(MAX_BATCH_SIZE).*)", - R"(.*smoke_AutoMultiHeteroOVGetMetricPropsTest.*OVGetMetricPropsTest.*(AVAILABLE_DEVICES|OPTIMIZATION_CAPABILITIES|RANGE_FOR_ASYNC_INFER_REQUESTS|RANGE_FOR_STREAMS).*)", + R"(.*smoke_HeteroOVGetMetricPropsTest.*OVGetMetricPropsTest.*(AVAILABLE_DEVICES|OPTIMIZATION_CAPABILITIES|RANGE_FOR_ASYNC_INFER_REQUESTS|RANGE_FOR_STREAMS).*)", // supports only '' as device id R"(.*OVClassQueryModelTest.*QueryModelWithDeviceID.*)", @@ -154,8 +145,6 @@ std::vector disabledTestPatterns() { R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*KSOFunction.*)", R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*NonMaxSuppression.*)", R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*Nms.*)", - // Issue: 76980 - R"(.*smoke_Auto_BehaviorTests.*InferDynamicNetwork/.*)", // Issue: 105838 R"(smoke_NmsLayerTest.*)", // Issue: 95590 @@ -167,8 +156,6 @@ std::vector disabledTestPatterns() { // The kernel does not have such garbage. The diff 0.000000745 is taken into account in calculations and affects further type conversion. // Reorder->GridSample->Reorder also does not work here. Potential fix is to use nearest conversion instead of truncation. R"(.*GridSampleLayerTestCPU.*(BILINEAR|BICUBIC).*(i32|i8).*)", - // AUTO does not support import / export - R"(.*smoke_Auto_BehaviorTests/OVCompiledGraphImportExportTest.*(mportExport|readFromV10IR).*/targetDevice=(AUTO).*)", // AdaptiveAvgPool is converted into Reduce op for suitable parameters. CPU Reduce impl doesn't support non planar layout for 3D case R"(.*StaticAdaPoolAvg3DLayoutTest.*OS=\(1\).*_inFmts=(nwc|nCw16c|nCw8c).*)", // Issue: 111404 @@ -186,8 +173,8 @@ std::vector disabledTestPatterns() { // Issue: 106939 R"(.*ScatterNDUpdateLayerCPUTest.*-1.-1.-1.-2.-2.-2.*)", // New plugin API doesn't support changes of pre-processing - R"(.*(Auto|Multi|Hetero).*InferRequestPreprocessTest.*SetPreProcessToInputInfo.*)", - R"(.*(Auto|Multi|Hetero).*InferRequestPreprocessTest.*SetPreProcessToInferRequest.*)", + R"(.*(Hetero).*InferRequestPreprocessTest.*SetPreProcessToInputInfo.*)", + R"(.*(Hetero).*InferRequestPreprocessTest.*SetPreProcessToInferRequest.*)", // TODO: for 22.2 (Issue 68949) R"(.*smoke_AutoBatching_CPU/AutoBatching_Test_DetectionOutput.*)", // Issue: 117837 diff --git a/src/plugins/intel_gpu/tests/functional/CMakeLists.txt b/src/plugins/intel_gpu/tests/functional/CMakeLists.txt index 480717eaacb912..60afb801970b01 100644 --- a/src/plugins/intel_gpu/tests/functional/CMakeLists.txt +++ b/src/plugins/intel_gpu/tests/functional/CMakeLists.txt @@ -57,14 +57,3 @@ if(WIN32) target_compile_definitions(${TARGET_NAME} PRIVATE ENABLE_DX11) target_link_libraries(${TARGET_NAME} PRIVATE d3d11 dxgi) endif() - -if (ENABLE_INTEL_CPU) - set_source_files_properties( - "${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instances/behavior/ov_plugin/life_time.cpp" - "${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp" - "${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp" - "${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instances/multi/gpu_remote_blob_tests.cpp" - "${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instances/behavior/infer_request/memory_states.cpp" - "${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp" - PROPERTIES COMPILE_DEFINITIONS ENABLE_INTEL_CPU=1) -endif() diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_net_base.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_net_base.cpp index 8e048d5d3b9283..ce1b6c2c7c02b3 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_net_base.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_net_base.cpp @@ -44,20 +44,6 @@ auto configsSetPrc = []() { InferenceEngine::PluginConfigParams::GPU_THROUGHPUT_AUTO}}}; }; -auto multiConfig = []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS, - InferenceEngine::PluginConfigParams::GPU_THROUGHPUT_AUTO}}}; -}; - -auto autoConfig = []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}}, - }; -}; - auto autoBatchConfig = []() { return std::vector>{ // explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) @@ -73,20 +59,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, ExecNetSetPrecision, ::testing::ValuesIn(configsSetPrc())), ExecNetSetPrecision::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, ExecNetSetPrecision, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiConfig())), - ExecNetSetPrecision::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, ExecNetSetPrecision, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoConfig())), - ExecNetSetPrecision::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, ExecNetSetPrecision, ::testing::Combine( ::testing::ValuesIn(netPrecisions), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp index 68920e41fddf64..c27224d0107011 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp @@ -22,27 +22,27 @@ namespace { INSTANTIATE_TEST_SUITE_P( nightly_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS, - ::testing::Values("GPU", "MULTI:GPU", "HETERO:GPU", "AUTO:GPU,CPU", "BATCH:GPU") + ::testing::Values("GPU", "HETERO:GPU", "BATCH:GPU") ); INSTANTIATE_TEST_SUITE_P( nightly_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, - ::testing::Values("GPU", "MULTI:GPU", "HETERO:GPU", "AUTO:GPU,CPU", "BATCH:GPU") + ::testing::Values("GPU", "HETERO:GPU", "BATCH:GPU") ); INSTANTIATE_TEST_SUITE_P( nightly_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS, - ::testing::Values("GPU", "MULTI:GPU", "HETERO:GPU", "AUTO:GPU,CPU", "BATCH:GPU") + ::testing::Values("GPU", "HETERO:GPU", "BATCH:GPU") ); INSTANTIATE_TEST_SUITE_P( nightly_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_NETWORK_NAME, - ::testing::Values("GPU", "MULTI:GPU", "HETERO:GPU", "AUTO:GPU,CPU", "BATCH:GPU") + ::testing::Values("GPU", "HETERO:GPU", "BATCH:GPU") ); INSTANTIATE_TEST_SUITE_P( nightly_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported, - ::testing::Values("GPU", "MULTI:GPU", "HETERO:GPU", "AUTO:GPU,CPU", "BATCH:GPU") + ::testing::Values("GPU", "HETERO:GPU", "BATCH:GPU") ); // diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp index f30aabd31f08d8..6cec708b6756ea 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp @@ -12,18 +12,6 @@ auto configs = []() { }; }; -auto multiConfigs = []() { - return std::vector>{ - {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_GPU}}}; -}; - -auto autoConfigs = []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - ov::test::utils::DEVICE_GPU + std::string(",") + ov::test::utils::DEVICE_CPU}}}; -}; - auto autoBatchConfigs = []() { return std::vector>{ // explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) @@ -38,18 +26,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestCallbackTests, ::testing::ValuesIn(configs())), InferRequestCallbackTests::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestCallbackTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiConfigs())), - InferRequestCallbackTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestCallbackTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoConfigs())), - InferRequestCallbackTests::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, InferRequestCallbackTests, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_BATCH), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/config.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/config.cpp index 88669d1bc716a1..62b0bd6fa64f9e 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/config.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/config.cpp @@ -10,22 +10,10 @@ auto configs = []() { return std::vector>{{}}; }; -auto multiConfigs = []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}}}; -}; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestConfigTest, ::testing::Combine( ::testing::Values(1u), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(configs())), InferRequestConfigTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestConfigTest, - ::testing::Combine( - ::testing::Values(1u), - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiConfigs())), - InferRequestConfigTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/io_blob.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/io_blob.cpp index 55345b782a37a4..ed0ae6944f9705 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/io_blob.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/io_blob.cpp @@ -9,34 +9,9 @@ using namespace BehaviorTestsDefinitions; namespace { -auto configs = []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}}}; -}; - -auto autoconfigs = []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - std::string(ov::test::utils::DEVICE_CPU) + "," + ov::test::utils::DEVICE_GPU}}}; -}; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestIOBBlobTest, ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(std::map({}))), InferRequestIOBBlobTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - InferRequestIOBBlobTest, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(configs())), - InferRequestIOBBlobTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - InferRequestIOBBlobTest, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoconfigs())), - InferRequestIOBBlobTest::getTestCaseName); - } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/multithreading.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/multithreading.cpp index 6c409783d786a4..484fee15c99d44 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/multithreading.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/multithreading.cpp @@ -9,18 +9,6 @@ using namespace BehaviorTestsDefinitions; namespace { -auto configs = []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}}}; -}; - -auto autoconfigs = []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - std::string(ov::test::utils::DEVICE_CPU) + "," + ov::test::utils::DEVICE_GPU}}}; -}; - auto auto_batch_configs = []() { return std::vector>{ // explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) @@ -35,19 +23,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestMultithreadingTests, ::testing::Values(std::map({}))), InferRequestMultithreadingTests::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestMultithreadingTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(configs())), - InferRequestMultithreadingTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestMultithreadingTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoconfigs())), - InferRequestMultithreadingTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, InferRequestMultithreadingTests, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_BATCH), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp index a76185499e0e13..3165b94647180e 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp @@ -10,20 +10,6 @@ auto configs = []() { return std::vector>{{}}; }; -auto Multiconfigs = - []() { - return std::vector>{ - {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_GPU}}}; - }; - -auto AutoConfigs = - []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - ov::test::utils::DEVICE_GPU + std::string(",") + ov::test::utils::DEVICE_CPU}}}; - }; - auto AutoBatchConfigs = []() { return std::vector>{ @@ -39,18 +25,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, ::testing::ValuesIn(configs())), InferRequestPerfCountersTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - InferRequestPerfCountersTest, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Multiconfigs())), - InferRequestPerfCountersTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - InferRequestPerfCountersTest, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(AutoConfigs())), - InferRequestPerfCountersTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, InferRequestPerfCountersTest, ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_BATCH), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp index d6a1026f9d15b0..7ffee02692fbb4 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp @@ -17,9 +17,7 @@ const std::vector BlobTypes = { auto gpuConfig = []() { return std::map{}; }; // nothing special -auto multiConfig = []() { - return std::map{{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_GPU}}; -}; + auto heteroConfig = []() { return std::map{{"TARGET_FALLBACK", ov::test::utils::DEVICE_GPU}}; }; @@ -30,18 +28,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Behavior, InferRequestSetBlobByType, ::testing::Values(gpuConfig())), InferRequestSetBlobByType::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Behavior_Multi, InferRequestSetBlobByType, - ::testing::Combine(::testing::ValuesIn(BlobTypes), - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::Values(multiConfig())), - InferRequestSetBlobByType::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Behavior_Auto, InferRequestSetBlobByType, - ::testing::Combine(::testing::ValuesIn(BlobTypes), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::Values(multiConfig())), - InferRequestSetBlobByType::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Behavior_Hetero, InferRequestSetBlobByType, ::testing::Combine(::testing::ValuesIn(BlobTypes), ::testing::Values(ov::test::utils::DEVICE_HETERO), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp index 7a4e9988f73b67..703fbc677524e2 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp @@ -9,18 +9,6 @@ using namespace BehaviorTestsDefinitions; namespace { -auto configs = []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}}}; -}; - -auto autoConfigs = []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - ov::test::utils::DEVICE_GPU + std::string(",") + ov::test::utils::DEVICE_CPU}}}; -}; - auto autoBatchConfigs = []() { return std::vector>{ // explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) @@ -35,18 +23,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, ::testing::Values(std::map({}))), InferRequestWaitTests::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - InferRequestWaitTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(configs())), - InferRequestWaitTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - InferRequestWaitTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoConfigs())), - InferRequestWaitTests::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, InferRequestWaitTests, ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_BATCH), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp index a067a0ea08faf6..951108438ed190 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp @@ -20,14 +20,6 @@ auto autoBatchConfigs = []() { {CONFIG_KEY(AUTO_BATCH_TIMEOUT), "0 "}}}; }; -const std::vector autoConfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_GPU)}, -#ifdef ENABLE_INTEL_CPU - {ov::device::priorities(ov::test::utils::DEVICE_CPU, ov::test::utils::DEVICE_GPU)}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU, ov::test::utils::DEVICE_CPU)}, -#endif -}; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVCompiledModelBaseTest, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_GPU), @@ -39,10 +31,4 @@ INSTANTIATE_TEST_SUITE_P(smoke_AutoBatchBehaviorTests, OVCompiledModelBaseTest, ::testing::Values(ov::test::utils::DEVICE_BATCH), ::testing::ValuesIn(autoBatchConfigs())), OVCompiledModelBaseTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - OVAutoExecutableNetworkTest, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoConfigs)), - OVCompiledModelBaseTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/get_metric.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/get_metric.cpp index a51dc4156977f9..6a7176302f684f 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/get_metric.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/get_metric.cpp @@ -18,7 +18,7 @@ namespace { INSTANTIATE_TEST_SUITE_P(nightly_OVClassCompiledModelGetPropertyTest, OVClassCompiledModelGetPropertyTest, - ::testing::Values("GPU", "MULTI:GPU", "HETERO:GPU", "AUTO:GPU,CPU", "BATCH:GPU")); + ::testing::Values("GPU", "HETERO:GPU", "BATCH:GPU")); const std::vector>> GetMetricTest_ExecutionDevice_GPU = { @@ -29,37 +29,13 @@ const std::vector>> G INSTANTIATE_TEST_SUITE_P(nightly_OVClassCompiledModelGetPropertyTest, OVClassCompiledModelGetPropertyTest_EXEC_DEVICES, ::testing::ValuesIn(GetMetricTest_ExecutionDevice_GPU)); - -auto multiDevicePriorityConfigs = []() { - return std::vector{{ov::device::priorities(ov::test::utils::DEVICE_CPU)}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU)}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU, ov::test::utils::DEVICE_GPU)}}; -}; - -INSTANTIATE_TEST_SUITE_P(nightly_OVClassCompiledModelGetPropertyTest, - OVClassCompiledModelGetPropertyTest_DEVICE_PRIORITY, - ::testing::Combine(::testing::Values("MULTI", "AUTO"), - ::testing::ValuesIn(multiDevicePriorityConfigs()))); - -auto multiModelPriorityConfigs = []() { - return std::vector{{ov::hint::model_priority(ov::hint::Priority::HIGH)}, - {ov::hint::model_priority(ov::hint::Priority::MEDIUM)}, - {ov::hint::model_priority(ov::hint::Priority::LOW)}}; -}; - -INSTANTIATE_TEST_SUITE_P(nightly_OVClassCompiledModelGetPropertyTest, - OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY, - ::testing::Combine(::testing::Values("AUTO"), - ::testing::ValuesIn(multiModelPriorityConfigs())), - OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY::getTestCaseName); - // // Executable Network GetConfig / SetConfig // INSTANTIATE_TEST_SUITE_P(nightly_OVClassCompiledModelGetIncorrectPropertyTest, OVClassCompiledModelGetIncorrectPropertyTest, - ::testing::Values("GPU", "MULTI:GPU", "HETERO:GPU", "AUTO:GPU,CPU", "BATCH:GPU")); + ::testing::Values("GPU", "HETERO:GPU", "BATCH:GPU")); INSTANTIATE_TEST_SUITE_P(nightly_OVClassCompiledModelGetConfigTest, OVClassCompiledModelGetConfigTest, @@ -82,9 +58,7 @@ const std::vector incorrect_device_priorities_properties = {{ov::dev INSTANTIATE_TEST_SUITE_P(smoke_BehaviorIncorrectPropertiesTests, OVClassCompiledModelPropertiesIncorrectTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO, - ov::test::utils::DEVICE_MULTI, - ov::test::utils::DEVICE_HETERO), + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_HETERO), ::testing::ValuesIn(incorrect_device_priorities_properties)), OVClassCompiledModelPropertiesIncorrectTests::getTestCaseName); @@ -119,115 +93,14 @@ INSTANTIATE_TEST_SUITE_P(smoke_OVClassCompileModelWithCorrectSecondaryProperties ::testing::ValuesIn(gpuCorrectConfigsWithSecondaryProperties())), ::testing::PrintToStringParamName()); -INSTANTIATE_TEST_SUITE_P(smoke_AUTO_OVClassCompileModelWithCorrectSecondaryPropertiesTest, +INSTANTIATE_TEST_SUITE_P(smoke_HETERO_OVClassCompileModelWithCorrectSecondaryPropertiesTest, OVClassCompileModelWithCorrectPropertiesTest, - ::testing::Combine(::testing::Values("AUTO:GPU", "MULTI:GPU", "HETERO:GPU"), + ::testing::Combine(::testing::Values("HETERO:GPU"), ::testing::ValuesIn(gpuCorrectConfigsWithSecondaryProperties()))); -auto autoCorrectConfigs = []() { - return std::vector{{ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), - ov::hint::allow_auto_batching(false)}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), - ov::hint::allow_auto_batching(true)}}; -}; - -auto autoCorrectConfigsWithSecondaryProperties = []() { - return std::vector{ - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::device::properties(ov::test::utils::DEVICE_AUTO, - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), - ov::hint::allow_auto_batching(false))}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::device::properties(ov::test::utils::DEVICE_GPU, - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), - ov::hint::allow_auto_batching(false))}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::device::properties(ov::test::utils::DEVICE_GPU, - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), - ov::hint::allow_auto_batching(false)), - ov::device::properties(ov::test::utils::DEVICE_CPU, - ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY), - ov::hint::allow_auto_batching(false))}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::device::properties("GPU.0", - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), - ov::hint::allow_auto_batching(false)), - ov::device::properties(ov::test::utils::DEVICE_CPU, - ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY), - ov::hint::allow_auto_batching(false))}}; -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_OVClassCompileModelWithCorrectPropertiesAutoBatchingTest, - OVClassCompileModelWithCorrectPropertiesTest, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI, - ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoCorrectConfigs()))); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_OVClassCompileModelWithCorrectSecondaryPropertiesTest, - OVClassCompileModelWithCorrectPropertiesTest, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI, - ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoCorrectConfigsWithSecondaryProperties())), - ::testing::PrintToStringParamName()); - const std::vector batchCorrectConfigs = {{}}; INSTANTIATE_TEST_SUITE_P(smoke_Auto_Batch_OVClassCompileModelWithCorrectPropertiesAutoBatchingTest, OVClassCompileModelWithCorrectPropertiesTest, ::testing::Combine(::testing::Values("BATCH:GPU"), ::testing::ValuesIn(batchCorrectConfigs))); - -const std::vector> autoExeDeviceConfigs = { - std::make_pair(ov::AnyMap{{ov::device::priorities("GPU.0")}}, "GPU.0"), -#ifdef ENABLE_INTEL_CPU - std::make_pair(ov::AnyMap{{ov::device::priorities(ov::test::utils::DEVICE_GPU, ov::test::utils::DEVICE_CPU)}}, - "undefined"), - std::make_pair(ov::AnyMap{{ov::device::priorities(ov::test::utils::DEVICE_CPU, ov::test::utils::DEVICE_GPU)}}, - "CPU"), - std::make_pair(ov::AnyMap{{ov::device::priorities(ov::test::utils::DEVICE_CPU, ov::test::utils::DEVICE_GPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}}, - "CPU,GPU"), - std::make_pair(ov::AnyMap{{ov::device::priorities(ov::test::utils::DEVICE_GPU, ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}}, - "GPU,CPU"), - std::make_pair(ov::AnyMap{{ov::device::priorities(ov::test::utils::DEVICE_GPU, ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT), - ov::hint::allow_auto_batching(true)}}, - "GPU,CPU"), -#endif -}; - -const std::vector> multiExeDeviceConfigs = { - std::make_pair(ov::AnyMap{{ov::device::priorities("GPU.0")}}, "GPU.0"), -#ifdef ENABLE_INTEL_CPU - std::make_pair(ov::AnyMap{{ov::device::priorities(ov::test::utils::DEVICE_GPU, ov::test::utils::DEVICE_CPU)}}, - "GPU,CPU"), - std::make_pair(ov::AnyMap{{ov::device::priorities(ov::test::utils::DEVICE_CPU, ov::test::utils::DEVICE_GPU)}}, - "CPU,GPU"), - std::make_pair(ov::AnyMap{{ov::device::priorities(ov::test::utils::DEVICE_CPU, ov::test::utils::DEVICE_GPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}}, - "CPU,GPU"), - std::make_pair(ov::AnyMap{{ov::device::priorities(ov::test::utils::DEVICE_GPU, ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}}, - "GPU,CPU"), - std::make_pair(ov::AnyMap{{ov::device::priorities(ov::test::utils::DEVICE_GPU, ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT), - ov::hint::allow_auto_batching(true)}}, - "GPU,CPU"), -#endif -}; - -INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiCompileModelBehaviorTests, - OVCompileModelGetExecutionDeviceTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoExeDeviceConfigs)), - OVCompileModelGetExecutionDeviceTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_MultiCompileModelBehaviorTests, - OVCompileModelGetExecutionDeviceTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiExeDeviceConfigs)), - OVCompileModelGetExecutionDeviceTests::getTestCaseName); - } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp index 6ce84fb09d28ea..fa63fdbea6ec9c 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp @@ -17,10 +17,6 @@ auto configs = []() { }; }; -auto multiConfigs = []() { - return std::vector{{ov::device::priorities(ov::test::utils::DEVICE_GPU)}}; -}; - auto autoBatchConfigs = []() { return std::vector{ // explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) @@ -35,18 +31,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestCallbackTests, ::testing::ValuesIn(configs())), OVInferRequestCallbackTests::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestCallbackTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiConfigs())), - OVInferRequestCallbackTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestCallbackTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(multiConfigs())), - OVInferRequestCallbackTests::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, OVInferRequestCallbackTests, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_BATCH), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_consistency.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_consistency.cpp index 207d57b5b355f1..7d6a57ae2cc003 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_consistency.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_consistency.cpp @@ -20,71 +20,10 @@ auto configs = []() { return std::vector{{{ov::test::utils::DEVICE_GPU, {}}, {ov::test::utils::DEVICE_GPU, {}}}}; }; -auto AutoConfigs = []() { - return std::vector{{{ov::test::utils::DEVICE_AUTO + std::string(":") + ov::test::utils::DEVICE_GPU, - {ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)}}, - {ov::test::utils::DEVICE_GPU, {}}}, - {{ov::test::utils::DEVICE_AUTO + std::string(":") + ov::test::utils::DEVICE_GPU, - {ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)}}, - {ov::test::utils::DEVICE_GPU, {}}}, - {{ov::test::utils::DEVICE_AUTO + std::string(":") + ov::test::utils::DEVICE_GPU, - {ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}}, - {ov::test::utils::DEVICE_GPU, {}}}, - {{ov::test::utils::DEVICE_AUTO + std::string(":") + ov::test::utils::DEVICE_GPU + "," + - ov::test::utils::DEVICE_CPU, - {ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)}}, - {ov::test::utils::DEVICE_GPU, {}}, - {ov::test::utils::DEVICE_CPU, {}}}, - {{ov::test::utils::DEVICE_AUTO + std::string(":") + ov::test::utils::DEVICE_GPU + "," + - ov::test::utils::DEVICE_CPU, - {ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)}}, - {ov::test::utils::DEVICE_GPU, {}}, - {ov::test::utils::DEVICE_CPU, {}}}, - {{ov::test::utils::DEVICE_AUTO + std::string(":") + ov::test::utils::DEVICE_GPU + "," + - ov::test::utils::DEVICE_CPU, - {ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}}, - {ov::test::utils::DEVICE_GPU, {}}, - {ov::test::utils::DEVICE_CPU, {}}}, - {{ov::test::utils::DEVICE_AUTO + std::string(":") + ov::test::utils::DEVICE_CPU + "," + - ov::test::utils::DEVICE_GPU, - {ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}}, - {ov::test::utils::DEVICE_CPU, {}}, - {ov::test::utils::DEVICE_GPU, {}}}}; -}; - -auto AutoBindConfigs = []() { - return std::vector{{{ov::test::utils::DEVICE_AUTO + std::string(":") + ov::test::utils::DEVICE_GPU + "," + - ov::test::utils::DEVICE_CPU, - {ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT), - ov::intel_auto::device_bind_buffer(true)}}, - {ov::test::utils::DEVICE_GPU, {}}, - {ov::test::utils::DEVICE_CPU, {}}}, - {{ov::test::utils::DEVICE_AUTO + std::string(":") + ov::test::utils::DEVICE_CPU + "," + - ov::test::utils::DEVICE_GPU, - {ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT), - ov::intel_auto::device_bind_buffer(true)}}, - {ov::test::utils::DEVICE_CPU, {}}, - {ov::test::utils::DEVICE_GPU, {}}}}; -}; - INSTANTIATE_TEST_SUITE_P(BehaviorTests, OVInferConsistencyTest, ::testing::Combine( ::testing::Values(10),// inferRequest num ::testing::Values(10),// infer counts ::testing::ValuesIn(configs())), OVInferConsistencyTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Auto_BehaviorTests, OVInferConsistencyTest, - ::testing::Combine( - ::testing::Values(10),// inferRequest num - ::testing::Values(10),// infer counts - ::testing::ValuesIn(AutoConfigs())), - OVInferConsistencyTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Auto_Bind_BehaviorTests, OVInferConsistencyTest, - ::testing::Combine( - ::testing::Values(0),// inferRequest num, will use optimal request number if set 0 - ::testing::Values(10),// infer counts - ::testing::ValuesIn(AutoBindConfigs())), - OVInferConsistencyTest::getTestCaseName); } // namespace \ No newline at end of file diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp index 9b652de3073ceb..7f5402bfa5cc8e 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp @@ -15,15 +15,6 @@ auto configs = []() { return std::vector{{}}; }; -auto AutoConfigs = []() { - return std::vector{{ov::device::priorities(ov::test::utils::DEVICE_GPU, ov::test::utils::DEVICE_CPU)}, - {}}; -}; - -auto AutoNotSupportConfigs = []() { - return std::vector{}; -}; - std::shared_ptr getFunction1() { const std::vector inputShape = {1, 4, 20, 20}; const ngraph::element::Type_t ngPrc = ngraph::element::Type_t::f32; @@ -38,29 +29,6 @@ std::shared_ptr getFunction1() { return std::make_shared(relu, params, "SimpleActivation"); } -std::shared_ptr getFunction2() { - const std::vector inputShape = {1, 4, 20, 20}; - const ngraph::element::Type_t ngPrc = ngraph::element::Type_t::f32; - - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - params.front()->set_friendly_name("Param_1"); - params.front()->get_output_tensor(0).set_names({"input_tensor"}); - auto split = ngraph::builder::makeSplit(params[0], ngPrc, 2, 1); - - auto in2add = ngraph::builder::makeConstant(ngPrc, {1, 2, 1, 1}, std::vector{}, true); - auto add = ngraph::builder::makeEltwise(split->output(0), in2add, ngraph::helpers::EltwiseTypes::ADD); - auto relu1 = std::make_shared(add); - - auto in2mult = ngraph::builder::makeConstant(ngPrc, {1, 2, 1, 1}, std::vector{}, true); - auto mult = ngraph::builder::makeEltwise(split->output(1), in2mult, ngraph::helpers::EltwiseTypes::MULTIPLY); - auto relu2 = std::make_shared(mult); - - auto concat = std::make_shared(ngraph::OutputVector{relu1->output(0), relu2->output(0)}, 3); - concat->get_output_tensor(0).set_names({"concat"}); - - return std::make_shared(concat, params, "SplitAddConcat"); -} - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests_1, OVInferRequestDynamicTests, ::testing::Combine( ::testing::Values(getFunction1()), @@ -70,36 +38,4 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests_1, OVInferRequestDynamicTests, ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(configs())), OVInferRequestDynamicTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestDynamicTests, - ::testing::Combine( - ::testing::Values(getFunction2()), - ::testing::Values(std::vector, std::vector>>{ - {{1, 4, 20, 20}, {1, 2, 20, 40}}, - {{2, 4, 20, 20}, {2, 2, 20, 40}}}), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(AutoConfigs())), - OVInferRequestDynamicTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferenceChaining, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(AutoConfigs())), - OVInferenceChaining::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferenceChainingStatic, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(AutoConfigs())), - OVInferenceChainingStatic::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVNotSupportRequestDynamicTests, - ::testing::Combine( - ::testing::Values(getFunction2()), - ::testing::Values(std::vector, std::vector>>{ - {{1, 4, 20, 20}, {1, 2, 20, 40}}, - {{2, 4, 20, 20}, {2, 2, 20, 40}}}), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(AutoNotSupportConfigs())), - OVInferRequestDynamicTests::getTestCaseName); } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp index c8d46afb2d66d0..7c2322192dd6cd 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp @@ -22,14 +22,6 @@ auto configs = []() { }; }; -auto MultiConfigs = []() { - return std::vector{{ov::device::priorities(ov::test::utils::DEVICE_GPU)}}; -}; - -auto AutoConfigs = []() { - return std::vector{{ov::device::priorities(ov::test::utils::DEVICE_GPU)}}; -}; - auto AutoBatchConfigs = []() { return std::vector{ // explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) @@ -44,18 +36,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestIOTensorTest, ::testing::ValuesIn(configs())), OVInferRequestIOTensorTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestIOTensorTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(MultiConfigs())), - OVInferRequestIOTensorTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestIOTensorTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(AutoConfigs())), - OVInferRequestIOTensorTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, OVInferRequestIOTensorTest, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_BATCH), @@ -103,20 +83,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestIOTensorSetPrecision ::testing::ValuesIn(configs())), OVInferRequestIOTensorSetPrecisionTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestIOTensorSetPrecisionTest, - ::testing::Combine( - ::testing::ValuesIn(prcs), - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(MultiConfigs())), - OVInferRequestIOTensorSetPrecisionTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestIOTensorSetPrecisionTest, - ::testing::Combine( - ::testing::ValuesIn(prcs), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(AutoConfigs())), - OVInferRequestIOTensorSetPrecisionTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, OVInferRequestIOTensorSetPrecisionTest, ::testing::Combine( ::testing::ValuesIn(prcs), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp index b33d1c535a30e9..7c17f4a296d21c 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp @@ -18,10 +18,6 @@ auto configs = []() { }; }; -auto Multiconfigs = []() { - return std::vector{{ov::device::priorities(ov::test::utils::DEVICE_GPU)}}; -}; - auto AutoBatchConfigs = []() { return std::vector{ // explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) @@ -36,18 +32,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestMultithreadingTests, ::testing::ValuesIn(configs())), OVInferRequestMultithreadingTests::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestMultithreadingTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Multiconfigs())), - OVInferRequestMultithreadingTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestMultithreadingTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Multiconfigs())), - OVInferRequestMultithreadingTests::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, OVInferRequestMultithreadingTests, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_BATCH), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp index c460819194ac9b..b10d622fb56138 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp @@ -11,29 +11,6 @@ auto configs = []() { return std::vector{{}}; }; -auto Multiconfigs = []() { - return std::vector{ - {ov::device::priorities(ov::test::utils::DEVICE_GPU)}, -#ifdef ENABLE_INTEL_CPU - {ov::device::priorities(ov::test::utils::DEVICE_GPU, ov::test::utils::DEVICE_CPU), ov::enable_profiling(true)}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU, ov::test::utils::DEVICE_CPU), - ov::intel_auto::device_bind_buffer(false)}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU, ov::test::utils::DEVICE_CPU), - ov::intel_auto::device_bind_buffer(true)} -#endif - }; -}; - -auto Autoconfigs = []() { - return std::vector{{ov::device::priorities(ov::test::utils::DEVICE_GPU)}, -#ifdef ENABLE_INTEL_CPU - {ov::device::priorities(ov::test::utils::DEVICE_GPU, ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT), - ov::intel_auto::device_bind_buffer(true)} -#endif - }; -}; - auto AutoBatchConfigs = []() { return std::vector{ // explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) @@ -48,36 +25,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestPerfCountersTest, ::testing::ValuesIn(configs())), OVInferRequestPerfCountersTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestPerfCountersTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Multiconfigs())), - OVInferRequestPerfCountersTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestPerfCountersTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Autoconfigs())), - OVInferRequestPerfCountersTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, OVInferRequestPerfCountersTest, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_BATCH), ::testing::ValuesIn(AutoBatchConfigs())), OVInferRequestPerfCountersTest::getTestCaseName); - -auto MulticonfigsTest = []() { - return std::vector{ -#ifdef ENABLE_INTEL_CPU - {ov::device::priorities(ov::test::utils::DEVICE_GPU, ov::test::utils::DEVICE_CPU), - ov::device::priorities(ov::test::utils::DEVICE_CPU, ov::test::utils::DEVICE_GPU)} -#endif - }; -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - OVInferRequestPerfCountersExceptionTest, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(MulticonfigsTest())), - OVInferRequestPerfCountersExceptionTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp index bde1f8f53735d0..f5918a29cb877a 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp @@ -18,14 +18,6 @@ auto configs = []() { }; }; -auto Multiconfigs = []() { - return std::vector{{ov::device::priorities(ov::test::utils::DEVICE_GPU)}}; -}; - -auto Autoconfigs = []() { - return std::vector{{ov::device::priorities(ov::test::utils::DEVICE_GPU)}}; -}; - auto AutoBatchConfigs = []() { return std::vector{ // explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) @@ -40,18 +32,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestWaitTests, ::testing::ValuesIn(configs())), OVInferRequestWaitTests::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestWaitTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Multiconfigs())), - OVInferRequestWaitTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestWaitTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Autoconfigs())), - OVInferRequestWaitTests::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, OVInferRequestWaitTests, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_BATCH), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp index 5ee4e5a60c40d0..04869fee60ac93 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp @@ -51,44 +51,6 @@ namespace { ::testing::Values(std::make_pair(ov::AnyMap{}, "blob"))), CompiledKernelsCacheTest::getTestCaseName); - auto autoConfigs = []() { - return std::vector>{ - std::make_pair(ov::AnyMap{{ov::device::priorities(ov::test::utils::DEVICE_GPU)}}, "blob"), - std::make_pair( - ov::AnyMap{{ov::device::priorities(ov::test::utils::DEVICE_GPU, ov::test::utils::DEVICE_CPU)}}, - "blob"), - std::make_pair( - ov::AnyMap{{ov::device::priorities(ov::test::utils::DEVICE_CPU, ov::test::utils::DEVICE_GPU)}}, - "blob")}; - }; - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_KernelCachingSupportCase_GPU, CompiledKernelsCacheTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoConfigs())), - CompiledKernelsCacheTest::getTestCaseName); - - const std::vector LoadFromFileConfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_GPU), ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU), ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)} - }; - const std::vector TestTargets = - {ov::test::utils::DEVICE_AUTO, - ov::test::utils::DEVICE_MULTI, - }; - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_CachingSupportCase_GPU, CompileModelLoadFromFileTestBase, - ::testing::Combine( - ::testing::ValuesIn(TestTargets), - ::testing::ValuesIn(LoadFromFileConfigs)), - CompileModelLoadFromFileTestBase::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_CachingSupportCase_GPU, - CompileModelLoadFromMemoryTestBase, - ::testing::Combine(::testing::ValuesIn(TestTargets), - ::testing::ValuesIn(LoadFromFileConfigs)), - CompileModelLoadFromMemoryTestBase::getTestCaseName); - const std::vector GPULoadFromFileConfigs = { {ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)}, {ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)}, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/life_time.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/life_time.cpp index 20126c977df938..5eeacde0093d87 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/life_time.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/life_time.cpp @@ -11,23 +11,7 @@ namespace { OVHoldersTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_VirtualPlugin_BehaviorTests, OVHoldersTest, - ::testing::Values("AUTO:GPU", - "MULTI:GPU", - //ov::test::utils::DEVICE_BATCH, + ::testing::Values(//ov::test::utils::DEVICE_BATCH, "HETERO:GPU"), OVHoldersTest::getTestCaseName); - -const std::vector device_names_and_priorities = { - "MULTI:GPU", // GPU via MULTI, - "AUTO:GPU", // GPU via AUTO, -#ifdef ENABLE_INTEL_CPU - "AUTO:GPU,CPU", // GPU+CPU - "AUTO:CPU,GPU", // CPU+GPU - "MULTI:GPU,CPU", // GPU+CPU - "MULTI:CPU,GPU", // CPU+GPU -#endif -}; - INSTANTIATE_TEST_SUITE_P(smoke_VirtualPlugin_BehaviorTests, OVHoldersTestWithConfig, - ::testing::ValuesIn(device_names_and_priorities), - OVHoldersTestWithConfig::getTestCaseName); } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp index 34707221f92eda..31c4d4884f05d1 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp @@ -35,49 +35,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, ::testing::ValuesIn(gpu_properties)), OVPropertiesTests::getTestCaseName); -auto auto_multi_properties = []() { - return std::vector{ - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU), ov::intel_auto::device_bind_buffer("YES")}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU), ov::intel_auto::device_bind_buffer("NO")}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU), ov::intel_auto::enable_startup_fallback("YES")}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU), ov::intel_auto::enable_startup_fallback("NO")}}; -}; - -const std::vector multi_properties = {{ov::device::priorities("CPU", "GPU")}, - {ov::device::priorities("CPU(1)", "GPU")}, - {ov::device::priorities("CPU(1)", "GPU(2)")}}; - -const std::vector auto_properties = {{ov::device::priorities("CPU", "GPU")}, - {ov::device::priorities("-CPU", "GPU")}, - {ov::device::priorities("CPU(1)", "GPU")}, - {ov::device::priorities("CPU(1)", "GPU(2)")}, - {ov::device::priorities("CPU", "-GPU")}}; - -INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiBehaviorTests, - OVPropertiesTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO, - ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(auto_multi_properties())), - OVPropertiesTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_AutoBehaviorTests, - OVPropertiesTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(auto_properties)), - OVPropertiesTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_MultiBehaviorTests, - OVPropertiesTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multi_properties)), - OVPropertiesTests::getTestCaseName); - const std::vector gpu_setcore_properties = { {ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), ov::hint::num_requests(2), @@ -94,50 +51,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_gpuCompileModelBehaviorTests, ::testing::ValuesIn(gpu_compileModel_properties)), OVSetPropComplieModleGetPropTests::getTestCaseName); -const std::vector multi_setcore_properties = { - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), - ov::hint::model_priority(ov::hint::Priority::HIGH)}}; -const std::vector multi_compileModel_properties = { - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), - ov::hint::model_priority(ov::hint::Priority::MEDIUM)}}; - -INSTANTIATE_TEST_SUITE_P(smoke_MultiCompileModelBehaviorTests, - OVSetPropComplieModleGetPropTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multi_setcore_properties), - ::testing::ValuesIn(multi_compileModel_properties)), - OVSetPropComplieModleGetPropTests::getTestCaseName); - -const std::vector auto_setcore_properties = { - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), - ov::hint::model_priority(ov::hint::Priority::HIGH)}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY), - ov::hint::model_priority(ov::hint::Priority::HIGH)}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT), - ov::hint::model_priority(ov::hint::Priority::HIGH)}, -}; -const std::vector auto_compileModel_properties = { - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY), - ov::hint::model_priority(ov::hint::Priority::MEDIUM)}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT), - ov::hint::model_priority(ov::hint::Priority::MEDIUM)}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), - ov::hint::model_priority(ov::hint::Priority::MEDIUM)}}; -INSTANTIATE_TEST_SUITE_P(smoke_AutoCompileModelBehaviorTests, - OVSetPropComplieModleGetPropTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(auto_setcore_properties), - ::testing::ValuesIn(auto_compileModel_properties)), - OVSetPropComplieModleGetPropTests::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(nightly_OVClassCommon, OVBasicPropertiesTestsP, ::testing::Values(std::make_pair("openvino_intel_gpu_plugin", "GPU"))); @@ -146,9 +59,9 @@ INSTANTIATE_TEST_SUITE_P(nightly_OVClassCommon, // // IE Class GetMetric // // -INSTANTIATE_TEST_SUITE_P(nightly_MultiHeteroAutoBatchOVGetMetricPropsTest, +INSTANTIATE_TEST_SUITE_P(nightly_HeteroAutoBatchOVGetMetricPropsTest, OVGetMetricPropsTest, - ::testing::Values("MULTI", "HETERO", "AUTO", "BATCH")); + ::testing::Values("HETERO", "BATCH")); INSTANTIATE_TEST_SUITE_P(nightly_gpuOVGetMetricPropsTest, OVGetMetricPropsTest, ::testing::Values("GPU")); @@ -156,14 +69,6 @@ INSTANTIATE_TEST_SUITE_P(nightly_OVGetAvailableDevicesPropsTest, OVGetAvailableDevicesPropsTest, ::testing::Values("GPU")); -INSTANTIATE_TEST_SUITE_P( - smoke_MultiAutoOVCheckSetSupportedRWMetricsPropsTests, - OVCheckSetSupportedRWMetricsPropsTests, - ::testing::Combine(::testing::Values("MULTI:GPU", "AUTO:GPU"), - ::testing::ValuesIn(OVCheckSetSupportedRWMetricsPropsTests::getRWMandatoryPropertiesValues( - {ov::hint::model_priority.name(), ov::log::level.name()}))), - OVCheckSetSupportedRWMetricsPropsTests::getTestCaseName); - INSTANTIATE_TEST_SUITE_P( smoke_OVCheckGetSupportedROMetricsPropsTests, OVCheckGetSupportedROMetricsPropsTests, @@ -172,9 +77,9 @@ INSTANTIATE_TEST_SUITE_P( { ov::device::uuid.name(), ov::device::luid.name(), ov::device::gops.name(), ov::device::type.name(), ov::device::full_name.name() }))), OVCheckGetSupportedROMetricsPropsTests::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(nightly_MultiHeteroAutoBatchOVCheckChangePropComplieModleGetPropTests_DEVICE_ID, +INSTANTIATE_TEST_SUITE_P(nightly_HeteroAutoBatchOVCheckChangePropComplieModleGetPropTests_DEVICE_ID, OVCheckChangePropComplieModleGetPropTests_DEVICE_ID, - ::testing::Combine(::testing::Values("MULTI", "HETERO", "AUTO", "BATCH"), + ::testing::Combine(::testing::Values("HETERO", "BATCH"), ::testing::Values(ov::AnyMap({}))), OVCheckChangePropComplieModleGetPropTests_DEVICE_ID::getTestCaseName); @@ -201,7 +106,7 @@ auto multiConfigs = []() { INSTANTIATE_TEST_SUITE_P(smoke_OVClassSetDevicePriorityConfigPropsTest, OVClassSetDevicePriorityConfigPropsTest, - ::testing::Combine(::testing::Values("MULTI", "AUTO", "HETERO"), + ::testing::Combine(::testing::Values("HETERO"), ::testing::ValuesIn(multiConfigs()))); // // GPU specific metrics diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/remote.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/remote.cpp index 274f501c7e1456..4ec0160690d4cb 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/remote.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/remote.cpp @@ -16,10 +16,6 @@ std::vector> generate_remote_params() { return {}; } -auto MultiConfigs = []() { - return std::vector{{ov::device::priorities(ov::test::utils::DEVICE_GPU)}}; -}; - auto AutoBatchConfigs = []() { return std::vector{ // explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) @@ -36,14 +32,6 @@ INSTANTIATE_TEST_SUITE_P(DISABLED_smoke_BehaviorTests, OVRemoteTest, ::testing::ValuesIn(generate_remote_params())), OVRemoteTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(DISABLED_smoke_Multi_BehaviorTests, OVRemoteTest, - ::testing::Combine( - ::testing::Values(ngraph::element::f32), - ::testing::Values(::ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(MultiConfigs()), - ::testing::ValuesIn(generate_remote_params())), - OVRemoteTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(DISABLED_smoke_AutoBatch_BehaviorTests, OVRemoteTest, ::testing::Combine( ::testing::Values(ngraph::element::f32), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/caching_tests.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/caching_tests.cpp index 7a0da8fb9e339c..5f3e4120047a82 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/caching_tests.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/caching_tests.cpp @@ -48,25 +48,4 @@ namespace { ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(std::make_pair(std::map(), "blob"))), LoadNetworkCompiledKernelsCacheTest::getTestCaseName); - - typedef std::map conftype; - auto autoConfigs = []() { - return std::vector>{ - std::make_pair(conftype{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - ov::test::utils::DEVICE_GPU}}, - "blob"), - std::make_pair(conftype{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - (std::string(ov::test::utils::DEVICE_GPU) + "," + ov::test::utils::DEVICE_CPU)}}, - "blob"), - std::make_pair(conftype{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - (std::string(ov::test::utils::DEVICE_CPU) + "," + ov::test::utils::DEVICE_GPU)}}, - "blob")}; - }; - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_KernelCachingSupportCase_GPU, LoadNetworkCompiledKernelsCacheTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoConfigs())), - LoadNetworkCompiledKernelsCacheTest::getTestCaseName); - } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp index 221c1fde058519..623246fafe0b1d 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp @@ -30,63 +30,6 @@ namespace { {{InferenceEngine::PluginConfigParams::KEY_DEVICE_ID, "DEVICE_UNKNOWN"}}}; }; - auto multiinconfigs = []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, "DOESN'T EXIST"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "-1"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, "ON"}}}; - }; - - auto autoinconfigs = []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, "DOESN'T EXIST"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "-1"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, "ON"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE, "unknown_file"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::PluginConfigParams::KEY_DEVICE_ID, "DEVICE_UNKNOWN"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, "NAN"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, "-1"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, "ABC"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - ov::test::utils::DEVICE_GPU + std::string(",") + ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, "DOESN'T EXIST"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - ov::test::utils::DEVICE_GPU + std::string(",") + ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "-1"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - ov::test::utils::DEVICE_GPU + std::string(",") + ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, "ON"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - ov::test::utils::DEVICE_GPU + std::string(",") + ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE, "unknown_file"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - ov::test::utils::DEVICE_GPU + std::string(",") + ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_DEVICE_ID, "DEVICE_UNKNOWN"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - ov::test::utils::DEVICE_GPU + std::string(",") + ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, "NAN"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - ov::test::utils::DEVICE_GPU + std::string(",") + ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, "-1"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - ov::test::utils::DEVICE_GPU + std::string(",") + ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, "ABC"}}}; - }; - auto auto_batch_inconfigs = []() { return std::vector>{ {{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG), ov::test::utils::DEVICE_GPU}, @@ -110,19 +53,6 @@ namespace { ::testing::ValuesIn(inconfigs())), IncorrectConfigTests::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, IncorrectConfigTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiinconfigs())), - IncorrectConfigTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, IncorrectConfigTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoinconfigs())), - IncorrectConfigTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, IncorrectConfigTests, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_BATCH), @@ -153,17 +83,6 @@ namespace { ::testing::ValuesIn(inconfigs())), IncorrectConfigAPITests::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, IncorrectConfigAPITests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiinconfigs())), - IncorrectConfigAPITests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, IncorrectConfigAPITests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoinconfigs())), - IncorrectConfigAPITests::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, IncorrectConfigAPITests, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_BATCH), @@ -190,41 +109,10 @@ namespace { {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES}, }}; - auto auto_multi_prop_config = []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, - InferenceEngine::PluginConfigParams::THROUGHPUT}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, - InferenceEngine::PluginConfigParams::MODEL_PRIORITY_MED}}}; - }; - - auto auto_multi_loadNetWork_config = []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, - InferenceEngine::PluginConfigParams::MODEL_PRIORITY_HIGH}}}; - }; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, SetPropLoadNetWorkGetPropTests, ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(gpu_prop_config), ::testing::ValuesIn(gpu_loadNetWork_config)), SetPropLoadNetWorkGetPropTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - SetPropLoadNetWorkGetPropTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(auto_multi_prop_config()), - ::testing::ValuesIn(auto_multi_loadNetWork_config())), - SetPropLoadNetWorkGetPropTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - SetPropLoadNetWorkGetPropTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(auto_multi_prop_config()), - ::testing::ValuesIn(auto_multi_loadNetWork_config())), - SetPropLoadNetWorkGetPropTests::getTestCaseName); } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp index 437df754176eab..ff85685d245db7 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp @@ -35,12 +35,12 @@ INSTANTIATE_TEST_SUITE_P( INSTANTIATE_TEST_SUITE_P( nightly_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_CONFIG_KEYS, - ::testing::Values("GPU", "MULTI", "HETERO", "AUTO", "BATCH") + ::testing::Values("GPU", "HETERO", "BATCH") ); INSTANTIATE_TEST_SUITE_P( nightly_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_METRICS, - ::testing::Values("GPU", "MULTI", "HETERO", "AUTO", "BATCH") + ::testing::Values("GPU", "HETERO", "BATCH") ); INSTANTIATE_TEST_SUITE_P( @@ -50,7 +50,7 @@ INSTANTIATE_TEST_SUITE_P( INSTANTIATE_TEST_SUITE_P( nightly_IEClassGetMetricTest, IEClassGetMetricTest_FULL_DEVICE_NAME, - ::testing::Values("GPU", "MULTI", "HETERO", "AUTO", "BATCH") + ::testing::Values("GPU", "HETERO", "BATCH") ); INSTANTIATE_TEST_SUITE_P( @@ -80,12 +80,12 @@ INSTANTIATE_TEST_SUITE_P( INSTANTIATE_TEST_SUITE_P( nightly_IEClassGetMetricTest, IEClassGetMetricTest_ThrowUnsupported, - ::testing::Values("GPU", "MULTI", "HETERO", "AUTO", "BATCH") + ::testing::Values("GPU", "HETERO", "BATCH") ); INSTANTIATE_TEST_SUITE_P( nightly_IEClassGetConfigTest, IEClassGetConfigTest_ThrowUnsupported, - ::testing::Values("GPU", "MULTI", "HETERO", "AUTO", "BATCH") + ::testing::Values("GPU", "HETERO", "BATCH") ); INSTANTIATE_TEST_SUITE_P( diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/set_preprocess.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/set_preprocess.cpp index 8eba235a4731d4..2ffabe771b656f 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/set_preprocess.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/set_preprocess.cpp @@ -22,18 +22,6 @@ namespace { }; }; - auto multiConfigs = []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}}}; - }; - - auto autoConfigs = []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - ov::test::utils::DEVICE_GPU + std::string(",") + ov::test::utils::DEVICE_CPU}}}; - }; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestPreprocessTest, ::testing::Combine( ::testing::ValuesIn(netPrecisions), @@ -41,20 +29,6 @@ namespace { ::testing::ValuesIn(configs())), InferRequestPreprocessTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestPreprocessTest, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiConfigs())), - InferRequestPreprocessTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestPreprocessTest, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoConfigs())), - InferRequestPreprocessTest::getTestCaseName); - const std::vector ioPrecisions = { InferenceEngine::Precision::FP32, InferenceEngine::Precision::U8 diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/version.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/version.cpp index 6f8755b10263f2..00fd89116bb52b 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/version.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/version.cpp @@ -10,14 +10,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, VersionTest, ::testing::Values(ov::test::utils::DEVICE_GPU), VersionTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, VersionTest, - ::testing::Values(ov::test::utils::DEVICE_MULTI), - VersionTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, VersionTest, - ::testing::Values(ov::test::utils::DEVICE_AUTO), - VersionTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, VersionTest, ::testing::Values(ov::test::utils::DEVICE_HETERO), VersionTest::getTestCaseName); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/multi/gpu_remote_blob_tests.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/multi/gpu_remote_blob_tests.cpp deleted file mode 100644 index 1b8fdb69dc9e72..00000000000000 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/multi/gpu_remote_blob_tests.cpp +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include "gpu/gpu_config.hpp" -#include "multi/multi_remote_blob_tests.hpp" -#include "multi/multi_remote_blob_multidevice_test.hpp" -#include "common_test_utils/test_constants.hpp" -#include - -using MultiDevice_Bind_oversubsciption_test = MultiDevice_Test; - -auto device_names_and_support_for_remote_blobs = []() { - return std::vector{ - {{GPU}, true, {}}, // GPU via MULTI, - {{"GPU.0"}, true, {}}, // GPU.0 via MULTI, - {{GPU}, true, {ov::intel_auto::device_bind_buffer(true)}}, // GPU via MULTI, - {{"GPU.0"}, true, {ov::intel_auto::device_bind_buffer(true)}}, // GPU.0 via MULTI, -#ifdef ENABLE_INTEL_CPU - {{GPU, CPU}, true, {}}, // GPU+CPU - {{CPU, GPU}, true, {}}, // CPU+GPU - {{GPU, CPU}, true, {ov::intel_auto::device_bind_buffer(true)}}, // GPU+CPU -#endif - }; -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_RemoteBlobGPU, - MultiDevice_SupportTest, - ::testing::ValuesIn(device_names_and_support_for_remote_blobs()), - MultiDevice_SupportTest::getTestCaseName); - -TEST_P(MultiDevice_Test, cannotInferRemoteBlobIfNotInitializedForDevice) { - InferenceEngine::CNNNetwork net(fn_ptr); - auto ie = PluginCache::get().ie(); - // load a network to the GPU to make sure we have a remote context - auto exec_net = ie->LoadNetwork(net, GPU); - auto ctx = exec_net.GetContext(); - - const InferenceEngine::ConstInputsDataMap inputInfo = exec_net.GetInputsInfo(); - auto& first_input_name = inputInfo.begin()->first; - auto& first_input = inputInfo.begin()->second; - auto rblob = InferenceEngine::make_shared_blob(first_input->getTensorDesc(), ctx); - rblob->allocate(); - - std::map configs; - for (auto&& value : _properties) { - configs.emplace(value.first, value.second.as()); - } - - InferenceEngine::ExecutableNetwork exec_net_multi; - try { - exec_net_multi = ie->LoadNetwork(net, device_names, configs); - } catch(...) { - // device is unavailable (e.g. for the "second GPU" test) or other (e.g. env) issues not related to the test - return; - } - InferenceEngine::InferRequest req = exec_net_multi.CreateInferRequest(); - ASSERT_TRUE(req); - ASSERT_NO_THROW(req.SetBlob(first_input_name, rblob)); - ASSERT_NO_THROW(req.StartAsync()); - // cpu can consume remote buffer - auto exe_device = exec_net_multi.GetConfig("EXECUTION_DEVICES").as>(); - if (exe_device.size() == 1 && exe_device[0] == "CPU") - ASSERT_NO_THROW(req.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY)); - else - ASSERT_THROW(req.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY), InferenceEngine::Exception); -} - -TEST_P(MultiDevice_Bind_oversubsciption_test, oversubsciptionOfInferRequest) { - InferenceEngine::CNNNetwork net(fn_ptr); - auto ie = PluginCache::get().ie(); - // load a network to the GPU to make sure we have a remote context - auto exec_net = ie->LoadNetwork(net, GPU); - auto ctx = exec_net.GetContext(); - - const InferenceEngine::ConstInputsDataMap inputInfo = exec_net.GetInputsInfo(); - auto& first_input = inputInfo.begin()->second; - auto rblob = InferenceEngine::make_shared_blob(first_input->getTensorDesc(), ctx); - rblob->allocate(); - - std::map configs; - for (auto&& value : _properties) { - configs.emplace(value.first, value.second.as()); - } - - InferenceEngine::ExecutableNetwork exec_net_multi; - try { - exec_net_multi = ie->LoadNetwork(net, device_names, configs); - } catch(...) { - // device is unavailable (e.g. for the "second GPU" test) or other (e.g. env) issues not related to the test - return; - } - - unsigned int optimalNum = 0; - try { - optimalNum = exec_net_multi.GetMetric(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)).as(); - } catch (...) { - std::cout << "ExecutableNetwork getMetric failed" << std::endl; - return; - } - - // test binder mode to throw exception when oversubsciption of infer requests - InferenceEngine::InferRequest req; - for (size_t i = 0; i < optimalNum; i++) { - req = exec_net_multi.CreateInferRequest(); - } - ASSERT_ANY_THROW(req = exec_net_multi.CreateInferRequest()); -} - -auto device_names_and_support_for_remote_blobs2 = []() { - return std::vector{ - // another GPU (the test will test its presence), different OCL contexts - // use GPU.0 as reference, expect auto to throw exception on other hardware contexts -#ifdef ENABLE_INTEL_CPU - //{{CPU}, {}}, // stand-alone CPU via MULTI (no GPU), no OCL context - {{"GPU.1", CPU}, - {ov::intel_auto::device_bind_buffer(true)}}, - {{"GPU.1", CPU}, - {ov::intel_auto::device_bind_buffer(false)}}, -#endif - {{"GPU.1"}, {}}, - {{"GPU.1"}, {ov::intel_auto::device_bind_buffer(true)}}, - }; -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_RemoteBlobInitializedWithoutGPU, - MultiDevice_Test, - ::testing::ValuesIn(device_names_and_support_for_remote_blobs2()), - MultiDevice_Test::getTestCaseName); - -auto multi_bind_oversubsciption_test = []() { - return std::vector{{{GPU}, {ov::intel_auto::device_bind_buffer(true)}}}; -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_RemoteBlobOversubsciptionInferRequest, - MultiDevice_Bind_oversubsciption_test, - ::testing::ValuesIn(multi_bind_oversubsciption_test()), - MultiDevice_Test::getTestCaseName); - -auto multi_device_names_and_support_for_remote_blobs = []() { - return std::vector{ -#ifdef ENABLE_INTEL_CPU - {"GPU.0", CPU}, - {"GPU.0", "GPU.1", CPU}, // another GPU (the test will test its presence), different OCL contexts -#endif - {"GPU.0", "GPU.1"}}; -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_RemoteBlobInitializedWithoutGPU, - MultiDeviceMultipleGPU_Test, - ::testing::ValuesIn(multi_device_names_and_support_for_remote_blobs()), - MultiDeviceMultipleGPU_Test::getTestCaseName); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 677cc9a2c217e4..798282680dbccd 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -37,13 +37,9 @@ std::vector disabledTestPatterns() { // Not allowed dynamic loop tests on GPU R"(.*smoke_StaticShapeLoop_dynamic_exit.*)", - // Not expected behavior - R"(.*Behavior.*(Multi|Auto).*InferRequestSetBlobByType.*Batched.*)", - R"(.*(Multi|Auto).*Behavior.*InferRequestIOBBlobTest.*canProcessDeallocatedOutputBlobAfterGetAndSetBlob.*)", // TODO Issue 100145 R"(.*Behavior.*OVInferRequestIOTensorTest.*canInferAfterIOBlobReallocation.*)", R"(.*Behavior.*OVInferRequestDynamicTests.*InferUpperBoundNetworkAfterIOTensorsReshaping.*)", - R"(.*(Auto|Multi).*Behavior.*IncorrectConfigTests.*CanNotLoadNetworkWithIncorrectConfig.*)", // Not implemented yet: R"(.*Behavior.*ExecutableNetworkBaseTest.*canSetConfigToExecNet.*)", // TODO: Issue 67408 @@ -68,8 +64,6 @@ std::vector disabledTestPatterns() { R"(.*smoke.*BehaviorTests.*DynamicInputToDynamicOutput.*)", // Issue: 76197 R"(.*registerPluginsXMLUnicodePath.*)", - // Issue: CVS-76980 - R"(.*smoke_Auto_BehaviorTests.*InferDynamicNetwork/.*)", // Issue: CVS-88667 - Need to verify hetero interoperability R"(.*nightly_OVClassHeteroExecutableNetworlGetMetricTest.*SUPPORTED_(CONFIG_KEYS|METRICS).*)", // TODO: Issue: 89555 @@ -89,19 +83,13 @@ std::vector disabledTestPatterns() { R"(.*smoke_ConvolutionLayerGPUTest_dynamic1DSymPad.*)", // Looks like the test is targeting CPU plugin and doesn't respect that execution graph may vary from plugin to plugin R"(.*ExecGraphSerializationTest.*)", - // TODO: support getconfig in auto/multi CVS-104942 - // TODO: move auto/multi cases to dedicated unit tests - R"(.*(Auto|Multi).*SetPropLoadNetWorkGetPropTests.*)", // unsupported metrics - R"(.*nightly_MultiHeteroAutoBatchOVGetMetricPropsTest.*OVGetMetricPropsTest.*(FULL_DEVICE_NAME_with_DEVICE_ID|AVAILABLE_DEVICES|DEVICE_UUID|OPTIMIZATION_CAPABILITIES|MAX_BATCH_SIZE|DEVICE_GOPS|DEVICE_TYPE|RANGE_FOR_ASYNC_INFER_REQUESTS|RANGE_FOR_STREAMS).*)", + R"(.*nightly_HeteroAutoBatchOVGetMetricPropsTest.*OVGetMetricPropsTest.*(FULL_DEVICE_NAME_with_DEVICE_ID|AVAILABLE_DEVICES|DEVICE_UUID|OPTIMIZATION_CAPABILITIES|MAX_BATCH_SIZE|DEVICE_GOPS|DEVICE_TYPE|RANGE_FOR_ASYNC_INFER_REQUESTS|RANGE_FOR_STREAMS).*)", // Issue: 111437 R"(.*smoke_Deconv_2D_Dynamic_.*FP32/DeconvolutionLayerGPUTest.CompareWithRefs.*)", R"(.*smoke_GroupDeconv_2D_Dynamic_.*FP32/GroupDeconvolutionLayerGPUTest.CompareWithRefs.*)", // Issue: 111440 R"(.*smoke_set1/GatherElementsGPUTest.CompareWithRefs.*)", - // New plugin API doesn't support changes of pre-processing - R"(.*(Auto|Multi).*InferRequestPreprocessTest.*SetPreProcessToInputInfo.*)", - R"(.*(Auto|Multi).*InferRequestPreprocessTest.*SetPreProcessToInferRequest.*)", // New plugin work with tensors, so it means that blob in old API can have different pointers R"(.*InferRequestIOBBlobTest.*secondCallGetInputDoNotReAllocateData.*)", R"(.*InferRequestIOBBlobTest.*secondCallGetOutputDoNotReAllocateData.*)", From 51afe489aabfa41ab43b2a419c4caf61e8ac4d27 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Sun, 8 Oct 2023 17:16:24 +0400 Subject: [PATCH 095/257] Moved migrated classes to ov::test namespace (#20226) * Moved migrated classes to ov::test namespace * Revert redundant changes * Fixed build --- .../subgraph_tests/conv_eltwise_fusion.cpp | 356 +++++++++--------- .../subgraph_tests/parameter_result.cpp | 29 +- .../subgraph_tests/preprocess.cpp | 17 +- .../subgraph_tests/simple_if.cpp | 198 ++++------ .../subgraph_tests/split_concat_memory.cpp | 32 +- .../src/param_result_custom_blob.cpp | 37 +- .../subgraph_tests/parameter_result.cpp | 26 +- .../subgraph_tests/preprocess.cpp | 16 +- .../subgraph_tests/conv_eltwise_fusion.hpp | 3 +- .../subgraph_tests/parameter_result.hpp | 8 +- .../subgraph_tests/split_concat_memory.hpp | 8 +- .../subgraph/conv_eltwise_fusion.hpp | 44 ++- .../subgraph/parameter_result.hpp | 33 +- .../subgraph/preprocess.hpp | 24 +- .../subgraph/simple_if.hpp | 34 +- .../subgraph/split_concat_memory.hpp | 13 +- .../src/subgraph/conv_eltwise_fusion.cpp | 115 ++++-- .../src/subgraph/parameter_result.cpp | 15 +- .../src/subgraph/preprocess.cpp | 15 +- .../src/subgraph/simple_if.cpp | 58 +-- .../src/subgraph/split_concat_memory.cpp | 29 +- 21 files changed, 586 insertions(+), 524 deletions(-) diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/conv_eltwise_fusion.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/conv_eltwise_fusion.cpp index 490d6dc70fa130..2e23919cfd6c4c 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/conv_eltwise_fusion.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/conv_eltwise_fusion.cpp @@ -2,183 +2,195 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "subgraph_tests/conv_eltwise_fusion.hpp" + #include -#include "subgraph_tests/conv_eltwise_fusion.hpp" #include "common_test_utils/test_constants.hpp" using namespace SubgraphTestsDefinitions; namespace { - const std::vector types{ngraph::element::f32, ngraph::element::f16}; - const std::vector eltwise_types{ngraph::opset4::Multiply::get_type_info_static(), - /* ngraph::opset4::Add::get_type_info_static() */}; - - INSTANTIATE_TEST_SUITE_P(smoke_Convolution_1D, ConvEltwiseFusion, - ::testing::Combine( - ::testing::Values(std::tuple{ngraph::opset4::Convolution::get_type_info_static(), 2}), - ::testing::ValuesIn(eltwise_types), - ::testing::Values(false), - ::testing::Values(ngraph::Shape{1, 8, 64}), - ::testing::Values(ngraph::Shape{64, 8, 1}), - ::testing::Values(ngraph::Shape{64, 1}), - ::testing::ValuesIn(types), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ConvEltwiseFusion::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolution_1D, ConvEltwiseFusion, - ::testing::Combine( - ::testing::Values(std::tuple{ngraph::opset4::GroupConvolution::get_type_info_static(), 2}), - ::testing::ValuesIn(eltwise_types), - ::testing::Values(false), - ::testing::Values(ngraph::Shape{1, 12, 5}), - ::testing::Values(ngraph::Shape{4, 5, 3, 2}), - ::testing::Values(ngraph::Shape{20, 1}), - ::testing::ValuesIn(types), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ConvEltwiseFusion::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionBackpropData_1D, ConvEltwiseFusion, - ::testing::Combine( - ::testing::Combine( - ::testing::Values(ngraph::opset4::ConvolutionBackpropData::get_type_info_static()), - ::testing::ValuesIn(std::vector{2, 3})), - ::testing::ValuesIn(eltwise_types), - ::testing::Values(false), - ::testing::Values(ngraph::Shape{1, 12, 64}), - ::testing::Values(ngraph::Shape{12, 20, 1}), - ::testing::Values(ngraph::Shape{20, 1}), - ::testing::ValuesIn(types), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ConvEltwiseFusion::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionBackpropData_1D, ConvEltwiseFusion, - ::testing::Combine( - ::testing::Combine( - ::testing::Values(ngraph::opset4::GroupConvolutionBackpropData::get_type_info_static()), - ::testing::ValuesIn(std::vector{2, 3})), - ::testing::ValuesIn(eltwise_types), - ::testing::Values(false), - ::testing::Values(ngraph::Shape{1, 12, 64}), - ::testing::Values(ngraph::Shape{4, 3, 5, 1}), - ::testing::Values(ngraph::Shape{1, 20, 1}), - ::testing::ValuesIn(types), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ConvEltwiseFusion::getTestCaseName); - - const std::vector const_shapes_2d{ - {}, - {1, 1}, - {1, 1, 1}, - {20, 1, 1}, - {1, 1, 1, 1} - }; - - INSTANTIATE_TEST_SUITE_P(smoke_Convolution_2D, ConvEltwiseFusion, - ::testing::Combine( - ::testing::Values(std::tuple{ngraph::opset4::Convolution::get_type_info_static(), 2}), - ::testing::ValuesIn(eltwise_types), - ::testing::Values(false), - ::testing::Values(ngraph::Shape{1, 3, 64, 64}), - ::testing::Values(ngraph::Shape{20, 3, 1, 1}), - ::testing::ValuesIn(const_shapes_2d), - ::testing::ValuesIn(types), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ConvEltwiseFusion::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolution_2D, ConvEltwiseFusion, - ::testing::Combine( - ::testing::Values(std::tuple{ngraph::opset4::GroupConvolution::get_type_info_static(), 2}), - ::testing::ValuesIn(eltwise_types), - ::testing::Values(false), - ::testing::Values(ngraph::Shape{1, 12, 64, 64}), - ::testing::Values(ngraph::Shape{4, 5, 3, 1, 2}), - ::testing::ValuesIn(const_shapes_2d), - ::testing::ValuesIn(types), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ConvEltwiseFusion::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionBackpropData_2D, ConvEltwiseFusion, - ::testing::Combine( - ::testing::Combine( - ::testing::Values(ngraph::opset4::ConvolutionBackpropData::get_type_info_static()), - ::testing::ValuesIn(std::vector{2, 3})), - ::testing::ValuesIn(eltwise_types), - ::testing::Values(false), - ::testing::Values(ngraph::Shape{1, 3, 64, 64}), - ::testing::Values(ngraph::Shape{3, 20, 3, 3}), - ::testing::ValuesIn(const_shapes_2d), - ::testing::ValuesIn(types), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ConvEltwiseFusion::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionBackpropData_2D, ConvEltwiseFusion, - ::testing::Combine( - ::testing::Combine( - ::testing::Values(ngraph::opset4::GroupConvolutionBackpropData::get_type_info_static()), - ::testing::ValuesIn(std::vector{2, 3})), - ::testing::ValuesIn(eltwise_types), - ::testing::Values(false), - ::testing::Values(ngraph::Shape{1, 12, 64, 64}), - ::testing::Values(ngraph::Shape{4, 3, 5, 1, 1}), - ::testing::ValuesIn(const_shapes_2d), - ::testing::ValuesIn(types), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ConvEltwiseFusion::getTestCaseName); - - const std::vector neg_const_shapes_2d{ - {1, 1, 1, 1, 1}, /* Broadcast output */ - {3}, {3, 1}, {3, 1, 1, 1} - }; - - INSTANTIATE_TEST_SUITE_P(smoke_Convolution_2D_Negative, ConvEltwiseFusion, - ::testing::Combine( - ::testing::Values(std::tuple{ngraph::opset4::Convolution::get_type_info_static(), 2}), - ::testing::ValuesIn(eltwise_types), - ::testing::Values(true), - ::testing::Values(ngraph::Shape{1, 3, 3, 3}), - ::testing::Values(ngraph::Shape{3, 3, 1, 1}), - ::testing::ValuesIn(neg_const_shapes_2d), - ::testing::ValuesIn(types), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ConvEltwiseFusion::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolution_2D_Negative, ConvEltwiseFusion, - ::testing::Combine( - ::testing::Values( - std::tuple{ngraph::opset4::GroupConvolution::get_type_info_static(), 2}), - ::testing::ValuesIn(eltwise_types), - ::testing::Values(true), - ::testing::Values(ngraph::Shape{1, 12, 3, 3}), - ::testing::Values(ngraph::Shape{4, 5, 3, 1, 1}), - ::testing::ValuesIn(neg_const_shapes_2d), - ::testing::ValuesIn(types), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ConvEltwiseFusion::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionBackpropData_2D_Negative, ConvEltwiseFusion, - ::testing::Combine( - ::testing::Values( - std::tuple{ngraph::opset4::ConvolutionBackpropData::get_type_info_static(), 2}), - ::testing::ValuesIn(eltwise_types), - ::testing::Values(true), - ::testing::Values(ngraph::Shape{1, 12, 3, 3}), - ::testing::Values(ngraph::Shape{12, 3, 1, 1}), - ::testing::ValuesIn(neg_const_shapes_2d), - ::testing::ValuesIn(types), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ConvEltwiseFusion::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionBackpropData_2D_Negative, ConvEltwiseFusion, - ::testing::Combine( - ::testing::Values( - std::tuple{ngraph::opset4::GroupConvolutionBackpropData::get_type_info_static(), 2}), - ::testing::ValuesIn(eltwise_types), - ::testing::Values(true), - ::testing::Values(ngraph::Shape{1, 12, 3, 3}), - ::testing::Values(ngraph::Shape{4, 3, 5, 1, 1}), - ::testing::ValuesIn(neg_const_shapes_2d), - ::testing::ValuesIn(types), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ConvEltwiseFusion::getTestCaseName); +const std::vector types{ov::element::f32, ov::element::f16}; +const std::vector eltwise_types{ov::op::v1::Multiply::get_type_info_static(), + /* ov::opset4::Add::get_type_info_static() */}; + +INSTANTIATE_TEST_SUITE_P(smoke_Convolution_1D, + ConvEltwiseFusion, + ::testing::Combine(::testing::Values(std::tuple{ + ov::op::v1::Convolution::get_type_info_static(), + 2}), + ::testing::ValuesIn(eltwise_types), + ::testing::Values(false), + ::testing::Values(ov::Shape{1, 8, 64}), + ::testing::Values(ov::Shape{64, 8, 1}), + ::testing::Values(ov::Shape{64, 1}), + ::testing::ValuesIn(types), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ConvEltwiseFusion::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolution_1D, + ConvEltwiseFusion, + ::testing::Combine(::testing::Values(std::tuple{ + ov::op::v1::GroupConvolution::get_type_info_static(), + 2}), + ::testing::ValuesIn(eltwise_types), + ::testing::Values(false), + ::testing::Values(ov::Shape{1, 12, 5}), + ::testing::Values(ov::Shape{4, 5, 3, 2}), + ::testing::Values(ov::Shape{20, 1}), + ::testing::ValuesIn(types), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ConvEltwiseFusion::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + smoke_ConvolutionBackpropData_1D, + ConvEltwiseFusion, + ::testing::Combine( + ::testing::Combine(::testing::Values(ov::op::v1::ConvolutionBackpropData::get_type_info_static()), + ::testing::ValuesIn(std::vector{2, 3})), + ::testing::ValuesIn(eltwise_types), + ::testing::Values(false), + ::testing::Values(ov::Shape{1, 12, 64}), + ::testing::Values(ov::Shape{12, 20, 1}), + ::testing::Values(ov::Shape{20, 1}), + ::testing::ValuesIn(types), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ConvEltwiseFusion::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + smoke_GroupConvolutionBackpropData_1D, + ConvEltwiseFusion, + ::testing::Combine( + ::testing::Combine(::testing::Values(ov::op::v1::GroupConvolutionBackpropData::get_type_info_static()), + ::testing::ValuesIn(std::vector{2, 3})), + ::testing::ValuesIn(eltwise_types), + ::testing::Values(false), + ::testing::Values(ov::Shape{1, 12, 64}), + ::testing::Values(ov::Shape{4, 3, 5, 1}), + ::testing::Values(ov::Shape{1, 20, 1}), + ::testing::ValuesIn(types), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ConvEltwiseFusion::getTestCaseName); + +const std::vector const_shapes_2d{{}, {1, 1}, {1, 1, 1}, {20, 1, 1}, {1, 1, 1, 1}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Convolution_2D, + ConvEltwiseFusion, + ::testing::Combine(::testing::Values(std::tuple{ + ov::op::v1::Convolution::get_type_info_static(), + 2}), + ::testing::ValuesIn(eltwise_types), + ::testing::Values(false), + ::testing::Values(ov::Shape{1, 3, 64, 64}), + ::testing::Values(ov::Shape{20, 3, 1, 1}), + ::testing::ValuesIn(const_shapes_2d), + ::testing::ValuesIn(types), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ConvEltwiseFusion::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolution_2D, + ConvEltwiseFusion, + ::testing::Combine(::testing::Values(std::tuple{ + ov::op::v1::GroupConvolution::get_type_info_static(), + 2}), + ::testing::ValuesIn(eltwise_types), + ::testing::Values(false), + ::testing::Values(ov::Shape{1, 12, 64, 64}), + ::testing::Values(ov::Shape{4, 5, 3, 1, 2}), + ::testing::ValuesIn(const_shapes_2d), + ::testing::ValuesIn(types), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ConvEltwiseFusion::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + smoke_ConvolutionBackpropData_2D, + ConvEltwiseFusion, + ::testing::Combine( + ::testing::Combine(::testing::Values(ov::op::v1::ConvolutionBackpropData::get_type_info_static()), + ::testing::ValuesIn(std::vector{2, 3})), + ::testing::ValuesIn(eltwise_types), + ::testing::Values(false), + ::testing::Values(ov::Shape{1, 3, 64, 64}), + ::testing::Values(ov::Shape{3, 20, 3, 3}), + ::testing::ValuesIn(const_shapes_2d), + ::testing::ValuesIn(types), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ConvEltwiseFusion::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + smoke_GroupConvolutionBackpropData_2D, + ConvEltwiseFusion, + ::testing::Combine( + ::testing::Combine(::testing::Values(ov::op::v1::GroupConvolutionBackpropData::get_type_info_static()), + ::testing::ValuesIn(std::vector{2, 3})), + ::testing::ValuesIn(eltwise_types), + ::testing::Values(false), + ::testing::Values(ov::Shape{1, 12, 64, 64}), + ::testing::Values(ov::Shape{4, 3, 5, 1, 1}), + ::testing::ValuesIn(const_shapes_2d), + ::testing::ValuesIn(types), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ConvEltwiseFusion::getTestCaseName); + +const std::vector neg_const_shapes_2d{{1, 1, 1, 1, 1}, /* Broadcast output */ + {3}, + {3, 1}, + {3, 1, 1, 1}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Convolution_2D_Negative, + ConvEltwiseFusion, + ::testing::Combine(::testing::Values(std::tuple{ + ov::op::v1::Convolution::get_type_info_static(), + 2}), + ::testing::ValuesIn(eltwise_types), + ::testing::Values(true), + ::testing::Values(ov::Shape{1, 3, 3, 3}), + ::testing::Values(ov::Shape{3, 3, 1, 1}), + ::testing::ValuesIn(neg_const_shapes_2d), + ::testing::ValuesIn(types), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ConvEltwiseFusion::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolution_2D_Negative, + ConvEltwiseFusion, + ::testing::Combine(::testing::Values(std::tuple{ + ov::op::v1::GroupConvolution::get_type_info_static(), + 2}), + ::testing::ValuesIn(eltwise_types), + ::testing::Values(true), + ::testing::Values(ov::Shape{1, 12, 3, 3}), + ::testing::Values(ov::Shape{4, 5, 3, 1, 1}), + ::testing::ValuesIn(neg_const_shapes_2d), + ::testing::ValuesIn(types), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ConvEltwiseFusion::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionBackpropData_2D_Negative, + ConvEltwiseFusion, + ::testing::Combine(::testing::Values(std::tuple{ + ov::op::v1::ConvolutionBackpropData::get_type_info_static(), + 2}), + ::testing::ValuesIn(eltwise_types), + ::testing::Values(true), + ::testing::Values(ov::Shape{1, 12, 3, 3}), + ::testing::Values(ov::Shape{12, 3, 1, 1}), + ::testing::ValuesIn(neg_const_shapes_2d), + ::testing::ValuesIn(types), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ConvEltwiseFusion::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionBackpropData_2D_Negative, + ConvEltwiseFusion, + ::testing::Combine(::testing::Values(std::tuple{ + ov::op::v1::GroupConvolutionBackpropData::get_type_info_static(), + 2}), + ::testing::ValuesIn(eltwise_types), + ::testing::Values(true), + ::testing::Values(ov::Shape{1, 12, 3, 3}), + ::testing::Values(ov::Shape{4, 3, 5, 1, 1}), + ::testing::ValuesIn(neg_const_shapes_2d), + ::testing::ValuesIn(types), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ConvEltwiseFusion::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/parameter_result.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/parameter_result.cpp index 300ab2016d0e37..7f25b2ef54ef44 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/parameter_result.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/parameter_result.cpp @@ -3,28 +3,31 @@ // #include "subgraph_tests/parameter_result.hpp" + #include "common_test_utils/test_constants.hpp" using namespace SubgraphTestsDefinitions; +using namespace ov::test; namespace { -INSTANTIATE_TEST_SUITE_P(smoke_Check, ParameterResultSubgraphTestLegacyApi, - ::testing::Combine( - ::testing::Values(ov::test::InputShape{{1, 3, 10, 10}, {}}), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ParameterResultSubgraphTestBase::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Check, + ParameterResultSubgraphTestLegacyApi, + ::testing::Combine(::testing::Values(ov::test::InputShape{{1, 3, 10, 10}, {}}), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ParameterResultSubgraphTestBase::getTestCaseName); const std::vector inputShapes = { - ov::test::InputShape{{1, 3, 10, 10}, {{ 1, 3, 10, 10}, { 1, 3, 10, 10}}}, - ov::test::InputShape{{-1, -1, -1, -1}, {{ 1, 3, 10, 10}, { 2, 5, 3, 10}, { 1, 3, 10, 10}, { 1, 3, 10, 10}}}, - ov::test::InputShape{{{1, 10}, {1, 10}, {1, 10}, {1, 10}}, {{ 1, 3, 10, 10}, { 2, 5, 3, 10}, { 1, 3, 10, 10}, { 1, 3, 10, 10}}}, + ov::test::InputShape{{1, 3, 10, 10}, {{1, 3, 10, 10}, {1, 3, 10, 10}}}, + ov::test::InputShape{{-1, -1, -1, -1}, {{1, 3, 10, 10}, {2, 5, 3, 10}, {1, 3, 10, 10}, {1, 3, 10, 10}}}, + ov::test::InputShape{{{1, 10}, {1, 10}, {1, 10}, {1, 10}}, + {{1, 3, 10, 10}, {2, 5, 3, 10}, {1, 3, 10, 10}, {1, 3, 10, 10}}}, }; -INSTANTIATE_TEST_SUITE_P(smoke_Check, ParameterResultSubgraphTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapes), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ParameterResultSubgraphTestBase::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Check, + ParameterResultSubgraphTest, + ::testing::Combine(::testing::ValuesIn(inputShapes), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ParameterResultSubgraphTestBase::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/preprocess.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/preprocess.cpp index f9a0cedbfdceeb..b8379e24fbf093 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/preprocess.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/preprocess.cpp @@ -2,14 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - #include "shared_test_classes/subgraph/preprocess.hpp" -using namespace SubgraphTestsDefinitions; +#include + +using namespace ov::test; -INSTANTIATE_TEST_SUITE_P(smoke_PrePostProcess, PrePostProcessTest, - ::testing::Combine( - ::testing::ValuesIn(ov::builder::preprocess::generic_preprocess_functions()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - PrePostProcessTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P( + smoke_PrePostProcess, + PrePostProcessTest, + ::testing::Combine(::testing::ValuesIn(ov::builder::preprocess::generic_preprocess_functions()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + PrePostProcessTest::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/simple_if.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/simple_if.cpp index b9fbc6435e28ac..1abec579d595c9 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/simple_if.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/simple_if.cpp @@ -4,121 +4,81 @@ #include "shared_test_classes/subgraph/simple_if.hpp" -using namespace SubgraphTestsDefinitions; +using namespace ov::test; namespace { std::vector> inputShapes = { - { - {{}, {{5, 7}}}, - {{}, {{5, 7}}}, - }, - { - {{}, {{30, 20, 10}}}, - {{}, {{30, 20, 10}}} - }, - { - { - {-1, -1, -1}, - {{10, 20, 5}, {10, 0, 5}, {1, 5, 5}} - }, - { - {-1, -1, -1}, - {{10, 20, 5}, {10, 0, 5}, {1, 1, 5}} - } - }, - { - { - {-1, 5, -1}, - {{10, 5, 10}, {2, 5, 5}, {1, 5, 5}} - }, - { - {-1, 5, -1}, - {{1, 5, 1}, {2, 5, 5}, {5, 5, 5}} - } - }, - { - { - {{1, 10}, {1, 10}, {1, 10}}, - {{2, 5, 10}, {2, 5, 1}, {1, 5, 5}} - }, - { - {{1, 10}, {1, 10}, {1, 10}}, - {{2, 5, 10}, {2, 1, 5}, {5, 5, 5}} - } - }, + { + {{}, {{5, 7}}}, + {{}, {{5, 7}}}, + }, + {{{}, {{30, 20, 10}}}, {{}, {{30, 20, 10}}}}, + {{{-1, -1, -1}, {{10, 20, 5}, {10, 0, 5}, {1, 5, 5}}}, {{-1, -1, -1}, {{10, 20, 5}, {10, 0, 5}, {1, 1, 5}}}}, + {{{-1, 5, -1}, {{10, 5, 10}, {2, 5, 5}, {1, 5, 5}}}, {{-1, 5, -1}, {{1, 5, 1}, {2, 5, 5}, {5, 5, 5}}}}, + {{{{1, 10}, {1, 10}, {1, 10}}, {{2, 5, 10}, {2, 5, 1}, {1, 5, 5}}}, + {{{1, 10}, {1, 10}, {1, 10}}, {{2, 5, 10}, {2, 1, 5}, {5, 5, 5}}}}, }; -const std::vector inTypes = { - ov::test::ElementType::f32, - ov::test::ElementType::bf16, - ov::test::ElementType::i8 -}; +const std::vector inTypes = {ov::test::ElementType::f32, + ov::test::ElementType::bf16, + ov::test::ElementType::i8}; std::vector conditions = {true, false}; -INSTANTIATE_TEST_SUITE_P(smoke_If, SimpleIfTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapes), - ::testing::ValuesIn(inTypes), - ::testing::ValuesIn(conditions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - SimpleIfTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_If, + SimpleIfTest, + ::testing::Combine(::testing::ValuesIn(inputShapes), + ::testing::ValuesIn(inTypes), + ::testing::ValuesIn(conditions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + SimpleIfTest::getTestCaseName); TEST_P(SimpleIfTest, CompareWithRefs) { run(); }; -INSTANTIATE_TEST_SUITE_P(smoke_If, SimpleIf2OutTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapes), - ::testing::ValuesIn(inTypes), - ::testing::ValuesIn(conditions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - SimpleIf2OutTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_If, + SimpleIf2OutTest, + ::testing::Combine(::testing::ValuesIn(inputShapes), + ::testing::ValuesIn(inTypes), + ::testing::ValuesIn(conditions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + SimpleIf2OutTest::getTestCaseName); TEST_P(SimpleIf2OutTest, CompareWithRefs) { run(); }; -INSTANTIATE_TEST_SUITE_P(smoke_If, SimpleIfNotConstConditionTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapes), - ::testing::ValuesIn(inTypes), - ::testing::ValuesIn(conditions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - SimpleIfNotConstConditionTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_If, + SimpleIfNotConstConditionTest, + ::testing::Combine(::testing::ValuesIn(inputShapes), + ::testing::ValuesIn(inTypes), + ::testing::ValuesIn(conditions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + SimpleIfNotConstConditionTest::getTestCaseName); TEST_P(SimpleIfNotConstConditionTest, CompareWithRefs) { run(); }; std::vector> inputShapes_2 = { - { - { - {-1, -1, -1}, - {{10, 20, 5}, {10, 20, 5}, {1, 5, 5}} - }, - }, - { - { - {-1, 5, -1}, - {{10, 5, 10}, {2, 5, 5}, {1, 5, 5}} - }, - }, - { - { - {{0, 10}, {0, 10}, {0, 10}}, - {{2, 5, 10}, {2, 5, 1}, {2, 5, 0}, {1, 5, 5}} - }, - }, + { + {{-1, -1, -1}, {{10, 20, 5}, {10, 20, 5}, {1, 5, 5}}}, + }, + { + {{-1, 5, -1}, {{10, 5, 10}, {2, 5, 5}, {1, 5, 5}}}, + }, + { + {{{0, 10}, {0, 10}, {0, 10}}, {{2, 5, 10}, {2, 5, 1}, {2, 5, 0}, {1, 5, 5}}}, + }, }; -INSTANTIATE_TEST_SUITE_P(smoke_If, SimpleIfNotConstConditionAndInternalDynamismTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapes_2), - ::testing::ValuesIn(inTypes), - ::testing::ValuesIn(conditions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), +INSTANTIATE_TEST_SUITE_P(smoke_If, + SimpleIfNotConstConditionAndInternalDynamismTest, + ::testing::Combine(::testing::ValuesIn(inputShapes_2), + ::testing::ValuesIn(inTypes), + ::testing::ValuesIn(conditions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), SimpleIfNotConstConditionTest::getTestCaseName); TEST_P(SimpleIfNotConstConditionAndInternalDynamismTest, CompareWithRefs) { @@ -126,51 +86,43 @@ TEST_P(SimpleIfNotConstConditionAndInternalDynamismTest, CompareWithRefs) { }; std::vector> inputShapes_3 = { - { - { - {-1, 2, -1}, - {{1, 2, 0}, {2, 2, 5}} - }, - }, - { - { - {{0, 10}, {0, 10}, {0, 10}}, - {{2, 5, 10}, {2, 0, 0}} - }, - }, + { + {{-1, 2, -1}, {{1, 2, 0}, {2, 2, 5}}}, + }, + { + {{{0, 10}, {0, 10}, {0, 10}}, {{2, 5, 10}, {2, 0, 0}}}, + }, }; -INSTANTIATE_TEST_SUITE_P(smoke_If, SimpleIfNotConstConditionAndDimsIncreaseTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapes_3), - ::testing::ValuesIn(inTypes), - ::testing::ValuesIn(conditions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), +INSTANTIATE_TEST_SUITE_P(smoke_If, + SimpleIfNotConstConditionAndDimsIncreaseTest, + ::testing::Combine(::testing::ValuesIn(inputShapes_3), + ::testing::ValuesIn(inTypes), + ::testing::ValuesIn(conditions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), SimpleIfNotConstConditionTest::getTestCaseName); TEST_P(SimpleIfNotConstConditionAndDimsIncreaseTest, CompareWithRefs) { run(); }; -// the axis of split in test suit "SimpleIfNotConstConditionUnusedOutputPortsTest" is hardcoded as 1, so shape[axis] should be static +// the axis of split in test suit "SimpleIfNotConstConditionUnusedOutputPortsTest" is hardcoded as 1, so shape[axis] +// should be static std::vector> inputShapes_4 = { - { - {{}, {{5, 7}}}, - }, - { - { - {-1, 5, -1}, - {{10, 5, 10}, {2, 5, 5}, {1, 5, 5}} - }, - }, + { + {{}, {{5, 7}}}, + }, + { + {{-1, 5, -1}, {{10, 5, 10}, {2, 5, 5}, {1, 5, 5}}}, + }, }; -INSTANTIATE_TEST_SUITE_P(smoke_If, SimpleIfNotConstConditionUnusedOutputPortsTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapes_4), - ::testing::ValuesIn(inTypes), - ::testing::ValuesIn(conditions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), +INSTANTIATE_TEST_SUITE_P(smoke_If, + SimpleIfNotConstConditionUnusedOutputPortsTest, + ::testing::Combine(::testing::ValuesIn(inputShapes_4), + ::testing::ValuesIn(inTypes), + ::testing::ValuesIn(conditions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), SimpleIfNotConstConditionUnusedOutputPortsTest::getTestCaseName); TEST_P(SimpleIfNotConstConditionUnusedOutputPortsTest, CompareWithRefs) { diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/split_concat_memory.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/split_concat_memory.cpp index 4949f983169ab9..11c8faab0902ff 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/split_concat_memory.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/split_concat_memory.cpp @@ -2,21 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - #include "subgraph_tests/split_concat_memory.hpp" -using namespace ov::test::subgraph; +#include + +using namespace ov::test; namespace { const std::vector netPrecisions = { - ov::element::f32, - ov::element::i32, - ov::element::f16, - ov::element::i16, - ov::element::u8, - ov::element::i8, + ov::element::f32, + ov::element::i32, + ov::element::f16, + ov::element::i16, + ov::element::u8, + ov::element::i8, }; const std::vector shapes = { @@ -26,11 +26,11 @@ const std::vector shapes = { {3, 8}, }; -INSTANTIATE_TEST_SUITE_P(smoke_CPU, SplitConcatMemory, - ::testing::Combine( - ::testing::ValuesIn(shapes), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(1), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - SplitConcatMemory::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_CPU, + SplitConcatMemory, + ::testing::Combine(::testing::ValuesIn(shapes), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(1), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + SplitConcatMemory::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/param_result_custom_blob.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/param_result_custom_blob.cpp index 11eaffd501ee1c..b762671a66d610 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/param_result_custom_blob.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/param_result_custom_blob.cpp @@ -2,16 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/subgraph/parameter_result.hpp" #include "common_test_utils/test_constants.hpp" +#include "shared_test_classes/subgraph/parameter_result.hpp" using namespace SubgraphTestsDefinitions; +using namespace ov::test; using namespace InferenceEngine; namespace CPULayerTestsDefinitions { class ParameterResultCustomBlobTest : public ParameterResultSubgraphTestLegacyApi { - protected: +protected: void Infer() override { constexpr size_t inferIterations = 10lu; @@ -25,7 +26,7 @@ class ParameterResultCustomBlobTest : public ParameterResultSubgraphTestLegacyAp std::string inputName = cnnNetwork.getInputsInfo().begin()->first; std::vector customInpData(elementsCount); - auto inpBlobData = inputBlob->buffer().as(); + auto inpBlobData = inputBlob->buffer().as(); std::copy(inpBlobData, inpBlobData + elementsCount, customInpData.begin()); auto& tensorDesc = inputsInfo->getTensorDesc(); @@ -38,7 +39,7 @@ class ParameterResultCustomBlobTest : public ParameterResultSubgraphTestLegacyAp } } void Validate() override { - //Do nothing. We call Validate() in the Infer() method + // Do nothing. We call Validate() in the Infer() method } }; @@ -51,12 +52,12 @@ TEST_P(ParameterResultCustomBlobTest, CompareWithRefs) { Run(); } namespace { - INSTANTIATE_TEST_SUITE_P(smoke_Check_Custom_Blob, ParameterResultCustomBlobTest, - ::testing::Combine( - ::testing::Values(ov::test::InputShape{{1, 3, 10, 10}, {{}}}), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ParameterResultSubgraphTestBase::getTestCaseName); -} // namespace +INSTANTIATE_TEST_SUITE_P(smoke_Check_Custom_Blob, + ParameterResultCustomBlobTest, + ::testing::Combine(::testing::Values(ov::test::InputShape{{1, 3, 10, 10}, {{}}}), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ParameterResultSubgraphTestBase::getTestCaseName); +} // namespace class ParameterResultSameBlobTest : public ParameterResultSubgraphTestLegacyApi { protected: @@ -69,7 +70,7 @@ class ParameterResultSameBlobTest : public ParameterResultSubgraphTestLegacyApi } } void Validate() override { - //Do nothing. We call Validate() in the Infer() method + // Do nothing. We call Validate() in the Infer() method } }; @@ -77,10 +78,10 @@ TEST_P(ParameterResultSameBlobTest, CompareWithRefs) { Run(); } namespace { - INSTANTIATE_TEST_SUITE_P(smoke_Check_Same_Blob, ParameterResultSameBlobTest, - ::testing::Combine( - ::testing::Values(ov::test::InputShape{{1, 3, 10, 10}, {{}}}), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ParameterResultSubgraphTestBase::getTestCaseName); -} // namespace -} // namespace CPULayerTestsDefinitions +INSTANTIATE_TEST_SUITE_P(smoke_Check_Same_Blob, + ParameterResultSameBlobTest, + ::testing::Combine(::testing::Values(ov::test::InputShape{{1, 3, 10, 10}, {{}}}), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ParameterResultSubgraphTestBase::getTestCaseName); +} // namespace +} // namespace CPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/parameter_result.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/parameter_result.cpp index a93153fba88752..c417dc6ce04a2c 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/parameter_result.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/parameter_result.cpp @@ -2,25 +2,27 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "subgraph_tests/parameter_result.hpp" + #include -#include "subgraph_tests/parameter_result.hpp" #include "common_test_utils/test_constants.hpp" using namespace SubgraphTestsDefinitions; +using namespace ov::test; namespace { -INSTANTIATE_TEST_SUITE_P(smoke_Check, ParameterResultSubgraphTestLegacyApi, - ::testing::Combine( - ::testing::Values(ov::test::InputShape{{1, 3, 10, 10}, {}}), - ::testing::Values(ov::test::utils::DEVICE_GPU)), - ParameterResultSubgraphTestBase::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Check, ParameterResultSubgraphTest, - ::testing::Combine( - ::testing::Values(ov::test::InputShape{{1, 3, 10, 10}, {{1, 3, 10, 10}}}), - ::testing::Values(ov::test::utils::DEVICE_GPU)), - ParameterResultSubgraphTestBase::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Check, + ParameterResultSubgraphTestLegacyApi, + ::testing::Combine(::testing::Values(ov::test::InputShape{{1, 3, 10, 10}, {}}), + ::testing::Values(ov::test::utils::DEVICE_GPU)), + ParameterResultSubgraphTestBase::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Check, + ParameterResultSubgraphTest, + ::testing::Combine(::testing::Values(ov::test::InputShape{{1, 3, 10, 10}, {{1, 3, 10, 10}}}), + ::testing::Values(ov::test::utils::DEVICE_GPU)), + ParameterResultSubgraphTestBase::getTestCaseName); } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/preprocess.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/preprocess.cpp index ec5545e57c7523..32ecadf49663b4 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/preprocess.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/preprocess.cpp @@ -2,11 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - #include "shared_test_classes/subgraph/preprocess.hpp" -using namespace SubgraphTestsDefinitions; +#include + +using namespace ov::test; namespace { @@ -28,7 +28,7 @@ inline std::vector GPU_smoke_preprocess_functions() { preprocess_func(resize_nearest, "resize_nearest", 0.01f), preprocess_func(resize_linear_nhwc, "resize_linear_nhwc", 0.01f), preprocess_func(resize_cubic, "resize_cubic", 0.01f), - preprocess_func(resize_dynamic, "resize_dynamic", 0.01f, { ov::Shape {1, 3, 123, 123} }), + preprocess_func(resize_dynamic, "resize_dynamic", 0.01f, {ov::Shape{1, 3, 123, 123}}), preprocess_func(crop_basic, "crop_basic", 0.000001f), preprocess_func(crop_negative, "crop_negative", 0.000001f), preprocess_func(convert_layout_by_dims, "convert_layout_by_dims", 0.01f), @@ -43,10 +43,10 @@ inline std::vector GPU_smoke_preprocess_functions() { }; } -INSTANTIATE_TEST_SUITE_P(smoke_PrePostProcess_GPU, PrePostProcessTest, - ::testing::Combine( - ::testing::ValuesIn(GPU_smoke_preprocess_functions()), - ::testing::Values(ov::test::utils::DEVICE_GPU)), +INSTANTIATE_TEST_SUITE_P(smoke_PrePostProcess_GPU, + PrePostProcessTest, + ::testing::Combine(::testing::ValuesIn(GPU_smoke_preprocess_functions()), + ::testing::Values(ov::test::utils::DEVICE_GPU)), PrePostProcessTest::getTestCaseName); } // namespace diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/conv_eltwise_fusion.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/conv_eltwise_fusion.hpp index 9d14fb3ab7149d..f41991680ab4f6 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/conv_eltwise_fusion.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/conv_eltwise_fusion.hpp @@ -11,4 +11,5 @@ namespace SubgraphTestsDefinitions { TEST_P(ConvEltwiseFusion, CompareWithRefs) { Run(); } -} // namespace SubgraphTestsDefinitions + +} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/parameter_result.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/parameter_result.hpp index a4f5baedd820c0..40123974846ea3 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/parameter_result.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/parameter_result.hpp @@ -12,8 +12,14 @@ TEST_P(ParameterResultSubgraphTestLegacyApi, CompareWithRefs) { Run(); } +} // namespace SubgraphTestsDefinitions + +namespace ov { +namespace test { + TEST_P(ParameterResultSubgraphTest, CompareWithRefs) { run(); } -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/split_concat_memory.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/split_concat_memory.hpp index c703bb4d139c5b..4545efdc75f683 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/split_concat_memory.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/split_concat_memory.hpp @@ -4,12 +4,11 @@ #pragma once -#include "shared_test_classes/subgraph/split_concat_memory.hpp" #include "common_test_utils/data_utils.hpp" +#include "shared_test_classes/subgraph/split_concat_memory.hpp" namespace ov { namespace test { -namespace subgraph { TEST_P(SplitConcatMemory, cyclicBufferCorrectness) { /* @@ -28,7 +27,7 @@ TEST_P(SplitConcatMemory, cyclicBufferCorrectness) { auto o_tensor = inferRequest.get_tensor(*function->outputs().begin()); auto output_tensor_ref = ov::Tensor(o_tensor.get_element_type(), o_tensor.get_shape()); - auto fill_by_quarter = [this] (ov::Tensor& tensor, std::vector vals) { + auto fill_by_quarter = [this](ov::Tensor& tensor, std::vector vals) { OPENVINO_ASSERT(vals.size() == 4); auto quarter_blocked_shape = tensor.get_shape(); @@ -38,7 +37,7 @@ TEST_P(SplitConcatMemory, cyclicBufferCorrectness) { quarter_blocked_shape.insert(quarter_blocked_shape.begin() + axis, vals.size()); OPENVINO_ASSERT(ov::shape_size(quarter_blocked_shape) == tensor.get_size()); - auto quarter_blocked_view = ov::Tensor(tensor.get_element_type(), quarter_blocked_shape, tensor.data()); + auto quarter_blocked_view = ov::Tensor(tensor.get_element_type(), quarter_blocked_shape, tensor.data()); ov::test::utils::fill_data_with_broadcast(quarter_blocked_view, axis, vals); }; @@ -63,7 +62,6 @@ TEST_P(SplitConcatMemory, cyclicBufferCorrectness) { compare({output_tensor_ref}, {o_tensor}); } -} // namespace subgraph } // namespace test } // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_eltwise_fusion.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_eltwise_fusion.hpp index 2e96d162821a39..d3dea8eb01b080 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_eltwise_fusion.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_eltwise_fusion.hpp @@ -4,37 +4,35 @@ #pragma once -#include #include +#include #include -#include "shared_test_classes/base/layer_test_utils.hpp" + #include "ov_models/builders.hpp" -#include -#include +#include "shared_test_classes/base/layer_test_utils.hpp" namespace SubgraphTestsDefinitions { -typedef std::tuple< - std::tuple< - ngraph::NodeTypeInfo, // Convolution type - size_t // Number of inputs - >, - ngraph::NodeTypeInfo, // Eltwise type - bool, // Is the test negative or not - ngraph::Shape, // Input shape - ngraph::Shape, // Weights shape - ngraph::Shape, // Const shape - ngraph::element::Type, // Network precision - std::string // Device name - > ConvEltwiseFusionParams; - -class ConvEltwiseFusion - : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { +typedef std::tuple, + ov::NodeTypeInfo, // Eltwise type + bool, // Is the test negative or not + ov::Shape, // Input shape + ov::Shape, // Weights shape + ov::Shape, // Const shape + ov::element::Type, // Network precision + std::string // Device name + > + ConvEltwiseFusionParams; + +class ConvEltwiseFusion : public testing::WithParamInterface, + virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; }; -} // namespace SubgraphTestsDefinitions + +} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/parameter_result.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/parameter_result.hpp index 7156036c6c05a1..5384d369b7b725 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/parameter_result.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/parameter_result.hpp @@ -4,35 +4,40 @@ #pragma once -#include +#include #include +#include #include -#include #include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { -using parameterResultParams = std::tuple; // Device name +using parameterResultParams = std::tuple; // Device name class ParameterResultSubgraphTestBase : public testing::WithParamInterface { - public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - protected: - std::shared_ptr createModel(const ov::PartialShape& shape); +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); + +protected: + std::shared_ptr createModel(const ov::PartialShape& shape); }; -class ParameterResultSubgraphTestLegacyApi : public ParameterResultSubgraphTestBase, - virtual public LayerTestsUtils::LayerTestsCommon { +class ParameterResultSubgraphTest : public ParameterResultSubgraphTestBase, virtual public ov::test::SubgraphBaseTest { protected: void SetUp() override; }; -class ParameterResultSubgraphTest : public ParameterResultSubgraphTestBase, - virtual public ov::test::SubgraphBaseTest { +} // namespace test +} // namespace ov + +namespace SubgraphTestsDefinitions { + +class ParameterResultSubgraphTestLegacyApi : public ov::test::ParameterResultSubgraphTestBase, + virtual public LayerTestsUtils::LayerTestsCommon { protected: void SetUp() override; }; diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/preprocess.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/preprocess.hpp index dcc713ff27be8e..59a242990c5e2c 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/preprocess.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/preprocess.hpp @@ -4,29 +4,37 @@ #pragma once -#include +#include #include +#include #include -#include -#include "shared_test_classes/base/ov_subgraph.hpp" #include "ov_models/builders.hpp" #include "ov_models/preprocess/preprocess_builders.hpp" #include "ov_models/utils/ov_helpers.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { -using preprocessParamsTuple = std::tuple< - ov::builder::preprocess::preprocess_func, // Function with preprocessing - std::string>; // Device name +using preprocessParamsTuple = std::tuple; // Device name class PrePostProcessTest : public testing::WithParamInterface, virtual public ov::test::SubgraphBaseTest { public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; }; +} // namespace test +} // namespace ov + +namespace SubgraphTestsDefinitions { + +using ov::test::PrePostProcessTest; +using ov::test::preprocessParamsTuple; + } // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/simple_if.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/simple_if.hpp index d44a84941488a1..89c5ab59c34536 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/simple_if.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/simple_if.hpp @@ -4,30 +4,30 @@ #pragma once -#include #include +#include #include -#include "shared_test_classes/base/ov_subgraph.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { -using SimpleIfParamsTuple = typename std::tuple< - std::vector, // Input shapes - ov::test::ElementType, // Network precision - bool, // If condition - std::string // Device name ->; +using SimpleIfParamsTuple = typename std::tuple, // Input shapes + ov::test::ElementType, // Network precision + bool, // If condition + std::string // Device name + >; -class SimpleIfTest: - public testing::WithParamInterface, - virtual public ov::test::SubgraphBaseTest { +class SimpleIfTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); + protected: void SetUp() override; - void compare(const std::vector &expected, const std::vector &actual) override; + void compare(const std::vector& expected, const std::vector& actual) override; size_t inferNum = 0; }; @@ -55,12 +55,12 @@ class SimpleIfNotConstConditionAndInternalDynamismTest : public SimpleIfNotConst class SimpleIfNotConstConditionAndDimsIncreaseTest : public SimpleIfNotConstConditionTest { protected: void SetUp() override; - void compare(const std::vector &expected, const std::vector &actual) override; + void compare(const std::vector& expected, const std::vector& actual) override; }; class SimpleIfNotConstConditionUnusedOutputPortsTest : public SimpleIfNotConstConditionTest { protected: void SetUp() override; }; - -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_concat_memory.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_concat_memory.hpp index 32c25ab6d3975e..68d99304bf2ffb 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_concat_memory.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_concat_memory.hpp @@ -10,14 +10,12 @@ namespace ov { namespace test { -namespace subgraph { -using SplitConcatMemoryParamsTuple = typename std::tuple< - ov::Shape, // input shapes - ov::element::Type, // precision - int, // axis of split - std::string // device name ->; +using SplitConcatMemoryParamsTuple = typename std::tuple; class SplitConcatMemory : public testing::WithParamInterface, virtual public ov::test::SubgraphBaseTest { @@ -29,6 +27,5 @@ class SplitConcatMemory : public testing::WithParamInterface -// #include - namespace SubgraphTestsDefinitions { -std::string ConvEltwiseFusion::getTestCaseName(const testing::TestParamInfo &obj) { +std::string ConvEltwiseFusion::getTestCaseName(const testing::TestParamInfo& obj) { std::tuple conv_params; NodeTypeInfo conv_type, eltwise_type; bool negative; Shape input_shape, weights_shape, const_shape; element::Type precision; std::string targetName; - std::tie(conv_params, eltwise_type, negative, input_shape, weights_shape, const_shape, precision, targetName) = obj.param; + std::tie(conv_params, eltwise_type, negative, input_shape, weights_shape, const_shape, precision, targetName) = + obj.param; size_t num_inputs; std::tie(conv_type, num_inputs) = conv_params; std::ostringstream results; @@ -46,7 +46,8 @@ void ConvEltwiseFusion::SetUp() { Shape input_shape, weights_shape, const_shape; element::Type precision; size_t num_inputs; - std::tie(conv_params, eltwise_type, negative, input_shape, weights_shape, const_shape, precision, targetDevice) = this->GetParam(); + std::tie(conv_params, eltwise_type, negative, input_shape, weights_shape, const_shape, precision, targetDevice) = + this->GetParam(); std::tie(conv_type, num_inputs) = conv_params; pass::Manager manager; @@ -56,8 +57,12 @@ void ConvEltwiseFusion::SetUp() { Shape strides(spatial_dims, 1); std::vector pad_begin(spatial_dims, 0), pad_end(spatial_dims, 0); - auto weights = ngraph::builder::makeConstant(precision, weights_shape, std::vector(shape_size(weights_shape), 2)); - auto eltwise_const = ngraph::builder::makeConstant(precision, const_shape, std::vector(shape_size(const_shape), 3)); + auto weights = ngraph::builder::makeConstant(precision, + weights_shape, + std::vector(shape_size(weights_shape), 2)); + auto eltwise_const = ngraph::builder::makeConstant(precision, + const_shape, + std::vector(shape_size(const_shape), 3)); std::shared_ptr conv; if (conv_type == opset11::Convolution::get_type_info_static()) { conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); @@ -65,19 +70,45 @@ void ConvEltwiseFusion::SetUp() { conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); } else if (conv_type == opset11::ConvolutionBackpropData::get_type_info_static()) { if (num_inputs == 3) { - auto output_shape = std::make_shared(element::u64, Shape{spatial_dims}, - std::vector(input_shape.begin() + 2, input_shape.end())); - conv = std::make_shared(param, weights, output_shape, strides, pad_begin, pad_end, strides); + auto output_shape = std::make_shared( + element::u64, + Shape{spatial_dims}, + std::vector(input_shape.begin() + 2, input_shape.end())); + conv = std::make_shared(param, + weights, + output_shape, + strides, + pad_begin, + pad_end, + strides); } else { - conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); + conv = std::make_shared(param, + weights, + strides, + pad_begin, + pad_end, + strides); } } else if (conv_type == opset11::GroupConvolutionBackpropData::get_type_info_static()) { if (num_inputs == 3) { - auto output_shape = std::make_shared(element::u64, Shape{spatial_dims}, - std::vector(input_shape.begin() + 2, input_shape.end())); - conv = std::make_shared(param, weights, output_shape, strides, pad_begin, pad_end, strides); + auto output_shape = std::make_shared( + element::u64, + Shape{spatial_dims}, + std::vector(input_shape.begin() + 2, input_shape.end())); + conv = std::make_shared(param, + weights, + output_shape, + strides, + pad_begin, + pad_end, + strides); } else { - conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); + conv = std::make_shared(param, + weights, + strides, + pad_begin, + pad_end, + strides); } } else { OPENVINO_THROW("Unsupported type"); @@ -111,7 +142,9 @@ void ConvEltwiseFusion::SetUp() { Shape strides(spatial_dims, 1); std::vector pad_begin(spatial_dims, 0), pad_end(spatial_dims, 0); - auto weights = ngraph::builder::makeConstant(precision, weights_shape, std::vector(shape_size(weights_shape), 6)); + auto weights = ngraph::builder::makeConstant(precision, + weights_shape, + std::vector(shape_size(weights_shape), 6)); std::shared_ptr conv; if (conv_type == opset11::Convolution::get_type_info_static()) { conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); @@ -119,19 +152,45 @@ void ConvEltwiseFusion::SetUp() { conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); } else if (conv_type == opset11::ConvolutionBackpropData::get_type_info_static()) { if (num_inputs == 3) { - auto output_shape = std::make_shared(element::u64, Shape{spatial_dims}, - std::vector(input_shape.begin() + 2, input_shape.end())); - conv = std::make_shared(param, weights, output_shape, strides, pad_begin, pad_end, strides); + auto output_shape = std::make_shared( + element::u64, + Shape{spatial_dims}, + std::vector(input_shape.begin() + 2, input_shape.end())); + conv = std::make_shared(param, + weights, + output_shape, + strides, + pad_begin, + pad_end, + strides); } else { - conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); + conv = std::make_shared(param, + weights, + strides, + pad_begin, + pad_end, + strides); } } else if (conv_type == opset11::GroupConvolutionBackpropData::get_type_info_static()) { if (num_inputs == 3) { - auto output_shape = std::make_shared(element::u64, Shape{spatial_dims}, - std::vector(input_shape.begin() + 2, input_shape.end())); - conv = std::make_shared(param, weights, output_shape, strides, pad_begin, pad_end, strides); + auto output_shape = std::make_shared( + element::u64, + Shape{spatial_dims}, + std::vector(input_shape.begin() + 2, input_shape.end())); + conv = std::make_shared(param, + weights, + output_shape, + strides, + pad_begin, + pad_end, + strides); } else { - conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); + conv = std::make_shared(param, + weights, + strides, + pad_begin, + pad_end, + strides); } } @@ -146,4 +205,4 @@ void ConvEltwiseFusion::SetUp() { auto res = compare_functions(cloned_function, function_ref); ASSERT_TRUE(res.first) << res.second; } -} // namespace SubgraphTestsDefinitions +} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/parameter_result.cpp b/src/tests/functional/shared_test_classes/src/subgraph/parameter_result.cpp index 26dd81d5e61b9d..e11bc877cd4605 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/parameter_result.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/parameter_result.cpp @@ -4,7 +4,8 @@ #include "shared_test_classes/subgraph/parameter_result.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { std::string ParameterResultSubgraphTestBase::getTestCaseName(const testing::TestParamInfo& obj) { ov::test::InputShape inShape; @@ -29,20 +30,24 @@ std::shared_ptr ParameterResultSubgraphTestBase::createModel(const ov return model; } -void ParameterResultSubgraphTestLegacyApi::SetUp() { +void ParameterResultSubgraphTest::SetUp() { ov::test::InputShape inShape; std::tie(inShape, targetDevice) = this->GetParam(); - IE_ASSERT(inShape.first.is_static()); + init_input_shapes({inShape}); function = createModel(inShape.first); } -void ParameterResultSubgraphTest::SetUp() { +} // namespace test +} // namespace ov + +namespace SubgraphTestsDefinitions { +void ParameterResultSubgraphTestLegacyApi::SetUp() { ov::test::InputShape inShape; std::tie(inShape, targetDevice) = this->GetParam(); - init_input_shapes({inShape}); + OPENVINO_ASSERT(inShape.first.is_static()); function = createModel(inShape.first); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/preprocess.cpp b/src/tests/functional/shared_test_classes/src/subgraph/preprocess.cpp index a340c349584070..df1b09d999b48e 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/preprocess.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/preprocess.cpp @@ -3,16 +3,18 @@ // #include "shared_test_classes/subgraph/preprocess.hpp" -#include "ov_models/preprocess/preprocess_builders.hpp" + #include "openvino/core/preprocess/pre_post_process.hpp" +#include "ov_models/preprocess/preprocess_builders.hpp" using namespace ov; using namespace ov::preprocess; using namespace ov::builder::preprocess; -namespace SubgraphTestsDefinitions { -std::string PrePostProcessTest::getTestCaseName( - const testing::TestParamInfo &obj) { +namespace ov { +namespace test { + +std::string PrePostProcessTest::getTestCaseName(const testing::TestParamInfo& obj) { std::string targetName; preprocess_func func; @@ -29,7 +31,7 @@ void PrePostProcessTest::SetUp() { std::tie(func, targetDevice) = GetParam(); function = func.m_function(); rel_threshold = func.m_accuracy; - functionRefs = ngraph::clone_function(*function); + functionRefs = function->clone(); abs_threshold = func.m_accuracy; if (func.m_shapes.empty()) { for (const auto& input : function->inputs()) { @@ -43,4 +45,5 @@ TEST_P(PrePostProcessTest, CompareWithRefs) { run(); } -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/subgraph/simple_if.cpp b/src/tests/functional/shared_test_classes/src/subgraph/simple_if.cpp index 1f81363b54b7c0..3c408c0bff048f 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/simple_if.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/simple_if.cpp @@ -3,11 +3,14 @@ // #include "shared_test_classes/subgraph/simple_if.hpp" + +#include "common_test_utils/ov_tensor_utils.hpp" #include "ov_models/builders.hpp" -#include -namespace SubgraphTestsDefinitions { -std::string SimpleIfTest::getTestCaseName(const testing::TestParamInfo &obj) { +namespace ov { +namespace test { + +std::string SimpleIfTest::getTestCaseName(const testing::TestParamInfo& obj) { std::vector shapes; ov::test::ElementType inType; bool condition; @@ -19,7 +22,7 @@ std::string SimpleIfTest::getTestCaseName(const testing::TestParamInfo &expected, const std::vector &actual) { +void SimpleIfTest::compare(const std::vector& expected, const std::vector& actual) { // in bodies there aren't nodes that work with dimension 0. So we shouldn't call SubgraphBaseTest::compare bool hasZero = false; for (auto shape : targetStaticShapes[inferNum]) { - hasZero = hasZero || std::any_of(shape.begin(), shape.end(), [](size_t dim) { return dim == 0; }); + hasZero = hasZero || std::any_of(shape.begin(), shape.end(), [](size_t dim) { + return dim == 0; + }); } if (!hasZero) { SubgraphBaseTest::compare(expected, actual); @@ -112,7 +117,8 @@ void SimpleIf2OutTest::SetUp() { auto ifRes1 = ifOp->set_output(res1, res3); auto ifRes2 = ifOp->set_output(res2, res4); - ov::ResultVector results{std::make_shared(ifRes1), std::make_shared(ifRes2)}; + ov::ResultVector results{std::make_shared(ifRes1), + std::make_shared(ifRes2)}; function = std::make_shared(results, params, "simpleIf2Out"); } @@ -122,7 +128,7 @@ void SimpleIfNotConstConditionTest::SetUp() { std::tie(shapes, inType, condition, targetDevice) = this->GetParam(); init_input_shapes(shapes); - for (auto &target : targetStaticShapes) + for (auto& target : targetStaticShapes) target.emplace_back(ov::Shape{}); ov::ParameterVector params; for (auto&& shape : inputDynamicShapes) { @@ -152,11 +158,12 @@ void SimpleIfNotConstConditionTest::SetUp() { auto ifRes1 = ifOp->set_output(res1, res3); auto ifRes2 = ifOp->set_output(res2, res4); - ov::ResultVector results{std::make_shared(ifRes1), std::make_shared(ifRes2)}; + ov::ResultVector results{std::make_shared(ifRes1), + std::make_shared(ifRes2)}; function = std::make_shared(results, params, "SimpleIfNotConstConditionTest"); } -void SimpleIfNotConstConditionTest::generate_inputs(const std::vector& targetInputStaticShapes) { +void SimpleIfNotConstConditionTest::generate_inputs(const std::vector& targetInputStaticShapes) { inputs.clear(); const auto& funcInputs = function->inputs(); for (size_t i = 0; i < funcInputs.size(); ++i) { @@ -165,10 +172,13 @@ void SimpleIfNotConstConditionTest::generate_inputs(const std::vector(); + auto* dataPtr = tensor.data(); dataPtr[0] = condition; } else { - tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], 10, -5); + tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), + targetInputStaticShapes[i], + 10, + -5); } inputs.insert({funcInput.get_node_shared_ptr(), tensor}); @@ -181,7 +191,7 @@ void SimpleIfNotConstConditionAndInternalDynamismTest::SetUp() { std::tie(shapes, inType, condition, targetDevice) = this->GetParam(); init_input_shapes(shapes); - for (auto &target : targetStaticShapes) + for (auto& target : targetStaticShapes) target.emplace_back(ov::Shape{}); ov::ParameterVector params; for (auto&& shape : inputDynamicShapes) { @@ -200,7 +210,7 @@ void SimpleIfNotConstConditionAndInternalDynamismTest::SetUp() { auto thenBody = std::make_shared(ov::OutputVector{thenRes_0, thenRes_1}, ov::ParameterVector{p1}); // else body - auto add_const = std::make_shared(inType, ov::Shape{}, std::vector{ 2 }); + auto add_const = std::make_shared(inType, ov::Shape{}, std::vector{2}); auto elseOp_0 = std::make_shared(p2, add_const); auto elseOp_1 = std::make_shared(elseOp_0, ov::element::i32); auto elseOp_2 = std::make_shared(elseOp_1, inType); @@ -215,7 +225,8 @@ void SimpleIfNotConstConditionAndInternalDynamismTest::SetUp() { auto ifRes_0 = ifOp->set_output(thenRes_0, elseRes_0); auto ifRes_1 = ifOp->set_output(thenRes_1, elseRes_1); - ov::ResultVector results{std::make_shared(ifRes_0), std::make_shared(ifRes_1)}; + ov::ResultVector results{std::make_shared(ifRes_0), + std::make_shared(ifRes_1)}; function = std::make_shared(results, params, "SimpleIfNotConstConditionAndInternalDynamismTest"); } @@ -225,7 +236,7 @@ void SimpleIfNotConstConditionAndDimsIncreaseTest::SetUp() { std::tie(shapes, inType, condition, targetDevice) = this->GetParam(); init_input_shapes(shapes); - for (auto &target : targetStaticShapes) + for (auto& target : targetStaticShapes) target.emplace_back(ov::Shape{}); ov::ParameterVector params; for (auto&& shape : inputDynamicShapes) { @@ -253,12 +264,16 @@ void SimpleIfNotConstConditionAndDimsIncreaseTest::SetUp() { auto ifRes = ifOp->set_output(thenRes, elseRes); function = std::make_shared(ov::ResultVector{std::make_shared(ifOp)}, - params, "SimpleIfNotConstConditionAndDimsIncreaseTest"); + params, + "SimpleIfNotConstConditionAndDimsIncreaseTest"); } -void SimpleIfNotConstConditionAndDimsIncreaseTest::compare(const std::vector &expected, const std::vector &actual) { +void SimpleIfNotConstConditionAndDimsIncreaseTest::compare(const std::vector& expected, + const std::vector& actual) { const auto shape = targetStaticShapes[inferNum++].front(); - if (!condition && std::any_of(shape.begin(), shape.end(), [](size_t dim) { return dim == 0; })) { + if (!condition && std::any_of(shape.begin(), shape.end(), [](size_t dim) { + return dim == 0; + })) { return; } @@ -271,7 +286,7 @@ void SimpleIfNotConstConditionUnusedOutputPortsTest::SetUp() { std::tie(shapes, inType, condition, targetDevice) = this->GetParam(); init_input_shapes(shapes); - for (auto &target : targetStaticShapes) + for (auto& target : targetStaticShapes) target.emplace_back(ov::Shape{}); ov::ParameterVector params; for (auto&& shape : inputDynamicShapes) { @@ -303,4 +318,5 @@ void SimpleIfNotConstConditionUnusedOutputPortsTest::SetUp() { function = std::make_shared(results, params, "SimpleIfNotConstConditionUnusedOutputPortsTest"); } -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/subgraph/split_concat_memory.cpp b/src/tests/functional/shared_test_classes/src/subgraph/split_concat_memory.cpp index 75e59fec9ac73a..44eebdc55da21b 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/split_concat_memory.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/split_concat_memory.cpp @@ -6,7 +6,6 @@ namespace ov { namespace test { -namespace subgraph { std::string SplitConcatMemory::getTestCaseName(const testing::TestParamInfo& obj) { ov::element::Type netPrecision; @@ -47,46 +46,42 @@ void SplitConcatMemory::SetUp() { * __|___ __|___ * [_out1_] [_mem2_] */ - ngraph::Shape ng_share_14(shape_14); - ngraph::Shape ng_share_34(shape_34); + ov::Shape ng_share_14(shape_14); + ov::Shape ng_share_34(shape_34); auto input = std::make_shared(inType, ng_share_14); input->set_friendly_name("input"); auto& tensor = input->get_output_tensor(0); tensor.set_names({"input_t"}); - //input->output(0).set_names({"input"}); + // input->output(0).set_names({"input"}); auto mem_c = std::make_shared(inType, ng_share_34, 0); auto mem_r = std::make_shared(mem_c, "id"); - auto cnc = std::make_shared(ngraph::NodeVector{mem_r, input}, axis); + auto cnc = std::make_shared(ov::NodeVector{mem_r, input}, axis); - std::vector chunks_val {static_cast(ng_share_14[axis]), static_cast(ng_share_34[axis])}; - auto chunk_c = std::make_shared(::ngraph::element::i64, ngraph::Shape{chunks_val.size()}, chunks_val); - auto axis_c = std::make_shared(::ngraph::element::i64, ngraph::Shape{}, axis); + std::vector chunks_val{static_cast(ng_share_14[axis]), static_cast(ng_share_34[axis])}; + auto chunk_c = std::make_shared(::ov::element::i64, ov::Shape{chunks_val.size()}, chunks_val); + auto axis_c = std::make_shared(::ov::element::i64, ov::Shape{}, axis); auto spl = std::make_shared(cnc, axis_c, chunk_c); - auto one = std::make_shared(inType, ngraph::Shape{}, 1); - auto plus = std::make_shared(cnc, one, ngraph::op::AutoBroadcastType::NUMPY); + auto one = std::make_shared(inType, ov::Shape{}, 1); + auto plus = std::make_shared(cnc, one, ov::op::AutoBroadcastType::NUMPY); plus->set_friendly_name("plus_one"); auto& o_tensor = plus->get_output_tensor(0); o_tensor.set_names({"plus_one_t"}); - //input->output(0).set_names({"plus_one"}); + // input->output(0).set_names({"plus_one"}); auto mem_w = std::make_shared(spl->output(1), "id"); - // WA. Ngraph limitations. Assign should have control dependencies on read. + // WA. OpenVINO limitations. Assign should have control dependencies on read. // And someone should hold assign node. mem_w->add_control_dependency(mem_r); plus->add_control_dependency(mem_w); - function = std::make_shared( - ngraph::NodeVector {plus}, - ngraph::ParameterVector {input}, - "CyclicBuffer4"); + function = std::make_shared(ov::NodeVector{plus}, ov::ParameterVector{input}, "CyclicBuffer4"); } -} // namespace subgraph } // namespace test } // namespace ov From ffbffed749c2aedfd70a8eeb39344982d541e379 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Sun, 8 Oct 2023 23:23:54 +0400 Subject: [PATCH 096/257] Enabled warnings as errors for static Ubuntu 22.04 (#20302) --- .github/workflows/linux_conditional_compilation.yml | 2 +- .github/workflows/linux_riscv.yml | 3 --- cmake/features.cmake | 9 ++++++++- src/plugins/intel_gna/src/frontend/quantization.cpp | 2 -- 4 files changed, 9 insertions(+), 7 deletions(-) diff --git a/.github/workflows/linux_conditional_compilation.yml b/.github/workflows/linux_conditional_compilation.yml index dc63843d5ce452..b8567d57a6a6cd 100644 --- a/.github/workflows/linux_conditional_compilation.yml +++ b/.github/workflows/linux_conditional_compilation.yml @@ -124,7 +124,7 @@ jobs: -DENABLE_CPPLINT=OFF \ -DENABLE_NCC_STYLE=OFF \ -DENABLE_INTEL_GNA=OFF \ - -DCMAKE_COMPILE_WARNING_AS_ERROR=OFF \ + -DCMAKE_COMPILE_WARNING_AS_ERROR=ON \ -DENABLE_PROFILING_ITT=ON \ -DSELECTIVE_BUILD=COLLECT \ -DCMAKE_C_COMPILER_LAUNCHER=${{ env.CMAKE_C_COMPILER_LAUNCHER }} \ diff --git a/.github/workflows/linux_riscv.yml b/.github/workflows/linux_riscv.yml index 3899b454054464..927b10ea80120c 100644 --- a/.github/workflows/linux_riscv.yml +++ b/.github/workflows/linux_riscv.yml @@ -85,7 +85,6 @@ jobs: python3 -m venv ${OPENVINO_BUILD_DIR}/env source ${OPENVINO_BUILD_DIR}/env/bin/activate - python3 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/requirements.txt python3 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/wheel/requirements-dev.txt python3 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/src/compatibility/openvino/requirements-dev.txt python3 -m pip install conan @@ -151,9 +150,7 @@ jobs: -DPYTHON_MODULE_EXTENSION=$(riscv64-linux-gnu-python3-config --extension-suffix) \ -DPYBIND11_PYTHON_EXECUTABLE_LAST=${OPENVINO_BUILD_DIR}/env/bin/python3.10 \ -DENABLE_TESTS=ON \ - -DTHREADING=SEQ \ -DENABLE_PYTHON_PACKAGING=ON \ - -DENABLE_SYSTEM_TBB=ON \ -DENABLE_SYSTEM_PROTOBUF=ON \ -DENABLE_SYSTEM_SNAPPY=ON \ -DENABLE_SYSTEM_PUGIXML=ON \ diff --git a/cmake/features.cmake b/cmake/features.cmake index 7b11a8a968f1a7..01a219e0aaf6a4 100644 --- a/cmake/features.cmake +++ b/cmake/features.cmake @@ -73,7 +73,14 @@ ie_dependent_option (ENABLE_PKGCONFIG_GEN "Enable openvino.pc pkg-config file ge # # "OneDNN library based on OMP or TBB or Sequential implementation: TBB|OMP|SEQ" -set(THREADING "TBB" CACHE STRING "Threading") +if(RISCV64) + # oneDNN does not support non-SEQ for RISC-V architecture + set(THREADING_DEFAULT "SEQ") +else() + set(THREADING_DEFAULT "TBB") +endif() + +set(THREADING "${THREADING_DEFAULT}" CACHE STRING "Threading") set_property(CACHE THREADING PROPERTY STRINGS "TBB" "TBB_AUTO" "OMP" "SEQ") list (APPEND IE_OPTIONS THREADING) if (NOT THREADING STREQUAL "TBB" AND diff --git a/src/plugins/intel_gna/src/frontend/quantization.cpp b/src/plugins/intel_gna/src/frontend/quantization.cpp index 4bcadfec5f7eb4..deb8663801a651 100644 --- a/src/plugins/intel_gna/src/frontend/quantization.cpp +++ b/src/plugins/intel_gna/src/frontend/quantization.cpp @@ -93,7 +93,6 @@ void QuantizeWeights(const QuantizationData& data, auto output_low = 0.0f; auto output_high = 0.0f; uint32_t levels = 1; - float valueAcc = 0.0f; const auto min_values_size = data.weights_quant_params.GetMinValues().size(); if (min_values_size > 0) { @@ -121,7 +120,6 @@ void QuantizeWeights(const QuantizationData& data, float scaled_row_max = 0; for (size_t col = 0; col < data.num_columns; col++) { float value = ptr_float_weights[row * data.num_columns + col] * data.scale_factor; - valueAcc += value; if (fabs(value) > scaled_row_max) { scaled_row_max = fabs(value); } From 142c6a4b6716936311953f262572e981819bd245 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Sun, 8 Oct 2023 23:24:28 +0400 Subject: [PATCH 097/257] Ability to compile samples without API 1.0 headers (#20299) --- samples/cpp/CMakeLists.txt | 4 +++- samples/cpp/benchmark_app/main.cpp | 16 ++++++---------- .../cpp/benchmark_app/remote_tensors_filling.hpp | 2 +- 3 files changed, 10 insertions(+), 12 deletions(-) diff --git a/samples/cpp/CMakeLists.txt b/samples/cpp/CMakeLists.txt index ea3448522dea90..038895b1c07429 100644 --- a/samples/cpp/CMakeLists.txt +++ b/samples/cpp/CMakeLists.txt @@ -214,7 +214,9 @@ macro(ie_add_sample) find_package(Threads REQUIRED) find_package(OpenVINO REQUIRED COMPONENTS Runtime) - if(c_sample) + + # Conan does not generate openvino::runtime::c target + if(c_sample AND TARGET openvino::runtime::c) set(ov_link_libraries openvino::runtime::c) else() set(ov_link_libraries openvino::runtime) diff --git a/samples/cpp/benchmark_app/main.cpp b/samples/cpp/benchmark_app/main.cpp index 2bda41ca8b880c..d8b5f14bfa419a 100644 --- a/samples/cpp/benchmark_app/main.cpp +++ b/samples/cpp/benchmark_app/main.cpp @@ -19,9 +19,6 @@ # define WAS_OV_LIBRARY_DEFINED #endif -#include "gna/gna_config.hpp" -#include "gpu/gpu_config.hpp" - #ifdef WAS_OV_LIBRARY_DEFINED # undef IN_OV_COMPONENT # undef WAS_OV_LIBRARY_DEFINED @@ -314,11 +311,11 @@ int main(int argc, char* argv[]) { // Override config if command line parameter is specified if (!config.count("GPU")) config["GPU"] = {}; - config["GPU"][CONFIG_KEY(CONFIG_FILE)] = FLAGS_c; + config["GPU"]["CONFIG_FILE"] = FLAGS_c; } - if (config.count("GPU") && config.at("GPU").count(CONFIG_KEY(CONFIG_FILE))) { - auto ext = config.at("GPU").at(CONFIG_KEY(CONFIG_FILE)).as(); - core.set_property("GPU", {{CONFIG_KEY(CONFIG_FILE), ext}}); + if (config.count("GPU") && config.at("GPU").count("CONFIG_FILE")) { + auto ext = config.at("GPU").at("CONFIG_FILE").as(); + core.set_property("GPU", {{"CONFIG_FILE", ext}}); slog::info << "GPU extensions are loaded: " << ext << slog::endl; } OPENVINO_SUPPRESS_DEPRECATED_END @@ -846,9 +843,8 @@ int main(int argc, char* argv[]) { slog::info << " " << item.first << ": " << slog::endl; for (auto& item2 : item.second.as()) { OPENVINO_SUPPRESS_DEPRECATED_START - if (item2.first == ov::supported_properties || - item2.first == METRIC_KEY(SUPPORTED_CONFIG_KEYS) || - item2.first == METRIC_KEY(SUPPORTED_METRICS)) + if (item2.first == ov::supported_properties || item2.first == "SUPPORTED_CONFIG_KEYS)" || + item2.first == "SUPPORTED_METRICS") continue; OPENVINO_SUPPRESS_DEPRECATED_END slog::info << " " << item2.first << ": " << item2.second.as() << slog::endl; diff --git a/samples/cpp/benchmark_app/remote_tensors_filling.hpp b/samples/cpp/benchmark_app/remote_tensors_filling.hpp index b495b0a7704e9c..dbfee53905dd0d 100644 --- a/samples/cpp/benchmark_app/remote_tensors_filling.hpp +++ b/samples/cpp/benchmark_app/remote_tensors_filling.hpp @@ -6,7 +6,7 @@ #if defined(HAVE_GPU_DEVICE_MEM_SUPPORT) # define HAVE_DEVICE_MEM_SUPPORT -# include +# include "openvino/runtime/intel_gpu/ocl/ocl_wrapper.hpp" #endif #include "utils.hpp" From e1faf3ddd0630cc2a4bb0d0cf2c4d2b403e98c4b Mon Sep 17 00:00:00 2001 From: Mikhail Ryzhov Date: Mon, 9 Oct 2023 10:23:57 +0200 Subject: [PATCH 098/257] [GHA] Parallel test script code style (#20119) * fixed code style * extend logs * fixed comments * comments --- .github/workflows/linux.yml | 1 + .../layer_tests_summary/run_parallel.py | 607 ++++++++++++------ .../layer_tests_summary/utils/constants.py | 2 + 3 files changed, 415 insertions(+), 195 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 16affeb9bfa01c..d110306684a851 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -1089,6 +1089,7 @@ jobs: ${{ env.INSTALL_TEST_DIR }}/logs/hanged/*.log ${{ env.INSTALL_TEST_DIR }}/logs/interapted/*.log ${{ env.INSTALL_TEST_DIR }}/logs/disabled_tests.log + ${{ env.INSTALL_TEST_DIR }}/logs/hash_table.csv if-no-files-found: 'error' TensorFlow_Hub_Models_Tests: diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_parallel.py b/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_parallel.py index c50e0b5f180af9..581a51105a5703 100644 --- a/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_parallel.py +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_parallel.py @@ -1,40 +1,42 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import os +import sys +import threading +import csv +import datetime +import shlex +import heapq -from utils.conformance_utils import get_logger, progressbar -from utils import constants -from utils import file_utils from argparse import ArgumentParser -from subprocess import Popen, STDOUT, TimeoutExpired, run, call +from subprocess import Popen, TimeoutExpired, run, call from hashlib import sha256 from pathlib import Path from shutil import rmtree, copyfile from tarfile import open as tar_open +from utils.conformance_utils import get_logger, progressbar +from utils import constants +from utils import file_utils + import defusedxml.ElementTree as ET if not constants.IS_WIN: from signal import SIGKILL -import os -import sys -import threading -import csv -import datetime -import shlex -import heapq - if sys.version_info.major >= 3: import _thread as thread else: import thread -has_python_api = True -logger = get_logger('test_parallel_runner') +HAS_PYTHON_API = True +logger = get_logger("test_parallel_runner") try: from utils.get_available_devices import get_available_devices except: - logger.warning("Please set the above env variable to get the same conformance ir names run by run!") - has_python_api = False + logger.warning( + "Please set the above env variable to get the same conformance ir names run by run!" + ) + HAS_PYTHON_API = False FILENAME_LENGTH = 255 LOG_NAME_REPLACE_STR = "##NAME##" @@ -43,6 +45,7 @@ DEFAULT_TEST_TIMEOUT = 900 MAX_LENGHT = 4096 if not constants.IS_WIN else 8191 + def parse_arguments(): parser = ArgumentParser() exec_file_path_help = "Path to the test executable file" @@ -50,30 +53,89 @@ def parse_arguments(): worker_num_help = "Worker number. Default value is `cpu_count` " working_dir_num_help = "Working dir" process_timeout_help = "Process timeout in s" - parallel_help = "Parallel over HW devices. For example run tests over GPU.0, GPU.1 and etc" + parallel_help = ( + "Parallel over HW devices. For example run tests over GPU.0, GPU.1 and etc" + ) split_unit_help = "Split by test or suite" repeat_help = "Number of times to repeat failed and interrupted tests" - parser.add_argument("-e", "--exec_file", help=exec_file_path_help, type=str, required=True) - parser.add_argument("-c", "--cache_path", help=cache_path_help, type=str, required=False, default="") - parser.add_argument("-j", "--workers", help=worker_num_help, type=int, required=False, default=os.cpu_count()) - parser.add_argument("-p", "--parallel_devices", help=parallel_help, type=int, required=False, default=0) - parser.add_argument("-w", "--working_dir", help=working_dir_num_help, type=str, required=False, default=".") - parser.add_argument("-t", "--process_timeout", help=process_timeout_help, type=int, required=False, default=DEFAULT_PROCESS_TIMEOUT) - parser.add_argument("-s", "--split_unit", help=split_unit_help, type=str, required=False, default=constants.TEST_UNIT_NAME) - parser.add_argument("-rf", "--repeat_failed", help=repeat_help, type=int, required=False, default=1) + parser.add_argument( + "-e", + "--exec_file", + help=exec_file_path_help, + type=str, + required=True + ) + parser.add_argument( + "-c", + "--cache_path", + help=cache_path_help, + type=str, + required=False, + default="" + ) + parser.add_argument( + "-j", + "--workers", + help=worker_num_help, + type=int, + required=False, + default=os.cpu_count(), + ) + parser.add_argument( + "-p", + "--parallel_devices", + help=parallel_help, + type=int, + required=False, + default=0, + ) + parser.add_argument( + "-w", + "--working_dir", + help=working_dir_num_help, + type=str, + required=False, + default=".", + ) + parser.add_argument( + "-t", + "--process_timeout", + help=process_timeout_help, + type=int, + required=False, + default=DEFAULT_PROCESS_TIMEOUT, + ) + parser.add_argument( + "-s", + "--split_unit", + help=split_unit_help, + type=str, + required=False, + default=constants.TEST_UNIT_NAME, + ) + parser.add_argument( + "-rf", + "--repeat_failed", + help=repeat_help, + type=int, + required=False, + default=1 + ) return parser.parse_args() + def get_test_command_line_args(): - command_line_args = list() + command_line_args = [] for i in range(len(sys.argv)): - if sys.argv[i] == '--': - command_line_args = sys.argv[i+1:] + if sys.argv[i] == "--": + command_line_args = sys.argv[i + 1 :] sys.argv = sys.argv[:i] break return command_line_args + def get_device_by_args(args: list): device = constants.NOT_EXIST_DEVICE is_device = False @@ -82,14 +144,17 @@ def get_device_by_args(args: list): is_device = True if argument.find("=") == -1: continue - device = argument[argument.find("=")+1:] + device = argument[argument.find("=") + 1 :] break if is_device and argument[0] != "-": device = argument break return device + # Class to read test cache + + class TestStructure: _name = "" _time = 0 @@ -98,15 +163,25 @@ def __init__(self, name, time): self._name = name self._time = int(time) + class TaskManager: process_timeout = -1 - def __init__(self, command_list:list, working_dir: os.path, prev_run_cmd_length=0, device=constants.NOT_EXIST_DEVICE, available_devices=list()): + def __init__( + self, + command_list: list, + working_dir: os.path, + prev_run_cmd_length=0, + device=constants.NOT_EXIST_DEVICE, + available_devices=[], + ): self._command_list = command_list - self._process_list = list() - self._workers = list() - self._timers = list() - self._log_filename = os.path.join(working_dir, f"log_{LOG_NAME_REPLACE_STR}.log") + self._process_list = [] + self._workers = [] + self._timers = [] + self._log_filename = os.path.join( + working_dir, f"log_{LOG_NAME_REPLACE_STR}.log" + ) self._prev_run_cmd_length = prev_run_cmd_length self._idx = 0 self._device = device @@ -123,19 +198,33 @@ def __create_thread(self, func): def init_worker(self): if len(self._command_list) <= self._idx: - logger.warning(f"Skip worker initialiazation. Command list lenght <= worker index") + logger.warning( + "Skip worker initialiazation. Command list lenght <= worker index" + ) return if self._device_cnt == 0: logger.error(f"Empty available devices! Check your device!") - exit(-1) + sys.exit(-1) for target_device in self._available_devices: - log_file_name = self._log_filename.replace(LOG_NAME_REPLACE_STR, str(self._idx + self._prev_run_cmd_length)) - with open(log_file_name, "w") as log_file: - args = self._command_list[self._idx].replace(self._device, target_device) + log_file_name = self._log_filename.replace( + LOG_NAME_REPLACE_STR, str(self._idx + self._prev_run_cmd_length) + ) + with open(log_file_name, "w", encoding=constants.ENCODING) as log_file: + args = self._command_list[self._idx].replace( + self._device, target_device + ) if not constants.IS_WIN: args = shlex.split(args) worker = self.__create_thread( - self._process_list.append(Popen(args, shell=constants.IS_WIN, stdout=log_file, stderr=log_file))) + self._process_list.append( + Popen( + args, + shell=constants.IS_WIN, + stdout=log_file, + stderr=log_file, + ) + ) + ) self._workers.append(worker) worker.join() self._timers.append(datetime.datetime.now()) @@ -149,18 +238,20 @@ def kill_process_tree(pid): if not constants.IS_WIN: os.killpg(pid, SIGKILL) else: - call(['taskkill', '/F', '/T', '/PID', str(pid)]) + call(["taskkill", "/F", "/T", "/PID", str(pid)]) except OSError as err: # logger.warning(f"Impossible to kill process {pid} with error: {err}") pass - def __find_free_process(self): while True: for pid in range(len(self._process_list)): try: - if float((datetime.datetime.now() - self._timers[pid]).total_seconds()) > self.process_timeout: - logger.warning(f"Process {pid} exceed time limetattion per process") + p_time = float((datetime.datetime.now() - self._timers[pid]).total_seconds()) + if p_time > self.process_timeout: + logger.warning( + f"Process {pid} exceed time limitation per process" + ) self.kill_process_tree(self._process_list[pid].pid) self._process_list[pid].kill() self._process_list[pid].wait(timeout=1) @@ -174,19 +265,25 @@ def __find_free_process(self): except TimeoutExpired: continue - def __update_process(self, pid:int, log_file, device): + def __update_process(self, pid: int, log_file, device): args = self._command_list[self._idx].replace(self._device, device) if not constants.IS_WIN: args = shlex.split(args) - self._process_list[pid] = Popen(args, shell=constants.IS_WIN, stdout=log_file, stderr=log_file) + self._process_list[pid] = Popen( + args, shell=constants.IS_WIN, stdout=log_file, stderr=log_file + ) def update_worker(self): if self._idx >= len(self._command_list): return False pid, device = self.__find_free_process() - log_file_name = self._log_filename.replace(LOG_NAME_REPLACE_STR, str(self._idx + self._prev_run_cmd_length)) - with open(log_file_name, "w") as log_file: - self._workers[pid] = self.__create_thread(self.__update_process(pid, log_file, device)) + log_file_name = self._log_filename.replace( + LOG_NAME_REPLACE_STR, str(self._idx + self._prev_run_cmd_length) + ) + with open(log_file_name, "w", encoding=constants.ENCODING) as log_file: + self._workers[pid] = self.__create_thread( + self.__update_process(pid, log_file, device) + ) self._workers[pid].join() self._timers[pid] = datetime.datetime.now() self._idx += 1 @@ -196,24 +293,39 @@ def compelete_all_processes(self): while len(self._process_list) > 0: for pid in range(len(self._process_list)): try: - if float((datetime.datetime.now() - self._timers[pid]).total_seconds()) > self.process_timeout: - logger.warning(f"Process {pid} exceed time limetation per process. The process will be killed") + p_time = float((datetime.datetime.now() - self._timers[pid]).total_seconds()) + if p_time > self.process_timeout: + logger.warning( + f"Process {pid} exceed time limetation per process. The process will be killed" + ) self.kill_process_tree(self._process_list[pid].pid) self._process_list[pid].kill() self._process_list[pid].wait(timeout=1) self._process_list[pid].wait(timeout=0) # logger.info(f"Process {pid} takes {float((datetime.datetime.now() - self._timers[pid]).total_seconds())}") self._process_list.pop(pid) - logger.info(f"Compeleting processes: Active process counter: {len(self._process_list)}...") + logger.info( + f"Compeleting processes: Active process counter: {len(self._process_list)}..." + ) break except TimeoutExpired: continue return self._idx + class TestParallelRunner: - def __init__(self, exec_file_path: os.path, test_command_line: list, - worker_num: int, working_dir: os.path, cache_path: os.path, - split_unit: str, repeat_failed: int, is_parallel_devices=False, excluded_tests=set()): + def __init__( + self, + exec_file_path: os.path, + test_command_line: list, + worker_num: int, + working_dir: os.path, + cache_path: os.path, + split_unit: str, + repeat_failed: int, + is_parallel_devices=False, + excluded_tests=set(), + ): self._exec_file_path = exec_file_path self._working_dir = working_dir self._conformance_ir_filelists = list() @@ -232,54 +344,60 @@ def __init__(self, exec_file_path: os.path, test_command_line: list, if split_unit in constants.UNIT_NAMES: self._split_unit = split_unit else: - logger.error(f"Incorrect split_unit argument: {split_unit}. Please use the following values: {','.join(constants.UNIT_NAMES)}") + logger.error( + f"Incorrect split_unit argument: {split_unit}. Please use the following values: {','.join(constants.UNIT_NAMES)}" + ) sys.exit(-1) self._repeat_failed = repeat_failed - self._disabled_tests = list() + self._disabled_tests = [] self._total_test_cnt = 0 self._device = get_device_by_args(self._command.split()) self._available_devices = [self._device] if not self._device is None else [] - if has_python_api and is_parallel_devices: + if HAS_PYTHON_API and is_parallel_devices: self._available_devices = get_available_devices(self._device) self._excluded_tests = excluded_tests def __init_basic_command_line_for_exec_file(self, test_command_line: list): - command = f'{self._exec_file_path}' + command = f"{self._exec_file_path}" is_input_folder = False for argument in test_command_line: if "--input_folders" in argument: is_input_folder = True - command += f" --input_folders=" - argument = argument[argument.find("=")+1:] + command += " --input_folders=" + argument = argument[argument.find("=") + 1 :] elif "--gtest_filter" in argument: - self._gtest_filter = argument[argument.find("=")+1:] + self._gtest_filter = argument[argument.find("=") + 1 :] if is_input_folder and argument[0] != "-": buf = "" - for _ in argument.split(','): - input_path = argument.replace('"', '') - if os.path.isfile(input_path) and file_utils.is_archieve(input_path): - input_path = file_utils.unzip_archieve(input_path, self._working_dir) + for _ in argument.split(","): + input_path = argument.replace('"', "") + if os.path.isfile(input_path) and file_utils.is_archieve( + input_path + ): + input_path = file_utils.unzip_archieve( + input_path, self._working_dir + ) buf = file_utils.prepare_filelist(input_path, ["*.xml"]) self._conformance_ir_filelists.append(buf) buf += "," argument = buf else: is_input_folder = False - command += f" " + command += " " command += f"{argument}" return command @staticmethod def __get_suite_filter(test_filter: str, suite_filter: str): - filters = test_filter.split(':') - suite_filter_mixed = '' + filters = test_filter.split(":") + suite_filter_mixed = "" for filter in filters: - patterns = filter.strip('\"').split('*') - suite_filter = f'{suite_filter}*' + patterns = filter.strip('"').split("*") + suite_filter = f"{suite_filter}*" suite_filter_part = suite_filter for pattern in patterns: if pattern and suite_filter.find(pattern) == -1: - suite_filter_part += f'{pattern}*' + suite_filter_part += f"{pattern}*" if suite_filter_part == suite_filter: suite_filter_mixed = f'"{suite_filter_part}"' break @@ -290,13 +408,13 @@ def __get_suite_filter(test_filter: str, suite_filter: str): return suite_filter_mixed @staticmethod - def __replace_restricted_symbols(input_string:str): - restricted_symbols = "!@$%^&-+`~:;\",<>?" + def __replace_restricted_symbols(input_string: str): + restricted_symbols = '!@$%^&-+`~:;",<>?' for symbol in restricted_symbols: - input_string = input_string.replace(symbol, '*') + input_string = input_string.replace(symbol, "*") return input_string - def __get_test_list_by_runtime(self, test_unit = constants.TEST_UNIT_NAME): + def __get_test_list_by_runtime(self, test_unit=constants.TEST_UNIT_NAME): self._total_test_cnt = 0 self._disabled_tests.clear() test_list_file_name = os.path.join(self._working_dir, "test_list.lst") @@ -308,26 +426,32 @@ def __get_test_list_by_runtime(self, test_unit = constants.TEST_UNIT_NAME): command_to_get_test_list = self._command + f' --gtest_list_tests > {test_list_file_name}' logger.info(f"Get test list using command: {command_to_get_test_list}") run_res = run(command_to_get_test_list, check=True, shell=True) - if run_res.stderr != "" and run_res.stderr != None: + if run_res.stderr not in ('', None): logger.error(f"Ooops! Something is going wrong... {run_res.stderr}") - exit(-1) + sys.exit(-1) if not os.path.isfile(test_list_file_name): - logger.error(f"The test list file does not exists! Please check the process output!") - exit(-1) + logger.error( + "The test list file does not exists! Please check the process output!" + ) + sys.exit(-1) - tests_dict = dict() - with open(test_list_file_name) as test_list_file: + tests_dict = {} + with open(test_list_file_name, encoding=constants.ENCODING) as test_list_file: test_suite = "" - for test_name in test_list_file.read().split('\n'): + for test_name in test_list_file.read().split("\n"): if "Running main() from" in test_name: continue - if not ' ' in test_name: + if not " " in test_name: test_suite = test_name.replace(".", "") continue - pos = test_name.find(' # ') + pos = test_name.find(" # ") if pos > 0 or test_suite != "": - real_test_name = test_suite + "." + (test_name[2:pos-1] if pos > 0 else test_name[2:]) + real_test_name = ( + test_suite + + "." + + (test_name[2 : pos - 1] if pos > 0 else test_name[2:]) + ) if constants.DISABLED_PREFIX in real_test_name: self._disabled_tests.append(real_test_name) elif test_unit == constants.TEST_UNIT_NAME: @@ -338,40 +462,50 @@ def __get_test_list_by_runtime(self, test_unit = constants.TEST_UNIT_NAME): self._total_test_cnt += 1 test_list_file.close() os.remove(test_list_file_name) - logger.info(f"Len test_list_runtime (without disabled tests): {len(tests_dict)}") + logger.info( + f"Len test_list_runtime (without disabled tests): {len(tests_dict)}" + ) if len(tests_dict) == 0: - logger.warning(f"Look like there are not tests to run! Please check the filters!") - exit(0) + logger.warning( + "Look like there are not tests to run! Please check the filters!" + ) + sys.exit(0) return tests_dict def __get_test_list_by_cache(self): - tests_dict_cache = dict() + tests_dict_cache = {} if os.path.isfile(self._cache_path): logger.info(f"Get test list from cache file: {self._cache_path}") - with open(self._cache_path, "r") as cache_file: + with open(self._cache_path, "r", encoding=constants.ENCODING) as cache_file: for line in cache_file.readlines(): pos = line.find(":") time = int(line[:pos]) - test_name = line[pos+1:].replace("\n", "") - test_suite = test_name[:test_name.find(".")] + test_name = line[pos + 1 :].replace("\n", "") + test_suite = test_name[: test_name.find(".")] if self._split_unit == constants.TEST_UNIT_NAME: if constants.DISABLED_PREFIX not in test_name: - if (time != -1): - tests_dict_cache[test_name] = tests_dict_cache.get(test_name, 0) + time + if time != -1: + tests_dict_cache[test_name] = ( + tests_dict_cache.get(test_name, 0) + time + ) elif self._split_unit == constants.SUITE_UNIT_NAME: if constants.DISABLED_PREFIX not in test_suite: - if (time == -1): - tests_dict_cache[test_suite] = tests_dict_cache.get(test_suite, -1) + if time == -1: + tests_dict_cache[test_suite] = tests_dict_cache.get( + test_suite, -1 + ) else: - tests_dict_cache[test_suite] = tests_dict_cache.get(test_suite, 0) + time + tests_dict_cache[test_suite] = ( + tests_dict_cache.get(test_suite, 0) + time + ) logger.info(f"Len tests_dict_cache: {len(tests_dict_cache)}") return tests_dict_cache def __generate_test_lists(self, test_dict_cache: dict, test_dict_runtime: dict): - cached_test_dict = dict() - runtime_test_dict = dict() + cached_test_dict = {} + runtime_test_dict = {} for test in test_dict_cache: if test in test_dict_runtime and test not in self._excluded_tests: @@ -382,9 +516,15 @@ def __generate_test_lists(self, test_dict_cache: dict, test_dict_runtime: dict): runtime_test_dict[test] = test_dict_runtime[test] if len(runtime_test_dict) > 0: - logger.warning(f'Cache file is not relevant the run. The will works in hybrid mode.') - logger.info(f'{self._split_unit.title()} count from cache: {len(cached_test_dict)}') - logger.info(f'{self._split_unit.title()} count from runtime: {len(runtime_test_dict)}') + logger.warning( + "Cache file is not relevant the run. The will works in hybrid mode." + ) + logger.info( + f"{self._split_unit.title()} count from cache: {len(cached_test_dict)}" + ) + logger.info( + f"{self._split_unit.title()} count from runtime: {len(runtime_test_dict)}" + ) return cached_test_dict, runtime_test_dict def __prepare_smart_filters(self, proved_test_dict: dict): @@ -403,13 +543,17 @@ def __prepare_smart_filters(self, proved_test_dict: dict): tasks_crashed = [] tasks_full = [] tasks_not_full = [] - tests_sorted = sorted(proved_test_dict.items(), key=lambda i: i[1], reverse=True) + tests_sorted = sorted( + proved_test_dict.items(), key=lambda i: i[1], reverse=True + ) for test_pattern, test_time in tests_sorted: - test_pattern = f'{self.__replace_restricted_symbols(test_pattern)}' + test_pattern = f"{self.__replace_restricted_symbols(test_pattern)}" if self._split_unit == constants.SUITE_UNIT_NAME: # fix the suite filters to execute the right amount of the tests - test_pattern = f'{self.__get_suite_filter(self._gtest_filter, test_pattern)}:' + test_pattern = ( + f"{self.__get_suite_filter(self._gtest_filter, test_pattern)}:" + ) else: # add quotes and pattern splitter test_pattern = f'"{test_pattern}":' @@ -419,7 +563,11 @@ def __prepare_smart_filters(self, proved_test_dict: dict): else: while len(tasks_not_full) > 0: t_time, t_pattern = tasks_not_full[0] - length = len(t_pattern) + def_length + len(test_pattern.replace(self._device, longest_device)) + length = ( + len(t_pattern) + + def_length + + len(test_pattern.replace(self._device, longest_device)) + ) if length < MAX_LENGHT: break else: @@ -428,7 +576,9 @@ def __prepare_smart_filters(self, proved_test_dict: dict): if len(tasks_not_full) < real_worker_num: heapq.heappush(tasks_not_full, (test_time, test_pattern)) else: - heapq.heapreplace(tasks_not_full, (t_time + test_time, t_pattern + test_pattern)) + heapq.heapreplace( + tasks_not_full, (t_time + test_time, t_pattern + test_pattern) + ) test_filters = tasks_full + tasks_not_full + tasks_crashed test_filters.sort(reverse=True) @@ -444,48 +594,60 @@ def __get_filters(self): test_dict_runtime = self.__get_test_list_by_runtime(self._split_unit) test_dict_cache = self.__get_test_list_by_cache() - cached_test_dict, runtime_test_dist = self.__generate_test_lists(test_dict_cache, test_dict_runtime) + cached_test_dict, runtime_test_dist = self.__generate_test_lists( + test_dict_cache, test_dict_runtime + ) - cached_test_list = list() + cached_test_list = [] if len(cached_test_dict) > 0: self._is_save_cache = False cached_test_list = self.__prepare_smart_filters(cached_test_dict) - runtime_test_list = list() + runtime_test_list = [] if len(runtime_test_dist) > 0: self._is_save_cache = True runtime_test_list = self.__prepare_smart_filters(runtime_test_dist) logger.info(f"Total test counter is {self._total_test_cnt}") return cached_test_list, runtime_test_list - def __execute_tests(self, filters: list(), prev_worker_cnt = 0): - commands = [f'{self._command} --gtest_filter={filter}' for filter in filters] + def __execute_tests(self, filters: [], prev_worker_cnt=0): + commands = [f"{self._command} --gtest_filter={filter}" for filter in filters] tmp_log_dir = os.path.join(self._working_dir, "temp") if not os.path.isdir(tmp_log_dir): os.mkdir(tmp_log_dir) - task_manager = TaskManager(commands, tmp_log_dir, prev_worker_cnt, self._device, self._available_devices) + task_manager = TaskManager( + commands, + tmp_log_dir, + prev_worker_cnt, + self._device, + self._available_devices, + ) for _ in progressbar(range(self._worker_num), "Worker initialization: ", 40): task_manager.init_worker() - for _ in progressbar(range(len(commands) - self._worker_num), "Worker execution: ", 40): + for _ in progressbar( + range(len(commands) - self._worker_num), "Worker execution: ", 40 + ): if not task_manager.update_worker(): break return task_manager.compelete_all_processes() def __find_not_runned_tests(self): test_names = set() - interapted_tests = list() + interapted_tests = [] for log in Path(os.path.join(self._working_dir, "temp")).rglob("log_*.log"): log_filename = os.path.join(self._working_dir, log) - with open(log_filename, "r") as log_file: + with open(log_filename, "r", encoding=constants.ENCODING) as log_file: has_status = False test_name = None try: lines = log_file.readlines() except: - lines = log.read_text(encoding='ascii', errors='ignore').split('\n') + lines = log.read_text(encoding="ascii", errors="ignore").split("\n") for line in lines: if constants.RUN in line: - test_name = line[line.find(constants.RUN) + len(constants.RUN) + 1:-1:] + test_name = line[ + line.find(constants.RUN) + len(constants.RUN) + 1 : -1 : + ] has_status = False if test_name is not None: test_names.add(test_name) @@ -500,7 +662,9 @@ def __find_not_runned_tests(self): interapted_tests.append(test_name) log_file.close() test_list_runtime = set(self.__get_test_list_by_runtime()) - not_runned_tests = test_list_runtime.difference(test_names).difference(self._excluded_tests) + not_runned_tests = test_list_runtime.difference(test_names).difference( + self._excluded_tests + ) interapted_tests = set(interapted_tests).difference(self._excluded_tests) return list(not_runned_tests), list(interapted_tests) @@ -509,7 +673,9 @@ def run(self): TaskManager.process_timeout = DEFAULT_PROCESS_TIMEOUT logger.info(f"Run test parallel is started. Worker num is {self._worker_num}") if len(self._available_devices) > 1: - logger.info(f"Tests will be run over devices: {self._available_devices} instead of {self._device}") + logger.info( + f"Tests will be run over devices: {self._available_devices} instead of {self._device}" + ) t_start = datetime.datetime.now() filters_cache, filters_runtime = self.__get_filters() @@ -517,22 +683,32 @@ def run(self): # it is better to reuse workes for both cached and runtime tasks test_filters = filters_cache + filters_runtime worker_cnt = 0 - if len(test_filters): - logger.info(f"Execute jobs taken from cache and runtime") + if test_filters: + logger.info("Execute jobs taken from cache and runtime") worker_cnt += self.__execute_tests(test_filters, worker_cnt) # 15m for one test in one process - if TaskManager.process_timeout == -1 or TaskManager.process_timeout == DEFAULT_PROCESS_TIMEOUT: - TaskManager.process_timeout = DEFAULT_SUITE_TIMEOUT if self._split_unit == constants.SUITE_UNIT_NAME else DEFAULT_TEST_TIMEOUT + if TaskManager.process_timeout in (-1, DEFAULT_PROCESS_TIMEOUT): + TaskManager.process_timeout = ( + DEFAULT_SUITE_TIMEOUT + if self._split_unit == constants.SUITE_UNIT_NAME + else DEFAULT_TEST_TIMEOUT + ) not_runned_tests, interapted_tests = self.__find_not_runned_tests() - if (self._repeat_failed > 0): + if self._repeat_failed > 0: if len(not_runned_tests) > 0: logger.info(f"Execute not runned {len(not_runned_tests)} tests") - not_runned_test_filters = [f'"{self.__replace_restricted_symbols(test)}"' for test in not_runned_tests] + not_runned_test_filters = [ + f'"{self.__replace_restricted_symbols(test)}"' + for test in not_runned_tests + ] worker_cnt += self.__execute_tests(not_runned_test_filters, worker_cnt) if len(interapted_tests) > 0: logger.info(f"Execute interapted {len(interapted_tests)} tests") - interapted_tests_filters = [f'"{self.__replace_restricted_symbols(test)}"' for test in interapted_tests] + interapted_tests_filters = [ + f'"{self.__replace_restricted_symbols(test)}"' + for test in interapted_tests + ] worker_cnt += self.__execute_tests(interapted_tests_filters, worker_cnt) t_end = datetime.datetime.now() @@ -540,17 +716,22 @@ def run(self): sec = round(total_seconds % 60, 2) min = int(total_seconds / 60) % 60 h = int(total_seconds / 3600) % 60 - logger.info(f"Run test parallel is finished successfully. Total time is {h}h:{min}m:{sec}s") + logger.info( + f"Run test parallel is finished successfully. Total time is {h}h:{min}m:{sec}s" + ) def postprocess_logs(self): - test_results = dict() - logger.info(f"Log analize is started") - saved_tests = list() + test_results = {} + logger.info("Log analize is started") + saved_tests = [] interapted_tests = set() INTERAPTED_DIR = "interapted" + def __save_log(logs_dir, dir, test_name): - test_log_filename = os.path.join(logs_dir, dir, f"{test_name}.txt".replace('/', '_')) - hash_str = str(sha256(test_name.encode('utf-8')).hexdigest()) + test_log_filename = os.path.join( + logs_dir, dir, f"{test_name}.txt".replace("/", "_") + ) + hash_str = str(sha256(test_name.encode(constants.ENCODING)).hexdigest()) if hash_str in hash_map.keys(): (dir_hash, _) = hash_map[hash_str] if dir_hash != INTERAPTED_DIR: @@ -561,18 +742,22 @@ def __save_log(logs_dir, dir, test_name): if test_name in interapted_tests: if dir == INTERAPTED_DIR: return False - interapted_log_path = os.path.join(logs_dir, INTERAPTED_DIR, f'{hash_str}.log') + interapted_log_path = os.path.join( + logs_dir, INTERAPTED_DIR, f"{hash_str}.log" + ) if os.path.isfile(interapted_log_path): os.remove(interapted_log_path) - logger.info(f"LOGS: Interapted {interapted_log_path} will be replaced") + logger.info( + f"LOGS: Interapted {interapted_log_path} will be replaced" + ) interapted_tests.remove(test_name) hash_map.pop(hash_str) hash_map.update({hash_str: (dir, test_name)}) - test_log_filename = os.path.join(logs_dir, dir, f'{hash_str}.log') + test_log_filename = os.path.join(logs_dir, dir, f"{hash_str}.log") if os.path.isfile(test_log_filename): # logger.warning(f"Log file {test_log_filename} is exist!") return False - with open(test_log_filename, "w") as log: + with open(test_log_filename, "w", encoding=constants.ENCODING) as log: log.writelines(test_log) log.close() saved_tests.append(test_name) @@ -586,14 +771,14 @@ def __save_log(logs_dir, dir, test_name): for test_st, _ in constants.TEST_STATUS.items(): if not os.path.exists(os.path.join(logs_dir, test_st)): os.mkdir(os.path.join(logs_dir, test_st)) - hash_map = dict() - test_times = list() - fix_priority = list() + hash_map = {} + test_times = [] + fix_priority = [] for log in Path(self._working_dir).rglob("log_*.log"): log_filename = os.path.join(self._working_dir, log) - with open(log_filename, "r") as log_file: + with open(log_filename, "r", encoding=constants.ENCODING) as log_file: test_name = None - test_log = list() + test_log = [] test_suites = set() dir = None test_cnt_expected = test_cnt_real_saved_now = 0 @@ -601,22 +786,26 @@ def __save_log(logs_dir, dir, test_name): try: lines = log_file.readlines() except: - lines = log.read_text(encoding='ascii', errors='ignore').split('\n') + lines = log.read_text(encoding="ascii", errors="ignore").split("\n") for line in lines: if constants.GTEST_FILTER in line: - line = line[line.find(constants.GTEST_FILTER):] - test_cnt_expected = line.count(':') + line = line[line.find(constants.GTEST_FILTER) :] + test_cnt_expected = line.count(":") if constants.RUN in line: - test_name = line[line.find(constants.RUN) + len(constants.RUN) + 1:-1:] + test_name = line[ + line.find(constants.RUN) + len(constants.RUN) + 1 : -1 : + ] dir = None - if self._device != None and self._available_devices != None: + if self._device is not None and self._available_devices is not None: for device_name in self._available_devices: if device_name in test_name: - test_name = test_name.replace(device_name, self._device) + test_name = test_name.replace( + device_name, self._device + ) break if constants.REF_COEF in line: - ref_k = float(line[line.rfind(' ') + 1:]) + ref_k = float(line[line.rfind(" ") + 1 :]) if dir is None: for test_st, mes_list in constants.TEST_STATUS.items(): for mes in mes_list: @@ -629,7 +818,7 @@ def __save_log(logs_dir, dir, test_name): if (constants.PG_ERR in line) or (constants.PG_WARN in line): test_log.append(line) if test_name is not None: - test_suite = test_name[:test_name.find(".")] + test_suite = test_name[: test_name.find(".")] test_suites.add(test_suite) test_log.append(line) if dir: @@ -637,7 +826,9 @@ def __save_log(logs_dir, dir, test_name): # update test_cache with tests. If tests is crashed use -1 as unknown time time = -1 if "ms)" in line: - time = line[line.rfind("(") + 1:line.rfind("ms)") - 1] + time = line[ + line.rfind("(") + 1 : line.rfind("ms)") - 1 + ] test_times.append((int(time), test_name)) if dir in test_results.keys(): test_results[dir] += 1 @@ -648,25 +839,28 @@ def __save_log(logs_dir, dir, test_name): ref_k = None test_cnt_real_saved_now += 1 test_name = None - test_log = list() + test_log = [] log_file.close() - if test_name != None: + if test_name is not None: dir = INTERAPTED_DIR if __save_log(logs_dir, dir, test_name): interapted_tests.add(test_name) - if (self._split_unit == constants.SUITE_UNIT_NAME): + if self._split_unit == constants.SUITE_UNIT_NAME: test_cnt_real = len(test_suites) else: test_cnt_real = test_cnt_real_saved_now if test_cnt_real < test_cnt_expected: - logger.error(f"Number of {self._split_unit}s in {log}: {test_cnt_real}. Expected is {test_cnt_expected} {self._split_unit}") + logger.error( + f"Number of {self._split_unit}s in {log}: {test_cnt_real}. Expected is {test_cnt_expected} {self._split_unit}" + ) else: os.remove(log_filename) - if len(list(Path(os.path.join(self._working_dir, "temp")).rglob("log_*.log"))) == 0: + if not list(Path(os.path.join(self._working_dir, "temp")).rglob("log_*.log")): rmtree(os.path.join(self._working_dir, "temp")) + for test_name in interapted_tests: # update test_cache with tests. If tests is crashed use -1 as unknown time time = -1 @@ -675,19 +869,23 @@ def __save_log(logs_dir, dir, test_name): test_results[INTERAPTED_DIR] += 1 else: test_results[INTERAPTED_DIR] = 1 - hash_str = str(sha256(test_name.encode('utf-8')).hexdigest()) - interapted_log_path = os.path.join(logs_dir, INTERAPTED_DIR, f'{hash_str}.log') + hash_str = str(sha256(test_name.encode(constants.ENCODING)).hexdigest()) + interapted_log_path = os.path.join( + logs_dir, INTERAPTED_DIR, f"{hash_str}.log" + ) if os.path.isfile(interapted_log_path): test_cnt_real_saved_now += 1 if self._is_save_cache: test_times.sort(reverse=True) - with open(self._cache_path, "w") as cache_file: - cache_file.writelines([f"{time}:{test_name}\n" for time, test_name in test_times]) + with open(self._cache_path, "w", encoding=constants.ENCODING) as cache_file: + cache_file.writelines( + [f"{time}:{test_name}\n" for time, test_name in test_times] + ) cache_file.close() logger.info(f"Test cache test is saved to: {self._cache_path}") hash_table_path = os.path.join(logs_dir, "hash_table.csv") - with open(hash_table_path, "w") as csv_file: - csv_writer = csv.writer(csv_file, dialect='excel') + with open(hash_table_path, "w", encoding=constants.ENCODING) as csv_file: + csv_writer = csv.writer(csv_file, dialect="excel") csv_writer.writerow(["Dir", "Hash", "Test Name"]) for hash, st in hash_map.items(): dir, name = st @@ -695,15 +893,15 @@ def __save_log(logs_dir, dir, test_name): logger.info(f"Hashed test list is saved to: {hash_table_path}") if len(fix_priority) > 0: fix_priority_path = os.path.join(logs_dir, "fix_priority.csv") - with open(fix_priority_path, "w") as csv_file: + with open(fix_priority_path, "w", encoding=constants.ENCODING) as csv_file: fix_priority.sort(reverse=True) - csv_writer = csv.writer(csv_file, dialect='excel') + csv_writer = csv.writer(csv_file, dialect="excel") csv_writer.writerow(["Test Name", "Fix Priority"]) - ir_hashes = list() + ir_hashes = [] for priority, name in fix_priority: csv_writer.writerow([name, priority]) if "IR=" in name: - ir_hash = name[name.find('IR=')+3:name.find('_Device=')] + ir_hash = name[name.find("IR=") + 3 : name.find("_Device=")] if os.path.isfile(ir_hash): _, tail = os.path.split(ir_hash) ir_hash, _ = os.path.splitext(tail) @@ -711,13 +909,17 @@ def __save_log(logs_dir, dir, test_name): logger.info(f"Fix priorities list is saved to: {fix_priority_path}") # Find all irs for failed tests - failed_ir_dir = os.path.join(self._working_dir, f'{self._device}_failed_ir') - failed_models_file_path = os.path.join(self._working_dir, f'failed_models.lst') + failed_ir_dir = os.path.join( + self._working_dir, f"{self._device}_failed_ir" + ) + failed_models_file_path = os.path.join( + self._working_dir, "failed_models.lst" + ) failed_models = set() for conformance_ir_filelist in self._conformance_ir_filelists: - with open(conformance_ir_filelist, 'r') as file: + with open(conformance_ir_filelist, "r", encoding=constants.ENCODING) as file: for conformance_ir in file.readlines(): - correct_ir = conformance_ir.replace('\n', '') + correct_ir = conformance_ir.replace("\n", "") _, tail = os.path.split(correct_ir) ir_hash, _ = os.path.splitext(tail) if ir_hash in ir_hashes: @@ -742,23 +944,27 @@ def __save_log(logs_dir, dir, test_name): for unique_model in meta_root.find("models"): for path in unique_model: for unique_path in path: - failed_models.add(unique_path.attrib["path"]) + failed_models.add( + unique_path.attrib["path"] + ) # api conformance has no failed irs if os.path.exists(failed_ir_dir): - output_file_name = failed_ir_dir + '.tar' + output_file_name = failed_ir_dir + ".tar" with tar_open(output_file_name, "w:gz") as tar: tar.add(failed_ir_dir, arcname=os.path.basename(failed_ir_dir)) - logger.info(f"All Conformance IRs for failed tests are saved to: {output_file_name}") + logger.info( + f"All Conformance IRs for failed tests are saved to: {output_file_name}" + ) rmtree(failed_ir_dir) if len(failed_models) > 0: - with open(failed_models_file_path, "w") as failed_models_file: - failed_models_list = list() + with open(failed_models_file_path, "w", encoding=constants.ENCODING) as failed_models_file: + failed_models_list = [] for item in failed_models: failed_models_list.append(f"{item}\n") failed_models_file.writelines(failed_models_list) failed_models_file.close() disabled_tests_path = os.path.join(logs_dir, "disabled_tests.log") - with open(disabled_tests_path, "w") as disabled_tests_file: + with open(disabled_tests_path, "w", encoding=constants.ENCODING) as disabled_tests_file: for i in range(len(self._disabled_tests)): self._disabled_tests[i] += "\n" disabled_tests_file.writelines(self._disabled_tests) @@ -766,16 +972,20 @@ def __save_log(logs_dir, dir, test_name): logger.info(f"Disabled test list is saved to: {disabled_tests_path}") not_run_tests_path = os.path.join(logs_dir, "not_run_tests.log") - with open(not_run_tests_path, "w") as not_run_tests_path_file: + with open(not_run_tests_path, "w", encoding=constants.ENCODING) as not_run_tests_path_file: test_list_runtime = self.__get_test_list_by_runtime() - diff_set = set(saved_tests).intersection(test_list_runtime).difference(set(saved_tests)).difference(self._excluded_tests) - diff_list = list() + diff_set = ( + set(saved_tests) + .intersection(test_list_runtime) + .difference(set(saved_tests)) + .difference(self._excluded_tests) + ) + diff_list = [] for item in diff_set: diff_list.append(f"{item}\n") not_run_tests_path_file.writelines(diff_list) not_run_tests_path_file.close() - l = len(diff_list) - if l > 0: + if diff_list: logger.warning(f"Not run test test counter is: {len(diff_list)}") logger.info(f"Not run test list is saved to: {not_run_tests_path}") @@ -784,19 +994,24 @@ def __save_log(logs_dir, dir, test_name): for test_st, test_res in test_results.items(): logger.info(f"{test_st} test counter is: {test_res}") test_cnt += test_res - if (test_st != "passed" and test_st != "skipped") and test_res > 0: + if (test_st not in ('passed', 'skipped')) and test_res > 0: is_successfull_run = False - if len(self._disabled_tests): + if self._disabled_tests: logger.info(f"disabled test counter is: {len(self._disabled_tests)}") diff_set = set(saved_tests).difference(set(test_list_runtime)) if diff_set: - logger.error(f"Total test count is {test_cnt} is different with expected {self._total_test_cnt} tests") - [logger.error(f'Missed test: {test}') for test in diff_set] + logger.error( + f"Total test count is {test_cnt} is different with expected {self._total_test_cnt} tests" + ) + [logger.error(f"Missed test: {test}") for test in diff_set] is_successfull_run = False - logger.info(f"Total test count with disabled tests is {test_cnt + len(self._disabled_tests)}. All logs is saved to {logs_dir}") + logger.info( + f"Total test count with disabled tests is {test_cnt + len(self._disabled_tests)}. All logs is saved to {logs_dir}" + ) return is_successfull_run + if __name__ == "__main__": exec_file_args = get_test_command_line_args() args = parse_arguments() @@ -810,14 +1025,16 @@ def __save_log(logs_dir, dir, test_name): logger.info(f"[ARGUMENTS] --repeat_failed={args.repeat_failed}") logger.info(f"[ARGUMENTS] Executable file arguments = {exec_file_args}") TaskManager.process_timeout = args.process_timeout - test_runner = TestParallelRunner(exec_file_path = args.exec_file, - test_command_line = exec_file_args, - worker_num = args.workers, - working_dir = args.working_dir, - cache_path = args.cache_path, - split_unit = args.split_unit, - repeat_failed = args.repeat_failed, - is_parallel_devices = args.parallel_devices) + test_runner = TestParallelRunner( + exec_file_path=args.exec_file, + test_command_line=exec_file_args, + worker_num=args.workers, + working_dir=args.working_dir, + cache_path=args.cache_path, + split_unit=args.split_unit, + repeat_failed=args.repeat_failed, + is_parallel_devices=args.parallel_devices, + ) test_runner.run() if not test_runner.postprocess_logs(): logger.error("Run is not successful") diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/utils/constants.py b/src/tests/test_utils/functional_test_utils/layer_tests_summary/utils/constants.py index 46386519c46f8a..05018f1cbfda21 100644 --- a/src/tests/test_utils/functional_test_utils/layer_tests_summary/utils/constants.py +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/utils/constants.py @@ -51,6 +51,8 @@ MEM_USAGE = "MEM_USAGE=" +ENCODING = 'UTF-8' + META_EXTENSION = ".meta" XML_EXTENSION = ".xml" BIN_EXTENSION = ".bin" From 20a3a599daae1b42e65cf15f953b532f9f13d746 Mon Sep 17 00:00:00 2001 From: River Li Date: Mon, 9 Oct 2023 16:36:52 +0800 Subject: [PATCH 099/257] [C API] add ov_shutdown API (#20305) --- src/bindings/c/include/openvino/c/ov_core.h | 11 +++++++++++ src/bindings/c/src/ov_core.cpp | 4 ++++ 2 files changed, 15 insertions(+) diff --git a/src/bindings/c/include/openvino/c/ov_core.h b/src/bindings/c/include/openvino/c/ov_core.h index d0c1fcf30533ad..45bbeb35da662d 100644 --- a/src/bindings/c/include/openvino/c/ov_core.h +++ b/src/bindings/c/include/openvino/c/ov_core.h @@ -413,3 +413,14 @@ ov_core_compile_model_with_context(const ov_core_t* core, */ OPENVINO_C_API(ov_status_e) ov_core_get_default_context(const ov_core_t* core, const char* device_name, ov_remote_context_t** context); + +/** + * @brief Shut down the OpenVINO by deleting all static-duration objects allocated by the library and releasing + * dependent resources + * @ingroup ov_c_api + * @note This function should be used by advanced user to control unload the resources. + * + * You might want to use this function if you are developing a dynamically-loaded library which should clean up all + * resources after itself when the library is unloaded. + */ +OPENVINO_C_API(void) ov_shutdown(); diff --git a/src/bindings/c/src/ov_core.cpp b/src/bindings/c/src/ov_core.cpp index a97d6a51a4a5d0..6e292dc7abf331 100644 --- a/src/bindings/c/src/ov_core.cpp +++ b/src/bindings/c/src/ov_core.cpp @@ -448,3 +448,7 @@ ov_status_e ov_core_get_default_context(const ov_core_t* core, const char* devic CATCH_OV_EXCEPTIONS return ov_status_e::OK; } + +void ov_shutdown() { + ov::shutdown(); +} From 904f992e0b13b5dc2305bcca7614eba7f97a7198 Mon Sep 17 00:00:00 2001 From: Wang Xin Date: Mon, 9 Oct 2023 16:44:42 +0800 Subject: [PATCH 100/257] fix a wrong comment (#20307) --- samples/cpp/classification_sample_async/main.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/samples/cpp/classification_sample_async/main.cpp b/samples/cpp/classification_sample_async/main.cpp index a92e1a9e48a40e..36d8edb8f19d7d 100644 --- a/samples/cpp/classification_sample_async/main.cpp +++ b/samples/cpp/classification_sample_async/main.cpp @@ -136,8 +136,7 @@ int main(int argc, char* argv[]) { if (images_data.empty() || valid_image_names.empty()) throw std::logic_error("Valid input images were not found!"); - // -------- Step 5. Loading model to the device -------- - // Setting batch size using image count + // -------- Step 5. Setting batch size using image count -------- const size_t batchSize = images_data.size(); slog::info << "Set batch size " << std::to_string(batchSize) << slog::endl; ov::set_batch(model, batchSize); From ba6a6764841f475e2e8324ce901f19be1630da29 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 9 Oct 2023 12:55:26 +0400 Subject: [PATCH 101/257] Aligned tests with azure linux.yml (#20304) * Aligned tests with azure linux.yml * Apply suggestions from code review fixed comments Co-authored-by: Andrey Kashchikhin --------- Co-authored-by: Andrey Kashchikhin --- .github/workflows/linux.yml | 84 ++++++++++++++++++++++------------- .github/workflows/windows.yml | 6 +-- install_build_dependencies.sh | 2 + 3 files changed, 59 insertions(+), 33 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index d110306684a851..f7d225d075833a 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -221,6 +221,7 @@ jobs: if-no-files-found: 'error' Debian_Packages: + name: Debian Packages needs: Build defaults: run: @@ -468,6 +469,7 @@ jobs: if-no-files-found: 'error' ONNX_Runtime: + name: ONNX Runtime Integration needs: Build defaults: run: @@ -595,6 +597,7 @@ jobs: working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo CXX_Unit_Tests: + name: C++ unit tests needs: Build defaults: run: @@ -709,6 +712,18 @@ jobs: ${INSTALL_TEST_DIR}/ov_transformations_tests --gtest_print_time=1 \ --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-Transformations.xml + - name: Legacy Transformations func tests + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_legacy_transformations_tests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-LegacyTransformations.xml + + - name: Inference Engine 1.0 unit tests + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/InferenceEngineUnitTests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-InferenceEngineUnitTests.xml + - name: Common test utils tests run: | source ${INSTALL_DIR}/setupvars.sh @@ -758,6 +773,11 @@ jobs: ${INSTALL_TEST_DIR}/ov_capi_test --gtest_print_time=1 \ --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-OpenVINOCAPITests.xml + - name: AutoBatch unit tests + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_auto_batch_unit_tests --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ov_auto_batch_unit_tests.xml + - name: AutoBatch func tests run: | source ${INSTALL_DIR}/setupvars.sh @@ -787,6 +807,7 @@ jobs: if-no-files-found: 'error' Python_Unit_Tests: + name: Python unit tests needs: Build defaults: run: @@ -864,25 +885,21 @@ jobs: # Tests # - - name: nGraph and IE Python Bindings Tests + - name: Python API 1.0 Tests run: | python3 -m pytest -s ${INSTALL_TEST_DIR}/pyngraph \ --junitxml=${INSTALL_TEST_DIR}/TEST-Pyngraph.xml \ - --ignore=${INSTALL_TEST_DIR}/pyngraph/tests/test_onnx/test_zoo_models.py \ - --ignore=${INSTALL_TEST_DIR}/pyngraph/tests/test_onnx/test_backend.py + --ignore=${INSTALL_TEST_DIR}/pyngraph/tests_compatibility/test_onnx/test_zoo_models.py \ + --ignore=${INSTALL_TEST_DIR}/pyngraph/tests_compatibility/test_onnx/test_backend.py - name: Python API 2.0 Tests run: | - # For python imports to import 'pybind_mock_frontend' - export PYTHONPATH=${INSTALL_TEST_DIR}:$PYTHONPATH # for 'template' extension export LD_LIBRARY_PATH=${INSTALL_TEST_DIR}:$LD_LIBRARY_PATH python3 -m pytest -sv ${INSTALL_TEST_DIR}/pyopenvino \ --junitxml=${INSTALL_TEST_DIR}/TEST-Pyngraph.xml \ - --ignore=${INSTALL_TEST_DIR}/pyopenvino/tests/test_utils/test_utils.py \ - --ignore=${INSTALL_TEST_DIR}/pyopenvino/tests/test_onnx/test_zoo_models.py \ - --ignore=${INSTALL_TEST_DIR}/pyopenvino/tests/test_onnx/test_backend.py + --ignore=${INSTALL_TEST_DIR}/pyopenvino/tests/test_utils/test_utils.py - name: Docs Python snippets run: | @@ -894,7 +911,6 @@ jobs: - name: Model Optimizer unit tests run: | - export PYTHONPATH=${INSTALL_TEST_DIR}:$PYTHONPATH # required for MxNet apt-get install -y libgomp1 libquadmath0 @@ -904,8 +920,6 @@ jobs: - name: PyTorch Layer Tests run: | python3 -m pip install -r ${LAYER_TESTS_INSTALL_DIR}/requirements.txt - export PYTHONPATH=${LAYER_TESTS_INSTALL_DIR}:$PYTHONPATH - python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/pytorch_tests -m precommit --junitxml=${INSTALL_TEST_DIR}/TEST-pytorch.xml env: TEST_DEVICE: CPU @@ -914,8 +928,8 @@ jobs: - name: TensorFlow 1 Layer Tests - TF FE run: | python3 -m pip install -r ${LAYER_TESTS_INSTALL_DIR}/requirements.txt - export PYTHONPATH=${OPENVINO_REPO}/tools/mo/:${LAYER_TESTS_INSTALL_DIR}:$PYTHONPATH - + # requires 'unit_tests' from 'tools/mo' + export PYTHONPATH=${OPENVINO_REPO}/tools/mo/:$PYTHONPATH python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow_tests/ --use_new_frontend -m precommit_tf_fe --junitxml=${INSTALL_TEST_DIR}/TEST-tf_fe.xml env: TEST_DEVICE: CPU @@ -924,8 +938,8 @@ jobs: - name: TensorFlow 2 Layer Tests - TF FE run: | python3 -m pip install -r ${LAYER_TESTS_INSTALL_DIR}/requirements.txt - export PYTHONPATH=${OPENVINO_REPO}/tools/mo/:${LAYER_TESTS_INSTALL_DIR}:$PYTHONPATH - + # requires 'unit_tests' from 'tools/mo' + export PYTHONPATH=${OPENVINO_REPO}/tools/mo/:$PYTHONPATH python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow2_keras_tests/ --use_new_frontend -m precommit_tf_fe --junitxml=${INSTALL_TEST_DIR}/TEST-tf2_fe.xml env: TEST_DEVICE: CPU @@ -934,8 +948,6 @@ jobs: - name: JAX Layer Tests - TF FE run: | python3 -m pip install -r ${LAYER_TESTS_INSTALL_DIR}/requirements.txt - export PYTHONPATH=${LAYER_TESTS_INSTALL_DIR}:$PYTHONPATH - python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/jax_tests/ -m precommit --junitxml=${INSTALL_TEST_DIR}/TEST-jax.xml env: TEST_DEVICE: CPU @@ -943,15 +955,11 @@ jobs: - name: TensorFlow 1 Layer Tests - Legacy FE run: | python3 -m pip install -r ${LAYER_TESTS_INSTALL_DIR}/requirements.txt - export PYTHONPATH=${OPENVINO_REPO}/tools/mo/:${LAYER_TESTS_INSTALL_DIR}:$PYTHONPATH - python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow_tests/test_tf_Roll.py --ir_version=10 --junitxml=${INSTALL_TEST_DIR}/TEST-tf_Roll.xml - name: TensorFlow 2 Layer Tests - Legacy FE run: | python3 -m pip install -r ${LAYER_TESTS_INSTALL_DIR}/requirements.txt - export PYTHONPATH=${OPENVINO_REPO}/tools/mo/:${LAYER_TESTS_INSTALL_DIR}:$PYTHONPATH - python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow2_keras_tests/test_tf2_keras_activation.py \ --ir_version=11 --junitxml=${INSTALL_TEST_DIR}/TEST-tf2_Activation.xml -k "sigmoid" env: @@ -961,17 +969,22 @@ jobs: - name: TensorFlow Lite Layer Tests - TFL FE run: | python3 -m pip install -r ${LAYER_TESTS_INSTALL_DIR}/requirements.txt - export PYTHONPATH=${OPENVINO_REPO}/tools/mo/:${LAYER_TESTS_INSTALL_DIR}:$PYTHONPATH - python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow_lite_tests/ --junitxml=${INSTALL_TEST_DIR}/TEST-tfl_fe.xml env: TEST_DEVICE: CPU TEST_PRECISION: FP16 + - name: Python ONNX operators tests + run: | + # Skip test_onnx/test_zoo_models and test_onnx/test_backend due to long execution time - ONNX Model Zoo tests are run separately + python3 -m pytest -sv ${OPENVINO_REPO}/src/frontends/onnx/tests -k 'not cuda' \ + --junitxml=${INSTALL_TEST_DIR}/TEST-onnx_frontend.xml \ + --ignore=${OPENVINO_REPO}/src/frontends/onnx/tests/test_python/test_zoo_models.py \ + --ignore=${OPENVINO_REPO}/src/frontends/onnx/tests/test_python/test_backend.py + - name: MO Python API Tests run: | python3 -m pip install -r ${LAYER_TESTS_INSTALL_DIR}/requirements.txt - export PYTHONPATH=${LAYER_TESTS_INSTALL_DIR}:$PYTHONPATH # TODO: remove setupvars.sh from here; currently, it's used for 'test_utils' installed in '/python/openvino' source ${INSTALL_DIR}/setupvars.sh bash ${INSTALL_DIR}/install_dependencies/install_openvino_dependencies.sh -c=core -y @@ -981,21 +994,29 @@ jobs: TEST_DEVICE: CPU TEST_PRECISION: FP16 + - name: OVC Python API Tests + run: | + python3 -m pip install -r ${LAYER_TESTS_INSTALL_DIR}/requirements.txt + # TODO: remove setupvars.sh from here; currently, it's used for 'test_utils' installed in '/python/openvino' + source ${INSTALL_DIR}/setupvars.sh + bash ${INSTALL_DIR}/install_dependencies/install_openvino_dependencies.sh -c=core -y + + python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/ovc_python_api_tests --junitxml=${INSTALL_TEST_DIR}/TEST-test_ovc_convert.xml + env: + TEST_DEVICE: CPU + TEST_PRECISION: FP16 + - name: Python Frontend tests run: | python3 -m pip install -r ${LAYER_TESTS_INSTALL_DIR}/requirements.txt - export PYTHONPATH=${OPENVINO_REPO}/tools/mo/:${LAYER_TESTS_INSTALL_DIR}:$PYTHONPATH # to allow 'libtest_builtin_extensions.so' to find 'libopenvino_onnx_frontend.so' source ${INSTALL_DIR}/setupvars.sh python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/py_frontend_tests --junitxml=${INSTALL_TEST_DIR}/TEST-test_py_fontend.xml + # TODO: install to 'tests' component via cpack - name: OVC unit tests - run: | - # For python imports to import 'pybind_mock_frontend' - export PYTHONPATH=${INSTALL_TEST_DIR}:$PYTHONPATH - - python3 -m pytest -s ${OPENVINO_REPO}/tools/ovc/unit_tests --junitxml=${INSTALL_TEST_DIR}/TEST-OpenVinoConversion.xml + run: python3 -m pytest -s ${OPENVINO_REPO}/tools/ovc/unit_tests --junitxml=${INSTALL_TEST_DIR}/TEST-OpenVinoConversion.xml - name: Upload Test Results uses: actions/upload-artifact@v3 @@ -1008,6 +1029,7 @@ jobs: if-no-files-found: 'error' CPU_Functional_Tests: + name: CPU functional tests needs: Build defaults: run: @@ -1093,6 +1115,7 @@ jobs: if-no-files-found: 'error' TensorFlow_Hub_Models_Tests: + name: TensorFlow Hub Models tests needs: Build defaults: run: @@ -1172,6 +1195,7 @@ jobs: if-no-files-found: 'error' PyTorch_Models_Tests: + name: PyTorch Models tests needs: Build defaults: run: diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index e8b539c7a1d49d..2dd4b218a1e861 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -332,17 +332,17 @@ jobs: run: | python3 -m pip install openvino-dev --force-reinstall --find-links=${{ env.INSTALL_DIR }}\tools - - name: nGraph and IE Python Bindings Tests + - name: Python API 1.0 Tests shell: cmd run: | set PYTHONPATH=${{ env.OPENVINO_REPO }}\tools\mo;${{ env.LAYER_TESTS_INSTALL_DIR }};%PYTHONPATH% - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/pyngraph ${{ env.PYTHON_STATIC_ARGS }} --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests/test_onnx/test_zoo_models.py --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests/test_onnx/test_backend.py + call "${{ env.INSTALL_DIR }}\\setupvars.bat" && python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/pyngraph ${{ env.PYTHON_STATIC_ARGS }} --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests_compatibility/test_onnx/test_zoo_models.py --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests_compatibility/test_onnx/test_backend.py - name: Python API 2.0 Tests shell: cmd run: | set PYTHONPATH=${{ env.OPENVINO_REPO }}\tools\mo;${{ env.LAYER_TESTS_INSTALL_DIR }};%PYTHONPATH% - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && python3 -m pytest -sv ${{ env.INSTALL_TEST_DIR }}/pyopenvino ${{ env.PYTHON_STATIC_ARGS }} --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml --ignore=${{ env.INSTALL_TEST_DIR }}/pyopenvino/tests/test_utils/test_utils.py --ignore=${{ env.INSTALL_TEST_DIR }}/pyopenvino/tests/test_onnx/test_zoo_models.py --ignore=${{ env.INSTALL_TEST_DIR }}/pyopenvino/tests/test_onnx/test_backend.py + call "${{ env.INSTALL_DIR }}\\setupvars.bat" && python3 -m pytest -sv ${{ env.INSTALL_TEST_DIR }}/pyopenvino ${{ env.PYTHON_STATIC_ARGS }} --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml --ignore=${{ env.INSTALL_TEST_DIR }}/pyopenvino/tests/test_utils/test_utils.py - name: Model Optimizer UT shell: cmd diff --git a/install_build_dependencies.sh b/install_build_dependencies.sh index 680e2027a49b1a..9b49c9eb8e292f 100755 --- a/install_build_dependencies.sh +++ b/install_build_dependencies.sh @@ -26,6 +26,8 @@ if [ -f /etc/lsb-release ] || [ -f /etc/debian_version ] ; then apt update apt-get install -y --no-install-recommends \ + `# for python3-pip` \ + ca-certificates \ file \ `# build tools` \ build-essential \ From 99de7818beb82d7307875715c8e9d390a26a6df0 Mon Sep 17 00:00:00 2001 From: Roman Lyamin Date: Mon, 9 Oct 2023 14:28:37 +0400 Subject: [PATCH 102/257] [GPU] Extended SupportedFusedOps for Concatenation (#20096) --- .../cl_kernels/concatenation_gpu_ref.cl | 18 +- .../concatenation_gpu_simple_ref.cl | 15 +- .../concatenation_kernel_ref.cpp | 25 ++- .../concatenation/concatenation_kernel_ref.h | 5 +- .../concatenation_kernel_simple_ref.cpp | 20 +++ .../concatenation_kernel_simple_ref.h | 6 +- .../unit/fusions/concatenate_fusion_test.cpp | 160 +++++++++++++++++- 7 files changed, 230 insertions(+), 19 deletions(-) diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/concatenation_gpu_ref.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/concatenation_gpu_ref.cl index 6c2fef9cf485ea..2cd0ed8dc30850 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/concatenation_gpu_ref.cl +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/concatenation_gpu_ref.cl @@ -6,7 +6,13 @@ #define GET_INDEX(prefix, ORDER) CAT(prefix, _GET_INDEX)(ORDER) -KERNEL (concatenation_gpu_ref)(__global INPUT0_TYPE* input, __global OUTPUT_TYPE* output, uint output_offset_in_concat_axis) +KERNEL(concatenation_gpu_ref)(__global INPUT0_TYPE* input, + __global OUTPUT_TYPE* output, + uint output_offset_in_concat_axis +#if HAS_FUSED_OPS_DECLS + , FUSED_OPS_DECLS +#endif +) { const uint d1 = (uint)get_global_id(0); // Y const uint d2 = (uint)get_global_id(1); // F @@ -20,6 +26,14 @@ KERNEL (concatenation_gpu_ref)(__global INPUT0_TYPE* input, __global OUTPUT_TYPE { uint input_offset = GET_INDEX(INPUT0, INPUT_DIMS_ORDER); uint output_offset = GET_INDEX(OUTPUT, OUTPUT_DIMS_ORDER); - output[output_offset] = ACTIVATION(TO_OUTPUT_TYPE(input[input_offset]), ACTIVATION_PARAMS); + + INPUT0_TYPE result = input[input_offset]; + +#if HAS_FUSED_OPS + FUSED_OPS; + output[output_offset] = TO_OUTPUT_TYPE(FUSED_OPS_RESULT); +#else + output[output_offset] = TO_OUTPUT_TYPE(ACTIVATION(result, ACTIVATION_PARAMS)); +#endif } } diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/concatenation_gpu_simple_ref.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/concatenation_gpu_simple_ref.cl index 20185a1161aebc..92278887faf56e 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/concatenation_gpu_simple_ref.cl +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/concatenation_gpu_simple_ref.cl @@ -8,7 +8,11 @@ KERNEL (concatenation_gpu_ref)( OPTIONAL_SHAPE_INFO_ARG __global INPUT0_TYPE* input, __global OUTPUT_TYPE* output, - uint output_offset_in_concat_axis) + uint output_offset_in_concat_axis +#if HAS_FUSED_OPS_DECLS + , FUSED_OPS_DECLS +#endif +) { const uint x = (uint)get_global_id(0) % INPUT0_SIZE_X; const uint y = (uint)get_global_id(0) / INPUT0_SIZE_X; @@ -43,5 +47,12 @@ KERNEL (concatenation_gpu_ref)( uint input_offset = FUNC_CALL(get_input_index)(OPTIONAL_SHAPE_INFO_TENSOR b, f, w, z, y, x); uint output_offset = FUNC_CALL(get_output_index)(OPTIONAL_SHAPE_INFO_TENSOR out_b, out_f, out_w, out_z, out_y, out_x); - output[output_offset] = TO_OUTPUT_TYPE(ACTIVATION(input[input_offset], ACTIVATION_PARAMS)); + INPUT0_TYPE result = input[input_offset]; + +#if HAS_FUSED_OPS + FUSED_OPS; + output[output_offset] = TO_OUTPUT_TYPE(FUSED_OPS_RESULT); +#else + output[output_offset] = TO_OUTPUT_TYPE(ACTIVATION(result, ACTIVATION_PARAMS)); +#endif } diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/concatenation/concatenation_kernel_ref.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/concatenation/concatenation_kernel_ref.cpp index 662e91d55e103a..592f2282624202 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/concatenation/concatenation_kernel_ref.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/concatenation/concatenation_kernel_ref.cpp @@ -83,13 +83,16 @@ JitConstants ConcatenationKernelRef::GetJitConstants(const concatenation_params& std::string input_dims_order = ""; std::string output_dims_order = ""; - for (size_t i = 0; i < dims_id.size(); i++) { - input_dims_order += dims_id[i] + (i == dims_id.size() - 1 ? "" : ","); - if (axis_order[i] == axis) - output_dims_order += "(" + dims_id[i] + " + output_offset_in_concat_axis)" + - (i == dims_id.size() - 1 ? "" : ","); - else - output_dims_order += dims_id[i] + (i == dims_id.size() - 1 ? "" : ","); + + for (size_t i = 0; i < dims_id.size(); ++i) { + std::string separator = i == dims_id.size() - 1 ? "" : ","; + input_dims_order += dims_id[i] + separator; + + if (axis_order[i] == axis) { + output_dims_order += "(" + dims_id[i] + " + output_offset_in_concat_axis)" + separator; + } else { + output_dims_order += dims_id[i] + separator; + } } cldnnJit.AddConstant(MakeJitConstant("INPUT_DIMS_ORDER", input_dims_order)); @@ -97,6 +100,14 @@ JitConstants ConcatenationKernelRef::GetJitConstants(const concatenation_params& cldnnJit.AddConstant(MakeJitConstant("INPUT_DIM_0", DataTensor::Channelndex(input_format, Tensor::DataChannelName::X))); + if (!params.fused_ops.empty()) { + auto idx_order = dims_id; + size_t axis_idx = std::distance(axis_order.begin(), std::find(axis_order.begin(), axis_order.end(), axis)); + idx_order[axis_idx] = "(" + idx_order[axis_idx] + " + output_offset_in_concat_axis)"; + + auto conf = FusedOpsConfiguration("", idx_order, "result", params.inputs[0].GetDType()); + cldnnJit.Merge(MakeFusedOpsJitConstants(params, { conf })); + } return cldnnJit; } diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/concatenation/concatenation_kernel_ref.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/concatenation/concatenation_kernel_ref.h index 14ac815a5fc817..16be926e2e35d8 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/concatenation/concatenation_kernel_ref.h +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/concatenation/concatenation_kernel_ref.h @@ -21,7 +21,10 @@ class ConcatenationKernelRef : public ConcatenationKernelBase { JitConstants GetJitConstants(const concatenation_params& params) const override; std::vector GetSupportedFusedOps() const override { return { - FusedOpType::REORDER + FusedOpType::REORDER, + FusedOpType::ACTIVATION, + FusedOpType::ELTWISE, + FusedOpType::QUANTIZE }; } }; diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/concatenation/concatenation_kernel_simple_ref.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/concatenation/concatenation_kernel_simple_ref.cpp index 9b6151fe4b30b1..d5a690ea05713e 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/concatenation/concatenation_kernel_simple_ref.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/concatenation/concatenation_kernel_simple_ref.cpp @@ -99,6 +99,26 @@ ConcatenationKernelBase::DispatchData ConcatenationKernel_simple_Ref::SetDefault return dispatchData; } +JitConstants ConcatenationKernel_simple_Ref::GetJitConstants(const concatenation_params& params) const { + auto jit = ConcatenationKernelBase::GetJitConstants(params); + + if (!params.fused_ops.empty()) { + const auto& output = params.outputs[0]; + std::vector idx_order; + + if (output.Dimentions() == 6) { + idx_order = { "out_b", "out_f", "out_w", "out_z", "out_y", "out_x" }; + } else if (output.Dimentions() == 5) { + idx_order = { "out_b", "out_f", "out_z", "out_y", "out_x" }; + } else { + idx_order = { "out_b", "out_f", "out_y", "out_x" }; + } + auto conf = FusedOpsConfiguration("", idx_order, "result", params.inputs[0].GetDType()); + jit.Merge(MakeFusedOpsJitConstants(params, { conf })); + } + return jit; +} + KernelsData ConcatenationKernel_simple_Ref::GetKernelsData(const Params& params, const optional_params& optParams) const { KernelsData kd = GetCommonKernelsData(params, optParams); return kd; diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/concatenation/concatenation_kernel_simple_ref.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/concatenation/concatenation_kernel_simple_ref.h index 33e626660a5b28..a49876ba459e54 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/concatenation/concatenation_kernel_simple_ref.h +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/concatenation/concatenation_kernel_simple_ref.h @@ -15,6 +15,7 @@ class ConcatenationKernel_simple_Ref : public ConcatenationKernelBase { KernelsData GetKernelsData(const Params& params, const optional_params& options) const override; KernelsPriority GetKernelsPriority(const Params& params, const optional_params& options) const override; + JitConstants GetJitConstants(const concatenation_params& params) const override; DispatchData SetDefault(const concatenation_params& params) const override; bool Validate(const Params& p, const optional_params& o) const override; @@ -22,7 +23,10 @@ class ConcatenationKernel_simple_Ref : public ConcatenationKernelBase { ParamsKey GetSupportedKey() const override; std::vector GetSupportedFusedOps() const override { return { - FusedOpType::REORDER + FusedOpType::REORDER, + FusedOpType::ACTIVATION, + FusedOpType::ELTWISE, + FusedOpType::QUANTIZE }; } }; diff --git a/src/plugins/intel_gpu/tests/unit/fusions/concatenate_fusion_test.cpp b/src/plugins/intel_gpu/tests/unit/fusions/concatenate_fusion_test.cpp index c4dcbdee66d7c1..a462f87d4dbbad 100644 --- a/src/plugins/intel_gpu/tests/unit/fusions/concatenate_fusion_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/fusions/concatenate_fusion_test.cpp @@ -15,10 +15,8 @@ using namespace cldnn; using namespace ::tests; -#ifdef ENABLE_ONEDNN_FOR_GPU -namespace { struct concat_test_params { - tensor in_shape; + ov::PartialShape in_shape; data_types data_type; format input_format; data_types default_type; @@ -28,6 +26,8 @@ struct concat_test_params { std::string kernel_name; }; +#ifdef ENABLE_ONEDNN_FOR_GPU +namespace { class ConcatOneDNNFusingTest : public ::BaseFusingTest { public: void execute(concat_test_params& p) { @@ -79,11 +79,11 @@ class ConcatOneDNNFusingTest : public ::BaseFusingTest { } layout get_input_layout(concat_test_params& p) { - return layout{ p.data_type, p.input_format, p.in_shape }; + return layout{ p.in_shape, p.data_type, p.input_format }; } layout get_per_channel_layout(concat_test_params& p) { - return layout{ p.default_type, p.default_format, tensor{ 1, p.in_shape.feature[0], 1, 1 } }; + return layout{ { 1, p.in_shape[1] }, p.default_type, p.default_format }; } }; } // namespace @@ -116,7 +116,7 @@ TEST_P(concat_onednn_activation, along_f) { class concat_onednn_eltwise : public ConcatOneDNNFusingTest {}; TEST_P(concat_onednn_eltwise, along_f) { auto p = GetParam(); - layout data_layout(p.default_type, p.default_format, tensor{ 1, p.in_shape.feature[0]*2, 1, 1 }); + layout data_layout({ 1, p.in_shape[1] * 2 }, p.default_type, p.default_format); create_topologies( input_layout("input0", get_input_layout(p)), @@ -144,3 +144,151 @@ INSTANTIATE_TEST_SUITE_P(fusings_gpu, concat_onednn_eltwise, ::testing::ValuesIn concat_test_params{ CASE_CONCAT_F16_1, 4, 4, "" }, })); #endif + +namespace { +class ConcatFusingTest : public ::BaseFusingTest { +public: + void execute(concat_test_params& p) { + auto input0_prim = get_mem(get_input_layout(p)); + auto input1_prim = get_mem(get_input_layout(p)); + + network network_not_fused(this->engine, this->topology_non_fused, cfg_not_fused); + network network_fused(this->engine, this->topology_fused, cfg_fused); + + network_not_fused.set_input_data("input0", input0_prim); + network_not_fused.set_input_data("input1", input1_prim); + + network_fused.set_input_data("input0", input0_prim); + network_fused.set_input_data("input1", input1_prim); + + ASSERT_FALSE(network_not_fused.get_primitives_info().empty()); + ASSERT_FALSE(network_fused.get_primitives_info().empty()); + + auto find_and_check = [&](primitive_info& p) -> bool { + return p.original_id == "concat" || p.original_id == "reorder_bfyx"; + }; + + auto pi_fused = network_fused.get_primitives_info(); + auto pi_not_fused = network_not_fused.get_primitives_info(); + + auto info_fused = std::find_if(pi_fused.begin(), pi_fused.end(), find_and_check); + auto info_not_fused = std::find_if(pi_not_fused.begin(), pi_not_fused.end(), find_and_check); + + ASSERT_TRUE(info_fused != pi_fused.end()); + ASSERT_TRUE(info_not_fused != pi_not_fused.end()); + + compare(network_not_fused, network_fused, p); + } + + layout get_input_layout(concat_test_params& p) { + return layout{ p.in_shape, p.data_type, p.input_format }; + } + + layout get_per_channel_layout(concat_test_params& p) { + return layout{ { 1, p.in_shape[1] }, p.default_type, p.default_format }; + } +}; +} // namespace + + +/* ----------------------------------------------------------------------------------------------------- */ +/* --------------------------------------- Concat cases ------------------------------------------------ */ +/* ----------------------------------------------------------------------------------------------------- */ +#define CASE_CONCAT_F32_1 { 1, 8, 4, 4 }, data_types::f32, format::bfyx, data_types::f32, format::bfyx +#define CASE_CONCAT_F16_1 { 1, 8, 4, 4 }, data_types::f16, format::bfyx, data_types::f16, format::bfyx + +class concat_activation : public ConcatFusingTest {}; +TEST_P(concat_activation, along_f) { + auto p = GetParam(); + + if (engine.get_device_info().supports_immad) + p.expected_fused_primitives++; + + create_topologies( + input_layout("input0", get_input_layout(p)), + input_layout("input1", get_input_layout(p)), + concatenation("concat", { input_info("input0"), input_info("input1") }, 1, p.data_type), + activation("act1", input_info("concat"), activation_func::round_half_to_even), + activation("act2", input_info("act1"), activation_func::clamp, { -0.5f, 0.5f }), + reorder("reorder_bfyx", input_info("act2"), cldnn::format::bfyx, p.default_type) + ); + + tolerance = default_tolerance(p.data_type); + execute(p); +} + +class concat_eltwise_with_broadcast : public ConcatFusingTest {}; +TEST_P(concat_eltwise_with_broadcast, along_f) { + auto p = GetParam(); + layout data_layout({ 1, p.in_shape[1] * 2 }, p.default_type, p.default_format); + create_topologies( + input_layout("input0", get_input_layout(p)), + input_layout("input1", get_input_layout(p)), + data("scale_data", get_mem(data_layout, 1.0f / tensor{ 1, 1, 4, 4 }.count())), + concatenation("concat", { input_info("input0"), input_info("input1") }, 1, p.data_type), + eltwise("scale", { input_info("concat"), input_info("scale_data") }, eltwise_mode::prod, p.default_type), + reorder("reorder_bfyx", input_info("scale"), cldnn::format::bfyx, p.default_type) + ); + + tolerance = default_tolerance(p.data_type); + execute(p); +} + +class concat_eltwise_wo_broadcast : public ConcatFusingTest {}; +TEST_P(concat_eltwise_wo_broadcast, along_f) { + auto p = GetParam(); + ov::PartialShape concatenated_shape = p.in_shape; + concatenated_shape[1] *= 2; + layout data_layout(concatenated_shape, p.default_type, p.default_format); + create_topologies( + input_layout("input0", get_input_layout(p)), + input_layout("input1", get_input_layout(p)), + data("scale_data", get_mem(data_layout, 1.0f / tensor{ 1, 1, 4, 4 }.count())), + concatenation("concat", { input_info("input0"), input_info("input1") }, 1, p.data_type), + eltwise("scale", { input_info("concat"), input_info("scale_data") }, eltwise_mode::prod, p.default_type), + reorder("reorder_bfyx", input_info("scale"), cldnn::format::bfyx, p.default_type) + ); + + tolerance = default_tolerance(p.data_type); + execute(p); +} + +class concat_quantize : public ConcatFusingTest {}; +TEST_P(concat_quantize, along_f) { + auto p = GetParam(); + create_topologies( + input_layout("input0", get_input_layout(p)), + input_layout("input1", get_input_layout(p)), + data("in_lo", get_mem(get_per_channel_layout(p), min_random, 0)), + data("in_hi", get_mem(get_per_channel_layout(p), 1, max_random)), + data("out_lo", get_mem(get_single_element_layout(p), 0)), + data("out_hi", get_mem(get_single_element_layout(p), 255)), + concatenation("concat", { input_info("input0"), input_info("input1") }, 1, p.data_type), + quantize("quantize", input_info("concat"), input_info("in_lo"), input_info("in_hi"), + input_info("out_lo"), input_info("out_hi"), 256, data_types::u8), + reorder("reorder_bfyx", input_info("quantize"), cldnn::format::bfyx, p.default_type) + ); + + tolerance = 1.f; + execute(p); +} + +INSTANTIATE_TEST_SUITE_P(fusings_gpu, concat_activation, ::testing::ValuesIn(std::vector{ + concat_test_params{ CASE_CONCAT_F32_1, 3, 5, "" }, + concat_test_params{ CASE_CONCAT_F16_1, 3, 5, "" }, +})); + +INSTANTIATE_TEST_SUITE_P(fusings_gpu, concat_eltwise_with_broadcast, ::testing::ValuesIn(std::vector{ + concat_test_params{ CASE_CONCAT_F32_1, 4, 4, "" }, + concat_test_params{ CASE_CONCAT_F16_1, 4, 4, "" }, +})); + +INSTANTIATE_TEST_SUITE_P(fusings_gpu, concat_eltwise_wo_broadcast, ::testing::ValuesIn(std::vector{ + concat_test_params{ CASE_CONCAT_F32_1, 4, 4, "" }, + concat_test_params{ CASE_CONCAT_F16_1, 4, 4, "" }, +})); + +INSTANTIATE_TEST_SUITE_P(fusings_gpu, concat_quantize, ::testing::ValuesIn(std::vector{ + concat_test_params{ CASE_CONCAT_F32_1, 4, 4, "" }, + concat_test_params{ CASE_CONCAT_F16_1, 4, 4, "" }, +})); From cba4721cf63200b11032786ba7bd786c0a9321b4 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 9 Oct 2023 15:11:15 +0400 Subject: [PATCH 103/257] Fixed wrong target usage in auto func tests (#20314) --- src/plugins/auto/tests/functional/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plugins/auto/tests/functional/CMakeLists.txt b/src/plugins/auto/tests/functional/CMakeLists.txt index 44bef91f8fa1d9..cd239db8806120 100644 --- a/src/plugins/auto/tests/functional/CMakeLists.txt +++ b/src/plugins/auto/tests/functional/CMakeLists.txt @@ -20,7 +20,7 @@ ov_add_test_target( openvino::runtime::dev gtest gtest_main - openvino::funcSharedTests + funcSharedTests INCLUDES ${CMAKE_CURRENT_SOURCE_DIR} ${TEST_COMMON_INCLUDE_DIR} From ead4b8a0ec37797fa69a780603df2080c1cfa490 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 9 Oct 2023 22:30:32 +0400 Subject: [PATCH 104/257] Moved cmake functions, variables to API 2.0 naming style (#20281) * Merge Linux CC + static build + clang compiler * Improvements * Removed ie prefixes from cmake scripts * Fixes for NPU --- .github/workflows/code_style.yml | 4 +- CMakeLists.txt | 4 +- ...e => OpenVINODeveloperScriptsConfig.cmake} | 60 ++++++----- ..._target.cmake => add_target_helpers.cmake} | 19 ++-- .../api_validator/api_validator.cmake | 32 +++--- .../clang_format/clang_format.cmake | 8 +- .../compile_flags/os_flags.cmake | 72 ++++++------- .../developer_package/coverage/coverage.cmake | 12 ++- cmake/developer_package/cpplint/cpplint.cmake | 8 +- .../cross_compile/cross_compiled_func.cmake | 6 +- cmake/developer_package/faster_build.cmake | 2 + cmake/developer_package/features.cmake | 48 ++++----- .../frontends/frontends.cmake | 6 +- .../ncc_naming_style/ncc_naming_style.cmake | 2 +- cmake/developer_package/options.cmake | 50 ++++++--- .../developer_package/packaging/archive.cmake | 5 - .../packaging/common-libraries.cmake | 5 - .../packaging/debian/debian.cmake | 9 +- cmake/developer_package/packaging/nsis.cmake | 5 - .../packaging/packaging.cmake | 19 ++-- .../developer_package/packaging/rpm/rpm.cmake | 7 +- cmake/developer_package/plugins/plugins.cmake | 16 +-- .../plugins/unregister_plugin_cmake.cmake | 4 +- .../python_requirements.cmake | 2 +- .../shellcheck/shellcheck.cmake | 24 ++--- .../shellcheck/shellcheck_process.cmake | 20 ++-- cmake/developer_package/tbb/TBBConfig.cmake | 12 ++- cmake/developer_package/version.cmake | 2 + .../vs_version/vs_version.cmake | 2 +- cmake/developer_package/whole_archive.cmake | 16 ++- cmake/features.cmake | 100 +++++++++--------- .../templates/InferenceEngineConfig.cmake.in | 4 +- ...renceEngineDeveloperPackageConfig.cmake.in | 30 +++--- cmake/templates/OpenVINOConfig.cmake.in | 4 +- .../OpenVINODeveloperPackageConfig.cmake.in | 6 +- cmake/templates/ngraphConfig.cmake.in | 2 + .../c/common/opencv_c_wrapper/CMakeLists.txt | 2 +- samples/c/hello_classification/CMakeLists.txt | 2 +- .../CMakeLists.txt | 2 +- samples/cpp/CMakeLists.txt | 74 +++++++------ .../benchmark/sync_benchmark/CMakeLists.txt | 2 +- .../throughput_benchmark/CMakeLists.txt | 2 +- samples/cpp/benchmark_app/CMakeLists.txt | 2 +- .../CMakeLists.txt | 2 +- .../cpp/hello_classification/CMakeLists.txt | 2 +- .../CMakeLists.txt | 2 +- samples/cpp/hello_query_device/CMakeLists.txt | 2 +- samples/cpp/hello_reshape_ssd/CMakeLists.txt | 2 +- .../cpp/model_creation_sample/CMakeLists.txt | 2 +- samples/cpp/speech_sample/CMakeLists.txt | 2 +- scripts/CMakeLists.txt | 2 +- src/bindings/c/tests/test_model_repo.cpp | 2 +- src/bindings/python/CMakeLists.txt | 6 +- .../openvino/inference_engine/CMakeLists.txt | 2 +- src/cmake/install_tbb.cmake | 40 +++---- src/cmake/openvino.cmake | 12 +-- src/cmake/ov_parallel.cmake | 22 ++-- src/common/preprocessing/src/CMakeLists.txt | 6 +- .../preprocessing/src/ie_preprocess_data.hpp | 2 +- src/core/tests/CMakeLists.txt | 4 +- src/core/tests/extension.cpp | 2 +- src/frontends/common/CMakeLists.txt | 2 +- src/frontends/onnx/tests/CMakeLists.txt | 2 +- .../tests/frontend/shared/CMakeLists.txt | 2 +- .../tests/frontend/shared/src/conversion.cpp | 2 +- .../frontend/shared/src/library_extension.cpp | 2 +- src/inference/CMakeLists.txt | 10 +- src/inference/src/ie_network_reader.cpp | 2 +- .../tests/functional/caching_test.cpp | 4 +- .../tests/functional/core_threading.cpp | 6 +- .../get_supported_property_test.cpp | 2 +- .../tests/functional/ov_core_threading.cpp | 6 +- .../tests/functional/ov_extension_test.cpp | 8 +- .../functional/ov_register_plugin_test.cpp | 12 +-- .../functional/ov_shared_object_test.cpp | 2 +- .../tests/unit/ie_extension_test.cpp | 2 +- .../functional/behavior/auto_func_test.cpp | 4 +- .../hetero/tests/functional/hetero_tests.cpp | 2 +- src/plugins/intel_cpu/CMakeLists.txt | 2 +- .../tests/functional/extension/extension.cpp | 2 +- src/plugins/intel_gna/CMakeLists.txt | 2 +- .../tests/deprecated/helpers/tests_common.hpp | 2 +- .../intel_gpu/src/graph/CMakeLists.txt | 2 +- .../intel_gpu/tests/unit/CMakeLists.txt | 2 +- src/plugins/proxy/tests/proxy_tests.cpp | 4 +- src/plugins/template/CMakeLists.txt | 2 +- src/plugins/template/backend/CMakeLists.txt | 2 +- src/tests/CMakeLists.txt | 4 +- .../include/base/behavior_test_utils.hpp | 2 +- .../include/base/ov_behavior_test_utils.hpp | 2 +- .../behavior/ov_plugin/core_integration.hpp | 6 +- .../behavior/ov_plugin/properties_tests.hpp | 4 +- .../behavior/plugin/core_integration.hpp | 6 +- .../src/behavior/plugin/hetero_synthetic.cpp | 4 +- .../common_test_utils/CMakeLists.txt | 2 +- .../include/common_test_utils/file_utils.hpp | 2 +- .../common_test_utils/src/test_case.cpp | 2 +- .../src/ov_plugin_cache.cpp | 2 +- .../src/plugin_cache.cpp | 2 +- tests/fuzz/CMakeLists.txt | 2 +- thirdparty/ocl/CMakeLists.txt | 4 +- tools/benchmark_tool/CMakeLists.txt | 6 +- tools/openvino_dev/CMakeLists.txt | 4 +- tools/ovc/CMakeLists.txt | 6 +- 104 files changed, 509 insertions(+), 468 deletions(-) rename cmake/developer_package/{IEDevScriptsConfig.cmake => OpenVINODeveloperScriptsConfig.cmake} (85%) rename cmake/developer_package/{add_ie_target.cmake => add_target_helpers.cmake} (97%) diff --git a/.github/workflows/code_style.yml b/.github/workflows/code_style.yml index 0500ba86b12412..dc584a9799079c 100644 --- a/.github/workflows/code_style.yml +++ b/.github/workflows/code_style.yml @@ -60,9 +60,9 @@ jobs: run: cmake -B build - name: Shellcheck cmake target - run: cmake --build build --target ie_shellcheck -j8 + run: cmake --build build --target ov_shellcheck -j8 - # always provide suggestions even for skipped scripts in ie_shellcheck tagret + # always provide suggestions even for skipped scripts in ov_shellcheck tagret - name: ShellCheck action if: always() uses: reviewdog/action-shellcheck@v1 diff --git a/CMakeLists.txt b/CMakeLists.txt index b0aceaa39db057..ea3de7994f722e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -30,7 +30,7 @@ endif() project(OpenVINO DESCRIPTION "OpenVINO toolkit") -find_package(IEDevScripts REQUIRED +find_package(OpenVINODeveloperScripts REQUIRED PATHS "${OpenVINO_SOURCE_DIR}/cmake/developer_package" NO_CMAKE_FIND_ROOT_PATH NO_DEFAULT_PATH) @@ -162,4 +162,4 @@ endif() # provides a callback function to describe each component in repo include(cmake/packaging/packaging.cmake) -ie_cpack(${IE_CPACK_COMPONENTS_ALL}) +ov_cpack(${OV_CPACK_COMPONENTS_ALL}) diff --git a/cmake/developer_package/IEDevScriptsConfig.cmake b/cmake/developer_package/OpenVINODeveloperScriptsConfig.cmake similarity index 85% rename from cmake/developer_package/IEDevScriptsConfig.cmake rename to cmake/developer_package/OpenVINODeveloperScriptsConfig.cmake index 95e3c9eea3629f..1dbe8952925f51 100644 --- a/cmake/developer_package/IEDevScriptsConfig.cmake +++ b/cmake/developer_package/OpenVINODeveloperScriptsConfig.cmake @@ -4,10 +4,12 @@ cmake_minimum_required(VERSION 3.13) -if(NOT DEFINED IEDevScripts_DIR) - message(FATAL_ERROR "IEDevScripts_DIR is not defined") +if(NOT DEFINED OpenVINODeveloperScripts_DIR ) + message(FATAL_ERROR "OpenVINODeveloperScripts_DIR is not defined") endif() +set(IEDevScripts_DIR "${OpenVINODeveloperScripts_DIR}") # for BW compatibility + # disable FindPkgConfig.cmake for Android if(ANDROID) # Android toolchain does not provide pkg-config file. So, cmake mistakenly uses @@ -23,7 +25,7 @@ macro(ov_set_if_not_defined var value) endmacro() set(OLD_CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH}) -set(CMAKE_MODULE_PATH "${IEDevScripts_DIR}") +set(CMAKE_MODULE_PATH "${OpenVINODeveloperScripts_DIR}") function(set_ci_build_number) set(repo_root "${CMAKE_SOURCE_DIR}") @@ -67,7 +69,7 @@ endif() # Prepare temporary folder # -function(set_temp_directory temp_variable source_tree_dir) +function(ov_set_temp_directory temp_variable source_tree_dir) if(DEFINED OV_TEMP) message(STATUS "OV_TEMP cmake variable is set : ${OV_TEMP}") file(TO_CMAKE_PATH ${OV_TEMP} temp) @@ -84,6 +86,11 @@ function(set_temp_directory temp_variable source_tree_dir) endif() endfunction() +macro(set_temp_directory) + message(WARNING "'set_temp_directory' is deprecated. Please, use 'ov_set_temp_directory'") + ov_set_temp_directory(${ARGV}) +endmacro() + # # For cross-compilation # @@ -139,37 +146,41 @@ if(NOT DEFINED OUTPUT_ROOT) endif() # Enable postfixes for Debug/Release builds -set(IE_DEBUG_POSTFIX_WIN "d") -set(IE_RELEASE_POSTFIX_WIN "") -set(IE_DEBUG_POSTFIX_LIN "") -set(IE_RELEASE_POSTFIX_LIN "") -set(IE_DEBUG_POSTFIX_MAC "d") -set(IE_RELEASE_POSTFIX_MAC "") +set(OV_DEBUG_POSTFIX_WIN "d") +set(OV_RELEASE_POSTFIX_WIN "") +set(OV_DEBUG_POSTFIX_LIN "") +set(OV_RELEASE_POSTFIX_LIN "") +set(OV_DEBUG_POSTFIX_MAC "d") +set(OV_RELEASE_POSTFIX_MAC "") if(WIN32) - set(IE_DEBUG_POSTFIX ${IE_DEBUG_POSTFIX_WIN}) - set(IE_RELEASE_POSTFIX ${IE_RELEASE_POSTFIX_WIN}) + set(OV_DEBUG_POSTFIX ${OV_DEBUG_POSTFIX_WIN}) + set(OV_RELEASE_POSTFIX ${OV_RELEASE_POSTFIX_WIN}) elseif(APPLE) - set(IE_DEBUG_POSTFIX ${IE_DEBUG_POSTFIX_MAC}) - set(IE_RELEASE_POSTFIX ${IE_RELEASE_POSTFIX_MAC}) + set(OV_DEBUG_POSTFIX ${OV_DEBUG_POSTFIX_MAC}) + set(OV_RELEASE_POSTFIX ${OV_RELEASE_POSTFIX_MAC}) else() - set(IE_DEBUG_POSTFIX ${IE_DEBUG_POSTFIX_LIN}) - set(IE_RELEASE_POSTFIX ${IE_RELEASE_POSTFIX_LIN}) + set(OV_DEBUG_POSTFIX ${OV_DEBUG_POSTFIX_LIN}) + set(OV_RELEASE_POSTFIX ${OV_RELEASE_POSTFIX_LIN}) endif() -set(CMAKE_DEBUG_POSTFIX ${IE_DEBUG_POSTFIX}) -set(CMAKE_RELEASE_POSTFIX ${IE_RELEASE_POSTFIX}) +set(CMAKE_DEBUG_POSTFIX ${OV_DEBUG_POSTFIX}) +set(CMAKE_RELEASE_POSTFIX ${OV_RELEASE_POSTFIX}) # Support CMake multi-configuration for Visual Studio / Ninja or Xcode build if(OV_GENERATOR_MULTI_CONFIG) - set(IE_BUILD_POSTFIX $<$:${IE_DEBUG_POSTFIX}>$<$:${IE_RELEASE_POSTFIX}>) + set(OV_BUILD_POSTFIX $<$:${OV_DEBUG_POSTFIX}>$<$:${OV_RELEASE_POSTFIX}>) else() if(CMAKE_BUILD_TYPE STREQUAL "Debug") - set(IE_BUILD_POSTFIX ${IE_DEBUG_POSTFIX}) + set(OV_BUILD_POSTFIX ${OV_DEBUG_POSTFIX}) else() - set(IE_BUILD_POSTFIX ${IE_RELEASE_POSTFIX}) + set(OV_BUILD_POSTFIX ${OV_RELEASE_POSTFIX}) endif() endif() +add_definitions(-DOV_BUILD_POSTFIX=\"${OV_BUILD_POSTFIX}\") + +# for BW compatibility; removed before 2024.0 +set(IE_BUILD_POSTFIX ${OV_BUILD_POSTFIX}) add_definitions(-DIE_BUILD_POSTFIX=\"${IE_BUILD_POSTFIX}\") ov_set_if_not_defined(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}) @@ -178,11 +189,6 @@ ov_set_if_not_defined(CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FO ov_set_if_not_defined(CMAKE_PDB_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}) ov_set_if_not_defined(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}) -if(CPACK_GENERATOR MATCHES "^(DEB|RPM)$") - # to make sure that lib/ is created on Debian - set(CMAKE_INSTALL_PREFIX "/usr" CACHE PATH "Cmake install prefix" FORCE) -endif() - include(packaging/packaging) if(APPLE) @@ -261,7 +267,7 @@ include(api_validator/api_validator) include(vs_version/vs_version) include(plugins/plugins) include(frontends/frontends) -include(add_ie_target) +include(add_target_helpers) include(CMakePackageConfigHelpers) if(ENABLE_FUZZING) diff --git a/cmake/developer_package/add_ie_target.cmake b/cmake/developer_package/add_target_helpers.cmake similarity index 97% rename from cmake/developer_package/add_ie_target.cmake rename to cmake/developer_package/add_target_helpers.cmake index 2452312d82026a..c52b393d7bbe74 100644 --- a/cmake/developer_package/add_ie_target.cmake +++ b/cmake/developer_package/add_target_helpers.cmake @@ -23,7 +23,7 @@ ov_add_target( link_dependencies DEPENDENCIES dependencies - ie::important_plugin + openvino::important_plugin OBJECT_FILES object libraries DEFINES @@ -90,8 +90,7 @@ function(ov_add_target) source_group("include" FILES ${includes}) source_group("src" FILES ${sources}) - set(all_sources) - list(APPEND all_sources ${sources} ${includes} ${ARG_OBJECT_FILES}) + set(all_sources ${sources} ${includes} ${ARG_OBJECT_FILES}) # defining a target if (ARG_TYPE STREQUAL EXECUTABLE) @@ -102,7 +101,7 @@ function(ov_add_target) message(SEND_ERROR "Invalid target type ${ARG_TYPE} specified for target name ${ARG_NAME}") endif() - ieTargetLinkWholeArchive(${ARG_NAME} ${ARG_LINK_LIBRARIES_WHOLE_ARCHIVE}) + ov_target_link_whole_archive(${ARG_NAME} ${ARG_LINK_LIBRARIES_WHOLE_ARCHIVE}) if (ARG_DEFINES) target_compile_definitions(${ARG_NAME} PRIVATE ${ARG_DEFINES}) @@ -140,11 +139,6 @@ function(ov_add_target) endif() endfunction() -function(addIeTarget) - message(WARNING "'addIeTarget' is deprecated, please, use 'ov_add_target' instead") - ov_add_target(${ARGV}) -endfunction() - #[[ Wrapper function over addIeTarget, that also adds a test with the same name. You could use @@ -195,6 +189,13 @@ function(ov_add_test_target) EXCLUDE_FROM_ALL) endfunction() +# deprecated + +function(addIeTarget) + message(WARNING "'addIeTarget' is deprecated, please, use 'ov_add_target' instead") + ov_add_target(${ARGV}) +endfunction() + function(addIeTargetTest) message(WARNING "'addIeTargetTest' is deprecated, please, use 'ov_add_test_target' instead") ov_add_test_target(${ARGV}) diff --git a/cmake/developer_package/api_validator/api_validator.cmake b/cmake/developer_package/api_validator/api_validator.cmake index a92c766bca25a7..6749366a64db05 100644 --- a/cmake/developer_package/api_validator/api_validator.cmake +++ b/cmake/developer_package/api_validator/api_validator.cmake @@ -29,7 +29,7 @@ if(WIN32) endif() endif() -function(_ie_add_api_validator_post_build_step_recursive) +function(_ov_add_api_validator_post_build_step_recursive) cmake_parse_arguments(API_VALIDATOR "" "TARGET" "" ${ARGN}) get_target_property(LIBRARY_TYPE ${API_VALIDATOR_TARGET} TYPE) @@ -55,9 +55,9 @@ function(_ie_add_api_validator_post_build_step_recursive) continue() endif() if(TARGET "${orig_library}") - _ie_add_api_validator_post_build_step_recursive(TARGET ${orig_library}) + _ov_add_api_validator_post_build_step_recursive(TARGET ${orig_library}) else() - _ie_add_api_validator_post_build_step_recursive(TARGET ${library}) + _ov_add_api_validator_post_build_step_recursive(TARGET ${library}) endif() endif() endforeach() @@ -113,10 +113,10 @@ function(_ov_add_api_validator_post_build_step) endif() # collect targets - _ie_add_api_validator_post_build_step_recursive(TARGET ${API_VALIDATOR_TARGET}) + _ov_add_api_validator_post_build_step_recursive(TARGET ${API_VALIDATOR_TARGET}) if (API_VALIDATOR_EXTRA) foreach(target IN LISTS API_VALIDATOR_EXTRA) - _ie_add_api_validator_post_build_step_recursive(TARGET ${target}) + _ov_add_api_validator_post_build_step_recursive(TARGET ${target}) endforeach() endif() @@ -171,7 +171,7 @@ function(_ov_add_api_validator_post_build_step) -D ONECORE_API_VALIDATOR_EXCLUSION=${ONECORE_API_VALIDATOR_EXCLUSION} -D ONECORE_API_VALIDATOR_OUTPUT=${output_file} -D CMAKE_TOOLCHAIN_FILE=${CMAKE_TOOLCHAIN_FILE} - -P "${IEDevScripts_DIR}/api_validator/api_validator_run.cmake") + -P "${OpenVINODeveloperScripts_DIR}/api_validator/api_validator_run.cmake") list(APPEND byproducts_files ${output_file}) unset(target_name) @@ -191,13 +191,15 @@ function(_ov_add_api_validator_post_build_step) endfunction() # -# ie_add_api_validator_post_build_step(TARGET ) +# ov_add_api_validator_post_build_step(TARGET ) # -macro(ov_add_api_validator_post_build_step) - _ov_add_api_validator_post_build_step(${ARGV}) -endmacro() - -macro(ie_add_api_validator_post_build_step) - message(WARNING "ie_add_api_validator_post_build_step is deprecated, use ov_add_api_validator_post_build_step instead") - _ov_add_api_validator_post_build_step(${ARGV}) -endmacro() +function(ov_add_api_validator_post_build_step) + _ov_add_api_validator_post_build_step(${ARGN}) +endfunction() + +# deprecated + +function(ie_add_api_validator_post_build_step) + message(WARNING "'ie_add_api_validator_post_build_step' is deprecated, use 'ov_add_api_validator_post_build_step' instead") + _ov_add_api_validator_post_build_step(${ARGN}) +endfunction() diff --git a/cmake/developer_package/clang_format/clang_format.cmake b/cmake/developer_package/clang_format/clang_format.cmake index e8286ed2e9cd20..57319e48006938 100644 --- a/cmake/developer_package/clang_format/clang_format.cmake +++ b/cmake/developer_package/clang_format/clang_format.cmake @@ -88,10 +88,10 @@ function(ov_add_clang_format_target TARGET_NAME) -D "CLANG_FORMAT=${CLANG_FORMAT}" -D "INPUT_FILE=${source_file}" -D "OUTPUT_FILE=${output_file}" - -P "${IEDevScripts_DIR}/clang_format/clang_format_check.cmake" + -P "${OpenVINODeveloperScripts_DIR}/clang_format/clang_format_check.cmake" DEPENDS "${source_file}" - "${IEDevScripts_DIR}/clang_format/clang_format_check.cmake" + "${OpenVINODeveloperScripts_DIR}/clang_format/clang_format_check.cmake" COMMENT "[clang-format] ${source_file}" VERBATIM) @@ -110,10 +110,10 @@ function(ov_add_clang_format_target TARGET_NAME) -D "CLANG_FORMAT=${CLANG_FORMAT}" -D "INPUT_FILES=${all_input_sources}" -D "EXCLUDE_PATTERNS=${CLANG_FORMAT_EXCLUDE_PATTERNS}" - -P "${IEDevScripts_DIR}/clang_format/clang_format_fix.cmake" + -P "${OpenVINODeveloperScripts_DIR}/clang_format/clang_format_fix.cmake" DEPENDS "${all_input_sources}" - "${IEDevScripts_DIR}/clang_format/clang_format_fix.cmake" + "${OpenVINODeveloperScripts_DIR}/clang_format/clang_format_fix.cmake" COMMENT "[clang-format] ${TARGET_NAME}_fix" VERBATIM) diff --git a/cmake/developer_package/compile_flags/os_flags.cmake b/cmake/developer_package/compile_flags/os_flags.cmake index 0ffdd903dcb46f..7d98b40c3ce81d 100644 --- a/cmake/developer_package/compile_flags/os_flags.cmake +++ b/cmake/developer_package/compile_flags/os_flags.cmake @@ -9,31 +9,31 @@ include(CheckCXXCompilerFlag) # ov_disable_deprecated_warnings() # # Disables deprecated warnings generation in current scope (directory, function) -# Defines ie_c_cxx_deprecated varaible which contains C / C++ compiler flags +# Defines ov_c_cxx_deprecated varaible which contains C / C++ compiler flags # macro(ov_disable_deprecated_warnings) if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") - set(ie_c_cxx_deprecated "/wd4996") + set(ov_c_cxx_deprecated "/wd4996") elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Intel") if(WIN32) - set(ie_c_cxx_deprecated "/Qdiag-disable:1478,1786") + set(ov_c_cxx_deprecated "/Qdiag-disable:1478,1786") else() - set(ie_c_cxx_deprecated "-diag-disable=1478,1786") + set(ov_c_cxx_deprecated "-diag-disable=1478,1786") endif() elseif(OV_COMPILER_IS_CLANG OR CMAKE_COMPILER_IS_GNUCXX) - set(ie_c_cxx_deprecated "-Wno-deprecated-declarations") + set(ov_c_cxx_deprecated "-Wno-deprecated-declarations") else() message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}") endif() - set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${ie_c_cxx_deprecated}") - set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${ie_c_cxx_deprecated}") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${ie_c_cxx_deprecated}") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${ie_c_cxx_deprecated}") + set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${ov_c_cxx_deprecated}") + set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${ov_c_cxx_deprecated}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${ov_c_cxx_deprecated}") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${ov_c_cxx_deprecated}") endmacro() macro(disable_deprecated_warnings) - message(WARNING "disable_deprecated_warnings is deprecated, use ov_disable_deprecated_warnings instead") + message(WARNING "'disable_deprecated_warnings' is deprecated, use 'ov_disable_deprecated_warnings' instead") ov_disable_deprecated_warnings() endmacro() @@ -41,30 +41,30 @@ endmacro() # ov_deprecated_no_errors() # # Don't threat deprecated warnings as errors in current scope (directory, function) -# Defines ie_c_cxx_deprecated_no_errors varaible which contains C / C++ compiler flags +# Defines ov_c_cxx_deprecated_no_errors varaible which contains C / C++ compiler flags # macro(ov_deprecated_no_errors) if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") # show 4996 only for /w4 - set(ie_c_cxx_deprecated_no_errors "/wd4996") + set(ov_c_cxx_deprecated_no_errors "/wd4996") elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Intel") if(WIN32) - set(ie_c_cxx_deprecated_no_errors "/Qdiag-warning:1478,1786") + set(ov_c_cxx_deprecated_no_errors "/Qdiag-warning:1478,1786") else() - set(ie_c_cxx_deprecated_no_errors "-diag-warning=1478,1786") + set(ov_c_cxx_deprecated_no_errors "-diag-warning=1478,1786") endif() elseif(OV_COMPILER_IS_CLANG OR CMAKE_COMPILER_IS_GNUCXX) - set(ie_c_cxx_deprecated_no_errors "-Wno-error=deprecated-declarations") + set(ov_c_cxx_deprecated_no_errors "-Wno-error=deprecated-declarations") # Suppress #warning messages - set(ie_c_cxx_deprecated_no_errors "${ie_c_cxx_deprecated_no_errors} -Wno-cpp") + set(ov_c_cxx_deprecated_no_errors "${ov_c_cxx_deprecated_no_errors} -Wno-cpp") else() message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}") endif() - set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${ie_c_cxx_deprecated_no_errors}") - set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${ie_c_cxx_deprecated_no_errors}") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${ie_c_cxx_deprecated_no_errors}") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${ie_c_cxx_deprecated_no_errors}") + set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${ov_c_cxx_deprecated_no_errors}") + set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${ov_c_cxx_deprecated_no_errors}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${ov_c_cxx_deprecated_no_errors}") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${ov_c_cxx_deprecated_no_errors}") endmacro() # @@ -74,24 +74,24 @@ endmacro() # macro(ov_dev_package_no_errors) if(OV_COMPILER_IS_CLANG OR CMAKE_COMPILER_IS_GNUCXX) - set(ie_c_cxx_dev_no_errors "-Wno-all") + set(ov_c_cxx_dev_no_errors "-Wno-all") if(SUGGEST_OVERRIDE_SUPPORTED) - set(ie_cxx_dev_no_errors "-Wno-error=suggest-override") + set(ov_cxx_dev_no_errors "-Wno-error=suggest-override") endif() endif() - set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${ie_c_cxx_dev_no_errors} ${ie_cxx_dev_no_errors}") - set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${ie_c_cxx_dev_no_errors}") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${ie_c_cxx_dev_no_errors} ${ie_cxx_dev_no_errors}") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${ie_c_cxx_dev_no_errors}") + set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${ov_c_cxx_dev_no_errors} ${ov_cxx_dev_no_errors}") + set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${ov_c_cxx_dev_no_errors}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${ov_c_cxx_dev_no_errors} ${ov_cxx_dev_no_errors}") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${ov_c_cxx_dev_no_errors}") endmacro() # -# ie_sse42_optimization_flags() +# ov_sse42_optimization_flags() # # Provides SSE4.2 compilation flags depending on an OS and a compiler # -macro(ie_sse42_optimization_flags flags) +macro(ov_sse42_optimization_flags flags) if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") # No such option for MSVC 2019 elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Intel") @@ -111,11 +111,11 @@ macro(ie_sse42_optimization_flags flags) endmacro() # -# ie_avx2_optimization_flags() +# ov_avx2_optimization_flags() # # Provides AVX2 compilation flags depending on an OS and a compiler # -macro(ie_avx2_optimization_flags flags) +macro(ov_avx2_optimization_flags flags) if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") set(${flags} /arch:AVX2) elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Intel") @@ -132,12 +132,12 @@ macro(ie_avx2_optimization_flags flags) endmacro() # -# ie_avx512_optimization_flags() +# ov_avx512_optimization_flags() # # Provides common AVX512 compilation flags for AVX512F instruction set support # depending on an OS and a compiler # -macro(ie_avx512_optimization_flags flags) +macro(ov_avx512_optimization_flags flags) if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") set(${flags} /arch:AVX512) elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Intel") @@ -154,9 +154,9 @@ macro(ie_avx512_optimization_flags flags) endmacro() # -# ie_arm_neon_optimization_flags() +# ov_arm_neon_optimization_flags() # -macro(ie_arm_neon_optimization_flags flags) +macro(ov_arm_neon_optimization_flags flags) if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel") message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}") elseif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") @@ -219,7 +219,7 @@ endfunction() # Enables Link Time Optimization compilation # macro(ie_enable_lto) - message(WARNING "ie_add_compiler_flags is deprecated, set INTERPROCEDURAL_OPTIMIZATION_RELEASE target property instead") + message(WARNING "'ie_enable_lto' is deprecated, set 'INTERPROCEDURAL_OPTIMIZATION_RELEASE' target property instead") set(CMAKE_INTERPROCEDURAL_OPTIMIZATION_RELEASE ON) endmacro() @@ -236,7 +236,7 @@ macro(ov_add_compiler_flags) endmacro() macro(ie_add_compiler_flags) - message(WARNING "ie_add_compiler_flags is deprecated, use ov_add_compiler_flags instead") + message(WARNING "'ie_add_compiler_flags' is deprecated, use 'ov_add_compiler_flags' instead") ov_add_compiler_flags(${ARGN}) endmacro() diff --git a/cmake/developer_package/coverage/coverage.cmake b/cmake/developer_package/coverage/coverage.cmake index 0a2f6a398967c6..f7573f10a66532 100644 --- a/cmake/developer_package/coverage/coverage.cmake +++ b/cmake/developer_package/coverage/coverage.cmake @@ -16,7 +16,7 @@ endif() set(OV_COVERAGE_REPORTS "${CMAKE_BINARY_DIR}/coverage") -set(OV_COVERAGE_SCRIPT_DIR "${IEDevScripts_DIR}/coverage") +set(OV_COVERAGE_SCRIPT_DIR "${OpenVINODeveloperScripts_DIR}/coverage") include(CMakeParseArguments) @@ -171,9 +171,9 @@ function(ov_coverage_genhtml) endfunction() # -# ie_coverage_remove(INPUT OUTPUT PATTERNS ) +# ov_coverage_remove(INPUT OUTPUT PATTERNS ) # -function(ie_coverage_remove) +function(ov_coverage_remove) cmake_parse_arguments(OV_COVERAGE "" "INPUT;OUTPUT" "PATTERNS" ${ARGN}) set(input_file "${OV_COVERAGE_REPORTS}/${OV_COVERAGE_INPUT}.info") @@ -199,9 +199,9 @@ function(ie_coverage_remove) endfunction() # -# ie_coverage_merge(OUTPUT INPUTS ) +# ov_coverage_merge(OUTPUT INPUTS ) # -function(ie_coverage_merge) +function(ov_coverage_merge) cmake_parse_arguments(OV_COVERAGE "" "OUTPUT" "INPUTS" ${ARGN}) set(output_file "${OV_COVERAGE_REPORTS}/${OV_COVERAGE_OUTPUT}.info") @@ -227,6 +227,8 @@ function(ie_coverage_merge) add_dependencies(ov_coverage_${OV_COVERAGE_OUTPUT}_info ${dependencies}) endfunction() +# deprecated + if(NOT TARGET ie_coverage) add_custom_target(ie_coverage) set_target_properties(ie_coverage PROPERTIES FOLDER coverage) diff --git a/cmake/developer_package/cpplint/cpplint.cmake b/cmake/developer_package/cpplint/cpplint.cmake index e22c8f2e034b96..aa2b4147e0e685 100644 --- a/cmake/developer_package/cpplint/cpplint.cmake +++ b/cmake/developer_package/cpplint/cpplint.cmake @@ -69,17 +69,17 @@ function(add_cpplint_target TARGET_NAME) COMMAND "${CMAKE_COMMAND}" -D "Python3_EXECUTABLE=${Python3_EXECUTABLE}" - -D "CPPLINT_SCRIPT=${IEDevScripts_DIR}/cpplint/cpplint.py" + -D "CPPLINT_SCRIPT=${OpenVINODeveloperScripts_DIR}/cpplint/cpplint.py" -D "INPUT_FILE=${source_file}" -D "OUTPUT_FILE=${output_file}" -D "WORKING_DIRECTORY=${CMAKE_CURRENT_SOURCE_DIR}" -D "SKIP_RETURN_CODE=${ENABLE_CPPLINT_REPORT}" -D "CUSTOM_FILTER=${custom_filter}" - -P "${IEDevScripts_DIR}/cpplint/cpplint_run.cmake" + -P "${OpenVINODeveloperScripts_DIR}/cpplint/cpplint_run.cmake" DEPENDS "${source_file}" - "${IEDevScripts_DIR}/cpplint/cpplint.py" - "${IEDevScripts_DIR}/cpplint/cpplint_run.cmake" + "${OpenVINODeveloperScripts_DIR}/cpplint/cpplint.py" + "${OpenVINODeveloperScripts_DIR}/cpplint/cpplint_run.cmake" COMMENT "[cpplint] ${source_file_relative_root}" VERBATIM) diff --git a/cmake/developer_package/cross_compile/cross_compiled_func.cmake b/cmake/developer_package/cross_compile/cross_compiled_func.cmake index ff68280a687bd0..d82d6a73098b6c 100644 --- a/cmake/developer_package/cross_compile/cross_compiled_func.cmake +++ b/cmake/developer_package/cross_compile/cross_compiled_func.cmake @@ -19,9 +19,9 @@ set(_DEFINE_AVX2 "HAVE_AVX2" ${_DEFINE_AVX}) set(_DEFINE_AVX512F "HAVE_AVX512F" ${_DEFINE_AVX2}) ## Arch specific compile options -ie_avx512_optimization_flags(_FLAGS_AVX512F) -ie_avx2_optimization_flags (_FLAGS_AVX2) -ie_sse42_optimization_flags (_FLAGS_SSE42) +ov_avx512_optimization_flags(_FLAGS_AVX512F) +ov_avx2_optimization_flags (_FLAGS_AVX2) +ov_sse42_optimization_flags (_FLAGS_SSE42) set(_FLAGS_AVX "") ## TBD is not defined for IE project yet set(_FLAGS_ANY "") ## diff --git a/cmake/developer_package/faster_build.cmake b/cmake/developer_package/faster_build.cmake index b0f6a6c8b7b3ef..f70274f465070c 100644 --- a/cmake/developer_package/faster_build.cmake +++ b/cmake/developer_package/faster_build.cmake @@ -20,6 +20,8 @@ function(ov_build_target_faster TARGET_NAME) endif() endfunction() +# deprecated + function(ie_faster_build) message(WARNING "ie_faster_build is deprecated, use ov_build_target_faster instead") ov_build_target_faster(${ARGV}) diff --git a/cmake/developer_package/features.cmake b/cmake/developer_package/features.cmake index bda6eb353bb022..96e927187ccf70 100644 --- a/cmake/developer_package/features.cmake +++ b/cmake/developer_package/features.cmake @@ -6,19 +6,19 @@ include(options) include(target_flags) set (CPACK_GENERATOR "TGZ" CACHE STRING "Cpack generator for OpenVINO") -list (APPEND IE_OPTIONS CPACK_GENERATOR) +list (APPEND OV_OPTIONS CPACK_GENERATOR) # FIXME: there are compiler failures with LTO and Cross-Compile toolchains. Disabling for now, but # this must be addressed in a proper way -ie_dependent_option (ENABLE_LTO "Enable Link Time Optimization" OFF +ov_dependent_option (ENABLE_LTO "Enable Link Time Optimization" OFF "LINUX;EMSCRIPTEN OR NOT CMAKE_CROSSCOMPILING;CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 4.9" OFF) -ie_option (OS_FOLDER "create OS dedicated folder in output" OFF) +ov_option (OS_FOLDER "create OS dedicated folder in output" OFF) if(OV_GENERATOR_MULTI_CONFIG) - ie_option(USE_BUILD_TYPE_SUBFOLDER "Create dedicated sub-folder per build type for output binaries" OFF) + ov_option(USE_BUILD_TYPE_SUBFOLDER "Create dedicated sub-folder per build type for output binaries" OFF) else() - ie_option(USE_BUILD_TYPE_SUBFOLDER "Create dedicated sub-folder per build type for output binaries" ON) + ov_option(USE_BUILD_TYPE_SUBFOLDER "Create dedicated sub-folder per build type for output binaries" ON) endif() if(DEFINED ENV{CI_BUILD_NUMBER} AND NOT (WIN32 OR CMAKE_CROSSCOMPILING)) @@ -27,37 +27,37 @@ else() set(CMAKE_COMPILE_WARNING_AS_ERROR_DEFAULT OFF) endif() -ie_option (CMAKE_COMPILE_WARNING_AS_ERROR "Enable warnings as errors" ${CMAKE_COMPILE_WARNING_AS_ERROR_DEFAULT}) +ov_option (CMAKE_COMPILE_WARNING_AS_ERROR "Enable warnings as errors" ${CMAKE_COMPILE_WARNING_AS_ERROR_DEFAULT}) -ie_dependent_option (ENABLE_QSPECTRE "Enable Qspectre mitigation" OFF "CMAKE_CXX_COMPILER_ID STREQUAL MSVC" OFF) +ov_dependent_option (ENABLE_QSPECTRE "Enable Qspectre mitigation" OFF "CMAKE_CXX_COMPILER_ID STREQUAL MSVC" OFF) -ie_dependent_option (ENABLE_INTEGRITYCHECK "build DLLs with /INTEGRITYCHECK flag" OFF "CMAKE_CXX_COMPILER_ID STREQUAL MSVC" OFF) +ov_dependent_option (ENABLE_INTEGRITYCHECK "build DLLs with /INTEGRITYCHECK flag" OFF "CMAKE_CXX_COMPILER_ID STREQUAL MSVC" OFF) -ie_option (ENABLE_SANITIZER "enable checking memory errors via AddressSanitizer" OFF) +ov_option (ENABLE_SANITIZER "enable checking memory errors via AddressSanitizer" OFF) -ie_option (ENABLE_UB_SANITIZER "enable UndefinedBahavior sanitizer" OFF) +ov_option (ENABLE_UB_SANITIZER "enable UndefinedBahavior sanitizer" OFF) -ie_option (ENABLE_THREAD_SANITIZER "enable checking data races via ThreadSanitizer" OFF) +ov_option (ENABLE_THREAD_SANITIZER "enable checking data races via ThreadSanitizer" OFF) -ie_dependent_option (ENABLE_COVERAGE "enable code coverage" OFF "CMAKE_COMPILER_IS_GNUCXX OR OV_COMPILER_IS_CLANG" OFF) +ov_dependent_option (ENABLE_COVERAGE "enable code coverage" OFF "CMAKE_COMPILER_IS_GNUCXX OR OV_COMPILER_IS_CLANG" OFF) # Defines CPU capabilities -ie_dependent_option (ENABLE_SSE42 "Enable SSE4.2 optimizations" ON "X86_64 OR (X86 AND NOT EMSCRIPTEN)" OFF) +ov_dependent_option (ENABLE_SSE42 "Enable SSE4.2 optimizations" ON "X86_64 OR (X86 AND NOT EMSCRIPTEN)" OFF) -ie_dependent_option (ENABLE_AVX2 "Enable AVX2 optimizations" ON "X86_64 OR (X86 AND NOT EMSCRIPTEN)" OFF) +ov_dependent_option (ENABLE_AVX2 "Enable AVX2 optimizations" ON "X86_64 OR (X86 AND NOT EMSCRIPTEN)" OFF) -ie_dependent_option (ENABLE_AVX512F "Enable AVX512 optimizations" ON "X86_64 OR (X86 AND NOT EMSCRIPTEN)" OFF) +ov_dependent_option (ENABLE_AVX512F "Enable AVX512 optimizations" ON "X86_64 OR (X86 AND NOT EMSCRIPTEN)" OFF) # Type of build, we add this as an explicit option to default it to ON get_property(BUILD_SHARED_LIBS_DEFAULT GLOBAL PROPERTY TARGET_SUPPORTS_SHARED_LIBS) -ie_option (BUILD_SHARED_LIBS "Build as a shared library" ${BUILD_SHARED_LIBS_DEFAULT}) +ov_option (BUILD_SHARED_LIBS "Build as a shared library" ${BUILD_SHARED_LIBS_DEFAULT}) # Android does not support SOVERSION # see https://www.opengis.ch/2011/11/23/creating-non-versioned-shared-libraries-for-android/ -ie_dependent_option (ENABLE_LIBRARY_VERSIONING "Enable libraries versioning" ON "NOT WIN32;NOT ANDROID;BUILD_SHARED_LIBS" OFF) +ov_dependent_option (ENABLE_LIBRARY_VERSIONING "Enable libraries versioning" ON "NOT WIN32;NOT ANDROID;BUILD_SHARED_LIBS" OFF) -ie_dependent_option (ENABLE_FASTER_BUILD "Enable build features (PCH, UNITY) to speed up build time" OFF "CMAKE_VERSION VERSION_GREATER_EQUAL 3.16" OFF) +ov_dependent_option (ENABLE_FASTER_BUILD "Enable build features (PCH, UNITY) to speed up build time" OFF "CMAKE_VERSION VERSION_GREATER_EQUAL 3.16" OFF) if(CMAKE_CROSSCOMPILING OR WIN32) set(STYLE_CHECKS_DEFAULT OFF) @@ -65,22 +65,22 @@ else() set(STYLE_CHECKS_DEFAULT ON) endif() -ie_option (ENABLE_CPPLINT "Enable cpplint checks during the build" ${STYLE_CHECKS_DEFAULT}) +ov_option (ENABLE_CPPLINT "Enable cpplint checks during the build" ${STYLE_CHECKS_DEFAULT}) -ie_dependent_option (ENABLE_CPPLINT_REPORT "Build cpplint report instead of failing the build" OFF "ENABLE_CPPLINT" OFF) +ov_dependent_option (ENABLE_CPPLINT_REPORT "Build cpplint report instead of failing the build" OFF "ENABLE_CPPLINT" OFF) -ie_option (ENABLE_CLANG_FORMAT "Enable clang-format checks during the build" ${STYLE_CHECKS_DEFAULT}) +ov_option (ENABLE_CLANG_FORMAT "Enable clang-format checks during the build" ${STYLE_CHECKS_DEFAULT}) -ie_option (ENABLE_NCC_STYLE "Enable ncc style check" ${STYLE_CHECKS_DEFAULT}) +ov_option (ENABLE_NCC_STYLE "Enable ncc style check" ${STYLE_CHECKS_DEFAULT}) -ie_option (ENABLE_UNSAFE_LOCATIONS "skip check for MD5 for dependency" OFF) +ov_option (ENABLE_UNSAFE_LOCATIONS "skip check for MD5 for dependency" OFF) if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC" AND MSVC_VERSION GREATER_EQUAL 1930) # Visual Studio 2022: 1930-1939 = VS 17.0 (v143 toolset) set(_msvc_version_2022 ON) endif() -ie_dependent_option (ENABLE_FUZZING "instrument build for fuzzing" OFF "OV_COMPILER_IS_CLANG OR _msvc_version_2022" OFF) +ov_dependent_option (ENABLE_FUZZING "instrument build for fuzzing" OFF "OV_COMPILER_IS_CLANG OR _msvc_version_2022" OFF) # # Check features diff --git a/cmake/developer_package/frontends/frontends.cmake b/cmake/developer_package/frontends/frontends.cmake index 78e62101670425..a86c57c6c87845 100644 --- a/cmake/developer_package/frontends/frontends.cmake +++ b/cmake/developer_package/frontends/frontends.cmake @@ -38,7 +38,7 @@ function(ov_generate_frontends_hpp) ov_target_link_frontends(openvino) set(ov_frontends_hpp "${CMAKE_BINARY_DIR}/src/frontends/common/src/ov_frontends.hpp") - set(frontends_hpp_in "${IEDevScripts_DIR}/frontends/ov_frontends.hpp.in") + set(frontends_hpp_in "${OpenVINODeveloperScripts_DIR}/frontends/ov_frontends.hpp.in") add_custom_command(OUTPUT "${ov_frontends_hpp}" COMMAND @@ -46,10 +46,10 @@ function(ov_generate_frontends_hpp) -D "OV_FRONTENDS_HPP_HEADER_IN=${frontends_hpp_in}" -D "OV_FRONTENDS_HPP_HEADER=${ov_frontends_hpp}" -D "FRONTEND_NAMES=${FRONTEND_NAMES}" - -P "${IEDevScripts_DIR}/frontends/create_frontends_hpp.cmake" + -P "${OpenVINODeveloperScripts_DIR}/frontends/create_frontends_hpp.cmake" DEPENDS "${frontends_hpp_in}" - "${IEDevScripts_DIR}/frontends/create_frontends_hpp.cmake" + "${OpenVINODeveloperScripts_DIR}/frontends/create_frontends_hpp.cmake" COMMENT "Generate ov_frontends.hpp for static build" VERBATIM) diff --git a/cmake/developer_package/ncc_naming_style/ncc_naming_style.cmake b/cmake/developer_package/ncc_naming_style/ncc_naming_style.cmake index cf2f447565ae43..ab6d49552b3156 100644 --- a/cmake/developer_package/ncc_naming_style/ncc_naming_style.cmake +++ b/cmake/developer_package/ncc_naming_style/ncc_naming_style.cmake @@ -6,7 +6,7 @@ if(NOT COMMAND ov_check_pip_packages) message(FATAL_ERROR "Internal error: ncc_naming_style.cmake must be included after ov_check_pip_packages") endif() -set(ncc_style_dir "${IEDevScripts_DIR}/ncc_naming_style") +set(ncc_style_dir "${OpenVINODeveloperScripts_DIR}/ncc_naming_style") set(ncc_style_bin_dir "${CMAKE_CURRENT_BINARY_DIR}/ncc_naming_style") # find python3 diff --git a/cmake/developer_package/options.cmake b/cmake/developer_package/options.cmake index 08593603589ae2..75b9c886894631 100644 --- a/cmake/developer_package/options.cmake +++ b/cmake/developer_package/options.cmake @@ -2,46 +2,47 @@ # SPDX-License-Identifier: Apache-2.0 # -# Usage: ie_option( "description" [IF ]) - include (CMakeDependentOption) if(POLICY CMP0127) cmake_policy(SET CMP0127 NEW) endif() -macro (ie_option variable description value) +macro(ov_option variable description value) option(${variable} "${description}" ${value}) + list(APPEND OV_OPTIONS ${variable}) list(APPEND IE_OPTIONS ${variable}) endmacro() -# Usage: ov_option( "description" [IF ]) -macro (ov_option variable description value) - ie_option(${variable} "${description}" ${value}) -endmacro() - -macro (ie_dependent_option variable description def_value condition fallback_value) +macro(ov_dependent_option variable description def_value condition fallback_value) cmake_dependent_option(${variable} "${description}" ${def_value} "${condition}" ${fallback_value}) + list(APPEND OV_OPTIONS ${variable}) list(APPEND IE_OPTIONS ${variable}) endmacro() -macro (ie_option_enum variable description value) +macro(ov_option_enum variable description value) set(OPTIONS) set(ONE_VALUE_ARGS) set(MULTI_VALUE_ARGS ALLOWED_VALUES) - cmake_parse_arguments(IE_OPTION_ENUM "${OPTIONS}" "${ONE_VALUE_ARGS}" "${MULTI_VALUE_ARGS}" ${ARGN}) + cmake_parse_arguments(OPTION_ENUM "${OPTIONS}" "${ONE_VALUE_ARGS}" "${MULTI_VALUE_ARGS}" ${ARGN}) - if(NOT ${value} IN_LIST IE_OPTION_ENUM_ALLOWED_VALUES) - message(FATAL_ERROR "variable must be one of ${IE_OPTION_ENUM_ALLOWED_VALUES}") + if(NOT ${value} IN_LIST OPTION_ENUM_ALLOWED_VALUES) + message(FATAL_ERROR "Internal error: variable must be one of ${OPTION_ENUM_ALLOWED_VALUES}") endif() + list(APPEND OV_OPTIONS ${variable}) list(APPEND IE_OPTIONS ${variable}) set(${variable} ${value} CACHE STRING "${description}") - set_property(CACHE ${variable} PROPERTY STRINGS ${IE_OPTION_ENUM_ALLOWED_VALUES}) + set_property(CACHE ${variable} PROPERTY STRINGS ${OPTION_ENUM_ALLOWED_VALUES}) + + unset(OPTIONS) + unset(ONE_VALUE_ARGS) + unset(MULTI_VALUE_ARGS) + unset(OPTION_ENUM_ALLOWED_VALUES) endmacro() -function (print_enabled_features) +function (ov_print_enabled_features) if(NOT COMMAND set_ci_build_number) message(FATAL_ERROR "CI_BUILD_NUMBER is not set yet") endif() @@ -49,8 +50,25 @@ function (print_enabled_features) message(STATUS "OpenVINO Runtime enabled features: ") message(STATUS "") message(STATUS " CI_BUILD_NUMBER: ${CI_BUILD_NUMBER}") - foreach(_var ${IE_OPTIONS}) + foreach(_var IN LISTS OV_OPTIONS) message(STATUS " ${_var} = ${${_var}}") endforeach() message(STATUS "") endfunction() + +# deprecated + +macro (ie_option variable description value) + message(WARNING "'ie_option' is deprecated, please, use 'ov_option' instead") + ov_option(${variable} "${description}" ${value}) +endmacro() + +macro(ie_dependent_option variable description def_value condition fallback_value) + message(WARNING "'ie_dependent_option' is deprecated, please, use 'ov_dependent_option' instead") + ov_dependent_option(${variable} "${description}" ${def_value} "${condition}" ${fallback_value}) +endmacro() + +function(print_enabled_features) + message(WARNING "'print_enabled_features' is deprecated, please, use 'ov_print_enabled_features' instead") + ov_print_enabled_features() +endfunction() diff --git a/cmake/developer_package/packaging/archive.cmake b/cmake/developer_package/packaging/archive.cmake index 07f29826e4c491..a3cc7db096754e 100644 --- a/cmake/developer_package/packaging/archive.cmake +++ b/cmake/developer_package/packaging/archive.cmake @@ -44,11 +44,6 @@ macro(ov_archive_cpack_set_dirs) set(OV_CPACK_ARCHIVEDIR runtime/lib/${ARCH_FOLDER}) endif() set(OV_CPACK_PLUGINSDIR ${OV_CPACK_RUNTIMEDIR}) - - # for BW compatibility - set(IE_CPACK_LIBRARY_PATH ${OV_CPACK_LIBRARYDIR}) - set(IE_CPACK_RUNTIME_PATH ${OV_CPACK_RUNTIMEDIR}) - set(IE_CPACK_ARCHIVE_PATH ${OV_CPACK_ARCHIVEDIR}) endmacro() ov_archive_cpack_set_dirs() diff --git a/cmake/developer_package/packaging/common-libraries.cmake b/cmake/developer_package/packaging/common-libraries.cmake index 85d148db9f08f0..fea1bac83dc7d6 100644 --- a/cmake/developer_package/packaging/common-libraries.cmake +++ b/cmake/developer_package/packaging/common-libraries.cmake @@ -47,11 +47,6 @@ macro(ov_common_libraries_cpack_set_dirs) # skipped during common libraries packaging set(OV_CPACK_WHEELSDIR "tools") - - # for BW compatibility - set(IE_CPACK_LIBRARY_PATH ${OV_CPACK_LIBRARYDIR}) - set(IE_CPACK_RUNTIME_PATH ${OV_CPACK_RUNTIMEDIR}) - set(IE_CPACK_ARCHIVE_PATH ${OV_CPACK_ARCHIVEDIR}) endmacro() ov_common_libraries_cpack_set_dirs() diff --git a/cmake/developer_package/packaging/debian/debian.cmake b/cmake/developer_package/packaging/debian/debian.cmake index c77d680d9409fe..38cd649ad41cc3 100644 --- a/cmake/developer_package/packaging/debian/debian.cmake +++ b/cmake/developer_package/packaging/debian/debian.cmake @@ -45,11 +45,6 @@ macro(ov_debian_cpack_set_dirs) # skipped during debian packaging set(OV_CPACK_WHEELSDIR "tools") - - # for BW compatibility - set(IE_CPACK_LIBRARY_PATH ${OV_CPACK_RUNTIMEDIR}) - set(IE_CPACK_RUNTIME_PATH ${OV_CPACK_RUNTIMEDIR}) - set(IE_CPACK_ARCHIVE_PATH ${OV_CPACK_ARCHIVEDIR}) endmacro() ov_debian_cpack_set_dirs() @@ -134,7 +129,9 @@ macro(ov_debian_specific_settings) # homepage set(CPACK_DEBIAN_PACKAGE_HOMEPAGE "https://docs.openvino.ai/") # use lintian to check packages in post-build step - set(CPACK_POST_BUILD_SCRIPTS "${IEDevScripts_DIR}/packaging/debian/post_build.cmake") + set(CPACK_POST_BUILD_SCRIPTS "${OpenVINODeveloperScripts_DIR}/packaging/debian/post_build.cmake") + # to make sure that lib/ is created on Debian + set(CMAKE_INSTALL_PREFIX "/usr" CACHE PATH "Cmake install prefix" FORCE) # enable for debug cpack run if(NOT DEFINED CPACK_DEBIAN_PACKAGE_DEBUG) set(CPACK_DEBIAN_PACKAGE_DEBUG OFF) diff --git a/cmake/developer_package/packaging/nsis.cmake b/cmake/developer_package/packaging/nsis.cmake index ac0aeb1f592349..901e34f97820bb 100644 --- a/cmake/developer_package/packaging/nsis.cmake +++ b/cmake/developer_package/packaging/nsis.cmake @@ -83,11 +83,6 @@ macro(ov_archive_cpack_set_dirs) set(OV_CPACK_ARCHIVEDIR runtime/lib/${ARCH_FOLDER}) endif() set(OV_CPACK_PLUGINSDIR ${OV_CPACK_RUNTIMEDIR}) - - # for BW compatibility - set(IE_CPACK_LIBRARY_PATH ${OV_CPACK_LIBRARYDIR}) - set(IE_CPACK_RUNTIME_PATH ${OV_CPACK_RUNTIMEDIR}) - set(IE_CPACK_ARCHIVE_PATH ${OV_CPACK_ARCHIVEDIR}) endmacro() ov_nsis_cpack_set_dirs() diff --git a/cmake/developer_package/packaging/packaging.cmake b/cmake/developer_package/packaging/packaging.cmake index ae2d4d643211d6..2b6a54473233a8 100644 --- a/cmake/developer_package/packaging/packaging.cmake +++ b/cmake/developer_package/packaging/packaging.cmake @@ -49,22 +49,22 @@ endfunction() # Wraps original `cpack_add_component` and adds component to internal IE list # function(ov_cpack_add_component name) - if(NOT ${name} IN_LIST IE_CPACK_COMPONENTS_ALL) + if(NOT ${name} IN_LIST OV_CPACK_COMPONENTS_ALL) cpack_add_component(${name} ${ARGN}) # need to store informarion about cpack_add_component arguments in CMakeCache.txt # to restore it later set(_${name}_cpack_component_args "${ARGN}" CACHE INTERNAL "Argument for cpack_add_component for ${name} cpack component" FORCE) - list(APPEND IE_CPACK_COMPONENTS_ALL ${name}) - set(IE_CPACK_COMPONENTS_ALL "${IE_CPACK_COMPONENTS_ALL}" CACHE INTERNAL "" FORCE) + list(APPEND OV_CPACK_COMPONENTS_ALL ${name}) + set(OV_CPACK_COMPONENTS_ALL "${OV_CPACK_COMPONENTS_ALL}" CACHE INTERNAL "" FORCE) endif() endfunction() -foreach(comp IN LISTS IE_CPACK_COMPONENTS_ALL) +foreach(comp IN LISTS OV_CPACK_COMPONENTS_ALL) unset(_${comp}_cpack_component_args) endforeach() -unset(IE_CPACK_COMPONENTS_ALL CACHE) +unset(OV_CPACK_COMPONENTS_ALL CACHE) # create `tests` component if(ENABLE_TESTS) @@ -164,7 +164,7 @@ elseif(CPACK_GENERATOR MATCHES "^(7Z|TBZ2|TGZ|TXZ|TZ|TZST|ZIP)$") include(packaging/archive) endif() -macro(ie_cpack) +macro(ov_cpack) set(CPACK_SOURCE_GENERATOR "") # not used set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "OpenVINO™ Toolkit") set(CPACK_COMPONENT_UNSPECIFIED_REQUIRED OFF) @@ -223,3 +223,10 @@ macro(ie_cpack) include(CPack) endmacro() + +# deprecated + +macro(ie_cpack) + message(WARNING "'ie_cpack' is deprecated. Please, use 'ov_cpack'") + ov_cpack(${ARGV}) +endmacro() diff --git a/cmake/developer_package/packaging/rpm/rpm.cmake b/cmake/developer_package/packaging/rpm/rpm.cmake index 94efa5955c9e0e..9660226e696582 100644 --- a/cmake/developer_package/packaging/rpm/rpm.cmake +++ b/cmake/developer_package/packaging/rpm/rpm.cmake @@ -36,11 +36,6 @@ macro(ov_rpm_cpack_set_dirs) # skipped during rpm packaging set(OV_CPACK_WHEELSDIR "tools") - - # for BW compatibility - set(IE_CPACK_LIBRARY_PATH ${OV_CPACK_LIBRARYDIR}) - set(IE_CPACK_RUNTIME_PATH ${OV_CPACK_RUNTIMEDIR}) - set(IE_CPACK_ARCHIVE_PATH ${OV_CPACK_ARCHIVEDIR}) endmacro() ov_rpm_cpack_set_dirs() @@ -128,7 +123,7 @@ macro(ov_rpm_specific_settings) # TODO: fix "error: bad date in %changelog" # set(CPACK_RPM_CHANGELOG_FILE "${OpenVINO_SOURCE_DIR}/cmake/developer_package/packaging/rpm/changelog") # use rpmlint to check packages in post-build step - set(CPACK_POST_BUILD_SCRIPTS "${IEDevScripts_DIR}/packaging/rpm/post_build.cmake") + set(CPACK_POST_BUILD_SCRIPTS "${OpenVINODeveloperScripts_DIR}/packaging/rpm/post_build.cmake") # enable for debug cpack run ov_set_if_not_defined(CPACK_RPM_PACKAGE_DEBUG OFF) diff --git a/cmake/developer_package/plugins/plugins.cmake b/cmake/developer_package/plugins/plugins.cmake index 4ab78f5cfabd00..a8ba97ad9fa27d 100644 --- a/cmake/developer_package/plugins/plugins.cmake +++ b/cmake/developer_package/plugins/plugins.cmake @@ -8,7 +8,7 @@ set(PLUGIN_FILES "" CACHE INTERNAL "") function(ov_plugin_get_file_name target_name library_name) set(LIB_PREFIX "${CMAKE_SHARED_MODULE_PREFIX}") - set(LIB_SUFFIX "${IE_BUILD_POSTFIX}${CMAKE_SHARED_MODULE_SUFFIX}") + set(LIB_SUFFIX "${OV_BUILD_POSTFIX}${CMAKE_SHARED_MODULE_SUFFIX}") get_target_property(LIB_NAME ${target_name} OUTPUT_NAME) if (LIB_NAME STREQUAL "LIB_NAME-NOTFOUND") @@ -168,6 +168,7 @@ function(ov_add_plugin) endfunction() function(ie_add_plugin) + message(WARNING "'ie_add_plugin' is deprecated. Please, use 'ov_add_plugin'") ov_add_plugin(${ARGN}) endfunction() @@ -203,7 +204,7 @@ macro(ov_register_in_plugins_xml) -D "OV_CONFIG_OUTPUT_FILE=${config_output_file}" -D "OV_PLUGIN_NAME=${device_name}" -D "OV_CONFIGS_DIR=${CMAKE_BINARY_DIR}/plugins" - -P "${IEDevScripts_DIR}/plugins/unregister_plugin_cmake.cmake" + -P "${OpenVINODeveloperScripts_DIR}/plugins/unregister_plugin_cmake.cmake" COMMENT "Remove ${device_name} from the plugins.xml file" VERBATIM) @@ -232,7 +233,7 @@ macro(ov_register_in_plugins_xml) -D "OV_DEVICE_NAME=${device_name}" -D "OV_PLUGIN_PROPERTIES=${${device_name}_CONFIG}" -D "OV_PLUGIN_LIBRARY_NAME=${library_name}" - -P "${IEDevScripts_DIR}/plugins/create_plugin_file.cmake" + -P "${OpenVINODeveloperScripts_DIR}/plugins/create_plugin_file.cmake" COMMENT "Register ${device_name} device as ${library_name}" VERBATIM) @@ -247,7 +248,7 @@ macro(ov_register_in_plugins_xml) -D "CMAKE_SHARED_MODULE_PREFIX=${CMAKE_SHARED_MODULE_PREFIX}" -D "OV_CONFIG_OUTPUT_FILE=${config_output_file}" -D "OV_CONFIGS_DIR=${CMAKE_BINARY_DIR}/plugins" - -P "${IEDevScripts_DIR}/plugins/register_plugin_cmake.cmake" + -P "${OpenVINODeveloperScripts_DIR}/plugins/register_plugin_cmake.cmake" COMMENT "Registering plugins to plugins.xml config file" VERBATIM) @@ -266,6 +267,7 @@ endmacro() # ie_register_plugins() # macro(ie_register_plugins) + message(WARNING "'ie_register_plugins' is deprecated. Please, use 'ov_register_plugins'") ov_register_plugins(${ARGN}) endmacro() @@ -346,7 +348,7 @@ function(ov_generate_plugins_hpp) else() set(ov_plugins_hpp "${CMAKE_BINARY_DIR}/src/inference/ov_plugins.hpp") endif() - set(plugins_hpp_in "${IEDevScripts_DIR}/plugins/plugins.hpp.in") + set(plugins_hpp_in "${OpenVINODeveloperScripts_DIR}/plugins/plugins.hpp.in") add_custom_command(OUTPUT "${ov_plugins_hpp}" COMMAND @@ -357,10 +359,10 @@ function(ov_generate_plugins_hpp) -D "OV_PLUGINS_HPP_HEADER=${ov_plugins_hpp}" ${device_configs} ${as_extension} - -P "${IEDevScripts_DIR}/plugins/create_plugins_hpp.cmake" + -P "${OpenVINODeveloperScripts_DIR}/plugins/create_plugins_hpp.cmake" DEPENDS "${plugins_hpp_in}" - "${IEDevScripts_DIR}/plugins/create_plugins_hpp.cmake" + "${OpenVINODeveloperScripts_DIR}/plugins/create_plugins_hpp.cmake" COMMENT "Generate ov_plugins.hpp" VERBATIM) diff --git a/cmake/developer_package/plugins/unregister_plugin_cmake.cmake b/cmake/developer_package/plugins/unregister_plugin_cmake.cmake index 11bb3b3822f6be..16543ad39b3925 100644 --- a/cmake/developer_package/plugins/unregister_plugin_cmake.cmake +++ b/cmake/developer_package/plugins/unregister_plugin_cmake.cmake @@ -7,7 +7,7 @@ if(NOT EXISTS "${OV_CONFIG_OUTPUT_FILE}") endif() # remove plugin file -file(REMOVE "${OV_CONFIGS_DIR}/${IE_PLUGIN_NAME}.xml") +file(REMOVE "${OV_CONFIGS_DIR}/${OV_PLUGIN_NAME}.xml") # remove plugin set(newContent "") @@ -15,7 +15,7 @@ file(STRINGS "${OV_CONFIG_OUTPUT_FILE}" content) set(skip_plugin OFF) foreach(line IN LISTS content) - if("${line}" MATCHES "name=\"${IE_PLUGIN_NAME}\"") + if("${line}" MATCHES "name=\"${OV_PLUGIN_NAME}\"") set(skip_plugin ON) endif() diff --git a/cmake/developer_package/python_requirements.cmake b/cmake/developer_package/python_requirements.cmake index 4d031f22c8a4a6..767f130668fea1 100644 --- a/cmake/developer_package/python_requirements.cmake +++ b/cmake/developer_package/python_requirements.cmake @@ -103,7 +103,7 @@ function(ov_check_pip_packages) from check_python_requirements import check_python_requirements ; check_python_requirements('${ARG_REQUIREMENTS_FILE}') ; " - WORKING_DIRECTORY "${IEDevScripts_DIR}" + WORKING_DIRECTORY "${OpenVINODeveloperScripts_DIR}" RESULT_VARIABLE EXIT_CODE OUTPUT_VARIABLE OUTPUT_TEXT ERROR_VARIABLE ERROR_TEXT) diff --git a/cmake/developer_package/shellcheck/shellcheck.cmake b/cmake/developer_package/shellcheck/shellcheck.cmake index 3b18ff02abf359..469a751e04df01 100644 --- a/cmake/developer_package/shellcheck/shellcheck.cmake +++ b/cmake/developer_package/shellcheck/shellcheck.cmake @@ -17,20 +17,20 @@ if(shellcheck_PROGRAM) endif() endif() -function(ie_shellcheck_process) +function(ov_shellcheck_process) if(NOT shellcheck_PROGRAM) message(WARNING "shellcheck tool is not found") return() endif() - cmake_parse_arguments(IE_SHELLCHECK "" "DIRECTORY" "SKIP" ${ARGN}) + cmake_parse_arguments(SHELLCHECK "" "DIRECTORY" "SKIP" ${ARGN}) - set(IE_SHELLCHECK_SCRIPT "${IEDevScripts_DIR}/shellcheck/shellcheck_process.cmake") - file(GLOB_RECURSE scripts "${IE_SHELLCHECK_DIRECTORY}/*.sh") + set(SHELLCHECK_SCRIPT "${OpenVINODeveloperScripts_DIR}/shellcheck/shellcheck_process.cmake") + file(GLOB_RECURSE scripts "${SHELLCHECK_DIRECTORY}/*.sh") foreach(script IN LISTS scripts) # check if we need to skip scripts unset(skip_script) - foreach(skip_directory IN LISTS IE_SHELLCHECK_SKIP) + foreach(skip_directory IN LISTS SHELLCHECK_SKIP) if(script MATCHES "${skip_directory}/*") set(skip_script ON) endif() @@ -39,21 +39,21 @@ function(ie_shellcheck_process) continue() endif() - string(REPLACE "${IE_SHELLCHECK_DIRECTORY}" "${CMAKE_BINARY_DIR}/shellcheck" output_file ${script}) + string(REPLACE "${SHELLCHECK_DIRECTORY}" "${CMAKE_BINARY_DIR}/shellcheck" output_file ${script}) set(output_file "${output_file}.txt") get_filename_component(script_name "${script}" NAME) add_custom_command(OUTPUT ${output_file} COMMAND ${CMAKE_COMMAND} - -D IE_SHELLCHECK_PROGRAM=${shellcheck_PROGRAM} - -D IE_SHELL_SCRIPT=${script} - -D IE_SHELLCHECK_OUTPUT=${output_file} - -P ${IE_SHELLCHECK_SCRIPT} - DEPENDS ${script} ${IE_SHELLCHECK_SCRIPT} + -D SHELLCHECK_PROGRAM=${shellcheck_PROGRAM} + -D SHELL_SCRIPT=${script} + -D SHELLCHECK_OUTPUT=${output_file} + -P ${SHELLCHECK_SCRIPT} + DEPENDS ${script} ${SHELLCHECK_SCRIPT} COMMENT "Check script ${script_name}" VERBATIM) list(APPEND outputs ${output_file}) endforeach() - add_custom_target(ie_shellcheck DEPENDS ${outputs}) + add_custom_target(ov_shellcheck DEPENDS ${outputs}) endfunction() diff --git a/cmake/developer_package/shellcheck/shellcheck_process.cmake b/cmake/developer_package/shellcheck/shellcheck_process.cmake index adc53f9ab51093..f7fe1a299b0c3b 100644 --- a/cmake/developer_package/shellcheck/shellcheck_process.cmake +++ b/cmake/developer_package/shellcheck/shellcheck_process.cmake @@ -2,25 +2,19 @@ # SPDX-License-Identifier: Apache-2.0 # -if(NOT DEFINED IE_SHELLCHECK_PROGRAM) - message(FATAL_ERROR "IE_SHELLCHECK_PROGRAM is not defined") -endif() - -if(NOT DEFINED IE_SHELL_SCRIPT) - message(FATAL_ERROR "IE_SHELL_SCRIPT is not defined") -endif() - -if(NOT DEFINED IE_SHELLCHECK_OUTPUT) - message(FATAL_ERROR "IE_SHELLCHECK_OUTPUT is not defined") -endif() +foreach(var SHELLCHECK_PROGRAM SHELL_SCRIPT SHELLCHECK_OUTPUT) + if(NOT DEFINED ${var}) + message(FATAL_ERROR "${var} is not defined") + endif() +endforeach() set(rules "SC1091,SC2164,SC2162,SC1090") -execute_process(COMMAND ${IE_SHELLCHECK_PROGRAM} --exclude=${rules} ${IE_SHELL_SCRIPT} +execute_process(COMMAND ${SHELLCHECK_PROGRAM} --exclude=${rules} ${SHELL_SCRIPT} OUTPUT_VARIABLE error_message RESULT_VARIABLE exit_code OUTPUT_STRIP_TRAILING_WHITESPACE) -file(WRITE "${IE_SHELLCHECK_OUTPUT}" "${error_message}") +file(WRITE "${SHELLCHECK_OUTPUT}" "${error_message}") if(NOT exit_code EQUAL 0) message(FATAL_ERROR "${error_message}") diff --git a/cmake/developer_package/tbb/TBBConfig.cmake b/cmake/developer_package/tbb/TBBConfig.cmake index a192f6e4a3a884..135e2719d427fe 100644 --- a/cmake/developer_package/tbb/TBBConfig.cmake +++ b/cmake/developer_package/tbb/TBBConfig.cmake @@ -14,13 +14,13 @@ # Path to IE own version of TBBConfig.cmake old TBB version without cmake config. if(APPLE) - set(IE_OWN_TBB_CONFIG tbb/mac) + set(_OV_OWN_TBB_CONFIG tbb/mac) elseif(UNIX) - set(IE_OWN_TBB_CONFIG tbb/lnx) + set(_OV_OWN_TBB_CONFIG tbb/lnx) elseif(WIN) - set(IE_OWN_TBB_CONFIG tbb/win) + set(_OV_OWN_TBB_CONFIG tbb/win) else() - unset(IE_OWN_TBB_CONFIG) + unset(_OV_OWN_TBB_CONFIG) endif() unset(TBB_DIR) @@ -29,8 +29,10 @@ unset(TBB_DIR CACHE) find_package(TBB 2017.0 CONFIG - PATHS "${IEDevScripts_DIR}/${IE_OWN_TBB_CONFIG}" + PATHS "${OpenVINODeveloperScripts_DIR}/${_OV_OWN_TBB_CONFIG}" NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH) find_package_handle_standard_args(TBB CONFIG_MODE) + +unset(_OV_OWN_TBB_CONFIG) diff --git a/cmake/developer_package/version.cmake b/cmake/developer_package/version.cmake index 9a461c43a73499..f1e12c6531b532 100644 --- a/cmake/developer_package/version.cmake +++ b/cmake/developer_package/version.cmake @@ -166,6 +166,8 @@ endif() ov_parse_ci_build_number() macro (addVersionDefines FILE) + message(WARNING "'addVersionDefines' is deprecated. Please, use 'ov_add_version_defines'") + set(__version_file ${FILE}) if(NOT IS_ABSOLUTE ${__version_file}) set(__version_file "${CMAKE_CURRENT_SOURCE_DIR}/${__version_file}") diff --git a/cmake/developer_package/vs_version/vs_version.cmake b/cmake/developer_package/vs_version/vs_version.cmake index 6a6209e49cb312..ce7956798291ee 100644 --- a/cmake/developer_package/vs_version/vs_version.cmake +++ b/cmake/developer_package/vs_version/vs_version.cmake @@ -73,7 +73,7 @@ function(ov_add_vs_version_file) set(OV_VS_VER_INTERNALNAME_STR ${VS_VER_NAME}) set(vs_version_output "${CMAKE_CURRENT_BINARY_DIR}/vs_version.rc") - configure_file("${IEDevScripts_DIR}/vs_version/vs_version.rc.in" "${vs_version_output}" @ONLY) + configure_file("${OpenVINODeveloperScripts_DIR}/vs_version/vs_version.rc.in" "${vs_version_output}" @ONLY) source_group("src" FILES ${vs_version_output}) target_sources(${VS_VER_NAME} PRIVATE ${vs_version_output}) diff --git a/cmake/developer_package/whole_archive.cmake b/cmake/developer_package/whole_archive.cmake index ae5d56aa5d0337..0ad00055fbfb0e 100644 --- a/cmake/developer_package/whole_archive.cmake +++ b/cmake/developer_package/whole_archive.cmake @@ -5,15 +5,14 @@ #[[ function links static library without removing any symbol from it. -ieTargetLinkWholeArchive( [ ...]) +ov_target_link_whole_archive( [ ...]) Example: -ieTargetLinkWholeArchive("FunctionalTests" "CommonLib" "AnotherLib") +ov_target_link_whole_archive("FunctionalTests" "CommonLib" "AnotherLib") #]] -function(ieTargetLinkWholeArchive targetName) - set(libs) - foreach(staticLib ${ARGN}) +function(ov_target_link_whole_archive targetName) + foreach(staticLib IN LISTS ARGN) if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") # CMake does not support generator expression in LINK_FLAGS, so we workaround it a little bit: # passing same static library as normal link (to get build deps working, and includes too), than using WHOLEARCHIVE option @@ -52,3 +51,10 @@ function(ieTargetLinkWholeArchive targetName) target_link_libraries(${targetName} PRIVATE ${libs}) endif() endfunction() + +# deprecated + +function(ieTargetLinkWholeArchive) + message(WARNING "'ieTargetLinkWholeArchive' is deprecated, use 'ov_target_link_whole_archive' instead") + ov_target_link_whole_archive(${ARGN}) +endfunction() diff --git a/cmake/features.cmake b/cmake/features.cmake index 01a219e0aaf6a4..48c50bc3f3ea52 100644 --- a/cmake/features.cmake +++ b/cmake/features.cmake @@ -14,12 +14,12 @@ else() set(ENABLE_INTEL_CPU_DEFAULT ON) endif() -ie_dependent_option (ENABLE_INTEL_CPU "CPU plugin for OpenVINO Runtime" ${ENABLE_INTEL_CPU_DEFAULT} +ov_dependent_option (ENABLE_INTEL_CPU "CPU plugin for OpenVINO Runtime" ${ENABLE_INTEL_CPU_DEFAULT} "RISCV64 OR X86 OR X86_64 OR AARCH64 OR ARM" OFF) -ie_dependent_option (ENABLE_ARM_COMPUTE_CMAKE "Enable ARM Compute build via cmake" OFF "ENABLE_INTEL_CPU" OFF) +ov_dependent_option (ENABLE_ARM_COMPUTE_CMAKE "Enable ARM Compute build via cmake" OFF "ENABLE_INTEL_CPU" OFF) -ie_option (ENABLE_TESTS "unit, behavior and functional tests" OFF) +ov_option (ENABLE_TESTS "unit, behavior and functional tests" OFF) if(ENABLE_TESTS) include(CTest) @@ -32,7 +32,7 @@ else() set(ENABLE_INTEL_GPU_DEFAULT OFF) endif() -ie_dependent_option (ENABLE_INTEL_GPU "GPU OpenCL-based plugin for OpenVINO Runtime" ${ENABLE_INTEL_GPU_DEFAULT} "X86_64 OR AARCH64;NOT APPLE;NOT WINDOWS_STORE;NOT WINDOWS_PHONE" OFF) +ov_dependent_option (ENABLE_INTEL_GPU "GPU OpenCL-based plugin for OpenVINO Runtime" ${ENABLE_INTEL_GPU_DEFAULT} "X86_64 OR AARCH64;NOT APPLE;NOT WINDOWS_STORE;NOT WINDOWS_PHONE" OFF) if (ANDROID OR MINGW OR (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0) OR (NOT BUILD_SHARED_LIBS AND ENABLE_INTEL_CPU)) # oneDNN doesn't support old compilers and android builds for now, so we'll build GPU plugin without oneDNN @@ -42,31 +42,31 @@ else() set(ENABLE_ONEDNN_FOR_GPU_DEFAULT ON) endif() -ie_dependent_option (ENABLE_ONEDNN_FOR_GPU "Enable oneDNN with GPU support" ${ENABLE_ONEDNN_FOR_GPU_DEFAULT} "ENABLE_INTEL_GPU" OFF) +ov_dependent_option (ENABLE_ONEDNN_FOR_GPU "Enable oneDNN with GPU support" ${ENABLE_ONEDNN_FOR_GPU_DEFAULT} "ENABLE_INTEL_GPU" OFF) -ie_option (ENABLE_DEBUG_CAPS "enable OpenVINO debug capabilities at runtime" OFF) -ie_dependent_option (ENABLE_GPU_DEBUG_CAPS "enable GPU debug capabilities at runtime" ON "ENABLE_DEBUG_CAPS;ENABLE_INTEL_GPU" OFF) -ie_dependent_option (ENABLE_CPU_DEBUG_CAPS "enable CPU debug capabilities at runtime" ON "ENABLE_DEBUG_CAPS;ENABLE_INTEL_CPU" OFF) +ov_option (ENABLE_DEBUG_CAPS "enable OpenVINO debug capabilities at runtime" OFF) +ov_dependent_option (ENABLE_GPU_DEBUG_CAPS "enable GPU debug capabilities at runtime" ON "ENABLE_DEBUG_CAPS;ENABLE_INTEL_GPU" OFF) +ov_dependent_option (ENABLE_CPU_DEBUG_CAPS "enable CPU debug capabilities at runtime" ON "ENABLE_DEBUG_CAPS;ENABLE_INTEL_CPU" OFF) -ie_option (ENABLE_PROFILING_ITT "Build with ITT tracing. Optionally configure pre-built ittnotify library though INTEL_VTUNE_DIR variable." OFF) +ov_option (ENABLE_PROFILING_ITT "Build with ITT tracing. Optionally configure pre-built ittnotify library though INTEL_VTUNE_DIR variable." OFF) -ie_option_enum(ENABLE_PROFILING_FILTER "Enable or disable ITT counter groups.\ +ov_option_enum(ENABLE_PROFILING_FILTER "Enable or disable ITT counter groups.\ Supported values:\ ALL - enable all ITT counters (default value)\ FIRST_INFERENCE - enable only first inference time counters" ALL ALLOWED_VALUES ALL FIRST_INFERENCE) -ie_option (ENABLE_PROFILING_FIRST_INFERENCE "Build with ITT tracing of first inference time." ON) +ov_option (ENABLE_PROFILING_FIRST_INFERENCE "Build with ITT tracing of first inference time." ON) -ie_option_enum(SELECTIVE_BUILD "Enable OpenVINO conditional compilation or statistics collection. \ +ov_option_enum(SELECTIVE_BUILD "Enable OpenVINO conditional compilation or statistics collection. \ In case SELECTIVE_BUILD is enabled, the SELECTIVE_BUILD_STAT variable should contain the path to the collected IntelSEAPI statistics. \ Usage: -DSELECTIVE_BUILD=ON -DSELECTIVE_BUILD_STAT=/path/*.csv" OFF ALLOWED_VALUES ON OFF COLLECT) -ie_option (ENABLE_DOCS "Build docs using Doxygen" OFF) +ov_option (ENABLE_DOCS "Build docs using Doxygen" OFF) find_package(PkgConfig QUIET) -ie_dependent_option (ENABLE_PKGCONFIG_GEN "Enable openvino.pc pkg-config file generation" ON "LINUX OR APPLE;PkgConfig_FOUND;BUILD_SHARED_LIBS" OFF) +ov_dependent_option (ENABLE_PKGCONFIG_GEN "Enable openvino.pc pkg-config file generation" ON "LINUX OR APPLE;PkgConfig_FOUND;BUILD_SHARED_LIBS" OFF) # # OpenVINO Runtime specific options @@ -82,7 +82,7 @@ endif() set(THREADING "${THREADING_DEFAULT}" CACHE STRING "Threading") set_property(CACHE THREADING PROPERTY STRINGS "TBB" "TBB_AUTO" "OMP" "SEQ") -list (APPEND IE_OPTIONS THREADING) +list (APPEND OV_OPTIONS THREADING) if (NOT THREADING STREQUAL "TBB" AND NOT THREADING STREQUAL "TBB_AUTO" AND NOT THREADING STREQUAL "OMP" AND @@ -97,50 +97,50 @@ else() set(ENABLE_TBBBIND_2_5_DEFAULT OFF) endif() -ie_dependent_option (ENABLE_TBBBIND_2_5 "Enable TBBBind_2_5 static usage in OpenVINO runtime" ${ENABLE_TBBBIND_2_5_DEFAULT} "THREADING MATCHES TBB; NOT APPLE" OFF) -ie_dependent_option (ENABLE_TBB_RELEASE_ONLY "Only Release TBB libraries are linked to the OpenVINO Runtime binaries" ON "THREADING MATCHES TBB;LINUX" OFF) +ov_dependent_option (ENABLE_TBBBIND_2_5 "Enable TBBBind_2_5 static usage in OpenVINO runtime" ${ENABLE_TBBBIND_2_5_DEFAULT} "THREADING MATCHES TBB; NOT APPLE" OFF) +ov_dependent_option (ENABLE_TBB_RELEASE_ONLY "Only Release TBB libraries are linked to the OpenVINO Runtime binaries" ON "THREADING MATCHES TBB;LINUX" OFF) -ie_dependent_option (ENABLE_INTEL_GNA "GNA support for OpenVINO Runtime" ON +ov_dependent_option (ENABLE_INTEL_GNA "GNA support for OpenVINO Runtime" ON "NOT APPLE;NOT ANDROID;X86_64;CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 5.4" OFF) -ie_dependent_option (ENABLE_INTEL_GNA_DEBUG "GNA debug build" OFF "ENABLE_INTEL_GNA" OFF) -ie_dependent_option (ENABLE_V7_SERIALIZE "enables serialization to IR v7" OFF "ENABLE_INTEL_GNA" OFF) -ie_dependent_option (ENABLE_IR_V7_READER "Enables IR v7 reader" ${BUILD_SHARED_LIBS} "ENABLE_TESTS;ENABLE_INTEL_GNA" OFF) +ov_dependent_option (ENABLE_INTEL_GNA_DEBUG "GNA debug build" OFF "ENABLE_INTEL_GNA" OFF) +ov_dependent_option (ENABLE_V7_SERIALIZE "enables serialization to IR v7" OFF "ENABLE_INTEL_GNA" OFF) +ov_dependent_option (ENABLE_IR_V7_READER "Enables IR v7 reader" ${BUILD_SHARED_LIBS} "ENABLE_TESTS;ENABLE_INTEL_GNA" OFF) -ie_dependent_option (ENABLE_GAPI_PREPROCESSING "Enables G-API preprocessing" ON "NOT MINGW64" OFF) +ov_dependent_option (ENABLE_GAPI_PREPROCESSING "Enables G-API preprocessing" ON "NOT MINGW64" OFF) -ie_option (ENABLE_MULTI "Enables MULTI Device Plugin" ON) -ie_option (ENABLE_AUTO "Enables AUTO Device Plugin" ON) -ie_option (ENABLE_AUTO_BATCH "Enables Auto-Batching Plugin" ON) -ie_option (ENABLE_HETERO "Enables Hetero Device Plugin" ON) -ie_option (ENABLE_TEMPLATE "Enable template plugin" ON) +ov_option (ENABLE_MULTI "Enables MULTI Device Plugin" ON) +ov_option (ENABLE_AUTO "Enables AUTO Device Plugin" ON) +ov_option (ENABLE_AUTO_BATCH "Enables Auto-Batching Plugin" ON) +ov_option (ENABLE_HETERO "Enables Hetero Device Plugin" ON) +ov_option (ENABLE_TEMPLATE "Enable template plugin" ON) -ie_dependent_option (ENABLE_PLUGINS_XML "Generate plugins.xml configuration file or not" OFF "BUILD_SHARED_LIBS" OFF) +ov_dependent_option (ENABLE_PLUGINS_XML "Generate plugins.xml configuration file or not" OFF "BUILD_SHARED_LIBS" OFF) -ie_dependent_option (GAPI_TEST_PERF "if GAPI unit tests should examine performance" OFF "ENABLE_TESTS;ENABLE_GAPI_PREPROCESSING" OFF) +ov_dependent_option (GAPI_TEST_PERF "if GAPI unit tests should examine performance" OFF "ENABLE_TESTS;ENABLE_GAPI_PREPROCESSING" OFF) -ie_dependent_option (ENABLE_FUNCTIONAL_TESTS "functional tests" ON "ENABLE_TESTS" OFF) +ov_dependent_option (ENABLE_FUNCTIONAL_TESTS "functional tests" ON "ENABLE_TESTS" OFF) -ie_option (ENABLE_SAMPLES "console samples are part of OpenVINO Runtime package" ON) +ov_option (ENABLE_SAMPLES "console samples are part of OpenVINO Runtime package" ON) set(OPENVINO_EXTRA_MODULES "" CACHE STRING "Extra paths for extra modules to include into OpenVINO build") find_host_package(Python3 QUIET COMPONENTS Interpreter) if(Python3_Interpreter_FOUND) - ie_option(ENABLE_OV_ONNX_FRONTEND "Enable ONNX FrontEnd" ON) + ov_option(ENABLE_OV_ONNX_FRONTEND "Enable ONNX FrontEnd" ON) else() - ie_option(ENABLE_OV_ONNX_FRONTEND "Enable ONNX FrontEnd" OFF) + ov_option(ENABLE_OV_ONNX_FRONTEND "Enable ONNX FrontEnd" OFF) endif() -ie_option(ENABLE_OV_PADDLE_FRONTEND "Enable PaddlePaddle FrontEnd" ON) -ie_option(ENABLE_OV_IR_FRONTEND "Enable IR FrontEnd" ON) -ie_option(ENABLE_OV_PYTORCH_FRONTEND "Enable PyTorch FrontEnd" ON) -ie_option(ENABLE_OV_IR_FRONTEND "Enable IR FrontEnd" ON) -ie_option(ENABLE_OV_TF_FRONTEND "Enable TensorFlow FrontEnd" ON) -ie_option(ENABLE_OV_TF_LITE_FRONTEND "Enable TensorFlow Lite FrontEnd" ON) -ie_dependent_option(ENABLE_SNAPPY_COMPRESSION "Enables compression support for TF FE" ON +ov_option(ENABLE_OV_PADDLE_FRONTEND "Enable PaddlePaddle FrontEnd" ON) +ov_option(ENABLE_OV_IR_FRONTEND "Enable IR FrontEnd" ON) +ov_option(ENABLE_OV_PYTORCH_FRONTEND "Enable PyTorch FrontEnd" ON) +ov_option(ENABLE_OV_IR_FRONTEND "Enable IR FrontEnd" ON) +ov_option(ENABLE_OV_TF_FRONTEND "Enable TensorFlow FrontEnd" ON) +ov_option(ENABLE_OV_TF_LITE_FRONTEND "Enable TensorFlow Lite FrontEnd" ON) +ov_dependent_option(ENABLE_SNAPPY_COMPRESSION "Enables compression support for TF FE" ON "ENABLE_OV_TF_FRONTEND" OFF) -ie_dependent_option (ENABLE_STRICT_DEPENDENCIES "Skip configuring \"convinient\" dependencies for efficient parallel builds" ON "ENABLE_TESTS;ENABLE_OV_ONNX_FRONTEND" OFF) +ov_dependent_option (ENABLE_STRICT_DEPENDENCIES "Skip configuring \"convinient\" dependencies for efficient parallel builds" ON "ENABLE_TESTS;ENABLE_OV_ONNX_FRONTEND" OFF) if(CMAKE_HOST_LINUX AND LINUX) # Debian packages are enabled on Ubuntu systems @@ -175,28 +175,28 @@ else() set(ENABLE_SYSTEM_TBB_DEFAULT ${ENABLE_SYSTEM_LIBS_DEFAULT}) endif() -ie_dependent_option (ENABLE_SYSTEM_TBB "Enables use of system TBB" ${ENABLE_SYSTEM_TBB_DEFAULT} +ov_dependent_option (ENABLE_SYSTEM_TBB "Enables use of system TBB" ${ENABLE_SYSTEM_TBB_DEFAULT} "THREADING MATCHES TBB" OFF) # TODO: turn it off by default during the work on cross-os distribution, because pugixml is not # available out of box on all systems (like RHEL, UBI) -ie_option (ENABLE_SYSTEM_PUGIXML "Enables use of system PugiXML" ${ENABLE_SYSTEM_PUGIXML_DEFAULT}) +ov_option (ENABLE_SYSTEM_PUGIXML "Enables use of system PugiXML" ${ENABLE_SYSTEM_PUGIXML_DEFAULT}) # the option is on by default, because we use only flatc compiler and don't use any libraries -ie_dependent_option(ENABLE_SYSTEM_FLATBUFFERS "Enables use of system flatbuffers" ${ENABLE_SYSTEM_FLATBUFFERS_DEFAULT} +ov_dependent_option(ENABLE_SYSTEM_FLATBUFFERS "Enables use of system flatbuffers" ${ENABLE_SYSTEM_FLATBUFFERS_DEFAULT} "ENABLE_OV_TF_LITE_FRONTEND" OFF) -ie_dependent_option (ENABLE_SYSTEM_OPENCL "Enables use of system OpenCL" ${ENABLE_SYSTEM_LIBS_DEFAULT} +ov_dependent_option (ENABLE_SYSTEM_OPENCL "Enables use of system OpenCL" ${ENABLE_SYSTEM_LIBS_DEFAULT} "ENABLE_INTEL_GPU" OFF) # the option is turned off by default, because we compile our own static version of protobuf # with LTO and -fPIC options, while system one does not have such flags -ie_dependent_option (ENABLE_SYSTEM_PROTOBUF "Enables use of system Protobuf" OFF +ov_dependent_option (ENABLE_SYSTEM_PROTOBUF "Enables use of system Protobuf" OFF "ENABLE_OV_ONNX_FRONTEND OR ENABLE_OV_PADDLE_FRONTEND OR ENABLE_OV_TF_FRONTEND" OFF) # the option is turned off by default, because we don't want to have a dependency on libsnappy.so -ie_dependent_option (ENABLE_SYSTEM_SNAPPY "Enables use of system version of Snappy" OFF +ov_dependent_option (ENABLE_SYSTEM_SNAPPY "Enables use of system version of Snappy" OFF "ENABLE_SNAPPY_COMPRESSION" OFF) -ie_dependent_option (ENABLE_PYTHON_PACKAGING "Enables packaging of Python API in APT / YUM" OFF +ov_dependent_option (ENABLE_PYTHON_PACKAGING "Enables packaging of Python API in APT / YUM" OFF "ENABLE_PYTHON;UNIX" OFF) -ie_option(ENABLE_OPENVINO_DEBUG "Enable output for OPENVINO_DEBUG statements" OFF) +ov_option(ENABLE_OPENVINO_DEBUG "Enable output for OPENVINO_DEBUG statements" OFF) if(NOT BUILD_SHARED_LIBS AND ENABLE_OV_TF_FRONTEND) set(FORCE_FRONTENDS_USE_PROTOBUF ON) @@ -216,4 +216,4 @@ if (ENABLE_PROFILING_RAW) add_definitions(-DENABLE_PROFILING_RAW=1) endif() -print_enabled_features() +ov_print_enabled_features() diff --git a/cmake/templates/InferenceEngineConfig.cmake.in b/cmake/templates/InferenceEngineConfig.cmake.in index ef3fad4b9ff0a4..5c4a9ca5c144e2 100644 --- a/cmake/templates/InferenceEngineConfig.cmake.in +++ b/cmake/templates/InferenceEngineConfig.cmake.in @@ -25,6 +25,8 @@ @PACKAGE_INIT@ +message(WARNING "find_package(InferenceEngine) is deprecated and will be removed in 2024.0 release. Please, use find_package(OpenVINO)") + if(NOT DEFINED CMAKE_FIND_PACKAGE_NAME) set(CMAKE_FIND_PACKAGE_NAME InferenceEngine) set(_ie_need_package_name_reset ON) @@ -77,7 +79,7 @@ endforeach() set(PACKAGE_PREFIX_DIR ${_ie_package_prefix_dir}) unset(_ie_package_prefix_dir) -set_and_check(InferenceEngine_INCLUDE_DIRS "@PACKAGE_IE_INCLUDE_DIR@") +set_and_check(InferenceEngine_INCLUDE_DIRS "@PACKAGE_OV_INCLUDE_DIR@") check_required_components(${CMAKE_FIND_PACKAGE_NAME}) diff --git a/cmake/templates/InferenceEngineDeveloperPackageConfig.cmake.in b/cmake/templates/InferenceEngineDeveloperPackageConfig.cmake.in index 4249559e027031..c4a9c49a481936 100644 --- a/cmake/templates/InferenceEngineDeveloperPackageConfig.cmake.in +++ b/cmake/templates/InferenceEngineDeveloperPackageConfig.cmake.in @@ -6,37 +6,39 @@ include(CMakeFindDependencyMacro) +message(WARNING "find_package(InferenceEngineDeveloperPackage) is deprecated and will be removed in 2024.0 release. Please, use find_package(OpenVINODeveloperPackage)") + # TODO: remove after changing [private plugins] set_and_check(OpenVINO_SOURCE_DIR "@OpenVINO_SOURCE_DIR@") # NPU set_and_check(OpenVINO_MAIN_SOURCE_DIR "@OpenVINO_SOURCE_DIR@") # NPU # Variables to export in plugin's projects -set(ie_options "@IE_OPTIONS@") -list(APPEND ie_options CMAKE_CXX_COMPILER_LAUNCHER CMAKE_C_COMPILER_LAUNCHER +set(ov_options "@OV_OPTIONS@") +list(APPEND ov_options CMAKE_CXX_COMPILER_LAUNCHER CMAKE_C_COMPILER_LAUNCHER CMAKE_CXX_LINKER_LAUNCHER CMAKE_C_LINKER_LAUNCHER CMAKE_SKIP_RPATH CMAKE_INSTALL_PREFIX CPACK_GENERATOR) if(APPLE) - list(APPEND ie_options CMAKE_OSX_ARCHITECTURES CMAKE_OSX_DEPLOYMENT_TARGET) + list(APPEND ov_options CMAKE_OSX_ARCHITECTURES CMAKE_OSX_DEPLOYMENT_TARGET) endif() -get_property(_IE_GENERATOR_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) -if(_IE_GENERATOR_MULTI_CONFIG) - list(APPEND ie_options CMAKE_CONFIGURATION_TYPES) +get_property(_OV_GENERATOR_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) +if(_OV_GENERATOR_MULTI_CONFIG) + list(APPEND ov_options CMAKE_CONFIGURATION_TYPES) if(CMAKE_GENERATOR MATCHES "^Ninja Multi-Config$") - list(APPEND ie_options CMAKE_DEFAULT_BUILD_TYPE) + list(APPEND ov_options CMAKE_DEFAULT_BUILD_TYPE) endif() else() - list(APPEND ie_options CMAKE_BUILD_TYPE) + list(APPEND ov_options CMAKE_BUILD_TYPE) endif() -unset(_IE_GENERATOR_MULTI_CONFIG) +unset(_OV_GENERATOR_MULTI_CONFIG) file(TO_CMAKE_PATH "${CMAKE_CURRENT_LIST_DIR}" cache_path) message(STATUS "The following CMake options are exported from Inference Engine Developer package") message(" ") -foreach(option IN LISTS ie_options) +foreach(option IN LISTS ov_options) if(NOT DEFINED "${option}") load_cache("${cache_path}" READ_WITH_PREFIX "" ${option}) endif() @@ -56,7 +58,7 @@ set(CMAKE_COMPILE_WARNING_AS_ERROR OFF) # Content # -find_dependency(IEDevScripts +find_dependency(OpenVINODeveloperScripts PATHS "${OpenVINO_SOURCE_DIR}/cmake/developer_package" NO_CMAKE_FIND_ROOT_PATH NO_DEFAULT_PATH) @@ -160,13 +162,13 @@ if(ENABLE_SYSTEM_PUGIXML) endif() endif() -set(_IE_nlohmann_json_FOUND "@nlohmann_json_FOUND@") -if(_IE_nlohmann_json_FOUND) +set(_ov_nlohmann_json_FOUND "@nlohmann_json_FOUND@") +if(_ov_nlohmann_json_FOUND) find_dependency(nlohmann_json) set_target_properties(nlohmann_json::nlohmann_json PROPERTIES IMPORTED_GLOBAL ON) add_library(IE::nlohmann_json ALIAS nlohmann_json::nlohmann_json) endif() -unset(_IE_nlohmann_json_FOUND) +unset(_ov_nlohmann_json_FOUND) # inherit OpenCV from main IE project if enabled if("@OpenCV_FOUND@") diff --git a/cmake/templates/OpenVINOConfig.cmake.in b/cmake/templates/OpenVINOConfig.cmake.in index 7dda80d8a312fd..470be5d17b1cf5 100644 --- a/cmake/templates/OpenVINOConfig.cmake.in +++ b/cmake/templates/OpenVINOConfig.cmake.in @@ -176,7 +176,7 @@ macro(_ov_find_tbb) set(enable_system_tbb "@ENABLE_SYSTEM_TBB@") if(NOT enable_system_tbb) - set_and_check(_tbb_dir "@PACKAGE_IE_TBB_DIR@") + set_and_check(_tbb_dir "@PACKAGE_OV_TBB_DIR@") # see https://stackoverflow.com/questions/28070810/cmake-generate-error-on-windows-as-it-uses-as-escape-seq if(DEFINED ENV{TBBROOT}) @@ -218,7 +218,7 @@ macro(_ov_find_tbb) set(install_tbbbind "@install_tbbbind@") if(install_tbbbind) - set_and_check(_tbb_bind_dir "@PACKAGE_IE_TBBBIND_DIR@") + set_and_check(_tbb_bind_dir "@PACKAGE_OV_TBBBIND_DIR@") _ov_find_dependency(TBBBIND_2_5 PATHS ${_tbb_bind_dir} NO_CMAKE_FIND_ROOT_PATH diff --git a/cmake/templates/OpenVINODeveloperPackageConfig.cmake.in b/cmake/templates/OpenVINODeveloperPackageConfig.cmake.in index 074139df5f3bf8..04cf8a219ae723 100644 --- a/cmake/templates/OpenVINODeveloperPackageConfig.cmake.in +++ b/cmake/templates/OpenVINODeveloperPackageConfig.cmake.in @@ -10,7 +10,7 @@ set_and_check(OpenVINO_SOURCE_DIR "@OpenVINO_SOURCE_DIR@") # Variables to export in plugin's projects -set(ov_options "@IE_OPTIONS@") +set(ov_options "@OV_OPTIONS@") list(APPEND ov_options CMAKE_CXX_COMPILER_LAUNCHER CMAKE_C_COMPILER_LAUNCHER CMAKE_CXX_LINKER_LAUNCHER CMAKE_C_LINKER_LAUNCHER CMAKE_SKIP_RPATH CMAKE_INSTALL_PREFIX CPACK_GENERATOR) @@ -20,7 +20,7 @@ if(APPLE) endif() get_property(_OV_GENERATOR_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) -if(_IE_GENERATOR_MULTI_CONFIG) +if(_OV_GENERATOR_MULTI_CONFIG) list(APPEND ov_options CMAKE_CONFIGURATION_TYPES) if(CMAKE_GENERATOR MATCHES "^Ninja Multi-Config$") list(APPEND ov_options CMAKE_DEFAULT_BUILD_TYPE) @@ -61,7 +61,7 @@ set(CMAKE_COMPILE_WARNING_AS_ERROR OFF) # Content # -find_dependency(IEDevScripts +find_dependency(OpenVINODeveloperScripts PATHS "${OpenVINO_SOURCE_DIR}/cmake/developer_package" NO_CMAKE_FIND_ROOT_PATH NO_DEFAULT_PATH) diff --git a/cmake/templates/ngraphConfig.cmake.in b/cmake/templates/ngraphConfig.cmake.in index ee889b545acd0a..a0111c2302195f 100644 --- a/cmake/templates/ngraphConfig.cmake.in +++ b/cmake/templates/ngraphConfig.cmake.in @@ -42,6 +42,8 @@ include(CMakeFindDependencyMacro) +message(WARNING "find_package(ngraph) is deprecated and will be removed in 2024.0 release. Please, use find_package(OpenVINO)") + find_dependency(OpenVINO PATHS "${CMAKE_CURRENT_LIST_DIR}" "${CMAKE_CURRENT_LIST_DIR}/../openvino${ngraph_VERSION}" diff --git a/samples/c/common/opencv_c_wrapper/CMakeLists.txt b/samples/c/common/opencv_c_wrapper/CMakeLists.txt index 1f2eea3bebcfd2..11ae7efbf35787 100644 --- a/samples/c/common/opencv_c_wrapper/CMakeLists.txt +++ b/samples/c/common/opencv_c_wrapper/CMakeLists.txt @@ -28,6 +28,6 @@ target_include_directories(${TARGET_NAME} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/in set_target_properties(${TARGET_NAME} PROPERTIES FOLDER c_samples) -if(COMMAND ov_add_clang_format_target AND NOT IE_SAMPLE_EXCLUDE_CLANG_FORMAT) +if(COMMAND ov_add_clang_format_target) ov_add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME}) endif() diff --git a/samples/c/hello_classification/CMakeLists.txt b/samples/c/hello_classification/CMakeLists.txt index 199744115257fd..031b9d44037a89 100644 --- a/samples/c/hello_classification/CMakeLists.txt +++ b/samples/c/hello_classification/CMakeLists.txt @@ -2,6 +2,6 @@ # SPDX-License-Identifier: Apache-2.0 # -ie_add_sample(NAME hello_classification_c +ov_add_sample(NAME hello_classification_c SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/main.c" DEPENDENCIES opencv_c_wrapper) diff --git a/samples/c/hello_nv12_input_classification/CMakeLists.txt b/samples/c/hello_nv12_input_classification/CMakeLists.txt index a517baf9b40ec3..83ae06b52052d6 100644 --- a/samples/c/hello_nv12_input_classification/CMakeLists.txt +++ b/samples/c/hello_nv12_input_classification/CMakeLists.txt @@ -2,5 +2,5 @@ # SPDX-License-Identifier: Apache-2.0 # -ie_add_sample(NAME hello_nv12_input_classification_c +ov_add_sample(NAME hello_nv12_input_classification_c SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/main.c") diff --git a/samples/cpp/CMakeLists.txt b/samples/cpp/CMakeLists.txt index 038895b1c07429..87bf32eb3179c9 100644 --- a/samples/cpp/CMakeLists.txt +++ b/samples/cpp/CMakeLists.txt @@ -49,18 +49,18 @@ endif() if(OpenVINO_SOURCE_DIR) # in case if samples are built from IE repo - set(IE_MAIN_SAMPLES_DIR "${OpenVINO_SOURCE_DIR}") + set(OV_MAIN_SAMPLES_DIR "${OpenVINO_SOURCE_DIR}") set(OpenVINO_DIR "${CMAKE_BINARY_DIR}") else() # in case if samples are built out of IE repo - set(IE_MAIN_SAMPLES_DIR ${CMAKE_CURRENT_BINARY_DIR}) + set(OV_MAIN_SAMPLES_DIR ${CMAKE_CURRENT_BINARY_DIR}) endif() -set (CMAKE_LIBRARY_OUTPUT_DIRECTORY ${IE_MAIN_SAMPLES_DIR}/${BIN_FOLDER}) -set (CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${IE_MAIN_SAMPLES_DIR}/${BIN_FOLDER}) -set (CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY ${IE_MAIN_SAMPLES_DIR}/${BIN_FOLDER}) -set (CMAKE_PDB_OUTPUT_DIRECTORY ${IE_MAIN_SAMPLES_DIR}/${BIN_FOLDER}) -set (CMAKE_RUNTIME_OUTPUT_DIRECTORY ${IE_MAIN_SAMPLES_DIR}/${BIN_FOLDER}) +set (CMAKE_LIBRARY_OUTPUT_DIRECTORY ${OV_MAIN_SAMPLES_DIR}/${BIN_FOLDER}) +set (CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${OV_MAIN_SAMPLES_DIR}/${BIN_FOLDER}) +set (CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY ${OV_MAIN_SAMPLES_DIR}/${BIN_FOLDER}) +set (CMAKE_PDB_OUTPUT_DIRECTORY ${OV_MAIN_SAMPLES_DIR}/${BIN_FOLDER}) +set (CMAKE_RUNTIME_OUTPUT_DIRECTORY ${OV_MAIN_SAMPLES_DIR}/${BIN_FOLDER}) if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_SCL_SECURE_NO_WARNINGS -D_CRT_SECURE_NO_WARNINGS") @@ -155,7 +155,7 @@ if (DEFINED OpenVINO_SOURCE_DIR AND NOT ENABLE_SAMPLES) return() endif() -function(add_samples_to_build) +function(ov_add_samples_to_build) # check each passed sample subdirectory foreach (dir ${ARGN}) if (IS_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/${dir}) @@ -175,37 +175,37 @@ function(add_samples_to_build) endif() endif() endforeach() -endfunction(add_samples_to_build) +endfunction(ov_add_samples_to_build) include(CMakeParseArguments) # -# ie_add_sample(NAME +# ov_add_sample(NAME # SOURCES # [HEADERS
] # [INCLUDE_DIRECTORIES ] # [DEPENDENCIES ] # [EXCLUDE_CLANG_FORMAT] # -macro(ie_add_sample) +macro(ov_add_sample) set(options EXCLUDE_CLANG_FORMAT) set(oneValueArgs NAME) set(multiValueArgs SOURCES HEADERS DEPENDENCIES INCLUDE_DIRECTORIES) - cmake_parse_arguments(IE_SAMPLE "${options}" "${oneValueArgs}" + cmake_parse_arguments(SAMPLE "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} ) # Create named folders for the sources within the .vcproj # Empty name lists them directly under the .vcproj - source_group("src" FILES ${IE_SAMPLE_SOURCES}) - if(IE_SAMPLE_HEADERS) - source_group("include" FILES ${IE_SAMPLE_HEADERS}) + source_group("src" FILES ${SAMPLE_SOURCES}) + if(SAMPLE_HEADERS) + source_group("include" FILES ${SAMPLE_HEADERS}) endif() # Create executable file from sources - add_executable(${IE_SAMPLE_NAME} ${IE_SAMPLE_SOURCES} ${IE_SAMPLE_HEADERS}) + add_executable(${SAMPLE_NAME} ${SAMPLE_SOURCES} ${SAMPLE_HEADERS}) set(folder_name cpp_samples) - if(IE_SAMPLE_NAME MATCHES ".*_c$") + if(SAMPLE_NAME MATCHES ".*_c$") set(c_sample ON) set(folder_name c_samples) endif() @@ -222,34 +222,46 @@ macro(ie_add_sample) set(ov_link_libraries openvino::runtime) endif() - set_target_properties(${IE_SAMPLE_NAME} PROPERTIES FOLDER ${folder_name} - COMPILE_PDB_NAME ${IE_SAMPLE_NAME}) + set_target_properties(${SAMPLE_NAME} PROPERTIES FOLDER ${folder_name} + COMPILE_PDB_NAME ${SAMPLE_NAME}) - if(IE_SAMPLE_INCLUDE_DIRECTORIES) - target_include_directories(${IE_SAMPLE_NAME} PRIVATE ${IE_SAMPLE_INCLUDE_DIRECTORIES}) + if(SAMPLE_INCLUDE_DIRECTORIES) + target_include_directories(${SAMPLE_NAME} PRIVATE ${SAMPLE_INCLUDE_DIRECTORIES}) endif() - target_include_directories(${IE_SAMPLE_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/../common") + target_include_directories(${SAMPLE_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/../common") - target_link_libraries(${IE_SAMPLE_NAME} PRIVATE ${ov_link_libraries} Threads::Threads ${IE_SAMPLE_DEPENDENCIES}) + target_link_libraries(${SAMPLE_NAME} PRIVATE ${ov_link_libraries} Threads::Threads ${SAMPLE_DEPENDENCIES}) - install(TARGETS ${IE_SAMPLE_NAME} + install(TARGETS ${SAMPLE_NAME} RUNTIME DESTINATION samples_bin/ COMPONENT samples_bin EXCLUDE_FROM_ALL) # create global target with all samples / demo apps - if(NOT TARGET ie_samples) - add_custom_target(ie_samples ALL) + if(NOT TARGET ov_samples) + add_custom_target(ov_samples ALL) endif() - add_dependencies(ie_samples ${IE_SAMPLE_NAME}) + add_dependencies(ov_samples ${SAMPLE_NAME}) - if(COMMAND ov_add_clang_format_target AND NOT IE_SAMPLE_EXCLUDE_CLANG_FORMAT) - ov_add_clang_format_target(${IE_SAMPLE_NAME}_clang FOR_SOURCES ${IE_SAMPLE_SOURCES} ${IE_SAMPLE_HEADERS}) + if(COMMAND ov_add_clang_format_target AND NOT SAMPLE_EXCLUDE_CLANG_FORMAT) + ov_add_clang_format_target(${SAMPLE_NAME}_clang FOR_SOURCES ${SAMPLE_SOURCES} ${SAMPLE_HEADERS}) endif() if(COMMAND ov_ncc_naming_style AND NOT c_sample) - ov_ncc_naming_style(FOR_TARGET "${IE_SAMPLE_NAME}" + ov_ncc_naming_style(FOR_TARGET "${SAMPLE_NAME}" SOURCE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}") endif() + + unset(options) + unset(oneValueArgs) + unset(multiValueArgs) + unset(c_sample) + unset(folder_name) + unset(ov_link_libraries) + unset(SAMPLE_NAME) + unset(SAMPLE_HEADERS) + unset(SAMPLE_DEPENDENCIES) + unset(SAMPLE_EXCLUDE_CLANG_FORMAT) + unset(SAMPLE_INCLUDE_DIRECTORIES) endmacro() # collect all samples subdirectories @@ -261,4 +273,4 @@ if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/thirdparty") endif() list(REMOVE_ITEM samples_dirs common) -add_samples_to_build(${samples_dirs}) +ov_add_samples_to_build(${samples_dirs}) diff --git a/samples/cpp/benchmark/sync_benchmark/CMakeLists.txt b/samples/cpp/benchmark/sync_benchmark/CMakeLists.txt index 39a1b86f3f0baf..c9350435612005 100644 --- a/samples/cpp/benchmark/sync_benchmark/CMakeLists.txt +++ b/samples/cpp/benchmark/sync_benchmark/CMakeLists.txt @@ -2,6 +2,6 @@ # SPDX-License-Identifier: Apache-2.0 # -ie_add_sample(NAME sync_benchmark +ov_add_sample(NAME sync_benchmark SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/main.cpp" DEPENDENCIES ie_samples_utils) diff --git a/samples/cpp/benchmark/throughput_benchmark/CMakeLists.txt b/samples/cpp/benchmark/throughput_benchmark/CMakeLists.txt index 682feee8cef0ca..be9730bf872822 100644 --- a/samples/cpp/benchmark/throughput_benchmark/CMakeLists.txt +++ b/samples/cpp/benchmark/throughput_benchmark/CMakeLists.txt @@ -2,6 +2,6 @@ # SPDX-License-Identifier: Apache-2.0 # -ie_add_sample(NAME throughput_benchmark +ov_add_sample(NAME throughput_benchmark SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/main.cpp" DEPENDENCIES ie_samples_utils) diff --git a/samples/cpp/benchmark_app/CMakeLists.txt b/samples/cpp/benchmark_app/CMakeLists.txt index c79615130c268a..863c2278058f5f 100644 --- a/samples/cpp/benchmark_app/CMakeLists.txt +++ b/samples/cpp/benchmark_app/CMakeLists.txt @@ -7,7 +7,7 @@ set(TARGET_NAME "benchmark_app") file (GLOB SRC ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp) file (GLOB HDR ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp) -ie_add_sample(NAME ${TARGET_NAME} +ov_add_sample(NAME ${TARGET_NAME} SOURCES ${SRC} HEADERS ${HDR} DEPENDENCIES ${GFLAGS_TARGET} format_reader ie_samples_utils) diff --git a/samples/cpp/classification_sample_async/CMakeLists.txt b/samples/cpp/classification_sample_async/CMakeLists.txt index 8b9f4a946f5d87..ea208418b768c6 100644 --- a/samples/cpp/classification_sample_async/CMakeLists.txt +++ b/samples/cpp/classification_sample_async/CMakeLists.txt @@ -2,7 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 # -ie_add_sample(NAME classification_sample_async +ov_add_sample(NAME classification_sample_async SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/main.cpp" HEADERS "${CMAKE_CURRENT_SOURCE_DIR}/classification_sample_async.h" DEPENDENCIES ${GFLAGS_TARGET} format_reader ie_samples_utils) diff --git a/samples/cpp/hello_classification/CMakeLists.txt b/samples/cpp/hello_classification/CMakeLists.txt index 8eb7dc23ffb83e..1d397db3d3f8ff 100644 --- a/samples/cpp/hello_classification/CMakeLists.txt +++ b/samples/cpp/hello_classification/CMakeLists.txt @@ -2,7 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 # -ie_add_sample(NAME hello_classification +ov_add_sample(NAME hello_classification SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/main.cpp" DEPENDENCIES format_reader ie_samples_utils) diff --git a/samples/cpp/hello_nv12_input_classification/CMakeLists.txt b/samples/cpp/hello_nv12_input_classification/CMakeLists.txt index d16c619c696581..1e0e73ca8ec35e 100644 --- a/samples/cpp/hello_nv12_input_classification/CMakeLists.txt +++ b/samples/cpp/hello_nv12_input_classification/CMakeLists.txt @@ -2,6 +2,6 @@ # SPDX-License-Identifier: Apache-2.0 # -ie_add_sample(NAME hello_nv12_input_classification +ov_add_sample(NAME hello_nv12_input_classification SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/main.cpp" DEPENDENCIES format_reader ie_samples_utils) diff --git a/samples/cpp/hello_query_device/CMakeLists.txt b/samples/cpp/hello_query_device/CMakeLists.txt index 6e3f28c2ad163a..b7947a58cf3e60 100644 --- a/samples/cpp/hello_query_device/CMakeLists.txt +++ b/samples/cpp/hello_query_device/CMakeLists.txt @@ -2,6 +2,6 @@ # SPDX-License-Identifier: Apache-2.0 # -ie_add_sample(NAME hello_query_device +ov_add_sample(NAME hello_query_device SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/main.cpp" DEPENDENCIES ie_samples_utils) diff --git a/samples/cpp/hello_reshape_ssd/CMakeLists.txt b/samples/cpp/hello_reshape_ssd/CMakeLists.txt index b1aa85349b7b86..6405c769f24307 100644 --- a/samples/cpp/hello_reshape_ssd/CMakeLists.txt +++ b/samples/cpp/hello_reshape_ssd/CMakeLists.txt @@ -2,6 +2,6 @@ # SPDX-License-Identifier: Apache-2.0 # -ie_add_sample(NAME hello_reshape_ssd +ov_add_sample(NAME hello_reshape_ssd SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/main.cpp" DEPENDENCIES format_reader ie_samples_utils) diff --git a/samples/cpp/model_creation_sample/CMakeLists.txt b/samples/cpp/model_creation_sample/CMakeLists.txt index f7d100c6bb0079..9d42ef3602a15b 100644 --- a/samples/cpp/model_creation_sample/CMakeLists.txt +++ b/samples/cpp/model_creation_sample/CMakeLists.txt @@ -4,7 +4,7 @@ set(TARGET_NAME "model_creation_sample") -ie_add_sample(NAME model_creation_sample +ov_add_sample(NAME model_creation_sample SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/main.cpp" HEADERS "${CMAKE_CURRENT_SOURCE_DIR}/model_creation_sample.hpp" DEPENDENCIES format_reader ie_samples_utils) diff --git a/samples/cpp/speech_sample/CMakeLists.txt b/samples/cpp/speech_sample/CMakeLists.txt index 092ebf14807680..a9e8c0e9d256fa 100644 --- a/samples/cpp/speech_sample/CMakeLists.txt +++ b/samples/cpp/speech_sample/CMakeLists.txt @@ -46,7 +46,7 @@ endif() # add sample -ie_add_sample(NAME speech_sample +ov_add_sample(NAME speech_sample SOURCES ${SRC} HEADERS ${HDR} DEPENDENCIES ${GFLAGS_TARGET} cnpy ie_samples_utils) diff --git a/scripts/CMakeLists.txt b/scripts/CMakeLists.txt index 68833c2fd5b8da..c76904c7270c03 100644 --- a/scripts/CMakeLists.txt +++ b/scripts/CMakeLists.txt @@ -15,7 +15,7 @@ set(shellcheck_skip_list "${OpenVINO_SOURCE_DIR}/src/bindings/python/thirdparty/pybind11" "${TEMP}") -ie_shellcheck_process(DIRECTORY "${OpenVINO_SOURCE_DIR}" +ov_shellcheck_process(DIRECTORY "${OpenVINO_SOURCE_DIR}" SKIP ${shellcheck_skip_list}) # diff --git a/src/bindings/c/tests/test_model_repo.cpp b/src/bindings/c/tests/test_model_repo.cpp index c221df9e3f8fe9..b7af72817d3b77 100644 --- a/src/bindings/c/tests/test_model_repo.cpp +++ b/src/bindings/c/tests/test_model_repo.cpp @@ -38,7 +38,7 @@ std::string generate_test_xml_file() { plugin_xml_file << ov::util::FileTraits::file_separator; plugin_xml_file << ov::util::FileTraits::library_prefix(); plugin_xml_file << "mock_engine"; - plugin_xml_file << IE_BUILD_POSTFIX; + plugin_xml_file << OV_BUILD_POSTFIX; plugin_xml_file << ov::util::FileTraits::dot_symbol; plugin_xml_file << ov::util::FileTraits::library_ext(); plugin_xml_file << "\" name=\"CUSTOM\">\n"; diff --git a/src/bindings/python/CMakeLists.txt b/src/bindings/python/CMakeLists.txt index c8b55e3b280e01..2093c315a06691 100644 --- a/src/bindings/python/CMakeLists.txt +++ b/src/bindings/python/CMakeLists.txt @@ -152,7 +152,7 @@ endfunction() ov_check_init_files_alignment() -ie_option(ENABLE_PYTHON "Enables OpenVINO Python API build" ${ENABLE_PYTHON_DEFAULT}) +ov_option(ENABLE_PYTHON "Enables OpenVINO Python API build" ${ENABLE_PYTHON_DEFAULT}) # # Check for wheel package @@ -196,7 +196,7 @@ endif() # this option should not be a part of OpenVINODeveloperPackage # since wheels can be built only together with main OV build -ie_dependent_option(ENABLE_WHEEL "Build wheel packages for PyPI" ${ENABLE_WHEEL_DEFAULT} "ENABLE_PYTHON" OFF) +ov_dependent_option(ENABLE_WHEEL "Build wheel packages for PyPI" ${ENABLE_WHEEL_DEFAULT} "ENABLE_PYTHON" OFF) if(NOT ENABLE_PYTHON) if(CMAKE_SOURCE_DIR STREQUAL OpenVINOPython_SOURCE_DIR) @@ -400,5 +400,5 @@ if(OpenVINODeveloperPackage_FOUND) # provides a callback function to describe each component in repo include("${OpenVINO_SOURCE_DIR}/cmake/packaging/packaging.cmake") - ie_cpack(${IE_CPACK_COMPONENTS_ALL}) + ov_cpack(${OV_CPACK_COMPONENTS_ALL}) endif() diff --git a/src/bindings/python/src/compatibility/openvino/inference_engine/CMakeLists.txt b/src/bindings/python/src/compatibility/openvino/inference_engine/CMakeLists.txt index 8fcfa90e66236a..92cd19c7c8f3f9 100644 --- a/src/bindings/python/src/compatibility/openvino/inference_engine/CMakeLists.txt +++ b/src/bindings/python/src/compatibility/openvino/inference_engine/CMakeLists.txt @@ -64,7 +64,7 @@ endif() function(python_ov_disable_deprecated_warnings) ov_disable_deprecated_warnings() set(pyx_file "${CMAKE_CURRENT_BINARY_DIR}/ie_api.cxx" "${CMAKE_CURRENT_BINARY_DIR}/constants.cxx") - set_source_files_properties(${pyx_file} PROPERTIES COMPILE_OPTIONS ${ie_c_cxx_deprecated}) + set_source_files_properties(${pyx_file} PROPERTIES COMPILE_OPTIONS ${ov_c_cxx_deprecated}) endfunction() python_ov_disable_deprecated_warnings() diff --git a/src/cmake/install_tbb.cmake b/src/cmake/install_tbb.cmake index d5e3e7037ed3f9..19a30ee624c918 100644 --- a/src/cmake/install_tbb.cmake +++ b/src/cmake/install_tbb.cmake @@ -67,13 +67,13 @@ unset(_ov_dynamic_tbbbind_2_5_found) # define variables for OpenVINOConfig.cmake if(THREADING MATCHES "^(TBB|TBB_AUTO)$") - set(IE_TBB_DIR "${TBB_DIR}") - list(APPEND PATH_VARS "IE_TBB_DIR") + set(OV_TBB_DIR "${TBB_DIR}") + list(APPEND PATH_VARS "OV_TBB_DIR") endif() if(install_tbbbind) - set(IE_TBBBIND_DIR "${TBBBIND_2_5_DIR}") - list(APPEND PATH_VARS "IE_TBBBIND_DIR") + set(OV_TBBBIND_DIR "${TBBBIND_2_5_DIR}") + list(APPEND PATH_VARS "OV_TBBBIND_DIR") endif() # install only downloaded | custom TBB, system one is not installed @@ -150,14 +150,14 @@ if(THREADING MATCHES "^(TBB|TBB_AUTO)$" AND endif() if(TBB_DIR MATCHES "^${TBBROOT}.*") - file(RELATIVE_PATH IE_TBB_DIR_INSTALL "${TBBROOT}" "${TBB_DIR}") - set(IE_TBB_DIR_INSTALL "${IE_TBBROOT_INSTALL}/${IE_TBB_DIR_INSTALL}") + file(RELATIVE_PATH OV_TBB_DIR_INSTALL "${TBBROOT}" "${TBB_DIR}") + set(OV_TBB_DIR_INSTALL "${IE_TBBROOT_INSTALL}/${OV_TBB_DIR_INSTALL}") else() # TBB_DIR is not a subdirectory of TBBROOT # example: old TBB 2017 with no cmake support at all # - TBBROOT point to actual root of TBB # - TBB_DIR points to cmake/developer_package/tbb/ - set(IE_TBB_DIR_INSTALL "${TBB_DIR}") + set(OV_TBB_DIR_INSTALL "${TBB_DIR}") endif() # try to select proper library directory @@ -185,21 +185,21 @@ if(THREADING MATCHES "^(TBB|TBB_AUTO)$" AND set(pkg_config_tbb_lib_dir "${IE_TBBROOT_INSTALL}/${tbb_libs_dir}") elseif(tbb_downloaded) - set(IE_TBB_DIR_INSTALL "runtime/3rdparty/tbb") + set(OV_TBB_DIR_INSTALL "runtime/3rdparty/tbb") if(WIN32) install(DIRECTORY "${TBBROOT}/bin" - DESTINATION "${IE_TBB_DIR_INSTALL}" + DESTINATION "${OV_TBB_DIR_INSTALL}" COMPONENT tbb) else() install(DIRECTORY "${TBBROOT}/lib" - DESTINATION "${IE_TBB_DIR_INSTALL}" + DESTINATION "${OV_TBB_DIR_INSTALL}" COMPONENT tbb PATTERN "cmake" EXCLUDE) endif() install(FILES "${TBBROOT}/LICENSE" - DESTINATION "${IE_TBB_DIR_INSTALL}" + DESTINATION "${OV_TBB_DIR_INSTALL}" COMPONENT tbb) # install development files @@ -212,29 +212,29 @@ if(THREADING MATCHES "^(TBB|TBB_AUTO)$" AND if(EXISTS "${TBBROOT}/lib/cmake") # oneTBB case install(DIRECTORY "${TBBROOT}/lib/cmake" - DESTINATION "${IE_TBB_DIR_INSTALL}/lib" + DESTINATION "${OV_TBB_DIR_INSTALL}/lib" COMPONENT tbb_dev) else() # tbb2020 case install(FILES "${TBBROOT}/cmake/TBBConfig.cmake" "${TBBROOT}/cmake/TBBConfigVersion.cmake" - DESTINATION "${IE_TBB_DIR_INSTALL}/cmake" + DESTINATION "${OV_TBB_DIR_INSTALL}/cmake" COMPONENT tbb_dev) endif() install(DIRECTORY "${TBBROOT}/include" - DESTINATION "${IE_TBB_DIR_INSTALL}" + DESTINATION "${OV_TBB_DIR_INSTALL}" COMPONENT tbb_dev) if(WIN32) # .lib files are needed only for Windows install(DIRECTORY "${TBBROOT}/lib" - DESTINATION "${IE_TBB_DIR_INSTALL}" + DESTINATION "${OV_TBB_DIR_INSTALL}" COMPONENT tbb_dev PATTERN "cmake" EXCLUDE) endif() - set(pkg_config_tbb_lib_dir "${IE_TBB_DIR_INSTALL}/lib") + set(pkg_config_tbb_lib_dir "${OV_TBB_DIR_INSTALL}/lib") else() message(WARNING "TBB of unknown origin. TBB files are not installed") endif() @@ -245,16 +245,16 @@ endif() # install tbbbind for static OpenVINO case if(install_tbbbind) - set(IE_TBBBIND_DIR_INSTALL "runtime/3rdparty/tbb_bind_2_5") + set(OV_TBBBIND_DIR_INSTALL "runtime/3rdparty/tbb_bind_2_5") install(DIRECTORY "${TBBBIND_2_5_ROOT}/lib" - DESTINATION "${IE_TBBBIND_DIR_INSTALL}" + DESTINATION "${OV_TBBBIND_DIR_INSTALL}" COMPONENT tbb) install(FILES "${TBBBIND_2_5_ROOT}/LICENSE" - DESTINATION "${IE_TBBBIND_DIR_INSTALL}" + DESTINATION "${OV_TBBBIND_DIR_INSTALL}" COMPONENT tbb) install(FILES "${TBBBIND_2_5_ROOT}/cmake/TBBBIND_2_5Config.cmake" - DESTINATION "${IE_TBBBIND_DIR_INSTALL}/cmake" + DESTINATION "${OV_TBBBIND_DIR_INSTALL}/cmake" COMPONENT tbb_dev) endif() diff --git a/src/cmake/openvino.cmake b/src/cmake/openvino.cmake index 73df6bf480719d..068ae2b0cd9816 100644 --- a/src/cmake/openvino.cmake +++ b/src/cmake/openvino.cmake @@ -157,7 +157,7 @@ install(EXPORT OpenVINOTargets # build tree -list(APPEND PATH_VARS "IE_INCLUDE_DIR") +list(APPEND PATH_VARS "OV_INCLUDE_DIR") # TODO: remove obsolete variable for API 1.0 before 2024.0 if(ENABLE_INTEL_GNA) list(APPEND PATH_VARS "GNA_PATH") endif() @@ -170,8 +170,8 @@ if(ENABLE_ONEDNN_FOR_GPU) endif() set(PUBLIC_HEADERS_DIR "${OpenVINO_SOURCE_DIR}/src/inference/include") -set(IE_INCLUDE_DIR "${PUBLIC_HEADERS_DIR}/ie") -set(IE_TBB_DIR "${TBB_DIR}") +set(OV_INCLUDE_DIR "${PUBLIC_HEADERS_DIR}/ie") +set(OV_TBB_DIR "${TBB_DIR}") configure_package_config_file("${OpenVINO_SOURCE_DIR}/cmake/templates/InferenceEngineConfig.cmake.in" "${CMAKE_BINARY_DIR}/InferenceEngineConfig.cmake" @@ -190,9 +190,9 @@ list(APPEND INSTALL_PATH_VARS "OPENVINO_LIB_DIR") # will be done by inside OpenVINOConfig.cmak / ACLConfig.cmake string(REPLACE "$" "" OPENVINO_LIB_DIR "${OV_CPACK_LIBRARYDIR}") -set(IE_INCLUDE_DIR "${OV_CPACK_INCLUDEDIR}/ie") -set(IE_TBB_DIR "${IE_TBB_DIR_INSTALL}") -set(IE_TBBBIND_DIR "${IE_TBBBIND_DIR_INSTALL}") +set(OV_INCLUDE_DIR "${OV_CPACK_INCLUDEDIR}/ie") +set(OV_TBB_DIR "${OV_TBB_DIR_INSTALL}") +set(OV_TBBBIND_DIR "${OV_TBBBIND_DIR_INSTALL}") set(GNA_PATH "${OV_CPACK_RUNTIMEDIR}") if(WIN32) set(GNA_PATH "${OV_CPACK_LIBRARYDIR}/../Release") diff --git a/src/cmake/ov_parallel.cmake b/src/cmake/ov_parallel.cmake index f669b7b1562d9e..a9d4d391e4543f 100644 --- a/src/cmake/ov_parallel.cmake +++ b/src/cmake/ov_parallel.cmake @@ -170,7 +170,7 @@ macro(ov_find_package_tbb) # fallback variant for TBB 2018 and older where TBB have not had cmake interface if(DEFINED TBBROOT OR DEFINED ENV{TBBROOT}) # note: if TBB older than 2017.0 is passed, cmake will skip it and THREADING=SEQ will be used - set(_tbb_paths PATHS "${IEDevScripts_DIR}/tbb") + set(_tbb_paths PATHS "${OpenVINODeveloperScripts_DIR}/tbb") endif() # try to find one more time @@ -279,7 +279,7 @@ function(ov_set_threading_interface_for TARGET_NAME) message(WARNING "Unknown target type") endif() - function(ie_target_link_libraries TARGET_NAME LINK_TYPE) + function(_ov_target_link_libraries TARGET_NAME LINK_TYPE) target_link_libraries(${TARGET_NAME} ${LINK_TYPE} ${ARGN}) # include directories as SYSTEM @@ -314,7 +314,7 @@ function(ov_set_threading_interface_for TARGET_NAME) if (TBB_FOUND) set(IE_THREAD_DEFINE "IE_THREAD_TBB") set(OV_THREAD_DEFINE "OV_THREAD_TBB") - ie_target_link_libraries(${TARGET_NAME} ${LINK_TYPE} TBB::tbb) + _ov_target_link_libraries(${TARGET_NAME} ${LINK_TYPE} TBB::tbb) target_compile_definitions(${TARGET_NAME} ${COMPILE_DEF_TYPE} TBB_PREVIEW_WAITING_FOR_WORKERS=1) else () set(THREADING "SEQ" PARENT_SCOPE) @@ -365,7 +365,7 @@ function(ov_set_threading_interface_for TARGET_NAME) if (WIN32) target_compile_options(${TARGET_NAME} ${LINK_TYPE} ${OpenMP_CXX_FLAGS} /openmp) target_compile_options(${TARGET_NAME} ${LINK_TYPE} ${OpenMP_CXX_FLAGS} /Qopenmp) - ie_target_link_libraries(${TARGET_NAME} ${LINK_TYPE} "-nodefaultlib:vcomp") + _ov_target_link_libraries(${TARGET_NAME} ${LINK_TYPE} "-nodefaultlib:vcomp") else() target_compile_options(${TARGET_NAME} ${LINK_TYPE} ${OpenMP_CXX_FLAGS} -fopenmp) endif () @@ -373,18 +373,18 @@ function(ov_set_threading_interface_for TARGET_NAME) # Debug binaries are optional. if (OMP_LIBRARIES_DEBUG AND NOT LINUX) if (WIN32) - ie_target_link_libraries(${TARGET_NAME} ${LINK_TYPE} "$<$:${OMP_LIBRARIES_DEBUG}>;$<$>:${OMP_LIBRARIES_RELEASE}>") + _ov_target_link_libraries(${TARGET_NAME} ${LINK_TYPE} "$<$:${OMP_LIBRARIES_DEBUG}>;$<$>:${OMP_LIBRARIES_RELEASE}>") else() # TODO: handle multi-config generators case if (CMAKE_BUILD_TYPE STREQUAL "Debug") - ie_target_link_libraries(${TARGET_NAME} ${LINK_TYPE} ${OMP_LIBRARIES_DEBUG}) + _ov_target_link_libraries(${TARGET_NAME} ${LINK_TYPE} ${OMP_LIBRARIES_DEBUG}) else() - ie_target_link_libraries(${TARGET_NAME} ${LINK_TYPE} ${OMP_LIBRARIES_RELEASE}) + _ov_target_link_libraries(${TARGET_NAME} ${LINK_TYPE} ${OMP_LIBRARIES_RELEASE}) endif () endif () else () # Link Release library to all configurations. - ie_target_link_libraries(${TARGET_NAME} ${LINK_TYPE} ${OMP_LIBRARIES_RELEASE}) + _ov_target_link_libraries(${TARGET_NAME} ${LINK_TYPE} ${OMP_LIBRARIES_RELEASE}) endif () endif () endif () @@ -394,11 +394,13 @@ function(ov_set_threading_interface_for TARGET_NAME) if (NOT THREADING STREQUAL "SEQ") find_package(Threads REQUIRED) - ie_target_link_libraries(${TARGET_NAME} ${LINK_TYPE} Threads::Threads) + _ov_target_link_libraries(${TARGET_NAME} ${LINK_TYPE} Threads::Threads) endif() endfunction(ov_set_threading_interface_for) +# deprecated + function(set_ie_threading_interface_for TARGET_NAME) - message(WARNING "This function is deprecated. Please use ov_set_threading_interface_for(TARGET_NAME) instead.") + message(WARNING "'set_ie_threading_interface_for' is deprecated. Please use 'ov_set_threading_interface_for' instead.") ov_set_threading_interface_for(${TARGET_NAME}) endfunction(set_ie_threading_interface_for) diff --git a/src/common/preprocessing/src/CMakeLists.txt b/src/common/preprocessing/src/CMakeLists.txt index dbc3a06369edf7..ccab33a652c633 100644 --- a/src/common/preprocessing/src/CMakeLists.txt +++ b/src/common/preprocessing/src/CMakeLists.txt @@ -37,7 +37,7 @@ if(ENABLE_SSE42) list(APPEND LIBRARY_HEADERS ${SSE_HEADERS}) list(APPEND LIBRARY_SRC ${SSE_SRC}) - ie_sse42_optimization_flags(sse4_2_flags) + ov_sse42_optimization_flags(sse4_2_flags) set_source_files_properties(${SSE_SRC} PROPERTIES COMPILE_OPTIONS "${sse4_2_flags}") add_definitions(-DHAVE_SSE=1) endif() @@ -49,7 +49,7 @@ if(ENABLE_AVX2) list(APPEND LIBRARY_HEADERS ${AVX2_HEADERS}) list(APPEND LIBRARY_SRC ${AVX2_SRC}) - ie_avx2_optimization_flags(avx2_flags) + ov_avx2_optimization_flags(avx2_flags) set_source_files_properties(${AVX2_SRC} PROPERTIES COMPILE_OPTIONS "${avx2_flags}") add_definitions(-DHAVE_AVX2=1) endif() @@ -75,7 +75,7 @@ if(ENABLE_AVX512F AND NOT GNU_5_DEBUG_CASE) endif() if(ARM OR AARCH64) - ie_arm_neon_optimization_flags(neon_flags) + ov_arm_neon_optimization_flags(neon_flags) file(GLOB NEON_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/arm_neon/*.hpp) file(GLOB NEON_SRC ${CMAKE_CURRENT_SOURCE_DIR}/arm_neon/*.cpp) diff --git a/src/common/preprocessing/src/ie_preprocess_data.hpp b/src/common/preprocessing/src/ie_preprocess_data.hpp index f16989e230ca90..79d8e84c140b0b 100644 --- a/src/common/preprocessing/src/ie_preprocess_data.hpp +++ b/src/common/preprocessing/src/ie_preprocess_data.hpp @@ -84,7 +84,7 @@ class PreProcessDataPlugin { // 2. in the same folder as libopenvino.so ov::util::FilePath ovLibraryPath = getInferenceEngineLibraryPath(); - ov::util::FilePath libraryName = ov::util::to_file_path(std::string("openvino_gapi_preproc") + std::string(IE_BUILD_POSTFIX)); + ov::util::FilePath libraryName = ov::util::to_file_path(std::string("openvino_gapi_preproc") + std::string(OV_BUILD_POSTFIX)); libraryName = FileUtils::makePluginLibraryName({}, libraryName); std::ostringstream str; diff --git a/src/core/tests/CMakeLists.txt b/src/core/tests/CMakeLists.txt index 433395d8a4bc0f..45751efe2012b2 100644 --- a/src/core/tests/CMakeLists.txt +++ b/src/core/tests/CMakeLists.txt @@ -57,10 +57,10 @@ target_include_directories(${TARGET_NAME} PRIVATE ${OV_CORE_SRC_DIR}/src target_compile_definitions(${TARGET_NAME} PRIVATE SHARED_LIB_PREFIX="${CMAKE_SHARED_LIBRARY_PREFIX}" - SHARED_LIB_SUFFIX="${IE_BUILD_POSTFIX}${CMAKE_SHARED_LIBRARY_SUFFIX}" + SHARED_LIB_SUFFIX="${OV_BUILD_POSTFIX}${CMAKE_SHARED_LIBRARY_SUFFIX}" FRONTEND_LIB_PREFIX="${CMAKE_SHARED_LIBRARY_PREFIX}${FRONTEND_NAME_PREFIX}" # Assume .so is an existed symlink to .so (or .so - FRONTEND_LIB_SUFFIX="${FRONTEND_NAME_SUFFIX}${IE_BUILD_POSTFIX}${CMAKE_SHARED_LIBRARY_SUFFIX}" + FRONTEND_LIB_SUFFIX="${FRONTEND_NAME_SUFFIX}${OV_BUILD_POSTFIX}${CMAKE_SHARED_LIBRARY_SUFFIX}" ) if(RISCV64) diff --git a/src/core/tests/extension.cpp b/src/core/tests/extension.cpp index e62ed76fd72b58..5dd442c65e0449 100644 --- a/src/core/tests/extension.cpp +++ b/src/core/tests/extension.cpp @@ -15,7 +15,7 @@ inline std::string get_extension_path() { return ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - std::string("openvino_template_extension") + IE_BUILD_POSTFIX); + std::string("openvino_template_extension") + OV_BUILD_POSTFIX); } TEST(extension, load_extension) { diff --git a/src/frontends/common/CMakeLists.txt b/src/frontends/common/CMakeLists.txt index 1cb869079213d2..4fd41e6f4d3601 100644 --- a/src/frontends/common/CMakeLists.txt +++ b/src/frontends/common/CMakeLists.txt @@ -39,7 +39,7 @@ target_link_libraries(${TARGET_NAME}_obj PRIVATE openvino::util openvino::core:: set_target_properties(${TARGET_NAME}_obj PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE OFF) -set(FRONTEND_LIB_SUFFIX "${FRONTEND_NAME_SUFFIX}${IE_BUILD_POSTFIX}") +set(FRONTEND_LIB_SUFFIX "${FRONTEND_NAME_SUFFIX}${OV_BUILD_POSTFIX}") if(APPLE) set(FRONTEND_LIB_SUFFIX "${FRONTEND_LIB_SUFFIX}${OpenVINO_VERSION_SUFFIX}${CMAKE_SHARED_LIBRARY_SUFFIX}") else() diff --git a/src/frontends/onnx/tests/CMakeLists.txt b/src/frontends/onnx/tests/CMakeLists.txt index 22bd85d5b92ee3..f0f8891b4a4945 100644 --- a/src/frontends/onnx/tests/CMakeLists.txt +++ b/src/frontends/onnx/tests/CMakeLists.txt @@ -133,7 +133,7 @@ target_include_directories(ov_onnx_frontend_tests PRIVATE "${CMAKE_CURRENT_SOURC target_compile_definitions(ov_onnx_frontend_tests PRIVATE SHARED_LIB_PREFIX="${CMAKE_SHARED_LIBRARY_PREFIX}" - SHARED_LIB_SUFFIX="${IE_BUILD_POSTFIX}${CMAKE_SHARED_LIBRARY_SUFFIX}") + SHARED_LIB_SUFFIX="${OV_BUILD_POSTFIX}${CMAKE_SHARED_LIBRARY_SUFFIX}") set(ONNX_OPSET_VERSION 17 CACHE INTERNAL "Supported version of ONNX operator set") target_compile_definitions(ov_onnx_frontend_tests PRIVATE ONNX_OPSET_VERSION=${ONNX_OPSET_VERSION}) diff --git a/src/frontends/tests/frontend/shared/CMakeLists.txt b/src/frontends/tests/frontend/shared/CMakeLists.txt index f99a3d82e9f816..f5d2809205db0e 100644 --- a/src/frontends/tests/frontend/shared/CMakeLists.txt +++ b/src/frontends/tests/frontend/shared/CMakeLists.txt @@ -28,6 +28,6 @@ add_dependencies(${TARGET_NAME} test_builtin_extensions) target_compile_definitions(${TARGET_NAME} PRIVATE SHARED_LIB_PREFIX="${CMAKE_SHARED_LIBRARY_PREFIX}" - SHARED_LIB_SUFFIX="${IE_BUILD_POSTFIX}${CMAKE_SHARED_LIBRARY_SUFFIX}") + SHARED_LIB_SUFFIX="${OV_BUILD_POSTFIX}${CMAKE_SHARED_LIBRARY_SUFFIX}") ov_add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME}) diff --git a/src/frontends/tests/frontend/shared/src/conversion.cpp b/src/frontends/tests/frontend/shared/src/conversion.cpp index 286a5bd638adf4..34f3e9d6897eaa 100644 --- a/src/frontends/tests/frontend/shared/src/conversion.cpp +++ b/src/frontends/tests/frontend/shared/src/conversion.cpp @@ -30,7 +30,7 @@ void FrontEndConversionExtensionTest::initParamTest() { inline std::string get_lib_path(const std::string& lib_name) { return ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - lib_name + IE_BUILD_POSTFIX); + lib_name + OV_BUILD_POSTFIX); } /////////////////////////////////////////////////////////////////// diff --git a/src/frontends/tests/frontend/shared/src/library_extension.cpp b/src/frontends/tests/frontend/shared/src/library_extension.cpp index 75f68869775520..a2257f8fca116b 100644 --- a/src/frontends/tests/frontend/shared/src/library_extension.cpp +++ b/src/frontends/tests/frontend/shared/src/library_extension.cpp @@ -31,7 +31,7 @@ void FrontendLibraryExtensionTest::initParamTest() { inline std::string get_lib_path(const std::string& lib_name) { return ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - lib_name + IE_BUILD_POSTFIX); + lib_name + OV_BUILD_POSTFIX); } /////////////////////////////////////////////////////////////////// diff --git a/src/inference/CMakeLists.txt b/src/inference/CMakeLists.txt index 19bf83c27e18f9..3e61c370d20482 100644 --- a/src/inference/CMakeLists.txt +++ b/src/inference/CMakeLists.txt @@ -20,8 +20,8 @@ file (GLOB LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/cpp_interfaces/interface/*.cpp ) -set(IE_STATIC_DEPENDENT_FILES ${CMAKE_CURRENT_SOURCE_DIR}/src/file_utils.cpp) -list(REMOVE_ITEM LIBRARY_SRC ${IE_STATIC_DEPENDENT_FILES}) +set(OV_STATIC_DEPENDENT_FILES ${CMAKE_CURRENT_SOURCE_DIR}/src/file_utils.cpp) +list(REMOVE_ITEM LIBRARY_SRC ${OV_STATIC_DEPENDENT_FILES}) if(BUILD_SHARED_LIBS OR ENABLE_IR_V7_READER) # TODO: remove together with GNA plugin @@ -66,7 +66,7 @@ if(ENABLE_SSE42) list(APPEND LIBRARY_HEADERS ${SSE_HEADERS}) list(APPEND LIBRARY_SRC ${SSE_SRC}) - ie_sse42_optimization_flags(sse4_2_flags) + ov_sse42_optimization_flags(sse4_2_flags) set_source_files_properties(${SSE_SRC} PROPERTIES COMPILE_OPTIONS "${sse4_2_flags}") add_definitions(-DHAVE_SSE=1) @@ -118,7 +118,7 @@ ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME}_plugin_api # Create object library add_library(${TARGET_NAME}_obj OBJECT - ${IE_STATIC_DEPENDENT_FILES} + ${OV_STATIC_DEPENDENT_FILES} ${LIBRARY_SRC} ${LIBRARY_HEADERS} ${PUBLIC_HEADERS}) @@ -174,7 +174,7 @@ target_link_libraries(${TARGET_NAME} INTERFACE openvino::runtime) target_include_directories(${TARGET_NAME} INTERFACE $ $) -ov_add_clang_format_target(${TARGET_NAME}_clang FOR_SOURCES ${IE_STATIC_DEPENDENT_FILES} ${LIBRARY_SRC} ${LIBRARY_HEADERS} ${PUBLIC_HEADERS}) +ov_add_clang_format_target(${TARGET_NAME}_clang FOR_SOURCES ${OV_STATIC_DEPENDENT_FILES} ${LIBRARY_SRC} ${LIBRARY_HEADERS} ${PUBLIC_HEADERS}) ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME}_obj SOURCE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}/include/openvino" diff --git a/src/inference/src/ie_network_reader.cpp b/src/inference/src/ie_network_reader.cpp index 8e7c8ecb823033..7fe34b42ed7948 100644 --- a/src/inference/src/ie_network_reader.cpp +++ b/src/inference/src/ie_network_reader.cpp @@ -113,7 +113,7 @@ void registerReaders() { // try to load IR reader v7 if library exists try { reader_irv7 = - std::make_shared(std::string("inference_engine_ir_v7_reader") + std::string(IE_BUILD_POSTFIX)); + std::make_shared(std::string("inference_engine_ir_v7_reader") + std::string(OV_BUILD_POSTFIX)); } catch (const std::runtime_error&) { // runtime error is thrown in case of library cannot be loaded } diff --git a/src/inference/tests/functional/caching_test.cpp b/src/inference/tests/functional/caching_test.cpp index 8a313792b87af5..2668eafc44a34e 100644 --- a/src/inference/tests/functional/caching_test.cpp +++ b/src/inference/tests/functional/caching_test.cpp @@ -161,7 +161,7 @@ class CachingTest : public ::testing::TestWithParam( ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - std::string("template_extension") + IE_BUILD_POSTFIX)); + std::string("template_extension") + OV_BUILD_POSTFIX)); ie.AddExtension(extension); } catch (const InferenceEngine::Exception& ex) { ASSERT_STR_CONTAINS(ex.what(), "name: custom_opset. Opset"); @@ -95,7 +95,7 @@ TEST_F(IECoreThreadingTests, RegisterPlugin) { [&]() { const std::string deviceName = std::to_string(index++); ie.RegisterPlugin(ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - std::string("mock_engine") + IE_BUILD_POSTFIX), + std::string("mock_engine") + OV_BUILD_POSTFIX), deviceName); ie.GetVersions(deviceName); ie.UnregisterPlugin(deviceName); @@ -118,7 +118,7 @@ TEST_F(IECoreThreadingTests, RegisterPlugins) { file << ov::util::FileTraits::file_separator; file << ov::util::FileTraits::library_prefix(); file << "mock_engine"; - file << IE_BUILD_POSTFIX; + file << OV_BUILD_POSTFIX; file << ov::util::FileTraits::dot_symbol; file << ov::util::FileTraits::library_ext(); file << "\" name=\""; diff --git a/src/inference/tests/functional/get_supported_property_test.cpp b/src/inference/tests/functional/get_supported_property_test.cpp index b92fbdf18a9962..c1fea82d2552de 100644 --- a/src/inference/tests/functional/get_supported_property_test.cpp +++ b/src/inference/tests/functional/get_supported_property_test.cpp @@ -33,7 +33,7 @@ class GetPropertyTest : public ::testing::TestWithParam { injectProxyEngine(plugin.get()); core.register_plugin(ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - std::string("mock_engine") + IE_BUILD_POSTFIX), + std::string("mock_engine") + OV_BUILD_POSTFIX), m_plugin_name); m_mock_plugin = plugin; } diff --git a/src/inference/tests/functional/ov_core_threading.cpp b/src/inference/tests/functional/ov_core_threading.cpp index c935c8952358f1..96f954b5dcbd6c 100644 --- a/src/inference/tests/functional/ov_core_threading.cpp +++ b/src/inference/tests/functional/ov_core_threading.cpp @@ -64,7 +64,7 @@ class CoreThreadingTests : public ::testing::Test { OPENVINO_SUPPRESS_DEPRECATED_START auto extension = std::make_shared( ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - std::string("template_extension") + IE_BUILD_POSTFIX)); + std::string("template_extension") + OV_BUILD_POSTFIX)); core.add_extension(extension); OPENVINO_SUPPRESS_DEPRECATED_END } catch (const ov::Exception& ex) { @@ -95,7 +95,7 @@ TEST_F(CoreThreadingTests, RegisterPlugin) { [&]() { const std::string deviceName = std::to_string(index++); core.register_plugin(ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - std::string("mock_engine") + IE_BUILD_POSTFIX), + std::string("mock_engine") + OV_BUILD_POSTFIX), deviceName); core.get_versions(deviceName); core.unload_plugin(deviceName); @@ -118,7 +118,7 @@ TEST_F(CoreThreadingTests, RegisterPlugins) { file << ov::util::FileTraits::file_separator; file << ov::util::FileTraits::library_prefix(); file << "mock_engine"; - file << IE_BUILD_POSTFIX; + file << OV_BUILD_POSTFIX; file << ov::util::FileTraits::dot_symbol; file << ov::util::FileTraits::library_ext(); file << "\" name=\""; diff --git a/src/inference/tests/functional/ov_extension_test.cpp b/src/inference/tests/functional/ov_extension_test.cpp index 41cfd4813b3481..394526c1bc47da 100644 --- a/src/inference/tests/functional/ov_extension_test.cpp +++ b/src/inference/tests/functional/ov_extension_test.cpp @@ -15,23 +15,23 @@ namespace { std::string getOVExtensionPath() { return ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - std::string("openvino_template_extension") + IE_BUILD_POSTFIX); + std::string("openvino_template_extension") + OV_BUILD_POSTFIX); } std::string getOldExtensionPath() { return ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - std::string("template_extension") + IE_BUILD_POSTFIX); + std::string("template_extension") + OV_BUILD_POSTFIX); } std::string getIncorrectExtensionPath() { return ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - std::string("incorrect") + IE_BUILD_POSTFIX); + std::string("incorrect") + OV_BUILD_POSTFIX); } std::string getRelativeOVExtensionPath() { std::string absolutePath = ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - std::string("openvino_template_extension") + IE_BUILD_POSTFIX); + std::string("openvino_template_extension") + OV_BUILD_POSTFIX); return ov::test::utils::getRelativePath(ov::test::utils::getCurrentWorkingDir(), absolutePath); } diff --git a/src/inference/tests/functional/ov_register_plugin_test.cpp b/src/inference/tests/functional/ov_register_plugin_test.cpp index 0708cd347d916d..a3830743efb919 100644 --- a/src/inference/tests/functional/ov_register_plugin_test.cpp +++ b/src/inference/tests/functional/ov_register_plugin_test.cpp @@ -45,7 +45,7 @@ TEST(RegisterPluginTests, getVersionforRegisteredPluginThrows) { // Registered plugin with invalid so here ASSERT_NO_THROW(core.register_plugin( ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - std::string("mock_registered_engine") + IE_BUILD_POSTFIX), + std::string("mock_registered_engine") + OV_BUILD_POSTFIX), mock_plugin_name)); ASSERT_THROW(core.get_versions("MOCK_REGISTERED_HARDWARE"), ov::Exception); } @@ -72,7 +72,7 @@ TEST(RegisterPluginTests, getVersionforNoRegisteredPluginNoThrows) { ASSERT_NO_THROW( core.register_plugin(ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - std::string("mock_engine") + IE_BUILD_POSTFIX), + std::string("mock_engine") + OV_BUILD_POSTFIX), mock_plugin_name)); ASSERT_NO_THROW(core.get_versions("MOCK_HARDWARE")); } @@ -87,7 +87,7 @@ TEST(RegisterPluginTests, registerNewPluginNoThrows) { std::string mock_plugin_name{"MOCK_HARDWARE"}; ASSERT_NO_THROW( core.register_plugin(ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - std::string("mock_engine") + IE_BUILD_POSTFIX), + std::string("mock_engine") + OV_BUILD_POSTFIX), mock_plugin_name)); ASSERT_NO_THROW(core.get_property(mock_plugin_name, ov::supported_properties)); @@ -104,10 +104,10 @@ TEST(RegisterPluginTests, registerExistingPluginThrows) { std::string mock_plugin_name{"MOCK_HARDWARE"}; ASSERT_NO_THROW( core.register_plugin(ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - std::string("mock_engine") + IE_BUILD_POSTFIX), + std::string("mock_engine") + OV_BUILD_POSTFIX), mock_plugin_name)); ASSERT_THROW(core.register_plugin(ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - std::string("mock_engine") + IE_BUILD_POSTFIX), + std::string("mock_engine") + OV_BUILD_POSTFIX), mock_plugin_name), ov::Exception); } @@ -118,7 +118,7 @@ inline std::string getPluginFile() { std::ostringstream stream; stream << ""; ov::test::utils::createFile(filename, stream.str()); return filename; diff --git a/src/inference/tests/functional/ov_shared_object_test.cpp b/src/inference/tests/functional/ov_shared_object_test.cpp index 1d60528fdcca40..3c268ea94fa610 100644 --- a/src/inference/tests/functional/ov_shared_object_test.cpp +++ b/src/inference/tests/functional/ov_shared_object_test.cpp @@ -16,7 +16,7 @@ class SharedObjectOVTests : public ::testing::Test { protected: std::string get_mock_engine_name() { return FileUtils::makePluginLibraryName(ov::test::utils::getExecutableDirectory(), - std::string("mock_engine") + IE_BUILD_POSTFIX); + std::string("mock_engine") + OV_BUILD_POSTFIX); } void loadDll(const string& libraryName) { diff --git a/src/inference/tests/unit/ie_extension_test.cpp b/src/inference/tests/unit/ie_extension_test.cpp index 3a68c157de0b7b..336bf5da71c9b7 100644 --- a/src/inference/tests/unit/ie_extension_test.cpp +++ b/src/inference/tests/unit/ie_extension_test.cpp @@ -23,7 +23,7 @@ OPENVINO_SUPPRESS_DEPRECATED_START static std::string getExtensionPath() { return FileUtils::makePluginLibraryName(ov::test::utils::getExecutableDirectory(), - std::string("template_extension") + IE_BUILD_POSTFIX); + std::string("template_extension") + OV_BUILD_POSTFIX); } TEST(ExtensionTests, testGetOpSets) { diff --git a/src/plugins/auto/tests/functional/behavior/auto_func_test.cpp b/src/plugins/auto/tests/functional/behavior/auto_func_test.cpp index 1ba14b66d57207..123a41e3524744 100644 --- a/src/plugins/auto/tests/functional/behavior/auto_func_test.cpp +++ b/src/plugins/auto/tests/functional/behavior/auto_func_test.cpp @@ -30,7 +30,7 @@ namespace { std::string get_mock_engine_path() { std::string mockEngineName("mock_engine"); return ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - mockEngineName + IE_BUILD_POSTFIX); + mockEngineName + OV_BUILD_POSTFIX); } template @@ -623,7 +623,7 @@ void ov::auto_plugin::tests::AutoFuncTests::reg_plugin(ov::Core& core, injectProxyEngine(plugin.get()); core.register_plugin(ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - std::string("mock_engine") + IE_BUILD_POSTFIX), + std::string("mock_engine") + OV_BUILD_POSTFIX), device_name, properties); m_mock_plugins.emplace_back(plugin); diff --git a/src/plugins/hetero/tests/functional/hetero_tests.cpp b/src/plugins/hetero/tests/functional/hetero_tests.cpp index 4228a5c14ce8e6..55440556f0552b 100644 --- a/src/plugins/hetero/tests/functional/hetero_tests.cpp +++ b/src/plugins/hetero/tests/functional/hetero_tests.cpp @@ -32,7 +32,7 @@ namespace { std::string get_mock_engine_path() { std::string mock_engine_name("mock_engine"); return ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - mock_engine_name + IE_BUILD_POSTFIX); + mock_engine_name + OV_BUILD_POSTFIX); } template diff --git a/src/plugins/intel_cpu/CMakeLists.txt b/src/plugins/intel_cpu/CMakeLists.txt index 4212bacb8cc055..10b82b2b75ffcb 100644 --- a/src/plugins/intel_cpu/CMakeLists.txt +++ b/src/plugins/intel_cpu/CMakeLists.txt @@ -44,7 +44,7 @@ if(X86 OR X86_64 OR AARCH64) else() set(ENABLE_MLAS_FOR_CPU_DEFAULT OFF) endif() -ie_option(ENABLE_MLAS_FOR_CPU "Enable MLAS for OpenVINO CPU Plugin" ${ENABLE_MLAS_FOR_CPU_DEFAULT}) +ov_option(ENABLE_MLAS_FOR_CPU "Enable MLAS for OpenVINO CPU Plugin" ${ENABLE_MLAS_FOR_CPU_DEFAULT}) add_subdirectory(thirdparty) diff --git a/src/plugins/intel_cpu/tests/functional/extension/extension.cpp b/src/plugins/intel_cpu/tests/functional/extension/extension.cpp index df4fe4b323836e..b2f9c1c59ac0c1 100644 --- a/src/plugins/intel_cpu/tests/functional/extension/extension.cpp +++ b/src/plugins/intel_cpu/tests/functional/extension/extension.cpp @@ -201,7 +201,7 @@ TEST(Extension, XmlModelWithCustomAbs) { static std::string get_extension_path() { return FileUtils::makePluginLibraryName(ov::test::utils::getExecutableDirectory(), - std::string("template_extension") + IE_BUILD_POSTFIX); + std::string("template_extension") + OV_BUILD_POSTFIX); } diff --git a/src/plugins/intel_gna/CMakeLists.txt b/src/plugins/intel_gna/CMakeLists.txt index 566253a3c6a318..107ea8a5a0cdc9 100644 --- a/src/plugins/intel_gna/CMakeLists.txt +++ b/src/plugins/intel_gna/CMakeLists.txt @@ -23,7 +23,7 @@ file(GLOB_RECURSE HEADERS # build avx2.cpp with AVX2 support, only for Windows if(ENABLE_AVX2 AND CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") - ie_avx2_optimization_flags(avx2_flags) + ov_avx2_optimization_flags(avx2_flags) set_source_files_properties(${CMAKE_CURRENT_SOURCE_DIR}/src/pre_post_process/hw_accelerated_converter_avx2.cpp PROPERTIES COMPILE_OPTIONS "${avx2_flags}") add_compile_definitions(HAVE_AVX2=1) endif() diff --git a/src/plugins/intel_gna/tests/deprecated/helpers/tests_common.hpp b/src/plugins/intel_gna/tests/deprecated/helpers/tests_common.hpp index f164fc1e8ca361..c9b5cea167f1dd 100644 --- a/src/plugins/intel_gna/tests/deprecated/helpers/tests_common.hpp +++ b/src/plugins/intel_gna/tests/deprecated/helpers/tests_common.hpp @@ -44,7 +44,7 @@ class TestsCommon : public ::testing::Test { void SetUp() override; static std::string make_so_name(const std::string & input) { - return ov::test::utils::pre + input + IE_BUILD_POSTFIX + ov::test::utils::ext; + return ov::test::utils::pre + input + OV_BUILD_POSTFIX + ov::test::utils::ext; } void TearDown() override; diff --git a/src/plugins/intel_gpu/src/graph/CMakeLists.txt b/src/plugins/intel_gpu/src/graph/CMakeLists.txt index 67a0a3282cda13..731b580718ea84 100644 --- a/src/plugins/intel_gpu/src/graph/CMakeLists.txt +++ b/src/plugins/intel_gpu/src/graph/CMakeLists.txt @@ -61,7 +61,7 @@ endif() ov_install_static_lib(${TARGET_NAME} ${OV_CPACK_COMP_CORE}) if(ENABLE_SSE42) - ie_sse42_optimization_flags(sse4_2_flags) + ov_sse42_optimization_flags(sse4_2_flags) set_source_files_properties(impls/cpu/detection_output.cpp PROPERTIES COMPILE_FLAGS "${sse4_2_flags}" COMPILE_DEFINITIONS "HAVE_SSE") diff --git a/src/plugins/intel_gpu/tests/unit/CMakeLists.txt b/src/plugins/intel_gpu/tests/unit/CMakeLists.txt index bf9858d1575e6b..3dda088627b833 100644 --- a/src/plugins/intel_gpu/tests/unit/CMakeLists.txt +++ b/src/plugins/intel_gpu/tests/unit/CMakeLists.txt @@ -82,7 +82,7 @@ elseif((NOT ANDROID) AND (UNIX)) endif() if(ENABLE_SSE42) - ie_sse42_optimization_flags(sse4_2_flags) + ov_sse42_optimization_flags(sse4_2_flags) set_source_files_properties(${SOURCES_ALL} PROPERTIES COMPILE_FLAGS "${sse4_2_flags}") endif() diff --git a/src/plugins/proxy/tests/proxy_tests.cpp b/src/plugins/proxy/tests/proxy_tests.cpp index 10d944ba3fe2f2..0343e40ee240e2 100644 --- a/src/plugins/proxy/tests/proxy_tests.cpp +++ b/src/plugins/proxy/tests/proxy_tests.cpp @@ -28,7 +28,7 @@ namespace { std::string get_mock_engine_path() { std::string mockEngineName("mock_engine"); return ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - mockEngineName + IE_BUILD_POSTFIX); + mockEngineName + OV_BUILD_POSTFIX); } template @@ -492,7 +492,7 @@ void ov::proxy::tests::ProxyTests::reg_plugin(ov::Core& core, injectProxyEngine(plugin.get()); core.register_plugin(ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - std::string("mock_engine") + IE_BUILD_POSTFIX), + std::string("mock_engine") + OV_BUILD_POSTFIX), device_name, properties); m_mock_plugins.emplace_back(plugin); diff --git a/src/plugins/template/CMakeLists.txt b/src/plugins/template/CMakeLists.txt index 02633f09abee92..4a3691186302aa 100644 --- a/src/plugins/template/CMakeLists.txt +++ b/src/plugins/template/CMakeLists.txt @@ -31,6 +31,6 @@ endif() # install if(OpenVINODeveloperPackage_FOUND) - ie_cpack(template) + ov_cpack(template) endif() # [cmake:main] diff --git a/src/plugins/template/backend/CMakeLists.txt b/src/plugins/template/backend/CMakeLists.txt index 74e10c01863dfe..2836d0c34b6c4f 100644 --- a/src/plugins/template/backend/CMakeLists.txt +++ b/src/plugins/template/backend/CMakeLists.txt @@ -36,7 +36,7 @@ ov_build_target_faster(interpreter_backend UNITY) target_compile_definitions(interpreter_backend PRIVATE SHARED_LIB_PREFIX="${CMAKE_SHARED_LIBRARY_PREFIX}" - SHARED_LIB_SUFFIX="${IE_BUILD_POSTFIX}${CMAKE_SHARED_LIBRARY_SUFFIX}" + SHARED_LIB_SUFFIX="${OV_BUILD_POSTFIX}${CMAKE_SHARED_LIBRARY_SUFFIX}" ) target_link_libraries(interpreter_backend PRIVATE openvino::builders openvino::reference openvino::util openvino::runtime::dev openvino::shape_inference) diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt index 944fc8d0d52054..d0a302ad12bec4 100644 --- a/src/tests/CMakeLists.txt +++ b/src/tests/CMakeLists.txt @@ -2,11 +2,11 @@ # SPDX-License-Identifier: Apache-2.0 # -set(IE_TESTS_ROOT ${CMAKE_CURRENT_SOURCE_DIR}) +set(OV_TESTS_ROOT ${CMAKE_CURRENT_SOURCE_DIR}) enable_testing() -ie_option(ENABLE_CONFORMANCE_PGQL "Enables support of PostgreSQL-based reporting from test tools" OFF) +ov_option(ENABLE_CONFORMANCE_PGQL "Enables support of PostgreSQL-based reporting from test tools" OFF) mark_as_advanced(FORCE ENABLE_CONFORMANCE_PGQL) if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") diff --git a/src/tests/functional/plugin/shared/include/base/behavior_test_utils.hpp b/src/tests/functional/plugin/shared/include/base/behavior_test_utils.hpp index c3ac8e0f5c1a0a..6fb1ca5e3d0a55 100644 --- a/src/tests/functional/plugin/shared/include/base/behavior_test_utils.hpp +++ b/src/tests/functional/plugin/shared/include/base/behavior_test_utils.hpp @@ -87,7 +87,7 @@ inline InferenceEngine::Core createIECoreWithTemplate() { PluginCache::get().reset(); InferenceEngine::Core ie; #ifndef OPENVINO_STATIC_LIBRARY - std::string pluginName = "openvino_template_plugin" IE_BUILD_POSTFIX; + std::string pluginName = "openvino_template_plugin" OV_BUILD_POSTFIX; ie.RegisterPlugin(ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), pluginName), ov::test::utils::DEVICE_TEMPLATE); #endif // !OPENVINO_STATIC_LIBRARY diff --git a/src/tests/functional/plugin/shared/include/base/ov_behavior_test_utils.hpp b/src/tests/functional/plugin/shared/include/base/ov_behavior_test_utils.hpp index 347660d0aa3252..5e29386b9d6a69 100644 --- a/src/tests/functional/plugin/shared/include/base/ov_behavior_test_utils.hpp +++ b/src/tests/functional/plugin/shared/include/base/ov_behavior_test_utils.hpp @@ -161,7 +161,7 @@ inline ov::Core createCoreWithTemplate() { ov::Core core; #ifndef OPENVINO_STATIC_LIBRARY std::string pluginName = "openvino_template_plugin"; - pluginName += IE_BUILD_POSTFIX; + pluginName += OV_BUILD_POSTFIX; core.register_plugin(ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), pluginName), ov::test::utils::DEVICE_TEMPLATE); #endif // !OPENVINO_STATIC_LIBRARY diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration.hpp index 3be422c23f4e2f..a6c0f73751bff7 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration.hpp @@ -65,8 +65,8 @@ class OVClassBasicTestP : public OVPluginTestBase, std::tie(pluginName, target_device) = GetParam(); SKIP_IF_CURRENT_TEST_IS_DISABLED(); APIBaseTest::SetUp(); - pluginName += IE_BUILD_POSTFIX; - if (pluginName == (std::string("openvino_template_plugin") + IE_BUILD_POSTFIX)) { + pluginName += OV_BUILD_POSTFIX; + if (pluginName == (std::string("openvino_template_plugin") + OV_BUILD_POSTFIX)) { pluginName = ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), pluginName); } } @@ -197,7 +197,7 @@ inline std::string getPluginFile() { std::ostringstream stream; stream << ""; ov::test::utils::createFile(filename, stream.str()); return filename; diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/properties_tests.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/properties_tests.hpp index edfcc5920b07a0..cecd117f3529a7 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/properties_tests.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/properties_tests.hpp @@ -123,8 +123,8 @@ class OVBasicPropertiesTestsP : public OVPluginTestBase, std::tie(pluginName, target_device) = GetParam(); SKIP_IF_CURRENT_TEST_IS_DISABLED(); APIBaseTest::SetUp(); - pluginName += IE_BUILD_POSTFIX; - if (pluginName == (std::string("openvino_template_plugin") + IE_BUILD_POSTFIX)) { + pluginName += OV_BUILD_POSTFIX; + if (pluginName == (std::string("openvino_template_plugin") + OV_BUILD_POSTFIX)) { pluginName = ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), pluginName); } } diff --git a/src/tests/functional/plugin/shared/include/behavior/plugin/core_integration.hpp b/src/tests/functional/plugin/shared/include/behavior/plugin/core_integration.hpp index f0ae5358605656..d49e0dfd17c2bd 100644 --- a/src/tests/functional/plugin/shared/include/behavior/plugin/core_integration.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/plugin/core_integration.hpp @@ -55,8 +55,8 @@ class IEClassBasicTestP : public BehaviorTestsUtils::IEPluginTestBase, std::tie(pluginName, target_device) = GetParam(); SKIP_IF_CURRENT_TEST_IS_DISABLED(); ov::test::behavior::APIBaseTest::SetUp(); - pluginName += IE_BUILD_POSTFIX; - if (pluginName == (std::string("openvino_template_plugin") + IE_BUILD_POSTFIX)) { + pluginName += OV_BUILD_POSTFIX; + if (pluginName == (std::string("openvino_template_plugin") + OV_BUILD_POSTFIX)) { pluginName = ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), pluginName); } } @@ -149,7 +149,7 @@ inline std::string getPluginFile() { std::ostringstream stream; stream << ""; ov::test::utils::createFile(filename, stream.str()); return filename; diff --git a/src/tests/functional/plugin/shared/src/behavior/plugin/hetero_synthetic.cpp b/src/tests/functional/plugin/shared/src/behavior/plugin/hetero_synthetic.cpp index 005e71be01fc36..bd165465edc894 100644 --- a/src/tests/functional/plugin/shared/src/behavior/plugin/hetero_synthetic.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/plugin/hetero_synthetic.cpp @@ -119,11 +119,11 @@ void HeteroSyntheticTest::SetUp() { try { if (pluginParameter._location == "openvino_template_plugin") { PluginCache::get().ie()->RegisterPlugin(ov::util::make_plugin_library_name( - ov::test::utils::getExecutableDirectory(), pluginParameter._location + IE_BUILD_POSTFIX), + ov::test::utils::getExecutableDirectory(), pluginParameter._location + OV_BUILD_POSTFIX), pluginParameter._name); } else { PluginCache::get().ie()->RegisterPlugin(pluginParameter._location - + IE_BUILD_POSTFIX, pluginParameter._name); + + OV_BUILD_POSTFIX, pluginParameter._name); } } catch (InferenceEngine::Exception& ex) { if (std::string{ex.what()}.find("Device with \"" + pluginParameter._name diff --git a/src/tests/test_utils/common_test_utils/CMakeLists.txt b/src/tests/test_utils/common_test_utils/CMakeLists.txt index 3d63059962c4f1..abf36d4fa3864d 100644 --- a/src/tests/test_utils/common_test_utils/CMakeLists.txt +++ b/src/tests/test_utils/common_test_utils/CMakeLists.txt @@ -79,7 +79,7 @@ function(add_common_utils ADD_TARGET_NAME) $ PRIVATE $) - target_include_directories(${ADD_TARGET_NAME} SYSTEM PUBLIC ${IE_TESTS_ROOT}/test_utils) + target_include_directories(${ADD_TARGET_NAME} SYSTEM PUBLIC ${OV_TESTS_ROOT}/test_utils) target_compile_definitions(${ADD_TARGET_NAME} PUBLIC ${ARGN}) diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/file_utils.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/file_utils.hpp index 6c5e66faabe4a0..58920ee8cf4379 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/file_utils.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/file_utils.hpp @@ -288,7 +288,7 @@ namespace { inline std::string get_mock_engine_path() { std::string mockEngineName("mock_engine"); return ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - mockEngineName + IE_BUILD_POSTFIX); + mockEngineName + OV_BUILD_POSTFIX); } template diff --git a/src/tests/test_utils/common_test_utils/src/test_case.cpp b/src/tests/test_utils/common_test_utils/src/test_case.cpp index 213af43397d985..2fb703e7603ee5 100644 --- a/src/tests/test_utils/common_test_utils/src/test_case.cpp +++ b/src/tests/test_utils/common_test_utils/src/test_case.cpp @@ -192,7 +192,7 @@ TestCase::TestCase(const std::shared_ptr& function, const std::string // Register template plugin m_core.register_plugin( ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - std::string("openvino_template_plugin") + IE_BUILD_POSTFIX), + std::string("openvino_template_plugin") + OV_BUILD_POSTFIX), "TEMPLATE"); } catch (...) { } diff --git a/src/tests/test_utils/functional_test_utils/src/ov_plugin_cache.cpp b/src/tests/test_utils/functional_test_utils/src/ov_plugin_cache.cpp index 5d32d1a7b6abda..118368f7a1180f 100644 --- a/src/tests/test_utils/functional_test_utils/src/ov_plugin_cache.cpp +++ b/src/tests/test_utils/functional_test_utils/src/ov_plugin_cache.cpp @@ -58,7 +58,7 @@ std::shared_ptr PluginCache::core(const std::string& deviceToCheck) { // register template plugin if it is needed try { std::string pluginName = "openvino_template_plugin"; - pluginName += IE_BUILD_POSTFIX; + pluginName += OV_BUILD_POSTFIX; ov_core->register_plugin( ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), pluginName), "TEMPLATE"); diff --git a/src/tests/test_utils/functional_test_utils/src/plugin_cache.cpp b/src/tests/test_utils/functional_test_utils/src/plugin_cache.cpp index 99fff0a5ef90e7..3db56da99c3a0e 100644 --- a/src/tests/test_utils/functional_test_utils/src/plugin_cache.cpp +++ b/src/tests/test_utils/functional_test_utils/src/plugin_cache.cpp @@ -57,7 +57,7 @@ std::shared_ptr PluginCache::ie(const std::string& device // register template plugin if it is needed try { std::string pluginName = "openvino_template_plugin"; - pluginName += IE_BUILD_POSTFIX; + pluginName += OV_BUILD_POSTFIX; ie_core->RegisterPlugin( ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), pluginName), "TEMPLATE"); diff --git a/tests/fuzz/CMakeLists.txt b/tests/fuzz/CMakeLists.txt index af134ae28e8cdc..fa206f9362d949 100644 --- a/tests/fuzz/CMakeLists.txt +++ b/tests/fuzz/CMakeLists.txt @@ -11,7 +11,7 @@ set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Release" "Debug" "RelWithD set(OpenVINO_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../../") -find_package(IEDevScripts REQUIRED +find_package(OpenVINODeveloperScripts REQUIRED PATHS "${OpenVINO_SOURCE_DIR}/cmake/developer_package" NO_CMAKE_FIND_ROOT_PATH NO_DEFAULT_PATH) diff --git a/thirdparty/ocl/CMakeLists.txt b/thirdparty/ocl/CMakeLists.txt index 0ebe3fd6d1df39..0a402752d3397e 100644 --- a/thirdparty/ocl/CMakeLists.txt +++ b/thirdparty/ocl/CMakeLists.txt @@ -4,9 +4,9 @@ function(get_lib_name TARGET_NAME LIBRARY_NAME) if(WIN32) - set(LIB_SUFFIX "${IE_BUILD_POSTFIX}${CMAKE_LINK_LIBRARY_SUFFIX}") + set(LIB_SUFFIX "${OV_BUILD_POSTFIX}${CMAKE_LINK_LIBRARY_SUFFIX}") else() - set(LIB_SUFFIX "${IE_BUILD_POSTFIX}${CMAKE_SHARED_LIBRARY_SUFFIX}") + set(LIB_SUFFIX "${OV_BUILD_POSTFIX}${CMAKE_SHARED_LIBRARY_SUFFIX}") endif() set("${LIBRARY_NAME}" "${CMAKE_SHARED_MODULE_PREFIX}${TARGET_NAME}${LIB_SUFFIX}" PARENT_SCOPE) diff --git a/tools/benchmark_tool/CMakeLists.txt b/tools/benchmark_tool/CMakeLists.txt index ccab4ec9ca8eb8..9787b5f7b5cca5 100644 --- a/tools/benchmark_tool/CMakeLists.txt +++ b/tools/benchmark_tool/CMakeLists.txt @@ -14,8 +14,8 @@ if(NOT DEFINED OpenVINO_SOURCE_DIR) get_filename_component(OpenVINO_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../.." REALPATH) endif() -if(NOT IEDevScripts_FOUND) - find_package(IEDevScripts REQUIRED +if(NOT OpenVINODeveloperScripts_FOUND) + find_package(OpenVINODeveloperScripts REQUIRED PATHS "${OpenVINO_SOURCE_DIR}/cmake/developer_package" NO_CMAKE_FIND_ROOT_PATH NO_DEFAULT_PATH) @@ -40,5 +40,5 @@ install(DIRECTORY ${OpenVINOBenchmarkTool_SOURCE_DIR}/openvino # if(CMAKE_SOURCE_DIR STREQUAL OpenVINOBenchmarkTool_SOURCE_DIR) - ie_cpack(${IE_CPACK_COMPONENTS_ALL}) + ov_cpack(${OV_CPACK_COMPONENTS_ALL}) endif() diff --git a/tools/openvino_dev/CMakeLists.txt b/tools/openvino_dev/CMakeLists.txt index 061e9c4e8e6b82..12a24082a83a8e 100644 --- a/tools/openvino_dev/CMakeLists.txt +++ b/tools/openvino_dev/CMakeLists.txt @@ -14,8 +14,8 @@ if(NOT DEFINED OpenVINO_SOURCE_DIR) get_filename_component(OpenVINO_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../.." REALPATH) endif() -if(NOT IEDevScripts_FOUND) - find_package(IEDevScripts REQUIRED +if(NOT OpenVINODeveloperScripts_FOUND) + find_package(OpenVINODeveloperScripts REQUIRED PATHS "${OpenVINO_SOURCE_DIR}/cmake/developer_package" NO_CMAKE_FIND_ROOT_PATH NO_DEFAULT_PATH) diff --git a/tools/ovc/CMakeLists.txt b/tools/ovc/CMakeLists.txt index d5c3ddfe3cca4a..cea078768604f7 100644 --- a/tools/ovc/CMakeLists.txt +++ b/tools/ovc/CMakeLists.txt @@ -14,8 +14,8 @@ if(NOT DEFINED OpenVINO_SOURCE_DIR) get_filename_component(OpenVINO_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../.." REALPATH) endif() -if(NOT IEDevScripts_FOUND) - find_package(IEDevScripts REQUIRED +if(NOT OpenVINODeveloperScripts_FOUND) + find_package(OpenVINODeveloperScripts REQUIRED PATHS "${OpenVINO_SOURCE_DIR}/cmake/developer_package" NO_CMAKE_FIND_ROOT_PATH NO_DEFAULT_PATH) @@ -40,5 +40,5 @@ install(DIRECTORY ${OpenVINOConverter_SOURCE_DIR}/openvino # if(CMAKE_SOURCE_DIR STREQUAL OpenVINOConverter_SOURCE_DIR) - ie_cpack(${IE_CPACK_COMPONENTS_ALL}) + ov_cpack(${OV_CPACK_COMPONENTS_ALL}) endif() From b54e4c9924ad3145ec68fa80dcec3cc90c6fd946 Mon Sep 17 00:00:00 2001 From: Oleg Pipikin Date: Tue, 10 Oct 2023 00:35:32 +0200 Subject: [PATCH 105/257] Refactor MatMulTest, MaxMinLayerTest, Mvn1LayerTest (#20292) * Refactor MatMulTest * Refactor MaxMinLayerTest * Refactor Mvn1LayerTest --- .../single_layer_tests/mat_mul.cpp | 120 +++++++---- .../single_layer_tests/minimum_maximum.cpp | 40 ++-- .../single_layer_tests/mvn.cpp | 198 +++++++++++------- .../include/single_op_tests/mat_mul.hpp | 15 ++ .../single_op_tests/minimum_maximum.hpp | 15 ++ .../shared/include/single_op_tests/mvn.hpp | 19 ++ .../shared_test_classes/single_op/mat_mul.hpp | 34 +++ .../single_op/minimum_maximum.hpp | 32 +++ .../shared_test_classes/single_op/mvn.hpp | 54 +++++ .../src/single_op/mat_mul.cpp | 81 +++++++ .../src/single_op/minimum_maximum.cpp | 85 ++++++++ .../shared_test_classes/src/single_op/mvn.cpp | 135 ++++++++++++ 12 files changed, 691 insertions(+), 137 deletions(-) create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/mat_mul.hpp create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/minimum_maximum.hpp create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/mvn.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/mat_mul.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/minimum_maximum.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/mvn.hpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/mat_mul.cpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/minimum_maximum.cpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/mvn.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/mat_mul.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/mat_mul.cpp index b1925b9b8c0ab4..735dba7e8591a8 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/mat_mul.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/mat_mul.cpp @@ -4,60 +4,100 @@ #include -#include "single_layer_tests/mat_mul.hpp" - -using namespace LayerTestsDefinitions; +#include "single_op_tests/mat_mul.hpp" namespace { +using ov::test::MatMulLayerTest; +using ov::test::utils::InputLayerType; + +const std::vector model_types = { + ov::element::f32, + ov::element::i32, +}; -const std::vector inputPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::I32, +std::vector> input_shapes_no_transpose_static { + { {1, 4, 5, 6}, {1, 4, 6, 4} }, + { {4, 5, 6}, {6, 3} }, + { {9, 9, 9}, {9, 9} }, + { {1, 2, 3}, {1, 3, 10} }, + { {1, 2, 3}, {1, 1, 3, 2} }, + { {1, 3, 2, 4}, {2, 1, 4, 2} }, + { {2, 1, 2, 4}, {1, 3, 4, 2} }, + { {3, 2, 4}, {2, 1, 4, 2} }, + { {2, 1, 4, 2}, {3, 2, 4} }, + { {3}, {2, 2, 3, 1} }, + { {2, 2, 1, 3}, {3} }, + { {1, 5}, {5, 1} }, + { {1, 5}, {5} }, + { {5}, {5, 1} }, + { {5}, {5} }, }; -const std::vector shapeRelatedParams = { - { { {1, 4, 5, 6}, false }, { {1, 4, 6, 4}, false } }, - { { {4, 5, 6}, false }, { {6, 3}, false } }, - { { {9, 9, 9}, false }, { {9, 9}, false } }, - { { {1, 2, 3}, false }, { {1, 10, 3}, true } }, - { { {1, 2, 3}, false }, { {1, 3, 10}, false } }, - { { {1, 2, 3}, false }, { {1, 1, 3, 2}, false } }, - { { {1, 3, 2, 4}, false }, { {2, 1, 4, 2}, false } }, - { { {2, 1, 2, 4}, false }, { {1, 3, 4, 2}, false } }, - { { {3, 2, 4}, false }, { {2, 1, 4, 2}, false } }, - { { {2, 1, 4, 2}, false }, { {3, 2, 4}, false } }, - { { {2, 1, 2, 3}, true }, { {3, 2, 4}, false } }, - { { {2, 1, 3, 2}, false }, { {3, 4, 2}, true } }, - { { {2, 1, 2, 3}, true }, { {3, 4, 2}, true } }, - { { {3}, false }, { {2, 2, 3, 1}, false } }, - { { {2, 2, 1, 3}, false }, { {3}, false } }, - { { {1, 5}, false }, { {5, 1}, false } }, - { { {5, 1}, true }, { {5, 1}, false } }, - { { {1, 5}, false }, { {10, 5}, true } }, - { { {1, 5}, false }, { {5}, false } }, - { { {5}, false }, { {5, 1}, false } }, - { { {5}, false }, { {5}, false } }, - { { {5}, true }, { {5}, true } } +std::vector> input_shapes_first_transpose_static { + { {2, 1, 2, 3}, {3, 2, 4} }, + { {5, 1}, {5, 1} }, }; -std::vector secondaryInputTypes = { - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::PARAMETER, +std::vector> input_shapes_second_transpose_static { + { {1, 2, 3}, {1, 10, 3} }, + { {2, 1, 3, 2}, {3, 4, 2} }, + { {1, 5}, {10, 5} }, +}; + +std::vector> input_shapes_both_transpose_static { + { {2, 1, 2, 3}, {3, 4, 2} }, + { {5}, {5}, }, +}; + + +std::vector secondary_input_types = { + InputLayerType::CONSTANT, + InputLayerType::PARAMETER, }; std::map additional_config = {}; -INSTANTIATE_TEST_SUITE_P(smoke_MatMul, MatMulTest, +INSTANTIATE_TEST_SUITE_P(smoke_MatMul_NoTranspose, MatMulLayerTest, + ::testing::Combine( + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_no_transpose_static)), + ::testing::Values(std::make_pair(false, false)), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(secondary_input_types), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(additional_config)), + MatMulLayerTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_MatMul_FirstTranspose, MatMulLayerTest, + ::testing::Combine( + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_first_transpose_static)), + ::testing::Values(std::make_pair(true, false)), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(secondary_input_types), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(additional_config)), + MatMulLayerTest::getTestCaseName); + + +INSTANTIATE_TEST_SUITE_P(smoke_MatMul_SecondTranspose, MatMulLayerTest, + ::testing::Combine( + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_second_transpose_static)), + ::testing::Values(std::make_pair(false, true)), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(secondary_input_types), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(additional_config)), + MatMulLayerTest::getTestCaseName); + + +INSTANTIATE_TEST_SUITE_P(smoke_MatMul_BothTranspose, MatMulLayerTest, ::testing::Combine( - ::testing::ValuesIn(shapeRelatedParams), - ::testing::ValuesIn(inputPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::ValuesIn(secondaryInputTypes), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_both_transpose_static)), + ::testing::Values(std::make_pair(true, true)), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(secondary_input_types), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(additional_config)), - MatMulTest::getTestCaseName); + MatMulLayerTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/minimum_maximum.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/minimum_maximum.cpp index 5330cf472cff40..6244afb8216c04 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/minimum_maximum.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/minimum_maximum.cpp @@ -2,15 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include "single_layer_tests/minimum_maximum.hpp" +#include "single_op_tests/minimum_maximum.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; - namespace { +using ov::test::MaxMinLayerTest; +using ov::test::utils::InputLayerType; +using ov::test::utils::MinMaxOpType; -const std::vector>> inShapes = { +const std::vector> input_shapes_static = { {{2}, {1}}, {{1, 1, 1, 3}, {1}}, {{1, 2, 4}, {1}}, @@ -20,31 +20,27 @@ const std::vector>> inShapes = { {{8, 1, 6, 1}, {7, 1, 5}}, }; -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16, +const std::vector model_types = { + ov::element::f32, + ov::element::f16, }; -const std::vector opType = { - ngraph::helpers::MinMaxOpType::MINIMUM, - ngraph::helpers::MinMaxOpType::MAXIMUM, +const std::vector op_types = { + MinMaxOpType::MINIMUM, + MinMaxOpType::MAXIMUM, }; -const std::vector inputType = { - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::PARAMETER, +const std::vector second_input_types = { + InputLayerType::CONSTANT, + InputLayerType::PARAMETER, }; INSTANTIATE_TEST_SUITE_P(smoke_maximum, MaxMinLayerTest, ::testing::Combine( - ::testing::ValuesIn(inShapes), - ::testing::ValuesIn(opType), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::ValuesIn(inputType), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_static)), + ::testing::ValuesIn(op_types), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(second_input_types), ::testing::Values(ov::test::utils::DEVICE_CPU)), MaxMinLayerTest::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/mvn.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/mvn.cpp index 8cf667daaebb75..9daa95881ec134 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/mvn.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/mvn.cpp @@ -4,38 +4,40 @@ #include -#include "single_layer_tests/mvn.hpp" +#include "single_op_tests/mvn.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; - -const std::vector emptyAcrossChannels = {{}}; -const std::vector emptyReductionAxes = {{}}; - -const std::vector> inputShapes = { - {8}, - {1, 16}, - {3, 19}, - {1, 32, 17}, - {1, 37, 9}, - {1, 16, 5, 8}, - {2, 19, 5, 10}, - {7, 32, 2, 8}, - {5, 8, 3, 5}, - {4, 41, 6, 9}, - {1, 32, 8, 1, 6}, - {1, 9, 1, 15, 9}, - {6, 64, 6, 1, 18}, - {2, 31, 2, 9, 1}, - {10, 16, 5, 10, 6} -}; - -const std::vector acrossChannels = { +namespace { +using ov::test::Mvn1LayerTest; +using ov::test::Mvn6LayerTest; + +const std::vector empty_across_channels = {{}}; +const std::vector empty_reduction_axes = {{}}; + +const std::vector> input_shapes_static = { + {{8}}, + {{1, 16}}, + {{3, 19}}, + {{1, 32, 17}}, + {{1, 37, 9}}, + {{1, 16, 5, 8}}, + {{2, 19, 5, 10}}, + {{7, 32, 2, 8}}, + {{5, 8, 3, 5}}, + {{4, 41, 6, 9}}, + {{1, 32, 8, 1, 6}}, + {{1, 9, 1, 15, 9}}, + {{6, 64, 6, 1, 18}}, + {{2, 31, 2, 9, 1}}, + {{10, 16, 5, 10, 6}} +}; + +const std::vector across_channels = { true, false }; -const std::vector normalizeVariance = { +const std::vector normalize_variance = { true, false }; @@ -44,39 +46,44 @@ const std::vector epsilon = { 0.000000001 }; -std::vector dataPrecisions = { - InferenceEngine::Precision::FP16, - InferenceEngine::Precision::FP32 +std::vector model_types = { + ov::element::f16, + ov::element::f32 }; -const auto MvnAcrossChannels = ::testing::Combine( - ::testing::ValuesIn(inputShapes), - ::testing::ValuesIn(dataPrecisions), - ::testing::ValuesIn(emptyReductionAxes), - ::testing::ValuesIn(acrossChannels), - ::testing::ValuesIn(normalizeVariance), +const auto Mvnacross_channels = ::testing::Combine( + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_static)), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(empty_reduction_axes), + ::testing::ValuesIn(across_channels), + ::testing::ValuesIn(normalize_variance), ::testing::ValuesIn(epsilon), ::testing::Values(ov::test::utils::DEVICE_CPU) ); +const std::vector> input_shapes_reduction_axes_static = { + {{1, 10, 5, 17}}, + {{1, 3, 8, 9}} +}; + const auto MvnReductionAxes = ::testing::Combine( - ::testing::ValuesIn(std::vector>{{1, 10, 5, 17}, {1, 3, 8, 9}}), - ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_reduction_axes_static)), + ::testing::Values(ov::element::f32), ::testing::ValuesIn(std::vector{{1, 2, 3}, {2, 3}}), - ::testing::ValuesIn(emptyAcrossChannels), - ::testing::ValuesIn(normalizeVariance), + ::testing::ValuesIn(empty_across_channels), + ::testing::ValuesIn(normalize_variance), ::testing::ValuesIn(epsilon), ::testing::Values(ov::test::utils::DEVICE_CPU) ); -INSTANTIATE_TEST_SUITE_P(smoke_TestsMVN_AcrossChannels, Mvn1LayerTest, MvnAcrossChannels, Mvn1LayerTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_TestsMVN_across_channels, Mvn1LayerTest, Mvnacross_channels, Mvn1LayerTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_TestsMVN_ReductionAxes, Mvn1LayerTest, MvnReductionAxes, Mvn1LayerTest::getTestCaseName); -std::vector idxPrecisions = { - InferenceEngine::Precision::I32, - InferenceEngine::Precision::I64 +std::vector idx_types = { + ov::element::i32, + ov::element::i64 }; const std::vector epsMode = { @@ -88,98 +95,139 @@ const std::vector epsilonF = { 0.0001f }; +const std::vector> input_shapes_5d_static = { + {{1, 10, 5, 7, 8}}, + {{1, 3, 8, 9, 49}} +}; + INSTANTIATE_TEST_SUITE_P(smoke_MVN_5D, Mvn6LayerTest, ::testing::Combine( - ::testing::ValuesIn(std::vector>{{1, 10, 5, 7, 8}, {1, 3, 8, 9, 49}}), - ::testing::ValuesIn(dataPrecisions), - ::testing::ValuesIn(idxPrecisions), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_5d_static)), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(idx_types), ::testing::ValuesIn(std::vector>{{1, 2, 3, 4}, {2, 3, 4}, {-3, -2, -1}, {-1, -4, -2, -3}}), - ::testing::ValuesIn(normalizeVariance), + ::testing::ValuesIn(normalize_variance), ::testing::ValuesIn(epsilonF), ::testing::ValuesIn(epsMode), ::testing::Values(ov::test::utils::DEVICE_CPU)), Mvn6LayerTest::getTestCaseName); +const std::vector> input_shapes_4d_static = { + {{1, 10, 5, 17}}, + {{1, 3, 8, 9}} +}; + INSTANTIATE_TEST_SUITE_P(smoke_MVN_4D, Mvn6LayerTest, ::testing::Combine( - ::testing::ValuesIn(std::vector>{{1, 10, 5, 17}, {1, 3, 8, 9}}), - ::testing::ValuesIn(dataPrecisions), - ::testing::ValuesIn(idxPrecisions), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_4d_static)), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(idx_types), ::testing::ValuesIn(std::vector>{{1, 2, 3}, {2, 3}, {-2, -1}, {-2, -1, -3}}), - ::testing::ValuesIn(normalizeVariance), + ::testing::ValuesIn(normalize_variance), ::testing::ValuesIn(epsilonF), ::testing::ValuesIn(epsMode), ::testing::Values(ov::test::utils::DEVICE_CPU)), Mvn6LayerTest::getTestCaseName); +const std::vector> input_shapes_3d_static = { + {{1, 32, 17}}, + {{1, 37, 9}} +}; + INSTANTIATE_TEST_SUITE_P(smoke_MVN_3D, Mvn6LayerTest, ::testing::Combine( - ::testing::ValuesIn(std::vector>{{1, 32, 17}, {1, 37, 9}}), - ::testing::ValuesIn(dataPrecisions), - ::testing::ValuesIn(idxPrecisions), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_3d_static)), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(idx_types), ::testing::ValuesIn(std::vector>{{1, 2}, {2}, {-1}, {-1, -2}}), - ::testing::ValuesIn(normalizeVariance), + ::testing::ValuesIn(normalize_variance), ::testing::ValuesIn(epsilonF), ::testing::ValuesIn(epsMode), ::testing::Values(ov::test::utils::DEVICE_CPU)), Mvn6LayerTest::getTestCaseName); +const std::vector> input_shapes_2d_static = { + {{3, 5}}, + {{2, 55}} +}; + INSTANTIATE_TEST_SUITE_P(smoke_MVN_2D, Mvn6LayerTest, ::testing::Combine( - ::testing::ValuesIn(std::vector>{{3, 5}, {2, 55}}), - ::testing::ValuesIn(dataPrecisions), - ::testing::ValuesIn(idxPrecisions), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_2d_static)), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(idx_types), ::testing::ValuesIn(std::vector>{{1}}), - ::testing::ValuesIn(normalizeVariance), + ::testing::ValuesIn(normalize_variance), ::testing::ValuesIn(epsilonF), ::testing::ValuesIn(epsMode), ::testing::Values(ov::test::utils::DEVICE_CPU)), Mvn6LayerTest::getTestCaseName); +const std::vector> input_shapes_1d_static = { + {{3}}, + {{9}}, + {{55}} +}; + INSTANTIATE_TEST_SUITE_P(smoke_MVN_1D, Mvn6LayerTest, ::testing::Combine( - ::testing::ValuesIn(std::vector>{{3}, {9}, {55}}), - ::testing::ValuesIn(dataPrecisions), - ::testing::ValuesIn(idxPrecisions), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_1d_static)), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(idx_types), ::testing::ValuesIn(std::vector>{{0}}), - ::testing::ValuesIn(normalizeVariance), + ::testing::ValuesIn(normalize_variance), ::testing::ValuesIn(epsilonF), ::testing::ValuesIn(epsMode), ::testing::Values(ov::test::utils::DEVICE_CPU)), Mvn6LayerTest::getTestCaseName); +const std::vector> input_shapes_decomposition_3d_static = { + {{1, 32, 17}}, + {{1, 37, 9}} +}; + INSTANTIATE_TEST_SUITE_P(smoke_Decomposition_3D, Mvn6LayerTest, ::testing::Combine( - ::testing::ValuesIn(std::vector>{{1, 32, 17}, {1, 37, 9}}), - ::testing::ValuesIn(dataPrecisions), - ::testing::ValuesIn(idxPrecisions), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_decomposition_3d_static)), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(idx_types), ::testing::ValuesIn(std::vector>{{0, 1, 2}, {0}, {1}}), - ::testing::ValuesIn(normalizeVariance), + ::testing::ValuesIn(normalize_variance), ::testing::ValuesIn(epsilonF), ::testing::ValuesIn(epsMode), ::testing::Values(ov::test::utils::DEVICE_CPU)), Mvn6LayerTest::getTestCaseName); +const std::vector> input_shapes_decomposition_4d_static = { + {{1, 16, 5, 8}}, + {{2, 19, 5, 10}} +}; + INSTANTIATE_TEST_SUITE_P(smoke_Decomposition_4D, Mvn6LayerTest, ::testing::Combine( - ::testing::ValuesIn(std::vector>{{1, 16, 5, 8}, {2, 19, 5, 10}}), - ::testing::ValuesIn(dataPrecisions), - ::testing::ValuesIn(idxPrecisions), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_decomposition_4d_static)), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(idx_types), ::testing::ValuesIn(std::vector>{{0, 1, 2, 3}, {0, 1, 2}, {0, 3}, {0}, {1}, {2}, {3}}), - ::testing::ValuesIn(normalizeVariance), + ::testing::ValuesIn(normalize_variance), ::testing::ValuesIn(epsilonF), ::testing::ValuesIn(epsMode), ::testing::Values(ov::test::utils::DEVICE_CPU)), Mvn6LayerTest::getTestCaseName); +const std::vector> input_shapes_decomposition_10d_static = { + {{1, 3, 5, 4, 2, 6, 5, 3, 2, 1}}, +}; + INSTANTIATE_TEST_SUITE_P(smoke_Decomposition_10D, Mvn6LayerTest, ::testing::Combine( - ::testing::ValuesIn(std::vector>{{1, 3, 5, 4, 2, 6, 5, 3, 2, 1}}), - ::testing::ValuesIn(dataPrecisions), - ::testing::ValuesIn(idxPrecisions), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_decomposition_10d_static)), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(idx_types), ::testing::ValuesIn(std::vector>{{0, 1, 5, 8, 9}, {0, 1, 2, 3}, {0, 1, 2}, {0, 3}, {0}, {3}, {5}, {9}}), - ::testing::ValuesIn(normalizeVariance), + ::testing::ValuesIn(normalize_variance), ::testing::ValuesIn(epsilonF), ::testing::ValuesIn(epsMode), ::testing::Values(ov::test::utils::DEVICE_CPU)), Mvn6LayerTest::getTestCaseName); +} // namespace \ No newline at end of file diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/mat_mul.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/mat_mul.hpp new file mode 100644 index 00000000000000..217b5825da9b7e --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/mat_mul.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/mat_mul.hpp" + +namespace ov { +namespace test { +TEST_P(MatMulLayerTest, Inference) { + run(); +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/minimum_maximum.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/minimum_maximum.hpp new file mode 100644 index 00000000000000..ae367d97a07ec0 --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/minimum_maximum.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/minimum_maximum.hpp" + +namespace ov { +namespace test { +TEST_P(MaxMinLayerTest, Inference){ + run(); +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/mvn.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/mvn.hpp new file mode 100644 index 00000000000000..6ad139d046621e --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/mvn.hpp @@ -0,0 +1,19 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/mvn.hpp" + +namespace ov { +namespace test { +TEST_P(Mvn1LayerTest, Inference) { + run(); +}; + +TEST_P(Mvn6LayerTest, Inference) { + run(); +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/mat_mul.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/mat_mul.hpp new file mode 100644 index 00000000000000..61fc57a100cd75 --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/mat_mul.hpp @@ -0,0 +1,34 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "common_test_utils/test_enums.hpp" + +namespace ov { +namespace test { +typedef std::tuple< + std::vector, // Input Shapes + std::pair, // Transpose inputs + ov::element::Type, // Model type + ov::test::utils::InputLayerType, // Secondary input type + std::string, // Device name + std::map // Additional network configuration +> MatMulLayerTestParamsSet; + +class MatMulLayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo &obj); + +protected: + void SetUp() override; +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/minimum_maximum.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/minimum_maximum.hpp new file mode 100644 index 00000000000000..d0af98986af336 --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/minimum_maximum.hpp @@ -0,0 +1,32 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "common_test_utils/test_enums.hpp" + +namespace ov { +namespace test { +using MaxMinParamsTuple = typename std::tuple< + std::vector, // Input shapes + ov::test::utils::MinMaxOpType, // Operation type + ov::element::Type, // Model type + ov::test::utils::InputLayerType, // Secondary input type + std::string>; // Device name + +class MaxMinLayerTest: + public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); +protected: + void SetUp() override; +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/mvn.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/mvn.hpp new file mode 100644 index 00000000000000..da93a4706700ee --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/mvn.hpp @@ -0,0 +1,54 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { +typedef std::tuple< + std::vector, // Input shapes + ov::element::Type, // Model type + ov::AxisSet, // Reduction axes + bool, // Across channels + bool, // Normalize variance + double, // Epsilon + std::string // Device name + > mvn1Params; + +class Mvn1LayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); + +protected: + void SetUp() override; +}; + +typedef std::tuple< + std::vector, // Input shapes + ov::element::Type, // Model type + ov::element::Type, // Axes type + std::vector, // Axes + bool, // Normalize variance + float, // Epsilon + std::string, // Epsilon mode + std::string // Device name + > mvn6Params; + +class Mvn6LayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); + +protected: + void SetUp() override; +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_op/mat_mul.cpp b/src/tests/functional/shared_test_classes/src/single_op/mat_mul.cpp new file mode 100644 index 00000000000000..4414febffd265b --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/mat_mul.cpp @@ -0,0 +1,81 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/mat_mul.hpp" + +#include "common_test_utils/ov_tensor_utils.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/matmul.hpp" + +namespace ov { +namespace test { +using ov::test::utils::InputLayerType; + +std::string MatMulLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { + std::vector shapes; + std::pair transpose; + ov::element::Type model_type; + InputLayerType secondary_input_type; + std::string target_device; + std::map additional_config; + std::tie(shapes, transpose, model_type, secondary_input_type, target_device, additional_config) = obj.param; + + std::ostringstream result; + result << "IS=("; + for (size_t i = 0lu; i < shapes.size(); i++) { + result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < shapes.size(); j++) { + result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + result << "transpose_a=" << transpose.first << "_"; + result << "transpose_b=" << transpose.second << "_"; + result << "secondary_input_type=" << secondary_input_type << "_"; + result << "modelType=" << model_type.get_type_name() << "_"; + result << "trgDev=" << target_device; + result << "config=("; + for (const auto& configEntry : additional_config) { + result << configEntry.first << ", " << configEntry.second << ";"; + } + result << ")"; + return result.str(); +} + +void MatMulLayerTest::SetUp() { + std::vector shapes; + std::pair transpose; + ov::element::Type model_type; + InputLayerType secondary_input_type; + std::map additional_config; + std::tie(shapes, transpose, model_type, secondary_input_type, targetDevice, additional_config) = this->GetParam(); + init_input_shapes(shapes); + configuration.insert(additional_config.begin(), additional_config.end()); + + ov::ParameterVector params {std::make_shared(model_type, inputDynamicShapes[0])}; + ov::NodeVector inputs {params[0]}; + + if (InputLayerType::PARAMETER == secondary_input_type) { + auto param = std::make_shared(model_type, inputDynamicShapes[1]); + params.push_back(param); + inputs.push_back(param); + } else { + auto tensor = ov::test::utils::create_and_fill_tensor(model_type, targetStaticShapes[0][1]); + auto constant = std::make_shared(tensor); + inputs.push_back(constant); + } + auto mat_mul = std::make_shared(inputs[0], inputs[1], transpose.first, transpose.second); + + auto result = std::make_shared(mat_mul); + + function = std::make_shared(result, params, "MatMul"); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_op/minimum_maximum.cpp b/src/tests/functional/shared_test_classes/src/single_op/minimum_maximum.cpp new file mode 100644 index 00000000000000..9ab3c48284c460 --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/minimum_maximum.cpp @@ -0,0 +1,85 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/minimum_maximum.hpp" + +#include "common_test_utils/ov_tensor_utils.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/minimum.hpp" +#include "openvino/op/maximum.hpp" + + +namespace ov { +namespace test { +using ov::test::utils::InputLayerType; +using ov::test::utils::MinMaxOpType; + +std::string MaxMinLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { + std::vector shapes; + ov::element::Type model_type; + std::string target_name; + InputLayerType second_input_type; + MinMaxOpType op_type; + std::tie(shapes, op_type, model_type, second_input_type, target_name) = obj.param; + std::ostringstream result; + + result << "IS=("; + for (size_t i = 0lu; i < shapes.size(); i++) { + result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < shapes.size(); j++) { + result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + result << "OpType=" << op_type << "_"; + result << "SecondaryInputType=" << second_input_type << "_"; + result << "netPRC=" << model_type.get_type_name() << "_"; + result << "trgDev=" << target_name << "_"; + return result.str(); +} + +void MaxMinLayerTest::SetUp() { + std::vector shapes; + ov::element::Type model_type; + InputLayerType second_input_type; + MinMaxOpType op_type; + std::tie(shapes, op_type, model_type, second_input_type, targetDevice) = this->GetParam(); + init_input_shapes(shapes); + + ov::ParameterVector params {std::make_shared(model_type, inputDynamicShapes[0])}; + ov::NodeVector inputs {params[0]}; + + if (InputLayerType::PARAMETER == second_input_type) { + auto param = std::make_shared(model_type, inputDynamicShapes[1]); + params.push_back(param); + inputs.push_back(param); + } else { + auto tensor = ov::test::utils::create_and_fill_tensor(model_type, targetStaticShapes[0][1]); + auto constant = std::make_shared(tensor); + inputs.push_back(constant); + } + + std::shared_ptr min_max_op; + switch (op_type) { + case MinMaxOpType::MINIMUM: + min_max_op = std::make_shared(inputs[0], inputs[1]); + break; + case MinMaxOpType::MAXIMUM: + min_max_op = std::make_shared(inputs[0], inputs[1]); + break; + default: + throw std::logic_error("Unsupported operation type"); + } + + auto result = std::make_shared(min_max_op); + function = std::make_shared(result, params, "MinMax"); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_op/mvn.cpp b/src/tests/functional/shared_test_classes/src/single_op/mvn.cpp new file mode 100644 index 00000000000000..666ded5cdd1acb --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/mvn.cpp @@ -0,0 +1,135 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/mvn.hpp" + +#include "openvino/op/parameter.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/mvn.hpp" + +namespace ov { +namespace test { +std::string Mvn1LayerTest::getTestCaseName(const testing::TestParamInfo& obj) { + std::vector shapes; + ov::element::Type model_type; + ov::AxisSet axes; + bool across_channels, normalize_variance; + double eps; + std::string target_device; + std::tie(shapes, model_type, axes, across_channels, normalize_variance, eps, target_device) = obj.param; + std::ostringstream result; + result << "IS=("; + for (size_t i = 0lu; i < shapes.size(); i++) { + result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < shapes.size(); j++) { + result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + result << "ModelType=" << model_type.get_type_name() << "_"; + if (!axes.empty()) { + result << "ReductionAxes=" << ov::test::utils::vec2str(axes.to_vector()) << "_"; + } else { + result << "across_channels=" << (across_channels ? "TRUE" : "FALSE") << "_"; + } + result << "normalize_variance=" << (normalize_variance ? "TRUE" : "FALSE") << "_"; + result << "Epsilon=" << eps << "_"; + result << "TargetDevice=" << target_device; + return result.str(); +} + +void Mvn1LayerTest::SetUp() { + std::vector shapes; + ov::element::Type model_type; + ov::AxisSet axes; + bool across_channels, normalize_variance; + double eps; + std::tie(shapes, model_type, axes, across_channels, normalize_variance, eps, targetDevice) = this->GetParam(); + init_input_shapes(shapes); + + auto param = std::make_shared(model_type, inputDynamicShapes.front()); + + std::shared_ptr mvn; + + if (axes.empty()) { + mvn = std::make_shared(param, across_channels, normalize_variance, eps); + + // OpenVINO MVN implementation implicitly adds 0th dimension to reduction axes set which is not valid behavior + ov::AxisSet axes; + const size_t startAxis = across_channels ? 1 : 2; + const size_t numOfDims = param->output(0).get_partial_shape().size(); + for (size_t i = startAxis; i < numOfDims; i++) + axes.insert(i); + mvn->set_reduction_axes(axes); + } else { + mvn = std::make_shared(param, axes, normalize_variance, eps); + } + + auto result = std::make_shared(mvn); + function = std::make_shared(result, ov::ParameterVector{param}, "MVN1"); +} + +std::string Mvn6LayerTest::getTestCaseName(const testing::TestParamInfo& obj) { + std::vector shapes; + ov::element::Type model_type; + ov::element::Type axis_type; + std::vector axes; + bool normalize_variance; + float eps; + std::string eps_mode; + std::string target_device; + std::tie(shapes, model_type, axis_type, axes, normalize_variance, eps, eps_mode, target_device) = obj.param; + std::ostringstream result; + result << "IS=("; + for (size_t i = 0lu; i < shapes.size(); i++) { + result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < shapes.size(); j++) { + result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + result << "ModelType=" << model_type.get_type_name() << "_"; + result << "AxType=" << axis_type.get_type_name() << "_"; + result << "Ax=" << ov::test::utils::vec2str(axes) << "_"; + result << "NormVariance=" << (normalize_variance ? "TRUE" : "FALSE") << "_"; + result << "Eps=" << eps << "_"; + result << "EM=" << eps_mode << "_"; + result << "TargetDevice=" << target_device; + return result.str(); +} + +void Mvn6LayerTest::SetUp() { + std::vector shapes; + ov::element::Type model_type; + ov::element::Type axis_type; + std::vector axes; + bool normalize_variance; + float eps; + std::string eps_mode; + std::tie(shapes, model_type, axis_type, axes, normalize_variance, eps, eps_mode, targetDevice) = this->GetParam(); + init_input_shapes(shapes); + + auto param = std::make_shared(model_type, inputDynamicShapes.front()); + + auto axes_node = ov::op::v0::Constant::create(axis_type, ov::Shape{axes.size()}, axes); + + ov::op::MVNEpsMode nEpsMode = ov::op::MVNEpsMode::INSIDE_SQRT; + if (eps_mode == "outside_sqrt") + nEpsMode = ov::op::MVNEpsMode::OUTSIDE_SQRT; + auto mvn = std::make_shared(param, axes_node, normalize_variance, eps, nEpsMode); + + auto result = std::make_shared(mvn); + function = std::make_shared(result, ov::ParameterVector{param}, "MVN6"); +} +} // namespace test +} // namespace ov From 9361c2c8101a5ae9503342d495fa3cec2307291e Mon Sep 17 00:00:00 2001 From: Oleg Pipikin Date: Tue, 10 Oct 2023 01:59:39 +0200 Subject: [PATCH 106/257] Refactor InterpolateLayerTest, IsInfLayerTest, LogSoftmaxLayerTest, LogicalLayerTest (#20196) * Refactor InterpolateLayerTest * Refactor IsInfLayerTest * Refactor LogSoftmaxLayerTest * Refactor LogicalLayerTest --- .../single_layer_tests/interpolate.cpp | 163 ++++++++-------- .../single_layer_tests/is_inf.cpp | 47 ++--- .../single_layer_tests/log_softmax.cpp | 63 +++---- .../single_layer_tests/logical.cpp | 67 ++++--- .../skip_tests_config.cpp | 2 + .../include/single_op_tests/interpolate.hpp | 18 ++ .../shared/include/single_op_tests/is_inf.hpp | 15 ++ .../include/single_op_tests/log_softmax.hpp | 15 ++ .../include/single_op_tests/logical.hpp | 15 ++ .../shared_test_classes/base/utils/ranges.hpp | 9 +- .../single_op/interpolate.hpp | 55 ++++++ .../shared_test_classes/single_op/is_inf.hpp | 31 +++ .../single_op/log_softmax.hpp | 32 ++++ .../shared_test_classes/single_op/logical.hpp | 36 ++++ .../src/base/utils/generate_inputs.cpp | 39 ++++ .../src/single_op/interpolate.cpp | 177 ++++++++++++++++++ .../src/single_op/is_inf.cpp | 70 +++++++ .../src/single_op/log_softmax.cpp | 54 ++++++ .../src/single_op/logical.cpp | 74 ++++++++ 19 files changed, 792 insertions(+), 190 deletions(-) create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/interpolate.hpp create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/is_inf.hpp create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/log_softmax.hpp create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/logical.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/interpolate.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/is_inf.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/log_softmax.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/logical.hpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/interpolate.cpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/is_inf.cpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/log_softmax.cpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/logical.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/interpolate.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/interpolate.cpp index f608a6d8b3b6f8..4da6623cd3a87c 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/interpolate.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/interpolate.cpp @@ -4,190 +4,173 @@ #include -#include "single_layer_tests/interpolate.hpp" +#include "single_op_tests/interpolate.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; - namespace { +using ov::test::InterpolateLayerTest; -const std::vector netPrecisions = { - InferenceEngine::Precision::FP16, - InferenceEngine::Precision::FP32, +const std::vector model_types = { + ov::element::f16, + ov::element::f32, }; -const std::vector> inShapes = { - {1, 4, 6, 6}, +const std::vector input_shapes_static = { + {1, 4, 6, 6} }; -const std::vector modesWithoutNearest = { - ngraph::op::v4::Interpolate::InterpolateMode::LINEAR, - ngraph::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX, - ngraph::op::v4::Interpolate::InterpolateMode::CUBIC, +const std::vector modes_without_nearest = { + ov::op::v4::Interpolate::InterpolateMode::LINEAR, + ov::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX, + ov::op::v4::Interpolate::InterpolateMode::CUBIC, }; -const std::vector nearestMode = { - ngraph::op::v4::Interpolate::InterpolateMode::NEAREST, +const std::vector nearest_mode = { + ov::op::v4::Interpolate::InterpolateMode::NEAREST, }; -const std::vector coordinateTransformModes = { - ngraph::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN, - ngraph::op::v4::Interpolate::CoordinateTransformMode::PYTORCH_HALF_PIXEL, - ngraph::op::v4::Interpolate::CoordinateTransformMode::HALF_PIXEL, - ngraph::op::v4::Interpolate::CoordinateTransformMode::ASYMMETRIC, - ngraph::op::v4::Interpolate::CoordinateTransformMode::ALIGN_CORNERS, +const std::vector coordinateTransformModes = { + ov::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN, + ov::op::v4::Interpolate::CoordinateTransformMode::PYTORCH_HALF_PIXEL, + ov::op::v4::Interpolate::CoordinateTransformMode::HALF_PIXEL, + ov::op::v4::Interpolate::CoordinateTransformMode::ASYMMETRIC, + ov::op::v4::Interpolate::CoordinateTransformMode::ALIGN_CORNERS, }; -const std::vector shapeCalculationMode = { - ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES, - ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES, +const std::vector shapeCalculationMode = { + ov::op::v4::Interpolate::ShapeCalcMode::SIZES, + ov::op::v4::Interpolate::ShapeCalcMode::SCALES, }; -const std::vector nearestModes = { - ngraph::op::v4::Interpolate::NearestMode::SIMPLE, - ngraph::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR, - ngraph::op::v4::Interpolate::NearestMode::FLOOR, - ngraph::op::v4::Interpolate::NearestMode::CEIL, - ngraph::op::v4::Interpolate::NearestMode::ROUND_PREFER_CEIL, +const std::vector nearest_modes = { + ov::op::v4::Interpolate::NearestMode::SIMPLE, + ov::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR, + ov::op::v4::Interpolate::NearestMode::FLOOR, + ov::op::v4::Interpolate::NearestMode::CEIL, + ov::op::v4::Interpolate::NearestMode::ROUND_PREFER_CEIL, }; -const std::vector defaultNearestMode = { - ngraph::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR, +const std::vector default_nearest_mode = { + ov::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR, }; const std::vector> pads = { - {0, 0, 1, 1}, - {0, 0, 0, 0}, + {0, 0, 1, 1}, + {0, 0, 0, 0}, }; const std::vector antialias = { // Not enabled in Inference Engine // true, - false, + false, }; -const std::vector cubeCoefs = { - -0.75f, +const std::vector cube_coefs = { + -0.75f, }; -const std::vector> defaultAxes = { +const std::vector> default_axes = { {0, 1, 2, 3} }; -const std::vector> targetShapes = { +const std::vector target_shapes = { {1, 4, 8, 8}, }; -const std::vector> defaultScales = { +const std::vector> default_scales = { {1.f, 1.f, 1.333333f, 1.333333f} }; std::map additional_config = {}; const auto interpolateCasesWithoutNearest = ::testing::Combine( - ::testing::ValuesIn(modesWithoutNearest), + ::testing::ValuesIn(modes_without_nearest), ::testing::ValuesIn(shapeCalculationMode), ::testing::ValuesIn(coordinateTransformModes), - ::testing::ValuesIn(defaultNearestMode), + ::testing::ValuesIn(default_nearest_mode), ::testing::ValuesIn(antialias), ::testing::ValuesIn(pads), ::testing::ValuesIn(pads), - ::testing::ValuesIn(cubeCoefs), - ::testing::ValuesIn(defaultAxes), - ::testing::ValuesIn(defaultScales)); + ::testing::ValuesIn(cube_coefs), + ::testing::ValuesIn(default_axes), + ::testing::ValuesIn(default_scales)); const auto interpolateCases = ::testing::Combine( - ::testing::ValuesIn(nearestMode), + ::testing::ValuesIn(nearest_mode), ::testing::ValuesIn(shapeCalculationMode), ::testing::ValuesIn(coordinateTransformModes), - ::testing::ValuesIn(nearestModes), + ::testing::ValuesIn(nearest_modes), ::testing::ValuesIn(antialias), ::testing::ValuesIn(pads), ::testing::ValuesIn(pads), - ::testing::ValuesIn(cubeCoefs), - ::testing::ValuesIn(defaultAxes), - ::testing::ValuesIn(defaultScales)); + ::testing::ValuesIn(cube_coefs), + ::testing::ValuesIn(default_axes), + ::testing::ValuesIn(default_scales)); INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_Basic, InterpolateLayerTest, ::testing::Combine( interpolateCasesWithoutNearest, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::ValuesIn(inShapes), - ::testing::ValuesIn(targetShapes), + ::testing::ValuesIn(model_types), + ::testing::Values(ov::test::static_shapes_to_test_representation(input_shapes_static)), + ::testing::ValuesIn(target_shapes), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(additional_config)), InterpolateLayerTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_Nearest, InterpolateLayerTest, ::testing::Combine( interpolateCases, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::ValuesIn(inShapes), - ::testing::ValuesIn(targetShapes), + ::testing::ValuesIn(model_types), + ::testing::Values(ov::test::static_shapes_to_test_representation(input_shapes_static)), + ::testing::ValuesIn(target_shapes), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(additional_config)), InterpolateLayerTest::getTestCaseName); -const std::vector> targetShapesTailTest = { - {1, 4, 2, 11}, // cover down sample and tails process code path +const std::vector target_shapes_tail_test = { + {1, 4, 2, 11}, // cover down sample and tails process code path }; -const std::vector> defaultScalesTailTest = { +const std::vector> default_scalesTailTest = { {1.f, 1.f, 0.333333f, 1.833333f} }; const auto interpolateCasesWithoutNearestTail = ::testing::Combine( - ::testing::ValuesIn(modesWithoutNearest), + ::testing::ValuesIn(modes_without_nearest), ::testing::ValuesIn(shapeCalculationMode), ::testing::ValuesIn(coordinateTransformModes), - ::testing::ValuesIn(defaultNearestMode), + ::testing::ValuesIn(default_nearest_mode), ::testing::ValuesIn(antialias), ::testing::ValuesIn(pads), ::testing::ValuesIn(pads), - ::testing::ValuesIn(cubeCoefs), - ::testing::ValuesIn(defaultAxes), - ::testing::ValuesIn(defaultScalesTailTest)); + ::testing::ValuesIn(cube_coefs), + ::testing::ValuesIn(default_axes), + ::testing::ValuesIn(default_scalesTailTest)); const auto interpolateCasesTail = ::testing::Combine( - ::testing::ValuesIn(nearestMode), + ::testing::ValuesIn(nearest_mode), ::testing::ValuesIn(shapeCalculationMode), ::testing::ValuesIn(coordinateTransformModes), - ::testing::ValuesIn(nearestModes), + ::testing::ValuesIn(nearest_modes), ::testing::ValuesIn(antialias), ::testing::ValuesIn(pads), ::testing::ValuesIn(pads), - ::testing::ValuesIn(cubeCoefs), - ::testing::ValuesIn(defaultAxes), - ::testing::ValuesIn(defaultScalesTailTest)); + ::testing::ValuesIn(cube_coefs), + ::testing::ValuesIn(default_axes), + ::testing::ValuesIn(default_scalesTailTest)); INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_Basic_Down_Sample_Tail, InterpolateLayerTest, ::testing::Combine( interpolateCasesWithoutNearestTail, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::ValuesIn(inShapes), - ::testing::ValuesIn(targetShapesTailTest), + ::testing::ValuesIn(model_types), + ::testing::Values(ov::test::static_shapes_to_test_representation(input_shapes_static)), + ::testing::ValuesIn(target_shapes_tail_test), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(additional_config)), InterpolateLayerTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_Nearest_Down_Sample_Tail, InterpolateLayerTest, ::testing::Combine( interpolateCasesTail, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::ValuesIn(inShapes), - ::testing::ValuesIn(targetShapesTailTest), + ::testing::ValuesIn(model_types), + ::testing::Values(ov::test::static_shapes_to_test_representation(input_shapes_static)), + ::testing::ValuesIn(target_shapes_tail_test), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(additional_config)), InterpolateLayerTest::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/is_inf.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/is_inf.cpp index 41e8c44950777f..74bec47ba16896 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/is_inf.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/is_inf.cpp @@ -2,14 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include "shared_test_classes/single_layer/is_inf.hpp" - -using namespace ov::test; -using namespace ov::test::subgraph; +#include "single_op_tests/is_inf.hpp" namespace { -std::vector> inShapesStatic = { +using ov::test::IsInfLayerTest; + +std::vector> input_shapes_static = { { {{}, {{2}}} }, { {{}, {{2, 200}}} }, { {{}, {{10, 200}}} }, @@ -29,45 +27,40 @@ std::vector> inShapesStatic = { { {{}, {{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}}} } }; -std::vector> inShapesDynamic = { +std::vector> input_shapes_dynamic = { {{{ngraph::Dimension(1, 10), 200}, {{2, 200}, {1, 200}}}} }; -std::vector netPrecisions = { +std::vector model_types = { ov::element::f32 }; -std::vector detectNegative = { +std::vector detect_negative = { true, false }; -std::vector detectPositive = { +std::vector detect_positive = { true, false }; std::map additional_config = {}; -const auto isInfParams = ::testing::Combine( - ::testing::ValuesIn(inShapesStatic), - ::testing::ValuesIn(detectNegative), - ::testing::ValuesIn(detectPositive), - ::testing::ValuesIn(netPrecisions), +const auto is_inf_params = ::testing::Combine( + ::testing::ValuesIn(input_shapes_static), + ::testing::ValuesIn(detect_negative), + ::testing::ValuesIn(detect_positive), + ::testing::ValuesIn(model_types), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(additional_config)); -const auto isInfParamsDyn = ::testing::Combine( - ::testing::ValuesIn(inShapesDynamic), - ::testing::ValuesIn(detectNegative), - ::testing::ValuesIn(detectPositive), - ::testing::ValuesIn(netPrecisions), +const auto is_inf_params_dynamic = ::testing::Combine( + ::testing::ValuesIn(input_shapes_dynamic), + ::testing::ValuesIn(detect_negative), + ::testing::ValuesIn(detect_positive), + ::testing::ValuesIn(model_types), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(additional_config)); - -TEST_P(IsInfLayerTest, CompareWithRefs) { - run(); -} - -INSTANTIATE_TEST_SUITE_P(smoke_static, IsInfLayerTest, isInfParams, IsInfLayerTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_dynamic, IsInfLayerTest, isInfParamsDyn, IsInfLayerTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_static, IsInfLayerTest, is_inf_params, IsInfLayerTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_dynamic, IsInfLayerTest, is_inf_params_dynamic, IsInfLayerTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/log_softmax.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/log_softmax.cpp index ea27ac854503d2..d16edfad904e65 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/log_softmax.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/log_softmax.cpp @@ -4,72 +4,61 @@ #include -#include "single_layer_tests/log_softmax.hpp" +#include "single_op_tests/log_softmax.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; - namespace { +using ov::test::LogSoftmaxLayerTest; -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32, +const std::vector model_types = { + ov::element::f32, }; -const std::vector inputShapes2D = { - InferenceEngine::SizeVector {1, 100}, - InferenceEngine::SizeVector {100, 1}, - InferenceEngine::SizeVector {10, 10}, +const std::vector> input_shapes_2d = { + {{1, 100}}, + {{100, 1}}, + {{10, 10}}, }; -const std::vector axis2D = { +const std::vector axis_2d = { -2, -1, 0, 1 }; -const auto params2D = testing::Combine( - testing::ValuesIn(netPrecisions), - testing::Values(InferenceEngine::Precision::UNSPECIFIED), - testing::Values(InferenceEngine::Precision::UNSPECIFIED), - testing::Values(InferenceEngine::Layout::ANY), - testing::Values(InferenceEngine::Layout::ANY), - testing::ValuesIn(inputShapes2D), - testing::ValuesIn(axis2D), - testing::Values(ov::test::utils::DEVICE_CPU), - testing::Values(std::map()) +const auto params_2d = testing::Combine( + testing::ValuesIn(model_types), + testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_2d)), + testing::ValuesIn(axis_2d), + testing::Values(ov::test::utils::DEVICE_CPU) ); INSTANTIATE_TEST_SUITE_P( smoke_LogSoftmax2D, LogSoftmaxLayerTest, - params2D, + params_2d, LogSoftmaxLayerTest::getTestCaseName ); -const std::vector inputShapes4D = { - InferenceEngine::SizeVector {1, 100, 1, 1}, - InferenceEngine::SizeVector {1, 3, 4, 3}, - InferenceEngine::SizeVector {2, 3, 4, 5}, +const std::vector> input_shapes_4d = { + {{1, 100, 1, 1}}, + {{1, 3, 4, 3}}, + {{2, 3, 4, 5}}, }; -const std::vector axis4D = { +const std::vector axis_4d = { -4, -3, -2, -1, 0, 1, 2, 3 }; -const auto params4D = testing::Combine( - testing::ValuesIn(netPrecisions), - testing::Values(InferenceEngine::Precision::UNSPECIFIED), - testing::Values(InferenceEngine::Precision::UNSPECIFIED), - testing::Values(InferenceEngine::Layout::ANY), - testing::Values(InferenceEngine::Layout::ANY), - testing::ValuesIn(inputShapes4D), - testing::ValuesIn(axis4D), - testing::Values(ov::test::utils::DEVICE_CPU), - testing::Values(std::map()) +const auto params_4d = testing::Combine( + testing::ValuesIn(model_types), + testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_4d)), + testing::ValuesIn(axis_4d), + testing::Values(ov::test::utils::DEVICE_CPU) ); INSTANTIATE_TEST_SUITE_P( smoke_LogSoftmax4D, LogSoftmaxLayerTest, - params4D, + params_4d, LogSoftmaxLayerTest::getTestCaseName ); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/logical.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/logical.cpp index d8dd2ae9f112fe..6cef30f55580d3 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/logical.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/logical.cpp @@ -2,16 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include "single_layer_tests/logical.hpp" +#include "single_op_tests/logical.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; -using namespace LayerTestsDefinitions::LogicalParams; - namespace { +using ov::test::LogicalLayerTest; -std::map, std::vector>> inputShapes = { +std::map> input_shapes_static = { {{1}, {{1}, {17}, {1, 1}, {2, 18}, {1, 1, 2}, {2, 2, 3}, {1, 1, 2, 3}}}, {{5}, {{1}, {1, 1}, {2, 5}, {1, 1, 1}, {2, 2, 5}}}, {{2, 200}, {{1}, {200}, {1, 200}, {2, 200}, {2, 2, 200}}}, @@ -20,7 +17,7 @@ std::map, std::vector>> inputShapes = { {{2, 1, 1, 3, 1}, {{1}, {1, 3, 4}, {2, 1, 3, 4}, {1, 1, 1, 1, 1}}}, }; -std::map, std::vector>> inputShapesNot = { +std::map> input_shapes_not_static = { {{1}, {}}, {{5}, {}}, {{2, 200}, {}}, @@ -29,53 +26,53 @@ std::map, std::vector>> inputShapesNot {{2, 1, 1, 3, 1}, {}}, }; -std::vector inputsPrecisions = { - InferenceEngine::Precision::BOOL, -}; +std::vector> combine_shapes(const std::map>& input_shapes_static) { + std::vector> result; + for (const auto& input_shape : input_shapes_static) { + for (auto& item : input_shape.second) { + result.push_back({input_shape.first, item}); + } + + if (input_shape.second.empty()) { + result.push_back({input_shape.first, {}}); + } + } + return result; +} -std::vector logicalOpTypes = { - ngraph::helpers::LogicalTypes::LOGICAL_AND, - ngraph::helpers::LogicalTypes::LOGICAL_OR, - ngraph::helpers::LogicalTypes::LOGICAL_XOR, +std::vector model_types = { + ov::element::boolean, }; -std::vector secondInputTypes = { - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::PARAMETER, +std::vector logicalOpTypes = { + ov::test::utils::LogicalTypes::LOGICAL_AND, + ov::test::utils::LogicalTypes::LOGICAL_OR, + ov::test::utils::LogicalTypes::LOGICAL_XOR, }; -std::vector netPrecisions = { - InferenceEngine::Precision::FP32, +std::vector secondInputTypes = { + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::PARAMETER, }; std::map additional_config = {}; const auto LogicalTestParams = ::testing::Combine( - ::testing::ValuesIn(LogicalLayerTest::combineShapes(inputShapes)), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(combine_shapes(input_shapes_static))), ::testing::ValuesIn(logicalOpTypes), ::testing::ValuesIn(secondInputTypes), - ::testing::ValuesIn(netPrecisions), - ::testing::ValuesIn(inputsPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::ValuesIn(model_types), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(additional_config)); const auto LogicalTestParamsNot = ::testing::Combine( - ::testing::ValuesIn(LogicalLayerTest::combineShapes(inputShapesNot)), - ::testing::Values(ngraph::helpers::LogicalTypes::LOGICAL_NOT), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), - ::testing::ValuesIn(netPrecisions), - ::testing::ValuesIn(inputsPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(combine_shapes(input_shapes_not_static))), + ::testing::Values(ov::test::utils::LogicalTypes::LOGICAL_NOT), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(model_types), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(additional_config)); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs, LogicalLayerTest, LogicalTestParams, LogicalLayerTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefsNot, LogicalLayerTest, LogicalTestParamsNot, LogicalLayerTest::getTestCaseName); - } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 3daad41fd0fe5a..127002565e2a1f 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -189,6 +189,8 @@ std::vector disabledTestPatterns() { // Issue: 121313 R"(smoke_GroupConvBackpropData.*paddingDefined/GroupConvBackpropLayerTest.Inference.*f16.*)", R"(smoke_GroupConvBackpropData.*paddingDefined/GroupConvBackpropLayerTest.Inference.*f32.*)", + // Issue: 122094 + R"(smoke_Interpolate_Basic_Down_Sample_Tail/InterpolateLayerTest.Inference.*(asymmetric|align_corners).*f16.*)", }; #if defined(__APPLE__) && defined(OPENVINO_ARCH_ARM64) // Issue: 120950 diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/interpolate.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/interpolate.hpp new file mode 100644 index 00000000000000..4d7bee7599896b --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/interpolate.hpp @@ -0,0 +1,18 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/interpolate.hpp" + +namespace ov { +namespace test { +TEST_P(InterpolateLayerTest, Inference) { + run(); +} +TEST_P(Interpolate11LayerTest, Inference) { + run(); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/is_inf.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/is_inf.hpp new file mode 100644 index 00000000000000..04b0aaa1f6a29e --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/is_inf.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/is_inf.hpp" + +namespace ov { +namespace test { +TEST_P(IsInfLayerTest, Inference) { + run(); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/log_softmax.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/log_softmax.hpp new file mode 100644 index 00000000000000..5d941a1d87f9e9 --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/log_softmax.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/log_softmax.hpp" + +namespace ov { +namespace test { +TEST_P(LogSoftmaxLayerTest, Inference) { + run(); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/logical.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/logical.hpp new file mode 100644 index 00000000000000..f98027493e8cc7 --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/logical.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/logical.hpp" + +namespace ov { +namespace test { +TEST_P(LogicalLayerTest, Inference) { + run(); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/ranges.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/ranges.hpp index 17121e8a57aff3..4315fd9ef5d5a6 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/ranges.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/ranges.hpp @@ -22,6 +22,10 @@ #include "openvino/op/dft.hpp" #include "openvino/op/idft.hpp" +#include "openvino/op/logical_and.hpp" +#include "openvino/op/logical_or.hpp" +#include "openvino/op/logical_xor.hpp" +#include "openvino/op/logical_not.hpp" #include #include @@ -86,7 +90,10 @@ static std::map>> i { ov::op::v4::ReduceL1::get_type_info_static(), {{{0, 5}}, {{0, 5, 1000}}} }, { ov::op::v4::ReduceL2::get_type_info_static(), {{{0, 5}}, {{0, 5, 1000}}} }, { ov::op::v7::DFT::get_type_info_static(), {{{0, 1}}, {{0, 1, 1000000}}} }, - { ov::op::v7::IDFT::get_type_info_static(), {{{0, 1}}, {{0, 1, 1000000}}} }, + { ov::op::v1::LogicalAnd::get_type_info_static(), {{{0, 2}}, {{0, 2, 1}}} }, + { ov::op::v1::LogicalOr::get_type_info_static(), {{{0, 2}}, {{0, 2, 1}}} }, + { ov::op::v1::LogicalNot::get_type_info_static(), {{{0, 2}}, {{0, 2, 1}}} }, + { ov::op::v1::LogicalXor::get_type_info_static(), {{{0, 2}}, {{0, 2, 1}}} }, }; } // namespace utils diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/interpolate.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/interpolate.hpp new file mode 100644 index 00000000000000..0623f5690fb100 --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/interpolate.hpp @@ -0,0 +1,55 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { +typedef std::tuple< + ov::op::util::InterpolateBase::InterpolateMode, // InterpolateMode + ov::op::util::InterpolateBase::ShapeCalcMode, // ShapeCalculationMode + ov::op::util::InterpolateBase::CoordinateTransformMode, // CoordinateTransformMode + ov::op::util::InterpolateBase::NearestMode, // NearestMode + bool, // AntiAlias + std::vector, // PadBegin + std::vector, // PadEnd + double, // Cube coef + std::vector, // Axes + std::vector // Scales +> InterpolateSpecificParams; + +typedef std::tuple< + InterpolateSpecificParams, + ov::element::Type, // Model type + std::vector, // Input shapes + ov::Shape, // Target shapes + std::string, // Device name + std::map // Additional network configuration +> InterpolateLayerTestParams; + +class InterpolateLayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); + +protected: + void SetUp() override; +}; + +class Interpolate11LayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); + +protected: + void SetUp() override; +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/is_inf.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/is_inf.hpp new file mode 100644 index 00000000000000..7856a0036e7bbb --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/is_inf.hpp @@ -0,0 +1,31 @@ +// Copyright (C) 2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { +using IsInfParams = std::tuple< + std::vector, // Data shape + bool, // Detect negative + bool, // Detect positive + ov::element::Type, // Model type + std::string, // Device name + std::map // Additional config +>; + +class IsInfLayerTest : public testing::WithParamInterface, + virtual public SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); +protected: + void SetUp() override; +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/log_softmax.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/log_softmax.hpp new file mode 100644 index 00000000000000..b803cd35a8abdb --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/log_softmax.hpp @@ -0,0 +1,32 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { +using logSoftmaxLayerTestParams = std::tuple< + ov::element::Type, // Model type + std::vector, // Input shapes + int64_t, // Axis + std::string // Target device +>; + +class LogSoftmaxLayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); + +protected: + void SetUp() override; +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/logical.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/logical.hpp new file mode 100644 index 00000000000000..5dce73dc0552ab --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/logical.hpp @@ -0,0 +1,36 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + + +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" + +#include "common_test_utils/test_enums.hpp" + +namespace ov { +namespace test { +using InputShapesTuple = std::pair, std::vector>; + +typedef std::tuple< + std::vector, // Input shapes + ov::test::utils::LogicalTypes, // Logical op type + ov::test::utils::InputLayerType, // Second input type + ov::element::Type, // Model type + std::string, // Device name + std::map // Additional model configuration +> LogicalTestParams; + +class LogicalLayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +protected: + void SetUp() override; + +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp b/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp index 285dbebeaa8c49..6080c4de5ab39b 100644 --- a/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp +++ b/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp @@ -1022,6 +1022,7 @@ ov::runtime::Tensor generate(const comparison::fill_tensor(tensor); return tensor; } + ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, @@ -1032,6 +1033,44 @@ ov::runtime::Tensor generate(const return tensor; } +namespace is_inf { +template +void fill_tensor(ov::Tensor& tensor) { + int range = ov::shape_size(tensor.get_shape()); + float startFrom = -static_cast(range) / 2.f; + + auto pointer = tensor.data(); + testing::internal::Random random(1); + for (size_t i = 0; i < range; i++) { + if (i % 7 == 0) { + pointer[i] = std::numeric_limits::infinity(); + } else if (i % 7 == 1) { + pointer[i] = std::numeric_limits::quiet_NaN(); + } else if (i % 7 == 3) { + pointer[i] = -std::numeric_limits::infinity(); + } else if (i % 7 == 5) { + pointer[i] = -std::numeric_limits::quiet_NaN(); + } else { + pointer[i] = static_cast(startFrom + random.Generate(range)); + } + } +} +} // namespace is_inf + +ov::runtime::Tensor generate(const + std::shared_ptr& node, + size_t port, + const ov::element::Type& elemType, + const ov::Shape& targetShape) { + auto tensor = ov::Tensor(elemType, targetShape); + if (elemType == ov::element::f16) { + is_inf::fill_tensor(tensor); + } else { + is_inf::fill_tensor(tensor); + } + return tensor; +} + namespace color_conversion { enum class ColorFormat { i420, diff --git a/src/tests/functional/shared_test_classes/src/single_op/interpolate.cpp b/src/tests/functional/shared_test_classes/src/single_op/interpolate.cpp new file mode 100644 index 00000000000000..5f5a04bd694b11 --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/interpolate.cpp @@ -0,0 +1,177 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/interpolate.hpp" + +#include "common_test_utils/test_enums.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/interpolate.hpp" + +namespace ov { +namespace test { +std::string InterpolateLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { + using ov::test::utils::operator<<; + + InterpolateSpecificParams interpolate_params; + ov::element::Type model_type; + std::vector shapes; + ov::Shape target_shape; + std::string target_device; + std::map additional_config; + std::tie(interpolate_params, model_type, shapes, target_shape, target_device, additional_config) = obj.param; + std::vector pad_begin, pad_end; + std::vector axes; + std::vector scales; + bool antialias; + ov::op::v4::Interpolate::InterpolateMode mode; + ov::op::v4::Interpolate::ShapeCalcMode shape_calc_mode; + ov::op::v4::Interpolate::CoordinateTransformMode coordinate_transform_mode; + ov::op::v4::Interpolate::NearestMode nearest_mode; + double cube_coef; + std::tie(mode, shape_calc_mode, coordinate_transform_mode, nearest_mode, antialias, pad_begin, pad_end, cube_coef, axes, scales) = interpolate_params; + + std::ostringstream result; + result << "IS=("; + for (size_t i = 0lu; i < shapes.size(); i++) { + result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < shapes.size(); j++) { + result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + result << "TS=" << ov::test::utils::vec2str(target_shape) << "_"; + result << "InterpolateMode=" << mode << "_"; + result << "ShapeCalcMode=" << shape_calc_mode << "_"; + result << "CoordinateTransformMode=" << coordinate_transform_mode << "_"; + result << "NearestMode=" << nearest_mode << "_"; + result << "cube_coef=" << cube_coef << "_"; + result << "Antialias=" << antialias << "_"; + result << "PB=" << ov::test::utils::vec2str(pad_begin) << "_"; + result << "PE=" << ov::test::utils::vec2str(pad_end) << "_"; + result << "Axes=" << ov::test::utils::vec2str(axes) << "_"; + result << "Scales=" << ov::test::utils::vec2str(scales) << "_"; + result << "netType=" << model_type.get_type_name() << "_"; + result << "trgDev=" << target_device; + return result.str(); +} + +void InterpolateLayerTest::SetUp() { + InterpolateSpecificParams interpolate_params; + ov::element::Type model_type; + std::vector shapes; + ov::Shape target_shape; + std::map additional_config; + std::tie(interpolate_params, model_type, shapes, target_shape, targetDevice, additional_config) = this->GetParam(); + std::vector pad_begin, pad_end; + std::vector axes; + std::vector scales; + bool antialias; + ov::op::v4::Interpolate::InterpolateMode mode; + ov::op::v4::Interpolate::ShapeCalcMode shape_calc_mode; + ov::op::v4::Interpolate::CoordinateTransformMode coordinate_transform_mode; + ov::op::v4::Interpolate::NearestMode nearest_mode; + + configuration.insert(additional_config.begin(), additional_config.end()); + + double cube_coef; + std::tie(mode, shape_calc_mode, coordinate_transform_mode, nearest_mode, antialias, pad_begin, pad_end, cube_coef, axes, scales) = interpolate_params; + init_input_shapes(shapes); + + auto param = std::make_shared(model_type, inputDynamicShapes.front()); + + auto sizes_input = std::make_shared(ov::element::i64, ov::Shape{target_shape.size()}, target_shape); + + auto scales_input = std::make_shared(ov::element::f32, ov::Shape{scales.size()}, scales); + + ov::op::v4::Interpolate::InterpolateAttrs interpolate_attributes{mode, shape_calc_mode, pad_begin, + pad_end, coordinate_transform_mode, nearest_mode, antialias, cube_coef}; + + std::shared_ptr interpolate; + if (axes.empty()) { + interpolate = std::make_shared(param, + sizes_input, + scales_input, + interpolate_attributes); + } else { + auto axesInput = std::make_shared(ov::element::i64, ov::Shape{axes.size()}, axes); + + interpolate = std::make_shared(param, + sizes_input, + scales_input, + axesInput, + interpolate_attributes); + } + auto result = std::make_shared(interpolate); + + function = std::make_shared(result, ov::ParameterVector{param}, "interpolate"); +} + +std::string Interpolate11LayerTest::getTestCaseName(const testing::TestParamInfo& obj) { + return InterpolateLayerTest::getTestCaseName(obj); +} + +namespace { +std::shared_ptr make_scales_or_sizes_input(ov::op::util::InterpolateBase::ShapeCalcMode shape_calc_mode, + const std::vector& sizes, + const std::vector& scales) { + if (shape_calc_mode == ov::op::util::InterpolateBase::ShapeCalcMode::SIZES) + return std::make_shared(ov::element::i64, ov::Shape{sizes.size()}, sizes); + else + return std::make_shared(ov::element::f32, ov::Shape{scales.size()}, scales); +} +} +void Interpolate11LayerTest::SetUp() { + InterpolateSpecificParams interpolate_params; + ov::element::Type model_type; + std::vector shapes; + ov::Shape target_shape; + std::map additional_config; + std::tie(interpolate_params, model_type, shapes, target_shape, targetDevice, additional_config) = this->GetParam(); + std::vector pad_begin, pad_end; + std::vector axes; + std::vector scales; + bool antialias; + ov::op::v4::Interpolate::InterpolateMode mode; + ov::op::v4::Interpolate::ShapeCalcMode shape_calc_mode; + ov::op::v4::Interpolate::CoordinateTransformMode coordinate_transform_mode; + ov::op::v4::Interpolate::NearestMode nearest_mode; + + configuration.insert(additional_config.begin(), additional_config.end()); + + double cube_coef; + std::tie(mode, shape_calc_mode, coordinate_transform_mode, nearest_mode, antialias, pad_begin, pad_end, cube_coef, axes, scales) = interpolate_params; + init_input_shapes(shapes); + + auto param = std::make_shared(model_type, inputDynamicShapes.front()); + + auto scales_orsizes_input = make_scales_or_sizes_input(shape_calc_mode, target_shape, scales); + + ov::op::util::InterpolateBase::InterpolateAttrs interpolate_attributes{mode, shape_calc_mode, pad_begin, + pad_end, coordinate_transform_mode, nearest_mode, antialias, cube_coef}; + + std::shared_ptr interpolate{}; + if (axes.empty()) { + interpolate = std::make_shared(param, + scales_orsizes_input, + interpolate_attributes); + } else { + auto axesInput = std::make_shared(ov::element::i64, ov::Shape{axes.size()}, axes); + + interpolate = std::make_shared(param, + scales_orsizes_input, + axesInput, + interpolate_attributes); + } + + auto result = std::make_shared(interpolate); + function = std::make_shared(result, ov::ParameterVector{param}, "interpolate"); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_op/is_inf.cpp b/src/tests/functional/shared_test_classes/src/single_op/is_inf.cpp new file mode 100644 index 00000000000000..148dc86cdf1250 --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/is_inf.cpp @@ -0,0 +1,70 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/is_inf.hpp" + +#include "common_test_utils/ov_tensor_utils.hpp" + +namespace ov { +namespace test { +std::string IsInfLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { + std::vector shapes; + ov::element::Type model_type; + bool detect_negative, detect_positive; + std::string target_name; + std::map additional_config; + std::tie(shapes, detect_negative, detect_positive, model_type, target_name, additional_config) = obj.param; + std::ostringstream result; + + result << "IS=("; + for (size_t i = 0lu; i < shapes.size(); i++) { + result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < shapes.size(); j++) { + result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + result << ")_detect_negative=" << (detect_negative ? "True" : "False") << "_"; + result << "detect_positive=" << (detect_positive ? "True" : "False") << "_"; + result << "model_type=" << model_type << "_"; + result << "trgDev=" << target_name; + + for (auto const& config_item : additional_config) { + result << "_config_item=" << config_item.first << "=" << config_item.second; + } + return result.str(); +} + +void IsInfLayerTest::SetUp() { + std::vector shapes; + ElementType model_type; + bool detect_negative, detect_positive; + std::map additional_config; + std::tie(shapes, detect_negative, detect_positive, model_type, targetDevice, additional_config) = this->GetParam(); + + init_input_shapes(shapes); + configuration.insert(additional_config.begin(), additional_config.end()); + + ov::ParameterVector parameters; + for (auto&& shape : inputDynamicShapes) { + parameters.push_back(std::make_shared(model_type, shape)); + } + parameters[0]->set_friendly_name("Data"); + + ov::op::v10::IsInf::Attributes attributes {detect_negative, detect_positive}; + auto is_inf = std::make_shared(parameters[0], attributes); + + ov::ResultVector results; + for (int i = 0; i < is_inf->get_output_size(); i++) { + results.push_back(std::make_shared(is_inf->output(i))); + } + + function = std::make_shared(results, parameters, "IsInf"); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_op/log_softmax.cpp b/src/tests/functional/shared_test_classes/src/single_op/log_softmax.cpp new file mode 100644 index 00000000000000..a7d28a36c6f183 --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/log_softmax.cpp @@ -0,0 +1,54 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/log_softmax.hpp" + +namespace ov { +namespace test { +std::string LogSoftmaxLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { + ov::element::Type model_type; + std::vector shapes; + int64_t axis; + std::string target_device; + std::tie(model_type, shapes, axis, target_device) = obj.param; + + std::ostringstream result; + result << "IS=("; + for (size_t i = 0lu; i < shapes.size(); i++) { + result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < shapes.size(); j++) { + result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + + result << "modelType=" << model_type.get_type_name() << "_"; + result << "axis=" << axis << "_"; + result << "trgDev=" << target_device; + + return result.str(); +} + +void LogSoftmaxLayerTest::SetUp() { + ov::element::Type model_type; + std::vector shapes; + int64_t axis; + + std::tie(model_type, shapes, axis, targetDevice) = GetParam(); + init_input_shapes(shapes); + + auto param = std::make_shared(model_type, inputDynamicShapes.front()); + + const auto log_softmax = std::make_shared(param, axis); + + auto result = std::make_shared(log_softmax); + + function = std::make_shared(result, ov::ParameterVector{param}, "logSoftmax"); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_op/logical.cpp b/src/tests/functional/shared_test_classes/src/single_op/logical.cpp new file mode 100644 index 00000000000000..d4e73f32cb25da --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/logical.cpp @@ -0,0 +1,74 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/logical.hpp" + +#include "common_test_utils/ov_tensor_utils.hpp" +#include "ov_models/builders.hpp" + +namespace ov { +namespace test { +std::string LogicalLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { + std::vector shapes; + ov::test::utils::LogicalTypes comparisonOpType; + ov::test::utils::InputLayerType second_input_type; + ov::element::Type model_type; + std::string device_name; + std::map additional_config; + std::tie(shapes, comparisonOpType, second_input_type, model_type, device_name, additional_config) = obj.param; + + std::ostringstream result; + result << "IS=("; + for (size_t i = 0lu; i < shapes.size(); i++) { + result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < shapes.size(); j++) { + result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + result << "comparisonOpType=" << comparisonOpType << "_"; + result << "second_input_type=" << second_input_type << "_"; + result << "netPRC=" << model_type.get_type_name() << "_"; + result << "trgDev=" << device_name; + return result.str(); +} + +void LogicalLayerTest::SetUp() { + std::vector shapes; + ov::test::utils::LogicalTypes logical_op_type; + ov::test::utils::InputLayerType second_input_type; + ov::element::Type model_type; + std::map additional_config; + + std::tie(shapes, logical_op_type, second_input_type, model_type, targetDevice, additional_config) = this->GetParam(); + init_input_shapes(shapes); + + configuration.insert(additional_config.begin(), additional_config.end()); + + ov::ParameterVector params {std::make_shared(model_type, inputDynamicShapes[0])}; + + std::shared_ptr logical_node; + if (ov::test::utils::LogicalTypes::LOGICAL_NOT != logical_op_type) { + std::shared_ptr secondInput; + if (ov::test::utils::InputLayerType::CONSTANT == second_input_type) { + auto tensor = ov::test::utils::create_and_fill_tensor(model_type, targetStaticShapes[0][1]); + secondInput = std::make_shared(tensor); + } else { + auto param = std::make_shared(model_type, inputDynamicShapes[1]); + secondInput = param; + params.push_back(param); + } + logical_node = ngraph::builder::makeLogical(params[0], secondInput, logical_op_type); + } else { + logical_node = std::make_shared(params[0]); + } + + function = std::make_shared(logical_node, params, "Logical"); +} +} // namespace test +} // namespace ov From 86d0bdb2db82aed2b369e151e2fc056db511818f Mon Sep 17 00:00:00 2001 From: Tomasz Jankowski Date: Tue, 10 Oct 2023 03:27:22 +0200 Subject: [PATCH 107/257] [Ref] Drop legacy API - leftovers (#20271) * Merge opt_kernel into reference * Remove get_default_order * Use ov:: in jit generators * Remove unused template function * Add reshape parameter for consistency with Op spec * Add brief description and such * Remove unused param from reshape ref * Use C++ casting --- .../ngraph/runtime/opt_kernel/reshape.hpp | 21 - .../group_convolution_backprop_data.hpp | 30 -- .../include/openvino/reference/matmul.hpp | 26 +- .../include/openvino/reference/range.hpp | 2 +- .../include/openvino/reference/reshape.hpp | 30 +- .../include/openvino/reference/reverse.hpp | 5 +- src/core/reference/src/op/convert.cpp | 2 +- src/core/reference/src/op/depth_to_space.cpp | 4 +- src/core/reference/src/op/einsum.cpp | 43 +- src/core/reference/src/op/jit_generator.cpp | 6 +- src/core/reference/src/op/jit_generator.hpp | 11 +- src/core/reference/src/op/reshape.cpp | 352 +++++++++++++++- .../reference/src/op/shuffle_channels.cpp | 4 +- src/core/reference/src/op/space_to_depth.cpp | 4 +- src/core/reference/src/op/strided_slice.cpp | 35 +- src/core/reference/src/op/transpose.cpp | 6 +- .../src/runtime/opt_kernel/reshape.cpp | 375 ------------------ src/core/src/op/batch_to_space.cpp | 38 +- src/core/src/op/reshape.cpp | 14 +- src/core/src/op/shuffle_channels.cpp | 1 - src/core/src/op/space_to_batch.cpp | 38 +- src/core/tests/reshape_opt_kernel.cpp | 14 +- 22 files changed, 478 insertions(+), 583 deletions(-) delete mode 100644 src/core/reference/include/ngraph/runtime/opt_kernel/reshape.hpp delete mode 100644 src/core/reference/src/runtime/opt_kernel/reshape.cpp diff --git a/src/core/reference/include/ngraph/runtime/opt_kernel/reshape.hpp b/src/core/reference/include/ngraph/runtime/opt_kernel/reshape.hpp deleted file mode 100644 index 33f2c5f9465098..00000000000000 --- a/src/core/reference/include/ngraph/runtime/opt_kernel/reshape.hpp +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "ngraph/axis_vector.hpp" -#include "ngraph/shape.hpp" - -namespace ngraph { -namespace runtime { -namespace opt_kernel { -void reshape(const char* in, - char* out, - const Shape& in_shape, - const AxisVector& in_axis_order, - const Shape& out_shape, - size_t elem_size); -} -} // namespace runtime -} // namespace ngraph diff --git a/src/core/reference/include/openvino/reference/group_convolution_backprop_data.hpp b/src/core/reference/include/openvino/reference/group_convolution_backprop_data.hpp index 66d2a6f431df92..5c410d044cea62 100644 --- a/src/core/reference/include/openvino/reference/group_convolution_backprop_data.hpp +++ b/src/core/reference/include/openvino/reference/group_convolution_backprop_data.hpp @@ -98,35 +98,5 @@ void group_convolution_backprop_data(const T* in, } } } - -// DEPRECATED, can't be removed currently due to arm-plugin dependency -template ::type> -OPENVINO_DEPRECATED("group_convolution_backprop_data function without output_paddings is deprecated, " - "use the one with output_padding.") -void group_convolution_backprop_data(const INPUT* in, - const FILTER* f, - OUTPUT* out, - const Shape& in_shape, - const Shape& filter_shape, - const Shape& out_shape, - const Strides& strides, - const Strides& dilation, - const CoordinateDiff& pads_begin, - const CoordinateDiff& pads_end) { - const CoordinateDiff output_padding(in_shape.size() - 2, 0); - - group_convolution_backprop_data(in, - f, - out, - in_shape, - filter_shape, - out_shape, - strides, - dilation, - pads_begin, - pads_end, - output_padding); -} - } // namespace reference } // namespace ov diff --git a/src/core/reference/include/openvino/reference/matmul.hpp b/src/core/reference/include/openvino/reference/matmul.hpp index b4a09e0f276d94..32ced574949655 100644 --- a/src/core/reference/include/openvino/reference/matmul.hpp +++ b/src/core/reference/include/openvino/reference/matmul.hpp @@ -9,8 +9,8 @@ #include #include -#include "ngraph/runtime/opt_kernel/reshape.hpp" #include "openvino/reference/broadcast.hpp" +#include "openvino/reference/reshape.hpp" namespace ov { namespace reference { @@ -100,12 +100,12 @@ void matmul(const T* arg0, std::vector tmp(shape_size(arg0_shape)); auto axis_vector = details::get_transpose_order(arg0_shape); std::swap(arg0_shape_tmp[arg0_rank - 1], arg0_shape_tmp[arg0_rank - 2]); - ngraph::runtime::opt_kernel::reshape(reinterpret_cast(arg0_data), - reinterpret_cast(tmp.data()), - arg0_shape, - axis_vector, - arg0_shape_tmp, - sizeof(T)); + reshape(reinterpret_cast(arg0_data), + reinterpret_cast(tmp.data()), + arg0_shape, + axis_vector, + arg0_shape_tmp, + sizeof(T)); arg0_new_data.swap(tmp); arg0_data = arg0_new_data.data(); } @@ -114,12 +114,12 @@ void matmul(const T* arg0, std::vector tmp(shape_size(arg1_shape)); auto axis_vector = details::get_transpose_order(arg1_shape); std::swap(arg1_shape_tmp[arg1_rank - 1], arg1_shape_tmp[arg1_rank - 2]); - ngraph::runtime::opt_kernel::reshape(reinterpret_cast(arg1_data), - reinterpret_cast(tmp.data()), - arg1_shape, - axis_vector, - arg1_shape_tmp, - sizeof(T)); + reshape(reinterpret_cast(arg1_data), + reinterpret_cast(tmp.data()), + arg1_shape, + axis_vector, + arg1_shape_tmp, + sizeof(T)); arg1_new_data.swap(tmp); arg1_data = arg1_new_data.data(); } diff --git a/src/core/reference/include/openvino/reference/range.hpp b/src/core/reference/include/openvino/reference/range.hpp index cc9cb2f643ae06..c8fb5c5084b0f5 100644 --- a/src/core/reference/include/openvino/reference/range.hpp +++ b/src/core/reference/include/openvino/reference/range.hpp @@ -14,7 +14,7 @@ namespace ov { namespace reference { // Return type is `void`, only enabled if `T` is a built-in FP -// type, or nGraph's `bfloat16` or `float16` type. +// type, or OpenVINO's `bfloat16` or `float16` type. template typename std::enable_if::value || std::is_same::value || std::is_same::value>::type diff --git a/src/core/reference/include/openvino/reference/reshape.hpp b/src/core/reference/include/openvino/reference/reshape.hpp index b3cdd12df47e06..2e4cb37e163140 100644 --- a/src/core/reference/include/openvino/reference/reshape.hpp +++ b/src/core/reference/include/openvino/reference/reshape.hpp @@ -9,11 +9,35 @@ namespace ov { namespace reference { -void reshape(const char* arg, + +/** + * @brief Basic reshape operation, without axes reorder. + * + * @param in Pointer to input data. + * @param out Pointer to output data. + * @param in_shape Input data shape. + * @param out_shape Output data shape. + * @param elem_size Single data element size im bytes. + */ +inline void reshape(const char* in, char* out, const Shape& in_shape, size_t elem_size) { + std::memcpy(out, in, shape_size(in_shape) * elem_size); +} + +/** + * @brief Permutes data shape and axes. + * + * @param in Pointer to input data. + * @param out Pointer to output data. + * @param in_shape Input data shape. + * @param axes_order Axes order. + * @param out_shape Output data shape. + * @param elem_size Single data element size im bytes. + */ +void reshape(const char* in, char* out, const Shape& in_shape, - const AxisVector& in_axis_order, + const AxisVector& axes_order, const Shape& out_shape, size_t elem_size); -} +} // namespace reference } // namespace ov diff --git a/src/core/reference/include/openvino/reference/reverse.hpp b/src/core/reference/include/openvino/reference/reverse.hpp index b4ea16c50130af..2eeb6afff5048b 100644 --- a/src/core/reference/include/openvino/reference/reverse.hpp +++ b/src/core/reference/include/openvino/reference/reverse.hpp @@ -4,9 +4,8 @@ #pragma once -#include - -#include "openvino/reference/utils/coordinate_transform.hpp" +#include "openvino/core/axis_set.hpp" +#include "openvino/core/shape.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/src/op/convert.cpp b/src/core/reference/src/op/convert.cpp index 2846802d6c43ed..f89cc28a8139de 100644 --- a/src/core/reference/src/op/convert.cpp +++ b/src/core/reference/src/op/convert.cpp @@ -7,7 +7,7 @@ #if defined(OPENVINO_ARCH_X86) || defined(OPENVINO_ARCH_X86_64) # include "jit_generator.hpp" -using namespace ngraph::runtime; +using namespace ov::runtime; #endif namespace ov { diff --git a/src/core/reference/src/op/depth_to_space.cpp b/src/core/reference/src/op/depth_to_space.cpp index 0a0d33596d4961..19024002ea0526 100644 --- a/src/core/reference/src/op/depth_to_space.cpp +++ b/src/core/reference/src/op/depth_to_space.cpp @@ -7,8 +7,8 @@ #include #include -#include "ngraph/runtime/opt_kernel/reshape.hpp" #include "openvino/core/except.hpp" +#include "openvino/reference/reshape.hpp" namespace ov { namespace reference { @@ -96,7 +96,7 @@ void depth_to_space(const char* const in, post_transpose_shape[axis_idx] = dispersed_shape[axes_order[axis_idx]]; } - ngraph::runtime::opt_kernel::reshape(in, out, dispersed_shape, axes_order, post_transpose_shape, elem_size); + reshape(in, out, dispersed_shape, axes_order, post_transpose_shape, elem_size); } } // namespace reference diff --git a/src/core/reference/src/op/einsum.cpp b/src/core/reference/src/op/einsum.cpp index abe8f8c14ba547..271982c3986e7f 100644 --- a/src/core/reference/src/op/einsum.cpp +++ b/src/core/reference/src/op/einsum.cpp @@ -249,14 +249,6 @@ Shape compute_matmul_output_shape(const Shape& common_sub_shape, return matmul_output_shape; } -/// @brief Prepares default order axis vector -/// -AxisVector get_default_order(size_t rank) { - AxisVector default_order(rank); - std::iota(begin(default_order), end(default_order), 0); - return default_order; -} - /// \brief Update a vector of inputs and subscripts by removing items for /// inputs with indices input_ind1 and input_ind2 and inserted new input and /// the corresponsing subscript in the tail @@ -296,15 +288,12 @@ ov::Tensor unsqueeze_input(const ov::Tensor& input, std::vector& unsque } auto output = ov::Tensor(input.get_element_type(), output_shape); - const auto order = get_default_order(input_shape.size()); const auto element_type = input.get_element_type(); - reference::reshape(reinterpret_cast(input.data()), - reinterpret_cast(output.data()), - input_shape, - order, - output_shape, - element_type.size()); + reshape(static_cast(input.data()), + static_cast(output.data()), + input_shape, + element_type.size()); return output; } @@ -653,14 +642,11 @@ ov::Tensor reshape_input_for_matmul(const ov::Tensor& input, const auto element_type = input.get_element_type(); const auto& input_shape = input.get_shape(); auto output = ov::Tensor(element_type, new_shape); - const auto order = get_default_order(input_shape.size()); - - reference::reshape(reinterpret_cast(input.data()), - reinterpret_cast(output.data()), - input_shape, - order, - new_shape, - element_type.size()); + + reshape(static_cast(input.data()), + static_cast(output.data()), + input_shape, + element_type.size()); return output; } @@ -930,13 +916,10 @@ void contract_two_inputs(ov::TensorVector& inputs, back_shape.insert(back_shape.end(), separate2_sub_shape.begin(), separate2_sub_shape.end()); auto contract_output = ov::Tensor(matmul_output.get_element_type(), back_shape); - const auto order = get_default_order(matmul_output.get_shape().size()); - reference::reshape(reinterpret_cast(matmul_output.data()), - reinterpret_cast(contract_output.data()), - matmul_output.get_shape(), - order, - back_shape, - matmul_output.get_element_type().size()); + reshape(static_cast(matmul_output.data()), + static_cast(contract_output.data()), + matmul_output.get_shape(), + matmul_output.get_element_type().size()); update_operands(inputs, input_subscripts, input_ind1, input_ind2, contract_output, resultant_subscript); } diff --git a/src/core/reference/src/op/jit_generator.cpp b/src/core/reference/src/op/jit_generator.cpp index 99f224dc68b802..e0585ae442cf07 100644 --- a/src/core/reference/src/op/jit_generator.cpp +++ b/src/core/reference/src/op/jit_generator.cpp @@ -12,9 +12,9 @@ # include # include "jit_generator.hpp" -# include "ngraph/type/float16.hpp" +# include "openvino/core/type/float16.hpp" -namespace ngraph { +namespace ov { namespace runtime { namespace jit { using namespace Xbyak; @@ -186,6 +186,6 @@ void Generator::copy(const Xbyak::Reg64& dst, const Xbyak::Reg64& src, co } } // namespace jit } // namespace runtime -} // namespace ngraph +} // namespace ov #endif // OPENVINO_ARCH_X86 || OPENVINO_ARCH_X86_64 diff --git a/src/core/reference/src/op/jit_generator.hpp b/src/core/reference/src/op/jit_generator.hpp index 9e8900abf6345f..e6da95c0440da5 100644 --- a/src/core/reference/src/op/jit_generator.hpp +++ b/src/core/reference/src/op/jit_generator.hpp @@ -11,12 +11,9 @@ #include #include -namespace ngraph -{ - namespace runtime - { - namespace jit - { +namespace ov { +namespace runtime { +namespace jit { #ifdef XBYAK64 static const Xbyak::Operand::Code abi_save_gpr_regs[] = { Xbyak::Operand::RBX, @@ -94,4 +91,4 @@ namespace ngraph }; } } -} + } // namespace ov diff --git a/src/core/reference/src/op/reshape.cpp b/src/core/reference/src/op/reshape.cpp index dec23afda868d7..efbf0933fc32e6 100644 --- a/src/core/reference/src/op/reshape.cpp +++ b/src/core/reference/src/op/reshape.cpp @@ -8,12 +8,290 @@ #include #include "openvino/core/except.hpp" +#include "openvino/core/parallel.hpp" #include "openvino/reference/utils/coordinate_range.hpp" #include "openvino/reference/utils/coordinate_transform.hpp" namespace ov { namespace reference { namespace { +static size_t get_threshold() { + // TODO: find a better way, not hardcoded value + return (1 << 9) * parallel_get_num_threads(); +} + +static inline void copy_element(char* out, const char* in, size_t elem_size) { +#define CASE(type) \ + case sizeof(type): \ + *reinterpret_cast(out) = *reinterpret_cast(in); \ + break; + + switch (elem_size) { + CASE(int32_t) + CASE(int64_t) + CASE(int16_t) + CASE(int8_t) + default: + std::memcpy(out, in, elem_size); + break; + } +#undef CASE +} + +void reshape_2D(const char* in, + char* out, + const Shape& in_shape, + const AxisVector& axes_order, + const Shape& out_shape, + size_t elem_size) { + size_t num_elements = shape_size(in_shape); + if (num_elements <= get_threshold()) { + for (size_t i = 0; i < out_shape[0]; i++) { + size_t off = i; + for (size_t j = 0; j < out_shape[1]; j++) { + copy_element(out, in + off * elem_size, elem_size); + out += elem_size; + off += out_shape[0]; + } + } + } else { + ov::parallel_for2d(out_shape[0], out_shape[1], [in, out, &out_shape, elem_size](size_t i, size_t j) { + size_t in_off = j * out_shape[0] + i; + size_t out_off = i * out_shape[1] + j; + copy_element(out + out_off * elem_size, in + in_off * elem_size, elem_size); + }); + } +} + +static std::vector get_strides(size_t rank, size_t elem_size, const AxisVector& order, const Shape& in_shape) { + std::vector rev_order(rank); + for (size_t i = 0; i < rank; i++) { + rev_order[order[i]] = i; + } + + std::vector strides(rank); + strides[rev_order[rank - 1]] = elem_size; + for (size_t i = rank - 1; i > 0; i--) { + strides[rev_order[i - 1]] = strides[rev_order[i]] * in_shape[i]; + } + + return strides; +} + +void reshape_3D(const char* in, + char* out, + const Shape& in_shape, + const AxisVector& axes_order, + const Shape& out_shape, + size_t elem_size) { + size_t num_elements = shape_size(in_shape); + if (num_elements <= get_threshold()) { + const auto strides = get_strides(3, elem_size, axes_order, in_shape); + + size_t off_0 = 0; + for (size_t i = 0; i < out_shape[0]; i++) { + size_t off_1 = off_0; + for (size_t j = 0; j < out_shape[1]; j++) { + size_t in_off = off_1; + for (size_t k = 0; k < out_shape[2]; k++) { + copy_element(out, in + in_off, elem_size); + out += elem_size; + in_off += strides[2]; + } + off_1 += strides[1]; + } + off_0 += strides[0]; + } + } else { + ov::parallel_for3d(out_shape[0], + out_shape[1], + out_shape[2], + [in, out, axes_order, &in_shape, &out_shape, elem_size](size_t i, size_t j, size_t k) { + size_t in_indexes[3]; + in_indexes[axes_order[0]] = i; + in_indexes[axes_order[1]] = j; + in_indexes[axes_order[2]] = k; + size_t in_off = + (in_indexes[0] * in_shape[1] + in_indexes[1]) * in_shape[2] + in_indexes[2]; + size_t out_off = (i * out_shape[1] + j) * out_shape[2] + k; + copy_element(out + out_off * elem_size, in + in_off * elem_size, elem_size); + }); + } +} + +void reshape_4D(const char* in, + char* out, + const Shape& in_shape, + const AxisVector& axes_order, + const Shape& out_shape, + size_t elem_size) { + size_t num_elements = shape_size(in_shape); + if (num_elements <= get_threshold()) { + const auto strides = get_strides(4, elem_size, axes_order, in_shape); + + size_t off_0 = 0; + for (size_t i = 0; i < out_shape[0]; i++) { + size_t off_1 = off_0; + for (size_t j = 0; j < out_shape[1]; j++) { + size_t off_2 = off_1; + for (size_t k = 0; k < out_shape[2]; k++) { + size_t in_off = off_2; + for (size_t l = 0; l < out_shape[3]; l++) { + copy_element(out, in + in_off, elem_size); + out += elem_size; + in_off += strides[3]; + } + off_2 += strides[2]; + } + off_1 += strides[1]; + } + off_0 += strides[0]; + } + } else { + ov::parallel_for4d( + out_shape[0], + out_shape[1], + out_shape[2], + out_shape[3], + [in, out, axes_order, &in_shape, &out_shape, elem_size](size_t i, size_t j, size_t k, size_t l) { + size_t in_indexes[4]; + in_indexes[axes_order[0]] = i; + in_indexes[axes_order[1]] = j; + in_indexes[axes_order[2]] = k; + in_indexes[axes_order[3]] = l; + size_t in_off = + ((in_indexes[0] * in_shape[1] + in_indexes[1]) * in_shape[2] + in_indexes[2]) * in_shape[3] + + in_indexes[3]; + size_t out_off = ((i * out_shape[1] + j) * out_shape[2] + k) * out_shape[3] + l; + copy_element(out + out_off * elem_size, in + in_off * elem_size, elem_size); + }); + } +} + +void reshape_5D(const char* in, + char* out, + const Shape& in_shape, + const AxisVector& axes_order, + const Shape& out_shape, + size_t elem_size) { + size_t num_elements = shape_size(in_shape); + if (num_elements <= get_threshold()) { + const auto strides = get_strides(5, elem_size, axes_order, in_shape); + + size_t off_0 = 0; + for (size_t i = 0; i < out_shape[0]; i++) { + size_t off_1 = off_0; + for (size_t j = 0; j < out_shape[1]; j++) { + size_t off_2 = off_1; + for (size_t k = 0; k < out_shape[2]; k++) { + size_t off_3 = off_2; + for (size_t l = 0; l < out_shape[3]; l++) { + size_t in_off = off_3; + for (size_t m = 0; m < out_shape[4]; m++) { + copy_element(out, in + in_off, elem_size); + out += elem_size; + in_off += strides[4]; + } + off_3 += strides[3]; + } + off_2 += strides[2]; + } + off_1 += strides[1]; + } + off_0 += strides[0]; + } + } else { + ov::parallel_for5d( + out_shape[0], + out_shape[1], + out_shape[2], + out_shape[3], + out_shape[4], + [in, out, axes_order, &in_shape, &out_shape, elem_size](size_t i, size_t j, size_t k, size_t l, size_t m) { + size_t in_indexes[5]; + in_indexes[axes_order[0]] = i; + in_indexes[axes_order[1]] = j; + in_indexes[axes_order[2]] = k; + in_indexes[axes_order[3]] = l; + in_indexes[axes_order[4]] = m; + size_t in_off = + (((in_indexes[0] * in_shape[1] + in_indexes[1]) * in_shape[2] + in_indexes[2]) * in_shape[3] + + in_indexes[3]) * + in_shape[4] + + in_indexes[4]; + size_t out_off = (((i * out_shape[1] + j) * out_shape[2] + k) * out_shape[3] + l) * out_shape[4] + m; + copy_element(out + out_off * elem_size, in + in_off * elem_size, elem_size); + }); + } +} + +void reshape_6D(const char* in, + char* out, + const Shape& in_shape, + const AxisVector& axes_order, + const Shape& out_shape, + size_t elem_size) { + size_t num_elements = shape_size(in_shape); + if (num_elements <= get_threshold()) { + const auto strides = get_strides(6, elem_size, axes_order, in_shape); + + size_t off_0 = 0; + for (size_t i = 0; i < out_shape[0]; i++) { + size_t off_1 = off_0; + for (size_t j = 0; j < out_shape[1]; j++) { + size_t off_2 = off_1; + for (size_t k = 0; k < out_shape[2]; k++) { + size_t off_3 = off_2; + for (size_t l = 0; l < out_shape[3]; l++) { + size_t off_4 = off_3; + for (size_t m = 0; m < out_shape[4]; m++) { + size_t in_off = off_4; + for (size_t n = 0; n < out_shape[5]; n++) { + copy_element(out, in + in_off, elem_size); + out += elem_size; + in_off += strides[5]; + } + off_4 += strides[4]; + } + off_3 += strides[3]; + } + off_2 += strides[2]; + } + off_1 += strides[1]; + } + off_0 += strides[0]; + } + } else { + ov::parallel_for6d( + out_shape[0], + out_shape[1], + out_shape[2], + out_shape[3], + out_shape[4], + out_shape[5], + [=, &axes_order, &in_shape, &out_shape](size_t i, size_t j, size_t k, size_t l, size_t m, size_t n) { + size_t in_indexes[6]; + in_indexes[axes_order[0]] = i; + in_indexes[axes_order[1]] = j; + in_indexes[axes_order[2]] = k; + in_indexes[axes_order[3]] = l; + in_indexes[axes_order[4]] = m; + in_indexes[axes_order[5]] = n; + size_t in_off = + ((((in_indexes[0] * in_shape[1] + in_indexes[1]) * in_shape[2] + in_indexes[2]) * in_shape[3] + + in_indexes[3]) * + in_shape[4] + + in_indexes[4]) * + in_shape[5] + + in_indexes[5]; + size_t out_off = ((((i * out_shape[1] + j) * out_shape[2] + k) * out_shape[3] + l) * out_shape[4] + m) * + out_shape[5] + + n; + copy_element(out + out_off * elem_size, in + in_off * elem_size, elem_size); + }); + } +} + std::vector reorder(const std::vector& origin, const AxisVector& order) { std::vector reordered = origin; auto out = begin(reordered); @@ -24,30 +302,76 @@ std::vector reorder(const std::vector& origin, const AxisVector& } return reordered; } + +void reshape_ND(const char* in, + char* out, + const Shape& in_shape, + const AxisVector& axes_order, + const Shape& out_shape, + size_t elem_size) { + char* output = out; + const char* const output_end = out + shape_size(out_shape) * elem_size; + const auto axis_strides = reorder(row_major_strides(in_shape), axes_order); + for (const auto& coordinate : CoordinateTransformBasic(reorder(in_shape, axes_order))) { + if (output >= output_end) { + break; + } + const auto elem_offset = std::inner_product(begin(coordinate), end(coordinate), begin(axis_strides), 0ll); + const auto input = in + elem_offset * elem_size; + copy_element(output, input, elem_size); + output += elem_size; + } +} + +bool no_axis_reordering(const AxisVector& axis_order) { + auto tmp = axis_order; + std::sort(begin(tmp), end(tmp)); + tmp.erase(std::unique(begin(tmp), end(tmp)), end(tmp)); + return tmp == axis_order; +} } // namespace -void reshape(const char* arg, +void reshape(const char* in, char* out, const Shape& in_shape, - const AxisVector& in_axis_order, + const AxisVector& axes_order, const Shape& out_shape, size_t elem_size) { + if (no_axis_reordering(axes_order)) { + std::memcpy(out, in, shape_size(in_shape) * elem_size); + return; + } + if (shape_size(in_shape) == 1) { - std::memcpy(out, arg, elem_size); + copy_element(out, in, elem_size); return; } - char* output = out; - const char* const output_end = out + shape_size(out_shape) * elem_size; - const auto axis_strides = reorder(row_major_strides(in_shape), in_axis_order); - for (const auto& coordinate : CoordinateTransformBasic(reorder(in_shape, in_axis_order))) { - if (output >= output_end) { - break; - } - const auto elem_offset = std::inner_product(begin(coordinate), end(coordinate), begin(axis_strides), 0ll); - const auto input = arg + elem_offset * elem_size; - std::memcpy(output, input, elem_size); - output += elem_size; + switch (in_shape.size()) { + case 0: + copy_element(out, in, elem_size); + break; + case 1: + std::memcpy(out, in, in_shape[0] * elem_size); + break; + case 2: + reshape_2D(in, out, in_shape, axes_order, out_shape, elem_size); + break; + case 3: + reshape_3D(in, out, in_shape, axes_order, out_shape, elem_size); + break; + case 4: + reshape_4D(in, out, in_shape, axes_order, out_shape, elem_size); + break; + case 5: + reshape_5D(in, out, in_shape, axes_order, out_shape, elem_size); + break; + case 6: + reshape_6D(in, out, in_shape, axes_order, out_shape, elem_size); + break; + default: + reshape_ND(in, out, in_shape, axes_order, out_shape, elem_size); + break; } } } // namespace reference diff --git a/src/core/reference/src/op/shuffle_channels.cpp b/src/core/reference/src/op/shuffle_channels.cpp index 487d951153834b..bc46c80afb0962 100644 --- a/src/core/reference/src/op/shuffle_channels.cpp +++ b/src/core/reference/src/op/shuffle_channels.cpp @@ -4,7 +4,7 @@ #include "openvino/reference/shuffle_channels.hpp" -#include "ngraph/runtime/opt_kernel/reshape.hpp" +#include "openvino/reference/reshape.hpp" namespace ov { namespace reference { @@ -42,7 +42,7 @@ void shuffle_channels(const char* arg, reshaped_input_shape[1], reshaped_input_shape[3]}; AxisVector axis_vector{0, 2, 1, 3}; - ngraph::runtime::opt_kernel::reshape(arg, out, reshaped_input_shape, axis_vector, transposed_shape, elem_size); + reshape(arg, out, reshaped_input_shape, axis_vector, transposed_shape, elem_size); // Reshaped 4D tensor is interpreted as ND output tensor with original shape of data // input diff --git a/src/core/reference/src/op/space_to_depth.cpp b/src/core/reference/src/op/space_to_depth.cpp index 247efe39412362..67790a7b56a61c 100644 --- a/src/core/reference/src/op/space_to_depth.cpp +++ b/src/core/reference/src/op/space_to_depth.cpp @@ -6,8 +6,8 @@ #include -#include "ngraph/runtime/opt_kernel/reshape.hpp" #include "openvino/core/except.hpp" +#include "openvino/reference/reshape.hpp" namespace ov { namespace reference { @@ -84,7 +84,7 @@ void space_to_depth(const char* const in, post_transpose_shape[axis_idx] = dispersed_shape[axes_order[axis_idx]]; } - ngraph::runtime::opt_kernel::reshape(in, out, dispersed_shape, axes_order, post_transpose_shape, elem_size); + reshape(in, out, dispersed_shape, axes_order, post_transpose_shape, elem_size); } } // namespace reference } // namespace ov diff --git a/src/core/reference/src/op/strided_slice.cpp b/src/core/reference/src/op/strided_slice.cpp index 2ff07ba8500308..6e83305e653059 100644 --- a/src/core/reference/src/op/strided_slice.cpp +++ b/src/core/reference/src/op/strided_slice.cpp @@ -9,17 +9,19 @@ #include #include "ngraph/runtime/aligned_buffer.hpp" -#include "ngraph/runtime/opt_kernel/reshape.hpp" - -using namespace ov; -NGRAPH_SUPPRESS_DEPRECATED_START - -void reference::strided_slice(const char* arg, - char* out, - const Shape& arg_shape, - const op::util::SlicePlan& sp, - size_t elem_type) { - auto hasZeroDims = [](const ov::Shape& shape) -> bool { +#include "openvino/reference/reshape.hpp" +#include "openvino/reference/reverse.hpp" +#include "openvino/reference/slice.hpp" + +namespace ov { +namespace reference { + +void strided_slice(const char* arg, + char* out, + const Shape& arg_shape, + const op::util::SlicePlan& sp, + size_t elem_type) { + auto hasZeroDims = [](const Shape& shape) -> bool { return std::any_of(shape.begin(), shape.end(), [](const size_t& dim) { return dim == 0; }); @@ -28,6 +30,7 @@ void reference::strided_slice(const char* arg, return; } + OPENVINO_SUPPRESS_DEPRECATED_START ngraph::runtime::AlignedBuffer slice_out_buffer(shape_size(sp.reshape_in_shape) * elem_type); slice(reinterpret_cast(arg), slice_out_buffer.get_ptr(), @@ -39,12 +42,7 @@ void reference::strided_slice(const char* arg, elem_type); ngraph::runtime::AlignedBuffer reshape_out_buffer(shape_size(sp.reshape_out_shape) * elem_type); - ngraph::runtime::opt_kernel::reshape(slice_out_buffer.get_ptr(), - reshape_out_buffer.get_ptr(), - sp.reshape_in_shape, - ngraph::get_default_order(sp.reshape_in_shape.size()), - sp.reshape_out_shape, - elem_type); + reshape(slice_out_buffer.get_ptr(), reshape_out_buffer.get_ptr(), sp.reshape_in_shape, elem_type); reverse(reshape_out_buffer.get_ptr(), out, @@ -52,4 +50,7 @@ void reference::strided_slice(const char* arg, sp.reshape_out_shape, sp.reverse_axes, elem_type); + OPENVINO_SUPPRESS_DEPRECATED_END } +} // namespace reference +} // namespace ov diff --git a/src/core/reference/src/op/transpose.cpp b/src/core/reference/src/op/transpose.cpp index 5b893ccc5697ed..fbc38ebde38012 100644 --- a/src/core/reference/src/op/transpose.cpp +++ b/src/core/reference/src/op/transpose.cpp @@ -9,8 +9,8 @@ #include #include -#include "ngraph/runtime/opt_kernel/reshape.hpp" #include "openvino/core/shape.hpp" +#include "openvino/reference/reshape.hpp" namespace ov { namespace reference { @@ -20,10 +20,10 @@ void transpose(const char* data, size_t element_size, const int64_t* axes_order, Shape out_shape) { - // To reuse opt_kernel::reshape axes order vector has to be converted to AxisVector + // To reuse reference::reshape axes order vector has to be converted to AxisVector // Negative axes are not supported, it is validated by transpose evaluate method std::vector axis_vector(axes_order, axes_order + data_shape.size()); - ngraph::runtime::opt_kernel::reshape(data, out, data_shape, axis_vector, out_shape, element_size); + reshape(data, out, data_shape, axis_vector, out_shape, element_size); } } // namespace reference } // namespace ov diff --git a/src/core/reference/src/runtime/opt_kernel/reshape.cpp b/src/core/reference/src/runtime/opt_kernel/reshape.cpp deleted file mode 100644 index e0ca720845c3a0..00000000000000 --- a/src/core/reference/src/runtime/opt_kernel/reshape.cpp +++ /dev/null @@ -1,375 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/runtime/opt_kernel/reshape.hpp" - -#include -#include - -#include "openvino/core/parallel.hpp" -#include "openvino/reference/reshape.hpp" - -using namespace ngraph; - -namespace { -void reshape_in0(const char* in, - char* out, - const Shape& in_shape, - const AxisVector& in_axis_order, - const Shape& out_shape, - size_t elem_size) { - memcpy(out, in, elem_size); -} - -void reshape_in1(const char* in, - char* out, - const Shape& in_shape, - const AxisVector& in_axis_order, - const Shape& out_shape, - size_t elem_size) { - size_t size[1]; - size_t in_index[1]; - size_t* map_index[1]; - for (size_t i = 0; i < 1; i++) { - size[i] = in_shape[in_axis_order[i]]; - map_index[in_axis_order[i]] = &in_index[i]; - } - for (in_index[0] = 0; in_index[0] < size[0]; ++in_index[0]) { - memcpy(out, in + *map_index[0] * elem_size, elem_size); - out += elem_size; - } -} - -static size_t get_threshold() { - // TODO: find a better way, not hardcoded value - return (1 << 9) * parallel_get_num_threads(); -} - -static inline void copy_element(char* out, const char* in, size_t elem_size) { -#define CASE(type) \ - case sizeof(type): \ - *reinterpret_cast(out) = *reinterpret_cast(in); \ - break; - - switch (elem_size) { - CASE(int32_t) - CASE(int64_t) - CASE(int16_t) - CASE(int8_t) - default: - std::memcpy(out, in, elem_size); - break; - } -#undef CASE -} - -void reshape_in2(const char* in, - char* out, - const Shape& in_shape, - const AxisVector& in_axis_order, - const Shape& out_shape, - size_t elem_size) { - size_t num_elements = shape_size(in_shape); - if (num_elements <= get_threshold()) { - for (size_t i = 0; i < out_shape[0]; i++) { - size_t off = i; - for (size_t j = 0; j < out_shape[1]; j++) { - copy_element(out, in + off * elem_size, elem_size); - out += elem_size; - off += out_shape[0]; - } - } - } else { - ov::parallel_for2d(out_shape[0], out_shape[1], [in, out, &out_shape, elem_size](size_t i, size_t j) { - size_t in_off = j * out_shape[0] + i; - size_t out_off = i * out_shape[1] + j; - copy_element(out + out_off * elem_size, in + in_off * elem_size, elem_size); - }); - } -} - -static std::vector get_strides(size_t rank, size_t elem_size, const AxisVector& order, const Shape& in_shape) { - std::vector rev_order(rank); - for (size_t i = 0; i < rank; i++) { - rev_order[order[i]] = i; - } - - std::vector strides(rank); - strides[rev_order[rank - 1]] = elem_size; - for (size_t i = rank - 1; i > 0; i--) { - strides[rev_order[i - 1]] = strides[rev_order[i]] * in_shape[i]; - } - - return strides; -} - -void reshape_in3(const char* in, - char* out, - const Shape& in_shape, - const AxisVector& in_axis_order, - const Shape& out_shape, - size_t elem_size) { - size_t num_elements = shape_size(in_shape); - if (num_elements <= get_threshold()) { - const auto strides = get_strides(3, elem_size, in_axis_order, in_shape); - - size_t off_0 = 0; - for (size_t i = 0; i < out_shape[0]; i++) { - size_t off_1 = off_0; - for (size_t j = 0; j < out_shape[1]; j++) { - size_t in_off = off_1; - for (size_t k = 0; k < out_shape[2]; k++) { - copy_element(out, in + in_off, elem_size); - out += elem_size; - in_off += strides[2]; - } - off_1 += strides[1]; - } - off_0 += strides[0]; - } - } else { - ov::parallel_for3d(out_shape[0], - out_shape[1], - out_shape[2], - [in, out, in_axis_order, &in_shape, &out_shape, elem_size](size_t i, size_t j, size_t k) { - size_t in_indexes[3]; - in_indexes[in_axis_order[0]] = i; - in_indexes[in_axis_order[1]] = j; - in_indexes[in_axis_order[2]] = k; - size_t in_off = - (in_indexes[0] * in_shape[1] + in_indexes[1]) * in_shape[2] + in_indexes[2]; - size_t out_off = (i * out_shape[1] + j) * out_shape[2] + k; - copy_element(out + out_off * elem_size, in + in_off * elem_size, elem_size); - }); - } -} - -void reshape_in4(const char* in, - char* out, - const Shape& in_shape, - const AxisVector& in_axis_order, - const Shape& out_shape, - size_t elem_size) { - size_t num_elements = shape_size(in_shape); - if (num_elements <= get_threshold()) { - const auto strides = get_strides(4, elem_size, in_axis_order, in_shape); - - size_t off_0 = 0; - for (size_t i = 0; i < out_shape[0]; i++) { - size_t off_1 = off_0; - for (size_t j = 0; j < out_shape[1]; j++) { - size_t off_2 = off_1; - for (size_t k = 0; k < out_shape[2]; k++) { - size_t in_off = off_2; - for (size_t l = 0; l < out_shape[3]; l++) { - copy_element(out, in + in_off, elem_size); - out += elem_size; - in_off += strides[3]; - } - off_2 += strides[2]; - } - off_1 += strides[1]; - } - off_0 += strides[0]; - } - } else { - ov::parallel_for4d( - out_shape[0], - out_shape[1], - out_shape[2], - out_shape[3], - [in, out, in_axis_order, &in_shape, &out_shape, elem_size](size_t i, size_t j, size_t k, size_t l) { - size_t in_indexes[4]; - in_indexes[in_axis_order[0]] = i; - in_indexes[in_axis_order[1]] = j; - in_indexes[in_axis_order[2]] = k; - in_indexes[in_axis_order[3]] = l; - size_t in_off = - ((in_indexes[0] * in_shape[1] + in_indexes[1]) * in_shape[2] + in_indexes[2]) * in_shape[3] + - in_indexes[3]; - size_t out_off = ((i * out_shape[1] + j) * out_shape[2] + k) * out_shape[3] + l; - copy_element(out + out_off * elem_size, in + in_off * elem_size, elem_size); - }); - } -} - -void reshape_in5(const char* in, - char* out, - const Shape& in_shape, - const AxisVector& in_axis_order, - const Shape& out_shape, - size_t elem_size) { - size_t num_elements = shape_size(in_shape); - if (num_elements <= get_threshold()) { - const auto strides = get_strides(5, elem_size, in_axis_order, in_shape); - - size_t off_0 = 0; - for (size_t i = 0; i < out_shape[0]; i++) { - size_t off_1 = off_0; - for (size_t j = 0; j < out_shape[1]; j++) { - size_t off_2 = off_1; - for (size_t k = 0; k < out_shape[2]; k++) { - size_t off_3 = off_2; - for (size_t l = 0; l < out_shape[3]; l++) { - size_t in_off = off_3; - for (size_t m = 0; m < out_shape[4]; m++) { - copy_element(out, in + in_off, elem_size); - out += elem_size; - in_off += strides[4]; - } - off_3 += strides[3]; - } - off_2 += strides[2]; - } - off_1 += strides[1]; - } - off_0 += strides[0]; - } - } else { - ov::parallel_for5d( - out_shape[0], - out_shape[1], - out_shape[2], - out_shape[3], - out_shape[4], - [in, out, in_axis_order, &in_shape, &out_shape, elem_size](size_t i, - size_t j, - size_t k, - size_t l, - size_t m) { - size_t in_indexes[5]; - in_indexes[in_axis_order[0]] = i; - in_indexes[in_axis_order[1]] = j; - in_indexes[in_axis_order[2]] = k; - in_indexes[in_axis_order[3]] = l; - in_indexes[in_axis_order[4]] = m; - size_t in_off = - (((in_indexes[0] * in_shape[1] + in_indexes[1]) * in_shape[2] + in_indexes[2]) * in_shape[3] + - in_indexes[3]) * - in_shape[4] + - in_indexes[4]; - size_t out_off = (((i * out_shape[1] + j) * out_shape[2] + k) * out_shape[3] + l) * out_shape[4] + m; - copy_element(out + out_off * elem_size, in + in_off * elem_size, elem_size); - }); - } -} - -void reshape_in6(const char* in, - char* out, - const Shape& in_shape, - const AxisVector& in_axis_order, - const Shape& out_shape, - size_t elem_size) { - size_t num_elements = shape_size(in_shape); - if (num_elements <= get_threshold()) { - const auto strides = get_strides(6, elem_size, in_axis_order, in_shape); - - size_t off_0 = 0; - for (size_t i = 0; i < out_shape[0]; i++) { - size_t off_1 = off_0; - for (size_t j = 0; j < out_shape[1]; j++) { - size_t off_2 = off_1; - for (size_t k = 0; k < out_shape[2]; k++) { - size_t off_3 = off_2; - for (size_t l = 0; l < out_shape[3]; l++) { - size_t off_4 = off_3; - for (size_t m = 0; m < out_shape[4]; m++) { - size_t in_off = off_4; - for (size_t n = 0; n < out_shape[5]; n++) { - copy_element(out, in + in_off, elem_size); - out += elem_size; - in_off += strides[5]; - } - off_4 += strides[4]; - } - off_3 += strides[3]; - } - off_2 += strides[2]; - } - off_1 += strides[1]; - } - off_0 += strides[0]; - } - } else { - ov::parallel_for6d( - out_shape[0], - out_shape[1], - out_shape[2], - out_shape[3], - out_shape[4], - out_shape[5], - [in, out, in_axis_order, &in_shape, &out_shape, elem_size](size_t i, - size_t j, - size_t k, - size_t l, - size_t m, - size_t n) { - size_t in_indexes[6]; - in_indexes[in_axis_order[0]] = i; - in_indexes[in_axis_order[1]] = j; - in_indexes[in_axis_order[2]] = k; - in_indexes[in_axis_order[3]] = l; - in_indexes[in_axis_order[4]] = m; - in_indexes[in_axis_order[5]] = n; - size_t in_off = - ((((in_indexes[0] * in_shape[1] + in_indexes[1]) * in_shape[2] + in_indexes[2]) * in_shape[3] + - in_indexes[3]) * - in_shape[4] + - in_indexes[4]) * - in_shape[5] + - in_indexes[5]; - size_t out_off = ((((i * out_shape[1] + j) * out_shape[2] + k) * out_shape[3] + l) * out_shape[4] + m) * - out_shape[5] + - n; - copy_element(out + out_off * elem_size, in + in_off * elem_size, elem_size); - }); - } -} - -bool no_axis_reordering(const AxisVector& axis_order) { - auto tmp = axis_order; - std::sort(begin(tmp), end(tmp)); - tmp.erase(std::unique(begin(tmp), end(tmp)), end(tmp)); - return tmp == axis_order; -} - -} // namespace -void runtime::opt_kernel::reshape(const char* in, - char* out, - const Shape& in_shape, - const AxisVector& in_axis_order, - const Shape& out_shape, - size_t elem_size) { - if (no_axis_reordering(in_axis_order)) { - std::memcpy(out, in, shape_size(in_shape) * elem_size); - return; - } - - switch (in_shape.size()) { - case 0: - reshape_in0(in, out, in_shape, in_axis_order, out_shape, elem_size); - break; - case 1: - reshape_in1(in, out, in_shape, in_axis_order, out_shape, elem_size); - break; - case 2: - reshape_in2(in, out, in_shape, in_axis_order, out_shape, elem_size); - break; - case 3: - reshape_in3(in, out, in_shape, in_axis_order, out_shape, elem_size); - break; - case 4: - reshape_in4(in, out, in_shape, in_axis_order, out_shape, elem_size); - break; - case 5: - reshape_in5(in, out, in_shape, in_axis_order, out_shape, elem_size); - break; - case 6: - reshape_in6(in, out, in_shape, in_axis_order, out_shape, elem_size); - break; - default: - ov::reference::reshape(in, out, in_shape, in_axis_order, out_shape, elem_size); - break; - } -} diff --git a/src/core/src/op/batch_to_space.cpp b/src/core/src/op/batch_to_space.cpp index e9c4acb2253e69..da2c2c5fa703a1 100644 --- a/src/core/src/op/batch_to_space.cpp +++ b/src/core/src/op/batch_to_space.cpp @@ -16,10 +16,10 @@ #include "ngraph/builder/make_constant.hpp" #include "ngraph/node.hpp" #include "ngraph/opsets/opset3.hpp" -#include "ngraph/runtime/opt_kernel/reshape.hpp" #include "ngraph/shape.hpp" #include "openvino/op/util/precision_sensitive_attribute.hpp" #include "openvino/op/util/slice_plan.hpp" +#include "openvino/reference/reshape.hpp" #include "openvino/reference/strided_slice.hpp" using namespace std; @@ -110,12 +110,12 @@ bool batch_to_space_evaluate(const HostTensorVector& outputs, const HostTensorVe for (size_t block_idx = 1; block_idx < block_values_size; ++block_idx) { dispersed_shape[0] = block_values[block_idx]; dispersed_shape[1] /= block_values[block_idx]; - runtime::opt_kernel::reshape(flat_data, - dispersed_data.data(), - data_shape, - plain_axes_order, - dispersed_shape, - elem_size); + ov::reference::reshape(flat_data, + dispersed_data.data(), + data_shape, + plain_axes_order, + dispersed_shape, + elem_size); size_t val = 1; for (size_t axis_idx = 0; axis_idx <= block_values_size; ++axis_idx) { @@ -130,21 +130,21 @@ bool batch_to_space_evaluate(const HostTensorVector& outputs, const HostTensorVe post_transpose_shape[axis_idx] = dispersed_shape[axes_order[axis_idx]]; } - runtime::opt_kernel::reshape(dispersed_data.data(), - post_transpose_data.data(), - dispersed_shape, - axes_order, - post_transpose_shape, - elem_size); + ov::reference::reshape(dispersed_data.data(), + post_transpose_data.data(), + dispersed_shape, + axes_order, + post_transpose_shape, + elem_size); squeezed_shape[0] = dispersed_shape[1]; squeezed_shape[block_idx] *= block_values[block_idx]; dispersed_shape[block_idx + 1] = squeezed_shape[block_idx]; - runtime::opt_kernel::reshape(post_transpose_data.data(), - flat_data, - post_transpose_shape, - plain_axes_order, - squeezed_shape, - elem_size); + ov::reference::reshape(post_transpose_data.data(), + flat_data, + post_transpose_shape, + plain_axes_order, + squeezed_shape, + elem_size); data_shape = squeezed_shape; } diff --git a/src/core/src/op/reshape.cpp b/src/core/src/op/reshape.cpp index a20a4b5d3a06fe..279a06350ee7ea 100644 --- a/src/core/src/op/reshape.cpp +++ b/src/core/src/op/reshape.cpp @@ -10,7 +10,6 @@ #include "bound_evaluate.hpp" #include "compare.hpp" #include "itt.hpp" -#include "ngraph/runtime/opt_kernel/reshape.hpp" #include "ngraph/util.hpp" #include "openvino/core/dimension_tracker.hpp" #include "openvino/core/validation_util.hpp" @@ -177,15 +176,10 @@ bool op::v1::Reshape::evaluate_reshape(ov::TensorVector& outputs, const ov::Tens OPENVINO_ASSERT(ov::PartialShape(output_shape).is_static()); outputs[0].set_shape(ov::PartialShape(output_shape).to_shape()); - OPENVINO_SUPPRESS_DEPRECATED_START - const AxisVector order = ngraph::get_default_order(inputs[0].get_shape()); - OPENVINO_SUPPRESS_DEPRECATED_END - ngraph::runtime::opt_kernel::reshape(static_cast(inputs[0].data()), - static_cast(outputs[0].data()), - inputs[0].get_shape(), - order, - outputs[0].get_shape(), - inputs[0].get_element_type().size()); + ov::reference::reshape(static_cast(inputs[0].data()), + static_cast(outputs[0].data()), + inputs[0].get_shape(), + inputs[0].get_element_type().size()); return true; } diff --git a/src/core/src/op/shuffle_channels.cpp b/src/core/src/op/shuffle_channels.cpp index 30de73691bb470..50ffa228d33dc7 100644 --- a/src/core/src/op/shuffle_channels.cpp +++ b/src/core/src/op/shuffle_channels.cpp @@ -11,7 +11,6 @@ #include "ngraph/attribute_visitor.hpp" #include "ngraph/builder/reshape.hpp" #include "ngraph/runtime/host_tensor.hpp" -#include "ngraph/runtime/opt_kernel/reshape.hpp" #include "ngraph/type/element_type.hpp" #include "ngraph/type/element_type_traits.hpp" #include "openvino/core/validation_util.hpp" diff --git a/src/core/src/op/space_to_batch.cpp b/src/core/src/op/space_to_batch.cpp index 634a0163f088d4..1747b6b615648a 100644 --- a/src/core/src/op/space_to_batch.cpp +++ b/src/core/src/op/space_to_batch.cpp @@ -15,10 +15,10 @@ #include "ngraph/builder/make_constant.hpp" #include "ngraph/node.hpp" #include "ngraph/ops.hpp" -#include "ngraph/runtime/opt_kernel/reshape.hpp" #include "ngraph/shape.hpp" #include "openvino/op/util/precision_sensitive_attribute.hpp" #include "openvino/reference/pad.hpp" +#include "openvino/reference/reshape.hpp" using namespace std; using namespace ngraph; @@ -169,32 +169,32 @@ bool ngraph::op::v1::SpaceToBatch::evaluate_space_to_batch(const HostTensorVecto } } - ngraph::runtime::opt_kernel::reshape(flat_data.data(), - dispersed_data.data(), - data_shape, - plain_axes_order, - dispersed_shape, - elem_size); + ov::reference::reshape(flat_data.data(), + dispersed_data.data(), + data_shape, + plain_axes_order, + dispersed_shape, + elem_size); ov::Shape post_transpose_shape(axes_order.size()); for (size_t i = 0; i < axes_order.size(); ++i) { post_transpose_shape[i] = dispersed_shape[axes_order[i]]; } - ngraph::runtime::opt_kernel::reshape(dispersed_data.data(), - post_transpose_data.data(), - dispersed_shape, - axes_order, - post_transpose_shape, - elem_size); + ov::reference::reshape(dispersed_data.data(), + post_transpose_data.data(), + dispersed_shape, + axes_order, + post_transpose_shape, + elem_size); squeezed_shape[0] *= block_values[block_idx]; squeezed_shape[block_idx] /= block_values[block_idx]; - ngraph::runtime::opt_kernel::reshape(post_transpose_data.data(), - flat_data.data(), - post_transpose_shape, - plain_axes_order, - squeezed_shape, - elem_size); + ov::reference::reshape(post_transpose_data.data(), + flat_data.data(), + post_transpose_shape, + plain_axes_order, + squeezed_shape, + elem_size); data_shape = squeezed_shape; } diff --git a/src/core/tests/reshape_opt_kernel.cpp b/src/core/tests/reshape_opt_kernel.cpp index d23b25308a39f7..823f75cc39e123 100644 --- a/src/core/tests/reshape_opt_kernel.cpp +++ b/src/core/tests/reshape_opt_kernel.cpp @@ -8,8 +8,8 @@ #include #include "common_test_utils/ndarray.hpp" -#include "ngraph/runtime/opt_kernel/reshape.hpp" #include "openvino/core/axis_vector.hpp" +#include "openvino/reference/reshape.hpp" using namespace ov; @@ -50,12 +50,12 @@ TEST_P(ReshapeOptKernel, reshape_opt_kernel) { for (size_t i = 0; i < out_shape.size(); i++) out_shape[i] = in_shape[axis_order[i]]; - ngraph::runtime::opt_kernel::reshape((const char*)p.input.data(), - (char*)output_buff.data(), - in_shape, - axis_order, - out_shape, - sizeof(ElementValue)); + ov::reference::reshape(static_cast(p.input.data()), + reinterpret_cast(output_buff.data()), + in_shape, + axis_order, + out_shape, + sizeof(ElementValue)); EXPECT_EQ(p.output.get_vector(), output_buff); } From bf9bdaa671628262eeb6b2e8124c44e4b149c92d Mon Sep 17 00:00:00 2001 From: hyunback kim Date: Tue, 10 Oct 2023 12:47:55 +0900 Subject: [PATCH 108/257] Onednn3.3 (#19299) * [GPU] oneDNN3.3 integration. * Supports new formats from oneDNN3.3 requires. * Fix Perf regression because of the wrong mvn kernel selection issue. modnet_webcam_portrait_matting.int8 person-reidentification-retail-0248.int8 * support undefined onednn tag for using any tag instead. Signed-off-by: hyunback --- .../include/intel_gpu/runtime/format.hpp | 11 ++ .../impls/ocl/kernel_selector_helper.cpp | 44 ++++++ .../graph/impls/onednn/convolution_onednn.cpp | 55 +++++--- .../src/graph/impls/onednn/utils.cpp | 27 +--- .../intel_gpu/src/graph/layout_optimizer.cpp | 44 +++--- .../include/batch_headers/fetch_data.cl | 130 ++++++++++++++++++ .../include/batch_headers/fetch_weights.cl | 45 ++++-- .../cl_kernels/reorder_weights.cl | 32 ++++- .../intel_gpu/src/kernel_selector/jitter.cpp | 10 ++ .../kernel_selector_common.cpp | 11 ++ .../kernel_selector/kernel_selector_utils.cpp | 11 +- .../kernels/reorder/reorder_kernel_base.cpp | 4 + .../src/kernel_selector/tensor_type.cpp | 60 ++++++++ .../src/kernel_selector/tensor_type.h | 11 ++ src/plugins/intel_gpu/src/runtime/format.cpp | 11 ++ src/plugins/intel_gpu/src/runtime/layout.cpp | 2 + .../unit/test_cases/hash_key_gpu_test.cpp | 2 +- src/plugins/intel_gpu/thirdparty/onednn_gpu | 2 +- 18 files changed, 435 insertions(+), 77 deletions(-) diff --git a/src/plugins/intel_gpu/include/intel_gpu/runtime/format.hpp b/src/plugins/intel_gpu/include/intel_gpu/runtime/format.hpp index fb6bfaa3fc8224..9e38a8b99c7b5e 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/runtime/format.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/runtime/format.hpp @@ -90,6 +90,8 @@ struct format { b_fs_zyx_fsv2, b_fs_yx_fsv4, ///< format for input for IMAD convolutions b_fs_zyx_fsv4, ///< format for input for IMAD 3D convolutions + b_fs_yx_fsv8, + b_fs_zyx_fsv8, b_fs_yx_fsv16, ///< format used for blocked convolution b_fs_yx_fsv32, ///< format used for blocked int8 convolution b_fs_zyx_fsv16, ///< format used for 3D blocked convolution (features blocked by 16) @@ -107,6 +109,8 @@ struct format { bs_fs_zyx_bsv8_fsv2, ///< format used for 3D blocked convolution (batch and features blocked by 8 and 2) bs_fs_yx_bsv16_fsv2, ///< format used for 2D blocked convolution (batch and features blocked by 16 and 2) bs_fs_zyx_bsv16_fsv2, ///< format used for 3D blocked convolution (batch and features blocked by 16 and 2) + bs_fs_yx_bsv16_fsv8, ///< format used for 2D blocked convolution (batch and features blocked by 16 and 8) + bs_fs_zyx_bsv16_fsv8, ///< format used for 3D blocked convolution (batch and features blocked by 16 and 8) bs_fs_yx_bsv4_fsv2, ///< format used for 2D blocked convolution (batch blocked by 4, features blocked by 2) bs_fs_zyx_bsv4_fsv4, ///< format used for 3D blocked convolution (batch and features blocked by 4) bs_fs_zyx_bsv4_fsv2, ///< format used for 3D blocked convolution (batch blocked by 4, features blocked by 2) @@ -135,7 +139,9 @@ struct format { oyix, oxiy, os_iyx_osv16, ///< format used only for convolution weights + o_is_yx_isv4, ///< format used only for convolution weights o_is_yx_isv16, ///< format used only for convolution weights + o_is_zyx_isv16, ///< format used only for convolution weights os_yxi_osv16, ///< format used only for convolution weights os_is_yx_osv16_isv2, ///< format used only for convolution weights os_is_yx_osv16_isv16, ///< format used for convolution i8 weights @@ -145,6 +151,7 @@ struct format { os_is_yx_isv16_osv16, ///< format used for blocked convolution os_is_zyx_isv16_osv16, ///< format used for weights for blocked 3D convolution is_os_zyx_isv16_osv16, ///< format used for weights for blocked 3D deconvolution + is_os_yx_osv8_isv4, ///< format used for weights for blocked deconvolution is_os_yx_isv16_osv16, ///< format used for weights for blocked deconvolution is_os_yx_isv16_osv8, ///< format used for weights for blocked deconvolution is_os_yx_isv16_osv4, ///< format used for weights for blocked deconvolution @@ -232,6 +239,8 @@ struct format { os_zyx_is_osv8_isv4, os_zy_is_x_osv8_isv2, os_zy_is_x_osv8_isv4, + os_is_yx_osv4_isv16, + os_is_yx_osv2_isv16, goiyx, ///< format used for weights for 2D convolution gioyx, ///< format used for weights for 2D deconvolution @@ -241,7 +250,9 @@ struct format { g_os_iyx_osv8, ///< format used for weights for 2D convolution g_os_iyx_osv16, ///< format used for weights for 2D convolution g_os_iyx_osv32, ///< format used for weights for 2D convolution + gs_oiyx_gsv8, ///< format used for weights for 2D convolution gs_oiyx_gsv16, ///< format used for weights for 2D convolution + gs_oizyx_gsv8, ///< format used for weights for 3D convolution gs_oizyx_gsv16, ///< format used for weights for 3D convolution gs_oiyx_gsv32, ///< format used for weights for 2D convolution gs_oizyx_gsv32, ///< format used for weights for 3D convolution diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp index 12e7d1b28b93ee..3392a7e42b2363 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp @@ -219,6 +219,8 @@ kernel_selector::data_layout to_data_layout(format f) { return kernel_selector::data_layout::b_fs_yx_fsv2; case format::b_fs_yx_fsv4: return kernel_selector::data_layout::b_fs_yx_fsv4; + case format::b_fs_yx_fsv8: + return kernel_selector::data_layout::b_fs_yx_fsv8; case format::b_fs_yx_fsv16: return kernel_selector::data_layout::b_fs_yx_fsv16; case format::b_fs_yx_fsv32: @@ -227,6 +229,8 @@ kernel_selector::data_layout to_data_layout(format f) { return kernel_selector::data_layout::b_fs_zyx_fsv2; case format::b_fs_zyx_fsv4: return kernel_selector::data_layout::b_fs_zyx_fsv4; + case format::b_fs_zyx_fsv8: + return kernel_selector::data_layout::b_fs_zyx_fsv8; case format::b_fs_zyx_fsv32: return kernel_selector::data_layout::b_fs_zyx_fsv32; case format::bs_f_bsv16: @@ -277,6 +281,10 @@ kernel_selector::data_layout to_data_layout(format f) { return kernel_selector::data_layout::bs_fs_yx_bsv16_fsv2; case format::bs_fs_zyx_bsv16_fsv2: return kernel_selector::data_layout::bs_fs_zyx_bsv16_fsv2; + case format::bs_fs_yx_bsv16_fsv8: + return kernel_selector::data_layout::bs_fs_yx_bsv16_fsv8; + case format::bs_fs_zyx_bsv16_fsv8: + return kernel_selector::data_layout::bs_fs_zyx_bsv16_fsv8; case format::bs_fs_yx_bsv8_fsv2: return kernel_selector::data_layout::bs_fs_yx_bsv8_fsv2; case format::bs_fs_zyx_bsv8_fsv2: @@ -320,10 +328,14 @@ cldnn::format from_data_layout(kernel_selector::data_layout l) { return cldnn::format::b_fs_yx_fsv2; case kernel_selector::data_layout::b_fs_yx_fsv4: return cldnn::format::b_fs_yx_fsv4; + case kernel_selector::data_layout::b_fs_yx_fsv8: + return cldnn::format::b_fs_yx_fsv8; case kernel_selector::data_layout::b_fs_yx_fsv16: return cldnn::format::b_fs_yx_fsv16; case kernel_selector::data_layout::b_fs_yx_fsv32: return cldnn::format::b_fs_yx_fsv32; + case kernel_selector::data_layout::b_fs_zyx_fsv8: + return cldnn::format::b_fs_zyx_fsv8; case kernel_selector::data_layout::b_fs_zyx_fsv32: return cldnn::format::b_fs_zyx_fsv32; case kernel_selector::data_layout::bs_f_bsv8__af8: @@ -366,6 +378,10 @@ cldnn::format from_data_layout(kernel_selector::data_layout l) { return cldnn::format::bs_fs_yx_bsv16_fsv2; case kernel_selector::data_layout::bs_fs_zyx_bsv16_fsv2: return cldnn::format::bs_fs_zyx_bsv16_fsv2; + case kernel_selector::data_layout::bs_fs_yx_bsv16_fsv8: + return cldnn::format::bs_fs_yx_bsv16_fsv8; + case kernel_selector::data_layout::bs_fs_zyx_bsv16_fsv8: + return cldnn::format::bs_fs_zyx_bsv16_fsv8; case kernel_selector::data_layout::bs_fs_yx_bsv8_fsv2: return cldnn::format::bs_fs_yx_bsv8_fsv2; case kernel_selector::data_layout::bs_fs_yx_bsv32_fsv32: @@ -406,8 +422,12 @@ kernel_selector::weights_layout to_weights_layout(format f, bool is_grouped) { return kernel_selector::weights_layout::yxio; case format::os_yxi_osv16: return kernel_selector::weights_layout::os_yxi_osv16; + case format::o_is_yx_isv4: + return kernel_selector::weights_layout::o_is_yx_isv4; case format::o_is_yx_isv16: return kernel_selector::weights_layout::o_is_yx_isv16; + case format::o_is_zyx_isv16: + return kernel_selector::weights_layout::o_is_zyx_isv16; case format::os_iyx_osv16: return kernel_selector::weights_layout::os_iyx_osv16; case format::os_is_yx_osv16_isv2: @@ -474,6 +494,10 @@ kernel_selector::weights_layout to_weights_layout(format f, bool is_grouped) { return kernel_selector::weights_layout::os_is_zyx_osv8_isv2; case format::os_is_yx_osv8_isv4: return kernel_selector::weights_layout::os_is_yx_osv8_isv4; + case format::os_is_yx_osv4_isv16: + return kernel_selector::weights_layout::os_is_yx_osv4_isv16; + case format::os_is_yx_osv2_isv16: + return kernel_selector::weights_layout::os_is_yx_osv2_isv16; case format::os_is_zyx_osv8_isv4: return kernel_selector::weights_layout::os_is_zyx_osv8_isv4; case format::os_is_yx_osa4_isa8_osv8_isv4_swizzled_by_4: @@ -527,6 +551,8 @@ kernel_selector::weights_layout to_weights_layout(format f, bool is_grouped) { return kernel_selector::weights_layout::is_os_zyx_isv16_osv16; case format::os_is_zyx_osv32_isv16: return kernel_selector::weights_layout::os_is_zyx_osv32_isv16; + case format::is_os_yx_osv8_isv4: + return kernel_selector::weights_layout::is_os_yx_osv8_isv4; case format::is_os_yx_isv16_osv16: return kernel_selector::weights_layout::is_os_yx_isv16_osv16; case format::is_os_yx_isv16_osv8: @@ -567,6 +593,10 @@ kernel_selector::weights_layout to_weights_layout(format f, bool is_grouped) { return kernel_selector::weights_layout::g_os_iyx_osv16; case format::g_os_iyx_osv32: return kernel_selector::weights_layout::g_os_iyx_osv32; + case format::gs_oiyx_gsv8: + return kernel_selector::weights_layout::gs_oiyx_gsv8; + case format::gs_oizyx_gsv8: + return kernel_selector::weights_layout::gs_oizyx_gsv8; case format::gs_oiyx_gsv16: return kernel_selector::weights_layout::gs_oiyx_gsv16; case format::gs_oizyx_gsv16: @@ -711,8 +741,12 @@ cldnn::format::type from_weights_layout(kernel_selector::weights_layout l) { return cldnn::format::yxio; case kernel_selector::weights_layout::os_yxi_osv16: return cldnn::format::os_yxi_osv16; + case kernel_selector::weights_layout::o_is_yx_isv4: + return cldnn::format::o_is_yx_isv4; case kernel_selector::weights_layout::o_is_yx_isv16: return cldnn::format::o_is_yx_isv16; + case kernel_selector::weights_layout::o_is_zyx_isv16: + return cldnn::format::o_is_zyx_isv16; case kernel_selector::weights_layout::os_iyx_osv16: return cldnn::format::os_iyx_osv16; case kernel_selector::weights_layout::os_is_yx_isv16_osv16: @@ -821,6 +855,8 @@ cldnn::format::type from_weights_layout(kernel_selector::weights_layout l) { return cldnn::format::os_is_zyx_isv16_osv16; case kernel_selector::weights_layout::is_os_zyx_isv16_osv16: return cldnn::format::is_os_zyx_isv16_osv16; + case kernel_selector::weights_layout::is_os_yx_osv8_isv4: + return cldnn::format::is_os_yx_osv8_isv4; case kernel_selector::weights_layout::is_os_yx_isv16_osv16: return cldnn::format::is_os_yx_isv16_osv16; case kernel_selector::weights_layout::is_os_yx_isv16_osv8: @@ -855,6 +891,10 @@ cldnn::format::type from_weights_layout(kernel_selector::weights_layout l) { return cldnn::format::os_is_zyx_osv8_isv2; case kernel_selector::weights_layout::os_is_yx_osv8_isv4: return cldnn::format::os_is_yx_osv8_isv4; + case kernel_selector::weights_layout::os_is_yx_osv4_isv16: + return cldnn::format::os_is_yx_osv4_isv16; + case kernel_selector::weights_layout::os_is_yx_osv2_isv16: + return cldnn::format::os_is_yx_osv2_isv16; case kernel_selector::weights_layout::os_is_zyx_osv8_isv4: return cldnn::format::os_is_zyx_osv8_isv4; case kernel_selector::weights_layout::os_is_zyx_isv8_osv16_isv2: @@ -871,6 +911,10 @@ cldnn::format::type from_weights_layout(kernel_selector::weights_layout l) { return cldnn::format::g_os_iyx_osv16; case kernel_selector::weights_layout::g_os_iyx_osv32: return cldnn::format::g_os_iyx_osv32; + case kernel_selector::weights_layout::gs_oiyx_gsv8: + return cldnn::format::gs_oiyx_gsv8; + case kernel_selector::weights_layout::gs_oizyx_gsv8: + return cldnn::format::gs_oizyx_gsv8; case kernel_selector::weights_layout::gs_oiyx_gsv16: return cldnn::format::gs_oiyx_gsv16; case kernel_selector::weights_layout::gs_oizyx_gsv16: diff --git a/src/plugins/intel_gpu/src/graph/impls/onednn/convolution_onednn.cpp b/src/plugins/intel_gpu/src/graph/impls/onednn/convolution_onednn.cpp index f1d1ab0a6c0745..075929afa765fb 100644 --- a/src/plugins/intel_gpu/src/graph/impls/onednn/convolution_onednn.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/onednn/convolution_onednn.cpp @@ -53,33 +53,40 @@ struct convolution_onednn : typed_primitive_onednn_impl { dnnl::memory::desc desc = onednn::layout_to_memory_desc(a_zp->get_layout(), dnnl::memory::format_tag::a, true); args.insert({DNNL_ARG_ATTR_ZERO_POINTS | DNNL_ARG_SRC, a_zp->get_onednn_memory(desc)}); - auto dnnl_mem = a_zp->get_onednn_memory(desc); - void *mapped_ptr = dnnl_mem.map_data(); - if (mapped_ptr) { - GPU_DEBUG_TRACE_DETAIL << instance.id() << " activations_zero_points: "; - for (size_t i = 0; i < desc.get_size(); ++i) { - GPU_DEBUG_TRACE_DETAIL << static_cast(mapped_ptr)[i] << " "; + GPU_DEBUG_GET_INSTANCE(debug_config); + GPU_DEBUG_IF(debug_config->verbose >= static_cast(ov::intel_gpu::LogLevel::TRACE_DETAIL)) { + auto dnnl_mem = a_zp->get_onednn_memory(desc); + void *mapped_ptr = dnnl_mem.map_data(); + if (mapped_ptr) { + GPU_DEBUG_TRACE_DETAIL << instance.id() << " activations_zero_points: "; + for (size_t i = 0; i < desc.get_size(); ++i) { + GPU_DEBUG_TRACE_DETAIL << static_cast(mapped_ptr)[i] << " "; + } + GPU_DEBUG_TRACE_DETAIL << std::endl; + dnnl_mem.unmap_data(mapped_ptr); } - GPU_DEBUG_TRACE_DETAIL << std::endl; - dnnl_mem.unmap_data(mapped_ptr); } } if (instance.weights_zero_points_term()) { - auto w_zp = instance.weights_zero_points_memory(); - dnnl::memory::desc desc = onednn::layout_to_memory_desc(w_zp->get_layout(), dnnl::memory::format_tag::a, true); - args.insert({DNNL_ARG_ATTR_ZERO_POINTS | DNNL_ARG_WEIGHTS, w_zp->get_onednn_memory(desc)}); - - auto dnnl_mem = w_zp->get_onednn_memory(desc); - void *mapped_ptr = dnnl_mem.map_data(); - if (mapped_ptr) { - GPU_DEBUG_TRACE_DETAIL << instance.id() << " weights_zero_points: "; - for (size_t i = 0; i < desc.get_size(); ++i) { - GPU_DEBUG_TRACE_DETAIL << static_cast(mapped_ptr)[i] << " "; - } - GPU_DEBUG_TRACE_DETAIL << std::endl; - dnnl_mem.unmap_data(mapped_ptr); - } + throw std::runtime_error("Convolution oneDNN primitive doesn't support asymmetric weights quantization"); + // auto w_zp = instance.weights_zero_points_memory(); + // dnnl::memory::desc desc = onednn::layout_to_memory_desc(w_zp->get_layout(), dnnl::memory::format_tag::a, true); + // args.insert({DNNL_ARG_ATTR_ZERO_POINTS | DNNL_ARG_WEIGHTS, w_zp->get_onednn_memory(desc)}); + + // GPU_DEBUG_GET_INSTANCE(debug_config); + // GPU_DEBUG_IF(debug_config->verbose >= static_cast(ov::intel_gpu::LogLevel::TRACE_DETAIL)) { + // auto dnnl_mem = w_zp->get_onednn_memory(desc); + // void *mapped_ptr = dnnl_mem.map_data(); + // if (mapped_ptr) { + // GPU_DEBUG_TRACE_DETAIL << instance.id() << " weights_zero_points: "; + // for (size_t i = 0; i < desc.get_size(); ++i) { + // GPU_DEBUG_TRACE_DETAIL << static_cast(mapped_ptr)[i] << " "; + // } + // GPU_DEBUG_TRACE_DETAIL << std::endl; + // dnnl_mem.unmap_data(mapped_ptr); + // } + // } } return args; @@ -255,6 +262,8 @@ attach_convolution_onednn::attach_convolution_onednn() { format::b_fs_zyx_fsv2, format::b_fs_yx_fsv4, format::b_fs_zyx_fsv4, + format::b_fs_yx_fsv8, + format::b_fs_zyx_fsv8, format::b_fs_yx_fsv16, format::b_fs_zyx_fsv16, format::b_fs_zyx_fsv32, @@ -269,9 +278,11 @@ attach_convolution_onednn::attach_convolution_onednn() { format::bs_fs_zyx_bsv32_fsv32, format::bs_fs_yx_bsv4_fsv4, format::bs_fs_yx_bsv8_fsv4, + format::bs_fs_yx_bsv16_fsv8, format::bs_fs_yx_bsv16_fsv4, format::bs_fs_yx_bsv16_fsv2, format::bs_fs_zyx_bsv8_fsv4, + format::bs_fs_zyx_bsv16_fsv8, format::bs_fs_zyx_bsv16_fsv4, format::bs_fs_zyx_bsv16_fsv2, format::bs_fs_yx_bsv8_fsv2, diff --git a/src/plugins/intel_gpu/src/graph/impls/onednn/utils.cpp b/src/plugins/intel_gpu/src/graph/impls/onednn/utils.cpp index 6905c074be8a86..d72aba88cb1177 100644 --- a/src/plugins/intel_gpu/src/graph/impls/onednn/utils.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/onednn/utils.cpp @@ -13,23 +13,6 @@ namespace cldnn { namespace onednn { -namespace { -std::string convert_data_format_string(cldnn::format fmt) { - switch (fmt) { - case cldnn::format::b_fs_yx_fsv2: return "aBcd2b"; - case cldnn::format::b_fs_zyx_fsv2: return "aBcde2b"; - case cldnn::format::bs_fs_yx_bsv16_fsv2: return "ABcd16a2b"; - case cldnn::format::bs_fs_zyx_bsv16_fsv2: return "ABcde16a2b"; - case cldnn::format::bs_fs_yx_bsv16_fsv4: return "ABcd16a4b"; - case cldnn::format::bs_fs_zyx_bsv16_fsv4: return "ABcde16a4b"; - case cldnn::format::bs_fs_yx_bsv16_fsv32: return "ABcd16a32b"; - case cldnn::format::bs_fs_zyx_bsv16_fsv32: return "ABcde16a32b"; - default: throw std::invalid_argument("[clDNN] Unsupported conversion from cldnn to onednn layout string" + fmt_to_str(fmt)); - } -} - -} // namespace - template cldnn::memory::ptr convert_zp_data_to_s32(const memory::ptr zp_memory) { auto engine = zp_memory->get_engine(); @@ -132,9 +115,11 @@ std::vector> format_map = { { cldnn::format::bzyxf, dnnl::memory::format_tag::ndhwc }, { cldnn::format::b_fs_yx_fsv2, dnnl::memory::format_tag::undef }, { cldnn::format::b_fs_yx_fsv4, dnnl::memory::format_tag::aBcd4b }, + { cldnn::format::b_fs_yx_fsv8, dnnl::memory::format_tag::aBcd8b }, { cldnn::format::b_fs_yx_fsv16, dnnl::memory::format_tag::nChw16c }, { cldnn::format::b_fs_yx_fsv32, dnnl::memory::format_tag::aBcd32b }, { cldnn::format::b_fs_zyx_fsv4, dnnl::memory::format_tag::aBcde4b }, + { cldnn::format::b_fs_zyx_fsv8, dnnl::memory::format_tag::aBcde8b }, { cldnn::format::b_fs_zyx_fsv16, dnnl::memory::format_tag::nCdhw16c }, { cldnn::format::b_fs_zyx_fsv32, dnnl::memory::format_tag::aBcde32b }, { cldnn::format::bs_fs_yx_bsv16_fsv16, dnnl::memory::format_tag::NChw16n16c }, @@ -157,8 +142,10 @@ dnnl::memory::format_tag convert_data_format(cldnn::format fmt) { auto ret = std::find_if(format_map.begin(), format_map.end(), [fmt](std::pair &e) { return e.first == fmt; }); - if (ret == format_map.end()) - return dnnl::memory::format_tag::undef; + if (ret == format_map.end()) { + GPU_DEBUG_INFO << "[clDNN] Unsupported conversion from "+ fmt.to_string() + " to onednn format_tag. Any tag will be used instead." << std::endl; + return dnnl::memory::format_tag::any; + } return ret->second; } @@ -233,8 +220,6 @@ dnnl::memory::desc layout_to_memory_desc(cldnn::layout l, dnnl::memory::format_t dnnl::memory::data_type dt = convert_data_type(l.data_type); dnnl::memory::format_tag fmt = target_fmt == dnnl::memory::format_tag::undef ? convert_data_format(l.format) : target_fmt; - - OPENVINO_ASSERT(fmt != dnnl::memory::format_tag::undef, "[GPU] Unexpected fmt: ", convert_data_format_string(l.format)); dnnl::memory::desc res(dims, dt, fmt); return res; diff --git a/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp b/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp index 63e1b04e5dbf91..fabb1e53329293 100644 --- a/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp +++ b/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp @@ -1535,28 +1535,32 @@ impl_types layout_optimizer::get_preferred_impl_type(program_node& node, format std::vector onednn_optimized_formats = { format::byxf, format::bzyxf, - format::b_fs_zyx_fsv32, - format::b_fs_yx_fsv32, - format::b_fs_zyx_fsv16, + format::b_fs_yx_fsv8, + format::b_fs_zyx_fsv8, format::b_fs_yx_fsv16, - format::bs_fs_zyx_bsv16_fsv16, + format::b_fs_zyx_fsv16, + format::b_fs_yx_fsv32, + format::b_fs_zyx_fsv32, + format::bs_fs_yx_bsv4_fsv2, + format::bs_fs_yx_bsv4_fsv4, + format::bs_fs_yx_bsv8_fsv2, + format::bs_fs_zyx_bsv8_fsv2, + format::bs_fs_yx_bsv8_fsv4, + format::bs_fs_zyx_bsv8_fsv4, + format::bs_fs_yx_bsv16_fsv2, + format::bs_fs_zyx_bsv16_fsv2, + format::bs_fs_yx_bsv16_fsv4, + format::bs_fs_zyx_bsv16_fsv4, + format::bs_fs_yx_bsv16_fsv8, + format::bs_fs_zyx_bsv16_fsv8, format::bs_fs_yx_bsv16_fsv16, - format::bs_fs_zyx_bsv16_fsv32, + format::bs_fs_zyx_bsv16_fsv16, format::bs_fs_yx_bsv16_fsv32, - format::bs_fs_zyx_bsv32_fsv16, + format::bs_fs_zyx_bsv16_fsv32, format::bs_fs_yx_bsv32_fsv16, - format::bs_fs_zyx_bsv32_fsv32, + format::bs_fs_zyx_bsv32_fsv16, format::bs_fs_yx_bsv32_fsv32, - format::bs_fs_zyx_bsv8_fsv4, - format::bs_fs_yx_bsv8_fsv4, - format::bs_fs_yx_bsv16_fsv4, - format::bs_fs_zyx_bsv16_fsv4, - format::bs_fs_yx_bsv16_fsv2, - format::bs_fs_zyx_bsv16_fsv2, - format::bs_fs_zyx_bsv8_fsv2, - format::bs_fs_yx_bsv8_fsv2, - format::bs_fs_yx_bsv4_fsv4, - format::bs_fs_yx_bsv4_fsv2, + format::bs_fs_zyx_bsv32_fsv32, }; impl_types impl_candidate = impl_types::onednn; @@ -1715,9 +1719,9 @@ format layout_optimizer::get_preferred_format(program_node& node) { expected = get_expected_format(node.as()); } else if (node.is_type()) { auto input_layout = node.get_input_layout(0); - if (input_layout.format.dimension() == 5 && - (input_layout.data_type == data_types::f32 || input_layout.data_type == data_types::f16)) - expected = format::bfzyx; + if (input_layout.data_type == data_types::f32 || input_layout.data_type == data_types::f16) { + expected = format::get_default_format(input_layout.get_rank()); + } } else if (node.is_type()) { // if the resample is in the last part of the network and there are no users using blocked format, // it is better to reorder to bfyx before resample is done. diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/include/batch_headers/fetch_data.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/include/batch_headers/fetch_data.cl index eaf56e090abb77..d2cc9d3cc77a3d 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/include/batch_headers/fetch_data.cl +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/include/batch_headers/fetch_data.cl @@ -293,6 +293,38 @@ inline uint get_b_fs_yx_fsv_index_safe(uint b, uint f, uint y, uint x, CAT(prefix, _PAD_BEFORE_SIZE_X), \ CAT(prefix, _PAD_AFTER_SIZE_X), 4) +#define GET_DATA_B_FS_YX_FSV8_INDEX(prefix, b, f, y, x) \ + get_b_fs_yx_fsv_index( \ + b, f, y, x, \ + CAT(prefix, _SIZE_X ), \ + CAT(prefix, _SIZE_Y), \ + CAT(prefix, _FEATURE_NUM), \ + CAT(prefix, _BATCH_NUM), \ + CAT(prefix, _PAD_BEFORE_BATCH_NUM), \ + CAT(prefix, _PAD_AFTER_BATCH_NUM), \ + CAT(prefix, _PAD_BEFORE_FEATURE_NUM), \ + CAT(prefix, _PAD_AFTER_FEATURE_NUM), \ + CAT(prefix, _PAD_BEFORE_SIZE_Y), \ + CAT(prefix, _PAD_AFTER_SIZE_Y), \ + CAT(prefix, _PAD_BEFORE_SIZE_X), \ + CAT(prefix, _PAD_AFTER_SIZE_X), 8) + +#define GET_DATA_B_FS_YX_FSV8_INDEX_SAFE(prefix, b, f, y, x) \ + get_b_fs_yx_fsv_index_safe( \ + b, f, y, x, \ + CAT(prefix, _SIZE_X ), \ + CAT(prefix, _SIZE_Y), \ + CAT(prefix, _FEATURE_NUM), \ + CAT(prefix, _BATCH_NUM), \ + CAT(prefix, _PAD_BEFORE_BATCH_NUM), \ + CAT(prefix, _PAD_AFTER_BATCH_NUM), \ + CAT(prefix, _PAD_BEFORE_FEATURE_NUM), \ + CAT(prefix, _PAD_AFTER_FEATURE_NUM), \ + CAT(prefix, _PAD_BEFORE_SIZE_Y), \ + CAT(prefix, _PAD_AFTER_SIZE_Y), \ + CAT(prefix, _PAD_BEFORE_SIZE_X), \ + CAT(prefix, _PAD_AFTER_SIZE_X), 8) + #define GET_DATA_B_FS_YX_FSV32_INDEX(prefix, b, f, y, x) \ get_b_fs_yx_fsv_index( \ b, f, y, x, \ @@ -482,6 +514,38 @@ inline uint get_fs_b_yx_fsv32_index_safe(uint b, uint f, uint y, uint x, CAT(prefix, _PAD_BEFORE_SIZE_X), \ CAT(prefix, _PAD_AFTER_SIZE_X), 4) +#define GET_DATA_B_FS_ZYX_FSV8_INDEX(prefix, b, f, z, y, x) \ + get_b_fs_zyx_fsv_index( \ + b, f, z, y, x, \ + CAT(prefix, _SIZE_X ), \ + CAT(prefix, _SIZE_Y), \ + CAT(prefix, _SIZE_Z), \ + CAT(prefix, _FEATURE_NUM), \ + CAT(prefix, _PAD_BEFORE_FEATURE_NUM), \ + CAT(prefix, _PAD_AFTER_FEATURE_NUM), \ + CAT(prefix, _PAD_BEFORE_SIZE_Z), \ + CAT(prefix, _PAD_AFTER_SIZE_Z), \ + CAT(prefix, _PAD_BEFORE_SIZE_Y), \ + CAT(prefix, _PAD_AFTER_SIZE_Y), \ + CAT(prefix, _PAD_BEFORE_SIZE_X), \ + CAT(prefix, _PAD_AFTER_SIZE_X), 8) + +#define GET_DATA_B_FS_ZYX_FSV8_INDEX_SAFE(prefix, b, f, z, y, x) \ + get_b_fs_zyx_fsv_index_safe( \ + b, f, z, y, x, \ + CAT(prefix, _SIZE_X), \ + CAT(prefix, _SIZE_Y), \ + CAT(prefix, _SIZE_Z), \ + CAT(prefix, _FEATURE_NUM), \ + CAT(prefix, _PAD_BEFORE_FEATURE_NUM), \ + CAT(prefix, _PAD_AFTER_FEATURE_NUM), \ + CAT(prefix, _PAD_BEFORE_SIZE_Z), \ + CAT(prefix, _PAD_AFTER_SIZE_Z), \ + CAT(prefix, _PAD_BEFORE_SIZE_Y), \ + CAT(prefix, _PAD_AFTER_SIZE_Y), \ + CAT(prefix, _PAD_BEFORE_SIZE_X), \ + CAT(prefix, _PAD_AFTER_SIZE_X), 8) + #define GET_DATA_B_FS_ZYX_FSV16_INDEX(prefix, b, f, z, y, x) \ get_b_fs_zyx_fsv_index( \ b, f, z, y, x, \ @@ -775,6 +839,38 @@ inline uint get_bs_fs_zyx_bsv_fsv_index(uint b, uint f, uint z, uint y, uint x, CAT(prefix, _PAD_BEFORE_SIZE_X), \ CAT(prefix, _PAD_AFTER_SIZE_X), 16, 4) +#define GET_DATA_BS_FS_ZYX_BSV16_FSV8_INDEX(prefix, b, f, z, y, x) \ + get_bs_fs_zyx_bsv_fsv_index( \ + b, f, z, y, x, \ + CAT(prefix, _SIZE_X), \ + CAT(prefix, _SIZE_Y), \ + CAT(prefix, _SIZE_Z), \ + CAT(prefix, _FEATURE_NUM), \ + CAT(prefix, _PAD_BEFORE_FEATURE_NUM), \ + CAT(prefix, _PAD_AFTER_FEATURE_NUM), \ + CAT(prefix, _PAD_BEFORE_SIZE_Z), \ + CAT(prefix, _PAD_AFTER_SIZE_Z), \ + CAT(prefix, _PAD_BEFORE_SIZE_Y), \ + CAT(prefix, _PAD_AFTER_SIZE_Y), \ + CAT(prefix, _PAD_BEFORE_SIZE_X), \ + CAT(prefix, _PAD_AFTER_SIZE_X), 16, 8) + +#define GET_DATA_BS_FS_YX_BSV16_FSV8_INDEX(prefix, b, f, y, x) \ + get_bs_fs_zyx_bsv_fsv_index( \ + b, f, 0, y, x, \ + CAT(prefix, _SIZE_X), \ + CAT(prefix, _SIZE_Y), \ + CAT(prefix, _SIZE_Z), \ + CAT(prefix, _FEATURE_NUM), \ + CAT(prefix, _PAD_BEFORE_FEATURE_NUM), \ + CAT(prefix, _PAD_AFTER_FEATURE_NUM), \ + CAT(prefix, _PAD_BEFORE_SIZE_Z), \ + CAT(prefix, _PAD_AFTER_SIZE_Z), \ + CAT(prefix, _PAD_BEFORE_SIZE_Y), \ + CAT(prefix, _PAD_AFTER_SIZE_Y), \ + CAT(prefix, _PAD_BEFORE_SIZE_X), \ + CAT(prefix, _PAD_AFTER_SIZE_X), 16, 8) + #define GET_DATA_BS_FS_ZYX_BSV8_FSV4_INDEX(prefix, b, f, z, y, x) \ get_bs_fs_zyx_bsv_fsv_index( \ b, f, z, y, x, \ @@ -1053,6 +1149,40 @@ inline uint get_bs_fs_zyx_bsv_fsv_index(uint b, uint f, uint z, uint y, uint x, CAT(prefix, _PAD_BEFORE_SIZE_X), \ CAT(prefix, _PAD_AFTER_SIZE_X), 16, 4) +#define GET_DATA_BS_FS_YX_BSV16_FSV8_INDEX_SAFE(prefix, b, f, y, x) \ + get_bs_fs_zyx_bsv_fsv_index_safe( \ + b, f, 0, y, x, \ + CAT(prefix, _SIZE_X), \ + CAT(prefix, _SIZE_Y), \ + CAT(prefix, _SIZE_Z), \ + CAT(prefix, _FEATURE_NUM), \ + CAT(prefix, _BATCH_NUM), \ + CAT(prefix, _PAD_BEFORE_FEATURE_NUM), \ + CAT(prefix, _PAD_AFTER_FEATURE_NUM), \ + CAT(prefix, _PAD_BEFORE_SIZE_Z), \ + CAT(prefix, _PAD_AFTER_SIZE_Z), \ + CAT(prefix, _PAD_BEFORE_SIZE_Y), \ + CAT(prefix, _PAD_AFTER_SIZE_Y), \ + CAT(prefix, _PAD_BEFORE_SIZE_X), \ + CAT(prefix, _PAD_AFTER_SIZE_X), 16, 8) + +#define GET_DATA_BS_FS_ZYX_BSV16_FSV8_INDEX_SAFE(prefix, b, f, z, y, x) \ + get_bs_fs_zyx_bsv_fsv_index_safe( \ + b, f, z, y, x, \ + CAT(prefix, _SIZE_X), \ + CAT(prefix, _SIZE_Y), \ + CAT(prefix, _SIZE_Z), \ + CAT(prefix, _FEATURE_NUM), \ + CAT(prefix, _BATCH_NUM), \ + CAT(prefix, _PAD_BEFORE_FEATURE_NUM), \ + CAT(prefix, _PAD_AFTER_FEATURE_NUM), \ + CAT(prefix, _PAD_BEFORE_SIZE_Z), \ + CAT(prefix, _PAD_AFTER_SIZE_Z), \ + CAT(prefix, _PAD_BEFORE_SIZE_Y), \ + CAT(prefix, _PAD_AFTER_SIZE_Y), \ + CAT(prefix, _PAD_BEFORE_SIZE_X), \ + CAT(prefix, _PAD_AFTER_SIZE_X), 16, 8) + #define GET_DATA_BS_FS_YX_BSV8_FSV4_INDEX_SAFE(prefix, b, f, y, x) \ get_bs_fs_zyx_bsv_fsv_index_safe( \ b, f, 0, y, x, \ diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/include/batch_headers/fetch_weights.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/include/batch_headers/fetch_weights.cl index 37512661a91119..939ec0f495d5dd 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/include/batch_headers/fetch_weights.cl +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/include/batch_headers/fetch_weights.cl @@ -16,6 +16,18 @@ isv \ ) +#define GET_FILTER_IS_OS_YX_OSV_ISV_INDEX(prefix, o, i, y, x, osv, isv) \ + get_os_is_zyx_isv_osv_index( \ + i, o, 0, y, x, \ + CAT(prefix, _SIZE_X), \ + CAT(prefix, _SIZE_Y), \ + 1, \ + CAT(prefix, _OFM_NUM), \ + CAT(prefix, _IFM_NUM), \ + isv, \ + osv \ + ) + #define GET_FILTER_IS_OS_YX_ISV_OSV_INDEX(prefix, o, i, y, x, osv, isv) \ get_is_os_zyx_isv_osv_index( \ o, i, 0, y, x, \ @@ -1447,6 +1459,22 @@ inline uint get_g_os_is_yx_osv_isv(uint g, uint o, uint i, uint y, uint x, x_size, y_size, 1, i_size, o_size, osv_size, isv_size); } +#define GET_FILTER_OS_IS_YX_OSV2_ISV16_INDEX(prefix, o, i, y, x) \ + get_g_os_is_yx_osv_isv( \ + 0, o, i, y, x, \ + CAT(prefix, _IFM_NUM), \ + CAT(prefix, _OFM_NUM), \ + CAT(prefix, _SIZE_X), \ + CAT(prefix, _SIZE_Y), 2, 16) + +#define GET_FILTER_OS_IS_YX_OSV4_ISV16_INDEX(prefix, o, i, y, x) \ + get_g_os_is_yx_osv_isv( \ + 0, o, i, y, x, \ + CAT(prefix, _IFM_NUM), \ + CAT(prefix, _OFM_NUM), \ + CAT(prefix, _SIZE_X), \ + CAT(prefix, _SIZE_Y), 4, 16) + #define GET_FILTER_OS_IS_YX_OSV8_ISV2_INDEX(prefix, o, i, y, x) \ get_g_os_is_yx_osv_isv( \ 0, o, i, y, x, \ @@ -1826,14 +1854,15 @@ inline uint get_g_os_zyx_is_osv_isv_index(uint g, uint o, uint i, uint z, uint y #define GET_FILTER_G_OS_ZYX_IS_OSV32_ISV16_INDEX(tensor, g, o, i, z, y, x) GET_FILTER_G_OS_ZYX_IS_OSV_ISV_INDEX(tensor, g, o, i, z, y, x, 32, 16) #define GET_FILTER_G_OS_ZYX_IS_OSV32_ISV32_INDEX(tensor, g, o, i, z, y, x) GET_FILTER_G_OS_ZYX_IS_OSV_ISV_INDEX(tensor, g, o, i, z, y, x, 32, 32) -#define GET_FILTER_O_IS_YX_ISV16_INDEX(prefix, o, i, y, x, isv) \ - CAT(prefix, _OFFSET) + \ - ((i) % (isv)) + \ - (o)*CAT(prefix, _OFM_PITCH) + \ - (isv)*( \ - (x)*CAT(prefix, _X_PITCH) + \ - (y)*CAT(prefix, _Y_PITCH) + \ - ((i) / (isv))*CAT(prefix, _IFM_PITCH) \ +#define GET_FILTER_O_IS_ZYX_ISV16_INDEX(prefix, o, i, z, y, x, isv) \ + CAT(prefix, _OFFSET) + \ + ((i) % (isv)) + \ + (o)*CAT(prefix, _OFM_PITCH) + \ + (isv)*( \ + (x)*CAT(prefix, _X_PITCH) + \ + (y)*CAT(prefix, _Y_PITCH) + \ + (z)*CAT(prefix, _Z_PITCH) + \ + ((i) / (isv))*CAT(prefix, _IFM_PITCH) \ ) #define GET_FILTER_OS_YXI_OSV16(prefix, o, i, y, x) \ diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/reorder_weights.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/reorder_weights.cl index 0dfbd30862437c..9003f23ad1ec8d 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/reorder_weights.cl +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/reorder_weights.cl @@ -280,8 +280,12 @@ inline uint FUNC(get_input_index)(uint g, uint o, uint i, uint z, uint y, uint x return GET_FILTER_OS_IYX_OSV_INDEX(INPUT0, o, i, y, x, 32); #elif defined INPUT0_LAYOUT_OS_IYX_OSV32__AI32 return GET_FILTER_OS_IYX_OSV_INDEX(INPUT0, o, i, y, x, 32); +#elif defined INPUT0_LAYOUT_O_IS_YX_ISV4 + return GET_FILTER_O_IS_ZYX_ISV16_INDEX(INPUT0, o, i, 0, y, x, 4); #elif defined INPUT0_LAYOUT_O_IS_YX_ISV16 - return GET_FILTER_O_IS_YX_ISV16_INDEX(INPUT0, o, i, y, x, 16); + return GET_FILTER_O_IS_ZYX_ISV16_INDEX(INPUT0, o, i, 0, y, x, 16); +#elif defined INPUT0_LAYOUT_O_IS_ZYX_ISV16 + return GET_FILTER_O_IS_ZYX_ISV16_INDEX(INPUT0, o, i, z, y, x, 16); #elif defined INPUT0_LAYOUT_IYX_OSV64 return GET_FILTER_OS_IYX_OSV_INDEX(INPUT0, o, i, y, x, 64); #elif defined INPUT0_LAYOUT_OS_IYX_OSV16_ROTATE_180 @@ -320,6 +324,8 @@ inline uint FUNC(get_input_index)(uint g, uint o, uint i, uint z, uint y, uint x return GET_FILTER_IS_OS_YX_ISV16_OSV16_INDEX(INPUT0, o, i, y, x, SUB_GROUP_SIZE); #elif defined INPUT0_LAYOUT_IS_OS_YX_ISV16_OSV8 return GET_FILTER_IS_OS_YX_ISV16_OSV8_INDEX(INPUT0, o, i, y, x, SUB_GROUP_SIZE); +#elif defined INPUT0_LAYOUT_IS_OS_YX_OSV8_ISV4 + return GET_FILTER_IS_OS_YX_OSV_ISV_INDEX(INPUT0, o, i, y, x, 8, 4); #elif defined INPUT0_LAYOUT_IS_OS_YX_ISV16_OSV4 return GET_FILTER_IS_OS_YX_ISV_OSV_INDEX(INPUT0, o, i, y, x, 16, 4); #elif defined INPUT0_LAYOUT_IS_OS_YX_ISV16_OSV2 @@ -384,6 +390,10 @@ inline uint FUNC(get_input_index)(uint g, uint o, uint i, uint z, uint y, uint x return GET_FILTER_G_OS_IYX_OSV16(INPUT0, g, o, i, y, x, 16); #elif defined INPUT0_LAYOUT_G_OS_IYX_OSV32 return GET_FILTER_G_OS_IYX_OSV16(INPUT0, g, o, i, y, x, 32); +#elif defined INPUT0_LAYOUT_GS_OIYX_GSV8 + return GET_FILTER_GS_OIYX_GSV16(INPUT0, g, o, i, y, x, 8); +#elif defined INPUT0_LAYOUT_GS_OIZYX_GSV8 + return GET_FILTER_GS_OIZYX_GSV16(INPUT0, g, o, i, z, y, x, 8); #elif defined INPUT0_LAYOUT_GS_OIYX_GSV16 return GET_FILTER_GS_OIYX_GSV16(INPUT0, g, o, i, y, x, 16); #elif defined INPUT0_LAYOUT_GS_OIZYX_GSV16 @@ -426,6 +436,10 @@ inline uint FUNC(get_input_index)(uint g, uint o, uint i, uint z, uint y, uint x return GET_FILTER_OS_IS_YX_OSV8_ISV4_INDEX(INPUT0, o, i, y, x); #elif defined INPUT0_LAYOUT_OS_IS_ZYX_OSV8_ISV4 return GET_FILTER_OS_IS_ZYX_OSV8_ISV4_INDEX(INPUT0, o, i, z, y, x); +#elif defined INPUT0_LAYOUT_OS_IS_YX_OSV2_ISV16 + return GET_FILTER_OS_IS_YX_OSV2_ISV16_INDEX(INPUT0, o, i, y, x); +#elif defined INPUT0_LAYOUT_OS_IS_YX_OSV4_ISV16 + return GET_FILTER_OS_IS_YX_OSV4_ISV16_INDEX(INPUT0, o, i, y, x); #elif defined INPUT0_LAYOUT_G_OS_IS_ZYX_OSV16_ISV16 return GET_FILTER_G_OS_IS_ZYX_OSV16_ISV16_INDEX(INPUT0, g, o, i, z, y, x); #elif defined INPUT0_LAYOUT_OS_IS_ZYX_OSV32_ISV16 @@ -487,8 +501,12 @@ inline uint FUNC(get_output_index)(uint g, uint o, uint i, uint z, uint y, uint return GET_FILTER_OS_IYX_OSV_INDEX(OUTPUT, o, i, y, x, 32); #elif defined OUTPUT_LAYOUT_OS_IYX_OSV64 return GET_FILTER_OS_IYX_OSV_INDEX(OUTPUT, o, i, y, x, 64); +#elif defined OUTPUT_LAYOUT_O_IS_YX_ISV4 + return GET_FILTER_O_IS_ZYX_ISV16_INDEX(OUTPUT, o, i, 0, y, x, 4); #elif defined OUTPUT_LAYOUT_O_IS_YX_ISV16 - return GET_FILTER_O_IS_YX_ISV16_INDEX(OUTPUT, o, i, y, x, 16); + return GET_FILTER_O_IS_ZYX_ISV16_INDEX(OUTPUT, o, i, 0, y, x, 16); +#elif defined OUTPUT_LAYOUT_O_IS_ZYX_ISV16 + return GET_FILTER_O_IS_ZYX_ISV16_INDEX(OUTPUT, o, i, z, y, x, 16); #elif defined OUTPUT_LAYOUT_OS_IYX_OSV16_ROTATE_180 return GET_FILTER_OS_IYX_OSV_ROTATE_180_INDEX(OUTPUT, o, i, y, x, SUB_GROUP_SIZE); #elif defined OUTPUT_LAYOUT_I_YXS_OS_YXSV2_OSV16 @@ -523,6 +541,10 @@ inline uint FUNC(get_output_index)(uint g, uint o, uint i, uint z, uint y, uint return GET_FILTER_OS_IS_YX_OSV8_ISV4_INDEX(OUTPUT, o, i, y, x); #elif defined OUTPUT_LAYOUT_OS_IS_ZYX_OSV8_ISV4 return GET_FILTER_OS_IS_ZYX_OSV8_ISV4_INDEX(OUTPUT, o, i, z, y, x); +#elif defined OUTPUT_LAYOUT_OS_IS_YX_OSV2_ISV16 + return GET_FILTER_OS_IS_YX_OSV2_ISV16_INDEX(OUTPUT, o, i, y, x); +#elif defined OUTPUT_LAYOUT_OS_IS_YX_OSV4_ISV16 + return GET_FILTER_OS_IS_YX_OSV4_ISV16_INDEX(OUTPUT, o, i, y, x); #elif defined OUTPUT_LAYOUT_OS_IS_YX_OSV32_ISV4_SWIZZLED_BY_2 return GET_FILTER_OS_IS_YX_OSV32_ISV4_SWIZZLED_BY_2_INDEX(OUTPUT, o, i, y, x); #elif defined OUTPUT_LAYOUT_OS_IS_YX_OSV32_ISV4 @@ -583,6 +605,8 @@ inline uint FUNC(get_output_index)(uint g, uint o, uint i, uint z, uint y, uint return GET_FILTER_IS_OS_YX_ISV16_OSV16_INDEX(OUTPUT, o, i, y, x, SUB_GROUP_SIZE); #elif defined OUTPUT_LAYOUT_IS_OS_YX_ISV16_OSV8 return GET_FILTER_IS_OS_YX_ISV16_OSV8_INDEX(OUTPUT, o, i, y, x, SUB_GROUP_SIZE); +#elif defined OUTPUT_LAYOUT_IS_OS_YX_OSV8_ISV4 + return GET_FILTER_IS_OS_YX_OSV_ISV_INDEX(OUTPUT, o, i, y, x, 8, 4); #elif defined OUTPUT_LAYOUT_IS_OS_YX_ISV16_OSV4 return GET_FILTER_IS_OS_YX_ISV_OSV_INDEX(OUTPUT, o, i, y, x, 16, 4); #elif defined OUTPUT_LAYOUT_IS_OS_YX_ISV16_OSV2 @@ -645,6 +669,10 @@ inline uint FUNC(get_output_index)(uint g, uint o, uint i, uint z, uint y, uint return GET_FILTER_G_OS_IYX_OSV16(OUTPUT, g, o, i, y, x, 16); #elif defined OUTPUT_LAYOUT_G_OS_IYX_OSV32 return GET_FILTER_G_OS_IYX_OSV16(OUTPUT, g, o, i, y, x, 32); +#elif defined OUTPUT_LAYOUT_GS_OIYX_GSV8 + return GET_FILTER_GS_OIYX_GSV16(OUTPUT, g, o, i, y, x, 8); +#elif defined OUTPUT_LAYOUT_GS_OIZYX_GSV8 + return GET_FILTER_GS_OIZYX_GSV16(OUTPUT, g, o, i, z, y, x, 8); #elif defined OUTPUT_LAYOUT_GS_OIYX_GSV16 return GET_FILTER_GS_OIYX_GSV16(OUTPUT, g, o, i, y, x, 16); #elif defined OUTPUT_LAYOUT_GS_OIZYX_GSV16 diff --git a/src/plugins/intel_gpu/src/kernel_selector/jitter.cpp b/src/plugins/intel_gpu/src/kernel_selector/jitter.cpp index 20d8e63ae29580..67c25b89025eb9 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/jitter.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/jitter.cpp @@ -440,10 +440,12 @@ JitDefinitions DataTensorJitConstant::GetDefinitions() const { layout == DataLayout::b_fs_yx_fsv32 || layout == DataLayout::b_fs_yx_fsv2 || layout == DataLayout::b_fs_yx_fsv4 || + layout == DataLayout::b_fs_yx_fsv8 || layout == DataLayout::fs_b_yx_fsv32 || layout == DataLayout::bs_fs_yx_bsv16_fsv16 || layout == DataLayout::bs_fs_yx_bsv16_fsv32 || layout == DataLayout::bs_fs_yx_bsv4_fsv4 || + layout == DataLayout::bs_fs_yx_bsv16_fsv8 || layout == DataLayout::bs_fs_yx_bsv16_fsv4 || layout == DataLayout::bs_fs_yx_bsv16_fsv2 || layout == DataLayout::bs_fs_yx_bsv8_fsv4 || @@ -508,6 +510,10 @@ JitDefinitions DataTensorJitConstant::GetDefinitions() const { index_func_val = "GET_DATA_BS_FS_ZYX_BSV16_FSV16_INDEX(" + _name + ", b, f, z, y, x)"; raw_index_func_val = "GET_DATA_BS_FS_ZYX_BSV16_FSV16_INDEX(" + _name + ", b, f, z, y, x)"; safe_index_func_val = "GET_DATA_BS_FS_ZYX_BSV16_FSV16_INDEX_SAFE(" + _name + ", b, f, z, y, x)"; + } else if (layout == DataLayout::bs_fs_zyx_bsv16_fsv8) { + index_func_val = "GET_DATA_BS_FS_ZYX_BSV16_FSV8_INDEX(" + _name + ", b, f, z, y, x)"; + raw_index_func_val = "GET_DATA_BS_FS_ZYX_BSV16_FSV8_INDEX(" + _name + ", b, f, z, y, x)"; + safe_index_func_val = "GET_DATA_BS_FS_ZYX_BSV16_FSV8_INDEX_SAFE(" + _name + ", b, f, z, y, x)"; } else if (layout == DataLayout::b_fs_zyx_fsv32) { index_func_val = "GET_DATA_B_FS_ZYX_FSV32_INDEX(" + _name + ", b, f, z, y, x)"; raw_index_func_val = "GET_DATA_B_FS_ZYX_FSV32_INDEX(" + _name + ", b, f, z, y, x)"; @@ -536,6 +542,10 @@ JitDefinitions DataTensorJitConstant::GetDefinitions() const { index_func_val = "GET_DATA_B_FS_ZYX_FSV4_INDEX(" + _name + ", b, f, z, y, x)"; raw_index_func_val = "GET_DATA_B_FS_ZYX_FSV4_INDEX(" + _name + ", b, f, z, y, x)"; safe_index_func_val = "GET_DATA_B_FS_ZYX_FSV4_INDEX_SAFE(" + _name + ", b, f, z, y, x)"; + } else if (layout == DataLayout::b_fs_zyx_fsv8) { + index_func_val = "GET_DATA_B_FS_ZYX_FSV8_INDEX(" + _name + ", b, f, z, y, x)"; + raw_index_func_val = "GET_DATA_B_FS_ZYX_FSV8_INDEX(" + _name + ", b, f, z, y, x)"; + safe_index_func_val = "GET_DATA_B_FS_ZYX_FSV8_INDEX_SAFE(" + _name + ", b, f, z, y, x)"; } else { index_func_val = "GET_DATA_INDEX_5D_RAW(" + _name + ", b, f, z, y, x)"; safe_index_func_val = "GET_DATA_INDEX_5D_RAW(" + _name + ", b, f, z, y, x)"; diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_common.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_common.cpp index 49e3e2a1f2edd4..6e192c92bfb808 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_common.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_common.cpp @@ -96,6 +96,7 @@ std::string toString(DataLayout l) { case kernel_selector::DataLayout::fyxb: return "FYXB"; case kernel_selector::DataLayout::b_fs_yx_fsv2: return "B_FS_YX_FSV2"; case kernel_selector::DataLayout::b_fs_yx_fsv4: return "B_FS_YX_FSV4"; + case kernel_selector::DataLayout::b_fs_yx_fsv8: return "B_FS_YX_FSV8"; case kernel_selector::DataLayout::b_fs_yx_fsv16: return "B_FS_YX_FSV16"; case kernel_selector::DataLayout::b_fs_yx_fsv32: return "B_FS_YX_FSV32"; case kernel_selector::DataLayout::b_fs_zyx_fsv32: return "B_FS_ZYX_FSV32"; @@ -109,6 +110,7 @@ std::string toString(DataLayout l) { case kernel_selector::DataLayout::bfwzyx: return "BFWZYX"; case kernel_selector::DataLayout::bfuwzyx: return "BFUWZYX"; case kernel_selector::DataLayout::bfvuwzyx: return "BFVUWZYX"; + case kernel_selector::DataLayout::b_fs_zyx_fsv8: return "B_FS_ZYX_FSV8"; case kernel_selector::DataLayout::b_fs_zyx_fsv16: return "B_FS_ZYX_FSV16"; case kernel_selector::DataLayout::bs_fs_yx_bsv16_fsv16: return "BS_FS_YX_BSV16_FSV16"; case kernel_selector::DataLayout::bs_fs_yx_bsv16_fsv32: return "BS_FS_YX_BSV16_FSV32"; @@ -117,6 +119,8 @@ std::string toString(DataLayout l) { case kernel_selector::DataLayout::bs_fs_yx_bsv4_fsv4: return "BS_FS_YX_BSV4_FSV4"; case kernel_selector::DataLayout::bs_fs_yx_bsv8_fsv4: return "BS_FS_YX_BSV8_FSV4"; case kernel_selector::DataLayout::bs_fs_zyx_bsv8_fsv4: return "BS_FS_ZYX_BSV8_FSV4"; + case kernel_selector::DataLayout::bs_fs_yx_bsv16_fsv8: return "BS_FS_YX_BSV16_FSV8"; + case kernel_selector::DataLayout::bs_fs_zyx_bsv16_fsv8: return "BS_FS_ZYX_BSV16_FSV8"; case kernel_selector::DataLayout::bs_fs_yx_bsv16_fsv4: return "BS_FS_YX_BSV16_FSV4"; case kernel_selector::DataLayout::bs_fs_zyx_bsv16_fsv4: return "BS_FS_ZYX_BSV16_FSV4"; case kernel_selector::DataLayout::bs_fs_yx_bsv16_fsv2: return "BS_FS_YX_BSV16_FSV2"; @@ -312,7 +316,9 @@ std::string toString(WeightsLayout layout) { case WeightsLayout::os_is_zyx_osv16_isv16: return "OS_IS_ZYX_OSV16_ISV16"; case WeightsLayout::os_is_zyx_osv32_isv16: return "OS_IS_ZYX_OSV32_ISV16"; case WeightsLayout::os_is_zyx_osv64_isv16: return "OS_IS_ZYX_OSV64_ISV16"; + case WeightsLayout::o_is_yx_isv4: return "O_IS_YX_ISV4"; case WeightsLayout::o_is_yx_isv16: return "O_IS_YX_ISV16"; + case WeightsLayout::o_is_zyx_isv16: return "O_IS_ZYX_ISV16"; case WeightsLayout::os_yxi_osv16: return "OS_YXI_OSV16"; case WeightsLayout::os_iyx_osv16: return "OS_IYX_OSV16"; case WeightsLayout::os_iyx_osv32: return "OS_IYX_OSV32"; @@ -364,6 +370,7 @@ std::string toString(WeightsLayout layout) { case WeightsLayout::os_is_zyx_isa8_osv8_isv2: return "OS_IS_ZYX_ISA8_OSV8_ISV2"; case WeightsLayout::is_os_yx_isa8_osv8_isv2: return "IS_OS_YX_ISA8_OSV8_ISV2"; case WeightsLayout::is_os_yx_isa8_osv8_isv4: return "IS_OS_YX_ISA8_OSV8_ISV4"; + case WeightsLayout::is_os_yx_osv8_isv4: return "IS_OS_YX_OSV8_ISV4"; case WeightsLayout::is_os_yx_osa8_isv16_osv4: return "IS_OS_YX_OSA8_ISV16_OSV4"; case WeightsLayout::os_is_yx_isa8_osv8_isv2: return "OS_IS_YX_ISA8_OSV8_ISV2"; case WeightsLayout::os_is_zyx_isv8_osv16_isv2: return "OS_IS_ZYX_ISV8_OSV16_ISV2"; @@ -374,6 +381,8 @@ std::string toString(WeightsLayout layout) { case WeightsLayout::os_is_yx_osv8_isv4: return "OS_IS_YX_OSV8_ISV4"; case WeightsLayout::os_is_zyx_osv8_isv4: return "OS_IS_ZYX_OSV8_ISV4"; case WeightsLayout::os_is_yx_osv8_isv2: return "OS_IS_YX_OSV8_ISV2"; + case WeightsLayout::os_is_yx_osv2_isv16: return "OS_IS_YX_OSV2_ISV16"; + case WeightsLayout::os_is_yx_osv4_isv16: return "OS_IS_YX_OSV4_ISV16"; case WeightsLayout::os_is_zyx_osv8_isv2: return "OS_IS_ZYX_OSV8_ISV2"; case WeightsLayout::goiyx: return "GOIYX"; case WeightsLayout::gioyx: return "GIOYX"; @@ -383,6 +392,8 @@ std::string toString(WeightsLayout layout) { case WeightsLayout::g_os_iyx_osv8: return "G_OS_IYX_OSV8"; case WeightsLayout::g_os_iyx_osv16: return "G_OS_IYX_OSV16"; case WeightsLayout::g_os_iyx_osv32: return "G_OS_IYX_OSV32"; + case WeightsLayout::gs_oiyx_gsv8: return "GS_OIYX_GSV8"; + case WeightsLayout::gs_oizyx_gsv8: return "GS_OIZYX_GSV8"; case WeightsLayout::gs_oiyx_gsv16: return "GS_OIYX_GSV16"; case WeightsLayout::gs_oizyx_gsv16: return "GS_OIZYX_GSV16"; case WeightsLayout::gs_oiyx_gsv32: return "GS_OIYX_GSV32"; diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_utils.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_utils.cpp index a2e0226367d7e7..bb3eb06e32b3b0 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_utils.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_utils.cpp @@ -252,12 +252,14 @@ std::vector GetOptimalLocalWorkGroupSizes(std::vector gws, const auto blocked_fsv_layout = output_layout == DataLayout::b_fs_yx_fsv2 || output_layout == DataLayout::b_fs_zyx_fsv2 || output_layout == DataLayout::b_fs_yx_fsv4 || output_layout == DataLayout::b_fs_zyx_fsv4 || + output_layout == DataLayout::b_fs_yx_fsv8 || output_layout == DataLayout::b_fs_zyx_fsv8 || output_layout == DataLayout::b_fs_yx_fsv16 || output_layout == DataLayout::b_fs_zyx_fsv16 || output_layout == DataLayout::b_fs_yx_fsv32 || output_layout == DataLayout::b_fs_zyx_fsv32 || output_layout == DataLayout::fs_b_yx_fsv32; auto blocked_bsv_fsv_layout = output_layout == DataLayout::bs_fs_yx_bsv16_fsv2 || output_layout == DataLayout::bs_fs_zyx_bsv16_fsv2 || output_layout == DataLayout::bs_fs_yx_bsv16_fsv4 || output_layout == DataLayout::bs_fs_zyx_bsv16_fsv4 || + output_layout == DataLayout::bs_fs_yx_bsv16_fsv8 || output_layout == DataLayout::bs_fs_zyx_bsv16_fsv8 || output_layout == DataLayout::bs_fs_yx_bsv16_fsv16 || output_layout == DataLayout::bs_fs_yx_bsv16_fsv32 || output_layout == DataLayout::bs_fs_yx_bsv32_fsv16 || output_layout == DataLayout::bs_fs_yx_bsv32_fsv32 || output_layout == DataLayout::bs_fs_zyx_bsv16_fsv16 || output_layout == DataLayout::bs_fs_zyx_bsv16_fsv32 || @@ -318,10 +320,10 @@ std::vector GetOptimalLocalWorkGroupSizes(std::vector gws, const break; } } else if (blocked_fsv_layout) { - if (output_layout == DataLayout::b_fs_yx_fsv2 || output_layout == DataLayout::b_fs_yx_fsv4 || + if (output_layout == DataLayout::b_fs_yx_fsv2 || output_layout == DataLayout::b_fs_yx_fsv4 || output_layout == DataLayout::b_fs_yx_fsv8 || output_layout == DataLayout::b_fs_yx_fsv16 || output_layout == DataLayout::b_fs_yx_fsv32) { layout_order = { f, x, y, b, z, w, u, v }; - } else if (output_layout == DataLayout::b_fs_zyx_fsv2 || output_layout == DataLayout::b_fs_zyx_fsv4 || + } else if (output_layout == DataLayout::b_fs_zyx_fsv2 || output_layout == DataLayout::b_fs_zyx_fsv4 || output_layout == DataLayout::b_fs_zyx_fsv8 || output_layout == DataLayout::b_fs_zyx_fsv16 || output_layout == DataLayout::b_fs_zyx_fsv32) { layout_order = { f, x, y, z, b, w, u, v }; } else { // output_layout == DataLayout::fs_b_yx_fsv32 @@ -453,13 +455,18 @@ bool CheckInputsOutputNoPitchSameDims(const base_params& params) { {DataLayout::b_fs_zyx_fsv16, {1, 16}}, {DataLayout::b_fs_yx_fsv32, {1, 32}}, {DataLayout::b_fs_zyx_fsv32, {1, 32}}, + {DataLayout::bs_fs_yx_bsv16_fsv8, {16, 8}}, {DataLayout::bs_fs_yx_bsv16_fsv16, {16, 16}}, {DataLayout::bs_fs_yx_bsv16_fsv32, {16, 32}}, + {DataLayout::bs_fs_zyx_bsv16_fsv8, {16, 8}}, {DataLayout::bs_fs_zyx_bsv16_fsv16, {16, 16}}, {DataLayout::bs_fs_zyx_bsv16_fsv32, {16, 32}}, {DataLayout::bs_f_bsv8__af8, {8, 8}}, {DataLayout::bs_f_bsv16__af8, {16, 8}}, {DataLayout::b_fs_yx_fsv4, {1, 4}}, + {DataLayout::b_fs_zyx_fsv4, {1, 4}}, + {DataLayout::b_fs_yx_fsv8, {1, 8}}, + {DataLayout::b_fs_zyx_fsv8, {1, 8}}, {DataLayout::fs_b_yx_fsv32, {1, 32}}, {DataLayout::b_fs_yx_32fp, {1, 32}}, {DataLayout::bs_fs_yx_bsv32_fsv16, {32, 16}}, diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/reorder/reorder_kernel_base.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/reorder/reorder_kernel_base.cpp index 1653ef019cbaf7..7fecbc14345cd0 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/reorder/reorder_kernel_base.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/reorder/reorder_kernel_base.cpp @@ -12,6 +12,8 @@ namespace kernel_selector { inline uint32_t SubGroupSize(WeightsLayout l) { switch (l) { + case WeightsLayout::o_is_yx_isv16: + case WeightsLayout::o_is_zyx_isv16: case WeightsLayout::os_iyx_osv16: case WeightsLayout::os_iyx_osv32: case WeightsLayout::os_iyx_osv64: @@ -50,6 +52,8 @@ inline uint32_t SubGroupSize(WeightsLayout l) { case WeightsLayout::iy_xs_os_xsv2_osv8__ao32: case WeightsLayout::giy_xs_os_xsv2_osv8__ao32: case WeightsLayout::g_os_iyx_osv8: + case WeightsLayout::gs_oiyx_gsv8: + case WeightsLayout::gs_oizyx_gsv8: return 8; default: return 1; diff --git a/src/plugins/intel_gpu/src/kernel_selector/tensor_type.cpp b/src/plugins/intel_gpu/src/kernel_selector/tensor_type.cpp index 545f01c6f3801e..b352059d850dea 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/tensor_type.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/tensor_type.cpp @@ -27,16 +27,20 @@ DataTensor::DataChannelArray DataTensor::dataChannelArray {{ { DataLayout::fyxb, { 1, 2, -1, -1, -1, -1, 3, 0 } }, { DataLayout::b_fs_yx_fsv2, { 0, 1, -1, -1, -1, -1, 2, 3 } }, { DataLayout::b_fs_yx_fsv4, { 0, 1, -1, -1, -1, -1, 2, 3 } }, + { DataLayout::b_fs_yx_fsv8, { 0, 1, -1, -1, -1, -1, 2, 3 } }, { DataLayout::b_fs_yx_fsv16, { 0, 1, -1, -1, -1, -1, 2, 3 } }, { DataLayout::b_fs_yx_fsv32, { 0, 1, -1, -1, -1, -1, 2, 3 } }, { DataLayout::b_fs_zyx_fsv2, { 0, 1, 2, -1, -1, -1, 3, 4 } }, { DataLayout::b_fs_zyx_fsv4, { 0, 1, 2, -1, -1, -1, 3, 4 } }, + { DataLayout::b_fs_zyx_fsv8, { 0, 1, 2, -1, -1, -1, 3, 4 } }, { DataLayout::b_fs_zyx_fsv16, { 0, 1, 2, -1, -1, -1, 3, 4 } }, { DataLayout::b_fs_zyx_fsv32, { 0, 1, 2, -1, -1, -1, 3, 4 } }, { DataLayout::bs_fs_yx_bsv16_fsv32, { 0, 1, -1, -1, -1, -1, 2, 3 } }, { DataLayout::bs_fs_zyx_bsv16_fsv32, { 0, 1, 2, -1, -1, -1, 3, 4 } }, { DataLayout::bs_fs_zyx_bsv16_fsv16, { 0, 1, 2, -1, -1, -1, 3, 4 } }, { DataLayout::bs_fs_yx_bsv16_fsv16, { 0, 1, -1, -1, -1, -1, 2, 3 } }, + { DataLayout::bs_fs_zyx_bsv16_fsv8, { 0, 1, 2, -1, -1, -1, 3, 4 } }, + { DataLayout::bs_fs_yx_bsv16_fsv8, { 0, 1, -1, -1, -1, -1, 2, 3 } }, { DataLayout::bs_fs_yx_bsv4_fsv4, { 0, 1, -1, -1, -1, -1, 2, 3 } }, { DataLayout::bs_fs_yx_bsv8_fsv4, { 0, 1, -1, -1, -1, -1, 2, 3 } }, { DataLayout::bs_fs_zyx_bsv8_fsv4, { 0, 1, 2, -1, -1, -1, 3, 4 } }, @@ -82,7 +86,9 @@ WeightsTensor::WeightsChannelArray WeightsTensor::weightsChannelArray {{ { WeightsLayout::os_iyx_osv32__ai32, { 0, 1, -1, 2, 3, -1 } }, { WeightsLayout::os_iyx_osv64, { 0, 1, -1, 2, 3, -1 } }, { WeightsLayout::os_iyx_osv16_rotate_180, { 0, 1, -1, 2, 3, -1 } }, + { WeightsLayout::o_is_yx_isv4, { 0, 1, -1, 2, 3, -1 } }, { WeightsLayout::o_is_yx_isv16, { 0, 1, -1, 2, 3, -1 } }, + { WeightsLayout::o_is_zyx_isv16, { 0, 1, 2, 3, 4, -1 } }, { WeightsLayout::os_yxi_osv16, { 1, 2, -1, 0, 3, -1 } }, { WeightsLayout::os_i_osv8__ai8, { -1, -1, -1, 0, 1, -1 } }, { WeightsLayout::os_i_osv16__ai8, { -1, -1, -1, 0, 1, -1 } }, @@ -132,6 +138,8 @@ WeightsTensor::WeightsChannelArray WeightsTensor::weightsChannelArray {{ { WeightsLayout::is_o32_yx_isv32_swizzled_by_4, { 1, 2, -1, 0, 3, -1 } }, { WeightsLayout::os_is_y_x8_osv8_isv4, { 0, 1, -1, 2, 3, -1 } }, { WeightsLayout::os_is_y_x8_osv8_isv4_swizzled_by_4, { 0, 1, -1, 2, 3, -1 } }, + { WeightsLayout::os_is_yx_osv2_isv16, { 0, 1, -1, 2, 3, -1 } }, + { WeightsLayout::os_is_yx_osv4_isv16, { 0, 1, -1, 2, 3, -1 } }, { WeightsLayout::os_is_yx_osv8_isv4, { 0, 1, -1, 2, 3, -1 } }, { WeightsLayout::os_is_zyx_osv8_isv4, { 0, 1, 2, 3, 4, -1 } }, { WeightsLayout::os_is_yx_osv8_isv2, { 0, 1, -1, 2, 3, -1 } }, @@ -145,6 +153,7 @@ WeightsTensor::WeightsChannelArray WeightsTensor::weightsChannelArray {{ { WeightsLayout::os_is_yx_osv32_isv32p, { 0, 1, -1, 2, 3, -1 } }, { WeightsLayout::os_is_zyx_isv16_osv16, { 0, 1, 2, 3, 4, -1 } }, { WeightsLayout::os_is_yx_isv16_osv16, { 0, 1, -1, 2, 3, -1 } }, + { WeightsLayout::is_os_yx_osv8_isv4, { 0, 1, -1, 3, 2, -1 } }, { WeightsLayout::is_os_zyx_isv16_osv16, { 0, 1, 2, 4, 3, -1 } }, { WeightsLayout::is_os_yx_isv16_osv16, { 0, 1, -1, 3, 2, -1 } }, { WeightsLayout::is_os_yx_isv16_osv8, { 0, 1, -1, 3, 2, -1 } }, @@ -180,6 +189,8 @@ WeightsTensor::WeightsChannelArray WeightsTensor::weightsChannelArray {{ { WeightsLayout::g_os_iyx_osv8, { 0, 1, -1, 2, 3, 4 } }, { WeightsLayout::g_os_iyx_osv16, { 0, 1, -1, 2, 3, 4 } }, { WeightsLayout::g_os_iyx_osv32, { 0, 1, -1, 2, 3, 4 } }, + { WeightsLayout::gs_oiyx_gsv8, { 0, 1, -1, 2, 3, 4 } }, + { WeightsLayout::gs_oizyx_gsv8, { 0, 1, 2, 3, 4, 5 } }, { WeightsLayout::gs_oiyx_gsv16, { 0, 1, -1, 2, 3, 4 } }, { WeightsLayout::gs_oizyx_gsv16, { 0, 1, 2, 3, 4, 5 } }, { WeightsLayout::gs_oiyx_gsv32, { 0, 1, -1, 2, 3, 4 } }, @@ -233,6 +244,10 @@ NDims DataTensor::GetSimpleDims(const std::vector& d, DataLayout l) { newDims[0] = RoundUp(newDims[0], 8); newDims[1] = RoundUp(newDims[1], 16); break; + case b_fs_yx_fsv8: + assert(newDims.size() == 4); + newDims[2] = RoundUp(newDims[2], 8); + break; case b_fs_yx_fsv16: assert(newDims.size() == 4); newDims[2] = RoundUp(newDims[2], 16); @@ -253,10 +268,19 @@ NDims DataTensor::GetSimpleDims(const std::vector& d, DataLayout l) { assert(newDims.size() == 4); newDims[3] = RoundUp(newDims[3], 32); break; + case b_fs_zyx_fsv8: + assert(newDims.size() == 5); + newDims[3] = RoundUp(newDims[3], 8); + break; case b_fs_zyx_fsv16: assert(newDims.size() == 5); newDims[3] = RoundUp(newDims[3], 16); break; + case bs_fs_yx_bsv16_fsv8: + assert(newDims.size() == 4); + newDims[2] = RoundUp(newDims[2], 8); + newDims[3] = RoundUp(newDims[3], 16); + break; case bs_fs_yx_bsv16_fsv16: assert(newDims.size() == 4); newDims[2] = RoundUp(newDims[2], 16); @@ -277,6 +301,11 @@ NDims DataTensor::GetSimpleDims(const std::vector& d, DataLayout l) { newDims[3] = RoundUp(newDims[3], 16); newDims[4] = RoundUp(newDims[4], 16); break; + case bs_fs_zyx_bsv16_fsv8: + assert(newDims.size() == 5); + newDims[3] = RoundUp(newDims[3], 8); + newDims[4] = RoundUp(newDims[4], 16); + break; case bs_fs_yx_bsv4_fsv4: assert(newDims.size() == 4); newDims[2] = RoundUp(newDims[2], 4); @@ -588,10 +617,18 @@ NDims WeightsTensor::GetSimpleDims(const std::vector& d, WeightsLayout l // TODO: It's not the right pitches. it's here in order to calculate physical size switch (l) { + case o_is_yx_isv4: + assert(newDims.size() == 4); + newDims[2] = RoundUp(newDims[2], 4); + break; case o_is_yx_isv16: assert(newDims.size() == 4); newDims[2] = RoundUp(newDims[2], 16); break; + case o_is_zyx_isv16: + assert(newDims.size() == 5); + newDims[2] = RoundUp(newDims[3], 16); + break; case os_iyx_osv16: case os_yxi_osv16: case os_iyx_osv16_rotate_180: @@ -772,6 +809,11 @@ NDims WeightsTensor::GetSimpleDims(const std::vector& d, WeightsLayout l newDims[3] = RoundUp(newDims[3], 16); newDims[4] = RoundUp(newDims[4], 16); break; + case is_os_yx_osv8_isv4: + assert(newDims.size() == 4); + newDims[2] = RoundUp(newDims[2], 8); + newDims[3] = RoundUp(newDims[3], 4); + break; case is_os_yx_isv16_osv16: assert(newDims.size() == 4); newDims[2] = RoundUp(newDims[2], 16); @@ -806,6 +848,16 @@ NDims WeightsTensor::GetSimpleDims(const std::vector& d, WeightsLayout l assert(newDims.size() == 5); newDims[3] = RoundUp(newDims[0], 16); break; + case os_is_yx_osv2_isv16: + assert(newDims.size() == 4); + newDims[2] = RoundUp(newDims[2], 16); + newDims[3] = RoundUp(newDims[3], 2); + break; + case os_is_yx_osv4_isv16: + assert(newDims.size() == 4); + newDims[2] = RoundUp(newDims[2], 16); + newDims[3] = RoundUp(newDims[3], 4); + break; case os_is_yx_osv8_isv4: assert(newDims.size() == 4); newDims[2] = RoundUp(newDims[2], 4); @@ -827,6 +879,14 @@ NDims WeightsTensor::GetSimpleDims(const std::vector& d, WeightsLayout l assert(newDims.size() == 5); newDims[3] = RoundUp(newDims[3], 32); break; + case gs_oiyx_gsv8: + assert(newDims.size() == 5); + newDims[4] = RoundUp(newDims[4], 8); + break; + case gs_oizyx_gsv8: + assert(newDims.size() == 6); + newDims[5] = RoundUp(newDims[5], 8); + break; case gs_oiyx_gsv16: assert(newDims.size() == 5); newDims[4] = RoundUp(newDims[4], 16); diff --git a/src/plugins/intel_gpu/src/kernel_selector/tensor_type.h b/src/plugins/intel_gpu/src/kernel_selector/tensor_type.h index 85cec1fe17cf42..3d54dfabade1c0 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/tensor_type.h +++ b/src/plugins/intel_gpu/src/kernel_selector/tensor_type.h @@ -40,6 +40,8 @@ enum DataLayout { b_fs_zyx_fsv2, b_fs_yx_fsv4, // reordering format for swizzled input for convolution using IMAD b_fs_zyx_fsv4, + b_fs_yx_fsv8, + b_fs_zyx_fsv8, b_fs_yx_fsv16, // 3D+batch b_fs_zyx_fsv16, // batch, feature, 3D spatial. Blocks of 16 input channels b_fs_yx_fsv32, // 3D+batch @@ -53,6 +55,8 @@ enum DataLayout { bs_fs_yx_bsv8_fsv2, // batch, feature, 2D spatial. Blocks of 8 batch and 2 channels bs_fs_zyx_bsv8_fsv4, // batch, feature, 3D spatial. Blocks of 8 batch and 4 channels bs_fs_zyx_bsv8_fsv2, // batch, feature, 3D spatial. Blocks of 8 batch and 2 channels + bs_fs_yx_bsv16_fsv8, // batch, feature, 2D spatial. Blocks of 16 batch and 8 channels + bs_fs_zyx_bsv16_fsv8, // batch, feature, 3D spatial. Blocks of 16 batch and 8 channels bs_fs_yx_bsv16_fsv4, // batch, feature, 2D spatial. Blocks of 16 batch and 4 channels bs_fs_zyx_bsv16_fsv4, // batch, feature, 3D spatial. Blocks of 16 batch and 4 channels bs_fs_yx_bsv16_fsv2, // batch, feature, 2D spatial. Blocks of 16 batch and 2 channels @@ -90,7 +94,9 @@ enum WeightsLayout { oxiy, iyxo, yxio, + o_is_yx_isv4, o_is_yx_isv16, + o_is_zyx_isv16, os_yxi_osv16, os_iyx_osv16, os_iyx_osv32, @@ -156,6 +162,7 @@ enum WeightsLayout { os_is_yx_isa8_osv8_isv2, is_os_yx_isa8_osv8_isv2, is_os_yx_isa8_osv8_isv4, + is_os_yx_osv8_isv4, is_os_yx_osa8_isv16_osv4, is_os_yx_isa2_osa8_isv8_osv2, g_os_is_yx_osa2_isa8_osv16_isv4, @@ -179,6 +186,8 @@ enum WeightsLayout { os_is_yx_osv32_isv4_swizzled_by_2, // weights for bfyx -> b_fs_yx_fsv32 convolution using IMAD with swizzled ofm (0, 2, 4..), (1, 3, 5...) os_is_yx_osv32_isv4, // weights for bfyx -> b_fs_yx_fsv{32,16} convolution using IMAD os_is_zyx_osv32_isv4, // weights for bfzyx -> b_fs_zyx_fsv16 convolution using IMAD + os_is_yx_osv2_isv16, + os_is_yx_osv4_isv16, oizyx, iozyx, os_is_yx_osv32_isv32p, // 2 blocks: 32 packed binary in channels and 32 output channels @@ -200,6 +209,8 @@ enum WeightsLayout { g_os_iyx_osv8, g_os_iyx_osv16, g_os_iyx_osv32, + gs_oiyx_gsv8, + gs_oizyx_gsv8, gs_oiyx_gsv16, gs_oizyx_gsv16, gs_oiyx_gsv32, diff --git a/src/plugins/intel_gpu/src/runtime/format.cpp b/src/plugins/intel_gpu/src/runtime/format.cpp index af268e94daae76..bd31583493ea71 100644 --- a/src/plugins/intel_gpu/src/runtime/format.cpp +++ b/src/plugins/intel_gpu/src/runtime/format.cpp @@ -31,10 +31,12 @@ static const std::map format_traits_map { FMT_TRAITS(bxfy, 1, 1, 2, 0, {0, 3, 1, 2}, "bxfy", "bfxy", {}), FMT_TRAITS(b_fs_yx_fsv2, 1, 1, 2, 0, {0, 1, 2, 3}, "bfyx", "bfxy", {{1, 2}}), FMT_TRAITS(b_fs_yx_fsv4, 1, 1, 2, 0, {0, 1, 2, 3}, "bfyx", "bfxy", {{1, 4}}), + FMT_TRAITS(b_fs_yx_fsv8, 1, 1, 2, 0, {0, 1, 2, 3}, "bfyx", "bfxy", {{1, 8}}), FMT_TRAITS(b_fs_yx_fsv16, 1, 1, 2, 0, {0, 1, 2, 3}, "bfyx", "bfxy", {{1, 16}}), FMT_TRAITS(b_fs_yx_fsv32, 1, 1, 2, 0, {0, 1, 2, 3}, "bfyx", "bfxy", {{1, 32}}), FMT_TRAITS(b_fs_zyx_fsv2, 1, 1, 3, 0, {0, 1, 2, 3, 4}, "bfzyx", "bfxyz", {{1, 2}}), FMT_TRAITS(b_fs_zyx_fsv4, 1, 1, 3, 0, {0, 1, 2, 3, 4}, "bfzyx", "bfxyz", {{1, 4}}), + FMT_TRAITS(b_fs_zyx_fsv8, 1, 1, 3, 0, {0, 1, 2, 3, 4}, "bfzyx", "bfxyz", {{1, 8}}), FMT_TRAITS(b_fs_zyx_fsv32, 1, 1, 3, 0, {0, 1, 2, 3, 4}, "bfzyx", "bfxyz", {{1, 32}}), FMT_TRAITS(bs_fs_fsv8_bsv8, 1, 1, 0, 0, {0, 1}, "bf", "bf", {{0, 8}, {1, 8}}), FMT_TRAITS(bs_fs_fsv8_bsv16, 1, 1, 0, 0, {0, 1}, "bf", "bf", {{0, 16}, {1, 8}}), @@ -55,6 +57,8 @@ static const std::map format_traits_map { FMT_TRAITS(bs_fs_yx_bsv4_fsv4, 1, 1, 2, 0, {0, 1, 2, 3}, "bfyx", "bfxy", {{0, 4 }, {1, 4}}), FMT_TRAITS(bs_fs_yx_bsv8_fsv4, 1, 1, 2, 0, {0, 1, 2, 3}, "bfyx", "bfxy", {{0, 8 }, {1, 4}}), FMT_TRAITS(bs_fs_zyx_bsv8_fsv4, 1, 1, 3, 0, {0, 1, 2, 3, 4}, "bfzyx", "bfxyz", {{0, 8 }, {1, 4}}), + FMT_TRAITS(bs_fs_yx_bsv16_fsv8, 1, 1, 2, 0, {0, 1, 2, 3}, "bfyx", "bfxy", {{0, 16 }, {1, 8}}), + FMT_TRAITS(bs_fs_zyx_bsv16_fsv8, 1, 1, 3, 0, {0, 1, 2, 3, 4}, "bfzyx", "bfxyz", {{0, 16 }, {1, 8}}), FMT_TRAITS(bs_fs_yx_bsv16_fsv4, 1, 1, 2, 0, {0, 1, 2, 3}, "bfyx", "bfxy", {{0, 16 }, {1, 4}}), FMT_TRAITS(bs_fs_zyx_bsv16_fsv4, 1, 1, 3, 0, {0, 1, 2, 3, 4}, "bfzyx", "bfxyz", {{0, 16 }, {1, 4}}), FMT_TRAITS(bs_fs_yx_bsv16_fsv2, 1, 1, 2, 0, {0, 1, 2, 3}, "bfyx", "bfxy", {{0, 16 }, {1, 2}}), @@ -81,7 +85,9 @@ static const std::map format_traits_map { FMT_TRAITS(oizyx, 1, 1, 3, 0, {0, 1, 2, 3, 4}, "oizyx", "oixyz", {}), FMT_TRAITS(iozyx, 1, 1, 3, 0, {1, 0, 2, 3, 4}, "iozyx", "oixyz", {}), FMT_TRAITS(os_is_yx_isv16_osv16, 1, 1, 2, 0, {0, 1, 2, 3}, "oiyx", "oixy", {{1, 16}, {0, 16}}), + FMT_TRAITS(o_is_yx_isv4, 1, 1, 2, 0, {0, 1, 2, 3}, "oiyx", "oixy", {{1, 4}}), FMT_TRAITS(o_is_yx_isv16, 1, 1, 2, 0, {0, 1, 2, 3}, "oiyx", "oixy", {{1, 16}}), + FMT_TRAITS(o_is_zyx_isv16, 1, 1, 3, 0, {0, 1, 2, 3, 4}, "oizyx", "oixyz", {{1, 16}}), FMT_TRAITS(os_yxi_osv16, 1, 1, 2, 0, {0, 2, 3, 1}, "oyxi", "oixy", {{0, 16}}), FMT_TRAITS(os_iyx_osv16, 1, 1, 2, 0, {0, 1, 2, 3}, "oiyx", "oixy", {{0, 16}}), FMT_TRAITS(os_iyx_osv32, 1, 1, 2, 0, {0, 1, 2, 3}, "oiyx", "oixy", {{0, 32}}), @@ -114,6 +120,8 @@ static const std::map format_traits_map { FMT_TRAITS(is_o32_yx_isv32_swizzled_by_4, 1, 1, 2, 0, {0, 1, 2, 3}, "oyxi", "oixy", {{0, 32}, {1, 32}}), FMT_TRAITS(os_is_y_x8_osv8_isv4, 1, 1, 2, 0, {0, 1, 2, 3}, "oyxi", "oixy", {{0, 8}, {1, 4}, {2, 8}}), FMT_TRAITS(os_is_y_x8_osv8_isv4_swizzled_by_4, 1, 1, 2, 0, {0, 1, 2, 3}, "oyxi", "oixy", {{0, 8}, {1, 4}, {2, 8}}), + FMT_TRAITS(os_is_yx_osv2_isv16, 1, 1, 2, 0, {0, 1, 2, 3}, "oiyx", "oixy", {{0, 2}, {1, 16}}), + FMT_TRAITS(os_is_yx_osv4_isv16, 1, 1, 2, 0, {0, 1, 2, 3}, "oiyx", "oixy", {{0, 4}, {1, 16}}), FMT_TRAITS(os_is_yx_osv16_isv4, 1, 1, 2, 0, {0, 1, 2, 3}, "oiyx", "oixy", {{0, 16}, {1, 4}}), FMT_TRAITS(os_is_yx_osv8_isv4, 1, 1, 2, 0, {0, 1, 2, 3}, "oiyx", "oixy", {{0, 8}, {1, 4}}), FMT_TRAITS(os_is_zyx_osv8_isv4, 1, 1, 3, 0, {0, 1, 2, 3, 4}, "oizyx", "oixyz", {{0, 8}, {1, 4}}), @@ -126,6 +134,7 @@ static const std::map format_traits_map { FMT_TRAITS(os_is_yx_osv32_isv32p, 1, 1, 1, 0, {0, 1, 2, 3}, "oiyx", "oixy", {{0, 32}, {1, 32}}), FMT_TRAITS(os_is_zyx_isv16_osv16, 1, 1, 3, 0, {0, 1, 2, 3, 4}, "oizyx", "oixyz", {{1, 16}, {0, 16}}), FMT_TRAITS(is_os_zyx_isv16_osv16, 1, 1, 3, 0, {1, 0, 2, 3, 4}, "iozyx", "oixyz", {{1, 16}, {0, 16}}), + FMT_TRAITS(is_os_yx_osv8_isv4, 1, 1, 2, 0, {1, 0, 2, 3}, "ioyx", "oixy", {{0, 8}, {1, 4}}), FMT_TRAITS(is_os_yx_isv16_osv16, 1, 1, 2, 0, {1, 0, 2, 3}, "ioyx", "oixy", {{1, 16}, {0, 16}}), FMT_TRAITS(is_os_yx_isv16_osv8, 1, 1, 2, 0, {1, 0, 2, 3}, "ioyx", "oixy", {{1, 16}, {0, 8}}), FMT_TRAITS(is_os_yx_isv16_osv4, 1, 1, 2, 0, {1, 0, 2, 3}, "ioyx", "oixy", {{1, 16}, {0, 4}}), @@ -174,6 +183,8 @@ static const std::map format_traits_map { FMT_TRAITS(g_os_iyx_osv8, 1, 1, 2, 1, {0, 1, 2, 3, 4}, "goiyx", "oixy????g", {{0, 8}}), FMT_TRAITS(g_os_iyx_osv16, 1, 1, 2, 1, {0, 1, 2, 3, 4}, "goiyx", "oixy????g", {{0, 16}}), FMT_TRAITS(g_os_iyx_osv32, 1, 1, 2, 1, {0, 1, 2, 3, 4}, "goiyx", "oixy????g", {{0, 32}}), + FMT_TRAITS(gs_oiyx_gsv8, 1, 1, 2, 1, {0, 1, 2, 3, 4}, "goiyx", "oixy????g", {{8, 8}}), + FMT_TRAITS(gs_oizyx_gsv8, 1, 1, 3, 1, {0, 1, 2, 3, 4, 5}, "goizyx", "oixyz???g", {{8, 8}}), FMT_TRAITS(gs_oiyx_gsv16, 1, 1, 2, 1, {0, 1, 2, 3, 4}, "goiyx", "oixy????g", {{8, 16}}), FMT_TRAITS(gs_oizyx_gsv16, 1, 1, 3, 1, {0, 1, 2, 3, 4, 5}, "goizyx", "oixyz???g", {{8, 16}}), FMT_TRAITS(gs_oiyx_gsv32, 1, 1, 2, 1, {0, 1, 2, 3, 4}, "goiyx", "oixy????g", {{8, 32}}), diff --git a/src/plugins/intel_gpu/src/runtime/layout.cpp b/src/plugins/intel_gpu/src/runtime/layout.cpp index 99fdadb24cc6bb..a18c5bd73210f1 100644 --- a/src/plugins/intel_gpu/src/runtime/layout.cpp +++ b/src/plugins/intel_gpu/src/runtime/layout.cpp @@ -145,6 +145,8 @@ static format to_weights_format(format f, bool is_grouped) { throw std::runtime_error("Invalid conversion of data format to weights format. bfwzyx can't be non-grouped as 4D spatials are not supported"); return format::goizyx; } + case format::b_fs_yx_fsv4: + return format::o_is_yx_isv4; case format::b_fs_yx_fsv16: return format::o_is_yx_isv16; case format::bs_fs_fsv8_bsv8: diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/hash_key_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/hash_key_gpu_test.cpp index b76e45428f9850..a9c1e1262f3aff 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/hash_key_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/hash_key_gpu_test.cpp @@ -176,7 +176,7 @@ class check_hash_value: public ::testing::Test { const auto params_hash = prim_inst->get_impl_params()->hash(); ASSERT_EQ(primitive_hash, 16293979194373117693UL); - ASSERT_EQ(params_hash, 14231564068060955575UL); + ASSERT_EQ(params_hash, 15950979219660866859UL); } void test_reshape_basic(bool is_caching_test) { diff --git a/src/plugins/intel_gpu/thirdparty/onednn_gpu b/src/plugins/intel_gpu/thirdparty/onednn_gpu index 4b82a66ed38eca..284ad4574939fa 160000 --- a/src/plugins/intel_gpu/thirdparty/onednn_gpu +++ b/src/plugins/intel_gpu/thirdparty/onednn_gpu @@ -1 +1 @@ -Subproject commit 4b82a66ed38ecaa993352e5cc6ed7753656b8a26 +Subproject commit 284ad4574939fa784e4ddaa1f4aa577b8eb7a017 From e30f75bb4d8708e821837e1b6c5e85be5409d23a Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Tue, 10 Oct 2023 08:27:26 +0400 Subject: [PATCH 109/257] Rpath story (#20297) --- .../OpenVINODeveloperScriptsConfig.cmake | 8 --- .../developer_package/packaging/archive.cmake | 9 ++++ .../packaging/common-libraries.cmake | 13 +++-- .../packaging/packaging.cmake | 26 ++++++++-- cmake/developer_package/version.cmake | 2 +- ...renceEngineDeveloperPackageConfig.cmake.in | 2 +- .../OpenVINODeveloperPackageConfig.cmake.in | 5 +- samples/cpp/CMakeLists.txt | 4 +- src/bindings/c/src/CMakeLists.txt | 4 ++ .../openvino/inference_engine/CMakeLists.txt | 4 ++ .../src/compatibility/pyngraph/CMakeLists.txt | 2 + .../python/src/pyopenvino/CMakeLists.txt | 9 +++- .../src/pyopenvino/core/async_infer_queue.cpp | 1 - .../python/src/pyopenvino/core/common.hpp | 3 +- .../python/src/pyopenvino/core/core.cpp | 1 - .../src/pyopenvino/core/infer_request.cpp | 1 - .../pyopenvino/frontend/frontend_module.cmake | 7 ++- .../src/pyopenvino/graph/node_factory.cpp | 1 - src/cmake/install_tbb.cmake | 9 ++-- src/cmake/openvino.cmake | 17 +++++-- src/cmake/ov_parallel.cmake | 8 ++- .../offline_transformations/CMakeLists.txt | 6 +-- src/inference/tests/functional/CMakeLists.txt | 4 +- src/inference/tests/unit/CMakeLists.txt | 2 + src/plugins/auto/tests/CMakeLists.txt | 3 +- src/plugins/auto_batch/CMakeLists.txt | 8 +-- src/plugins/auto_batch/tests/CMakeLists.txt | 6 +++ src/plugins/hetero/CMakeLists.txt | 49 ++++++++----------- src/plugins/hetero/tests/CMakeLists.txt | 6 +++ .../hetero/tests/functional/CMakeLists.txt | 1 - src/plugins/hetero/tests/unit/CMakeLists.txt | 1 - .../tests/deprecated/unit/CMakeLists.txt | 3 -- .../intel_gna/tests/unit/CMakeLists.txt | 3 -- src/plugins/intel_gpu/CMakeLists.txt | 2 + 34 files changed, 139 insertions(+), 91 deletions(-) create mode 100644 src/plugins/auto_batch/tests/CMakeLists.txt create mode 100644 src/plugins/hetero/tests/CMakeLists.txt diff --git a/cmake/developer_package/OpenVINODeveloperScriptsConfig.cmake b/cmake/developer_package/OpenVINODeveloperScriptsConfig.cmake index 1dbe8952925f51..3996f373156d89 100644 --- a/cmake/developer_package/OpenVINODeveloperScriptsConfig.cmake +++ b/cmake/developer_package/OpenVINODeveloperScriptsConfig.cmake @@ -192,14 +192,6 @@ ov_set_if_not_defined(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER include(packaging/packaging) if(APPLE) - set(CMAKE_INSTALL_RPATH_USE_LINK_PATH ON) - - if(DEFINED OV_CPACK_LIBRARYDIR) - set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/${OV_CPACK_LIBRARYDIR}") - else() - message(FATAL_ERROR "Internal error: OV_CPACK_LIBRARYDIR is not defined, while it's required to initialize RPATH") - endif() - # WA for Xcode generator + object libraries issue: # https://gitlab.kitware.com/cmake/cmake/issues/20260 # http://cmake.3232098.n2.nabble.com/XCODE-DEPEND-HELPER-make-Deletes-Targets-Before-and-While-They-re-Built-td7598277.html diff --git a/cmake/developer_package/packaging/archive.cmake b/cmake/developer_package/packaging/archive.cmake index a3cc7db096754e..5f259a78d72838 100644 --- a/cmake/developer_package/packaging/archive.cmake +++ b/cmake/developer_package/packaging/archive.cmake @@ -4,6 +4,15 @@ include(GNUInstallDirs) +if(APPLE) + # on macOS versions with SIP enabled, we need to use @rpath + # because DYLD_LIBRARY_PATH is ignored + set(CMAKE_SKIP_INSTALL_RPATH OFF) +else() + # we don't need RPATHs, because setupvars.sh is used + set(CMAKE_SKIP_INSTALL_RPATH ON) +endif() + # # ov_archive_cpack_set_dirs() # diff --git a/cmake/developer_package/packaging/common-libraries.cmake b/cmake/developer_package/packaging/common-libraries.cmake index fea1bac83dc7d6..4fbce5b4a58ca7 100644 --- a/cmake/developer_package/packaging/common-libraries.cmake +++ b/cmake/developer_package/packaging/common-libraries.cmake @@ -4,6 +4,14 @@ include(GNUInstallDirs) +if(CPACK_GENERATOR STREQUAL "BREW") + # brew relies on RPATHs + # set(CMAKE_SKIP_INSTALL_RPATH OFF) +else() + # we don't need RPATHs, because libraries are searched by standard paths + set(CMAKE_SKIP_INSTALL_RPATH ON) +endif() + # # ov_common_libraries_cpack_set_dirs() # @@ -107,8 +115,3 @@ macro(ov_define_component_include_rules) endmacro() ov_define_component_include_rules() - -if(CPACK_GENERATOR STREQUAL "BREW") - # brew relies on RPATH - set(CMAKE_SKIP_INSTALL_RPATH OFF) -endif() diff --git a/cmake/developer_package/packaging/packaging.cmake b/cmake/developer_package/packaging/packaging.cmake index 2b6a54473233a8..505565f55da5d7 100644 --- a/cmake/developer_package/packaging/packaging.cmake +++ b/cmake/developer_package/packaging/packaging.cmake @@ -4,9 +4,6 @@ include(CPackComponent) -# we don't need RPATHs, because setupvars.sh is used -set(CMAKE_SKIP_INSTALL_RPATH ON) - # # ov_install_static_lib( ) # @@ -31,6 +28,29 @@ macro(ov_install_static_lib target comp) endif() endmacro() +# +# ov_set_apple_rpath( ...) +# +# Sets LC_RPATH properties for macOS MACH-O binaries to ensure that libraries can find their dependencies +# when macOS system integrity protection (SIP) is enabled (DYLD_LIBRARY_PATH is ignored in this case). +# Note, that this is important when binaries are dynamically loaded at runtime (e.g. via Python). +# +function(ov_set_apple_rpath TARGET_NAME lib_install_path) + if(APPLE AND CPACK_GENERATOR MATCHES "^(7Z|TBZ2|TGZ|TXZ|TZ|TZST|ZIP)$") + unset(rpath_list) + foreach(dependency_install_path IN LISTS ARGN) + file(RELATIVE_PATH dependency_rpath "/${lib_install_path}" "/${dependency_install_path}") + set(dependency_rpath "@loader_path/${dependency_rpath}") + list(APPEND rpath_list "${dependency_rpath}") + endforeach() + + set_target_properties(${TARGET_NAME} PROPERTIES + MACOSX_RPATH ON + INSTALL_RPATH "${rpath_list}" + INSTALL_NAME_DIR "@rpath") + endif() +endfunction() + # # ov_get_pyversion() # diff --git a/cmake/developer_package/version.cmake b/cmake/developer_package/version.cmake index f1e12c6531b532..0353e3a52a8617 100644 --- a/cmake/developer_package/version.cmake +++ b/cmake/developer_package/version.cmake @@ -220,7 +220,7 @@ macro (ov_add_version_defines FILE TARGET) $ $) set_target_properties(${TARGET}_version - PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE + PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE $) target_sources(${TARGET} PRIVATE $) diff --git a/cmake/templates/InferenceEngineDeveloperPackageConfig.cmake.in b/cmake/templates/InferenceEngineDeveloperPackageConfig.cmake.in index c4a9c49a481936..e197597487b61c 100644 --- a/cmake/templates/InferenceEngineDeveloperPackageConfig.cmake.in +++ b/cmake/templates/InferenceEngineDeveloperPackageConfig.cmake.in @@ -17,7 +17,7 @@ set_and_check(OpenVINO_MAIN_SOURCE_DIR "@OpenVINO_SOURCE_DIR@") # NPU set(ov_options "@OV_OPTIONS@") list(APPEND ov_options CMAKE_CXX_COMPILER_LAUNCHER CMAKE_C_COMPILER_LAUNCHER CMAKE_CXX_LINKER_LAUNCHER CMAKE_C_LINKER_LAUNCHER - CMAKE_SKIP_RPATH CMAKE_INSTALL_PREFIX CPACK_GENERATOR) + CMAKE_INSTALL_PREFIX CPACK_GENERATOR) if(APPLE) list(APPEND ov_options CMAKE_OSX_ARCHITECTURES CMAKE_OSX_DEPLOYMENT_TARGET) diff --git a/cmake/templates/OpenVINODeveloperPackageConfig.cmake.in b/cmake/templates/OpenVINODeveloperPackageConfig.cmake.in index 04cf8a219ae723..f78e31ce635d81 100644 --- a/cmake/templates/OpenVINODeveloperPackageConfig.cmake.in +++ b/cmake/templates/OpenVINODeveloperPackageConfig.cmake.in @@ -13,7 +13,7 @@ set_and_check(OpenVINO_SOURCE_DIR "@OpenVINO_SOURCE_DIR@") set(ov_options "@OV_OPTIONS@") list(APPEND ov_options CMAKE_CXX_COMPILER_LAUNCHER CMAKE_C_COMPILER_LAUNCHER CMAKE_CXX_LINKER_LAUNCHER CMAKE_C_LINKER_LAUNCHER - CMAKE_SKIP_RPATH CMAKE_INSTALL_PREFIX CPACK_GENERATOR) + CMAKE_INSTALL_PREFIX CPACK_GENERATOR) if(APPLE) list(APPEND ov_options CMAKE_OSX_ARCHITECTURES CMAKE_OSX_DEPLOYMENT_TARGET) @@ -42,6 +42,9 @@ foreach(option IN LISTS ov_options) endforeach() message(" ") +# Restore TBB installation directory (requires for proper LC_RPATH on macOS with SIP) +load_cache("${cache_path}" READ_WITH_PREFIX "" TBB_INSTALL_DIR) + # activate generation of plugins.xml set(ENABLE_PLUGINS_XML ON) diff --git a/samples/cpp/CMakeLists.txt b/samples/cpp/CMakeLists.txt index 87bf32eb3179c9..5051bc742927e7 100644 --- a/samples/cpp/CMakeLists.txt +++ b/samples/cpp/CMakeLists.txt @@ -223,7 +223,9 @@ macro(ov_add_sample) endif() set_target_properties(${SAMPLE_NAME} PROPERTIES FOLDER ${folder_name} - COMPILE_PDB_NAME ${SAMPLE_NAME}) + COMPILE_PDB_NAME ${SAMPLE_NAME} + # to ensure out of box LC_RPATH on macOS with SIP + INSTALL_RPATH_USE_LINK_PATH ON) if(SAMPLE_INCLUDE_DIRECTORIES) target_include_directories(${SAMPLE_NAME} PRIVATE ${SAMPLE_INCLUDE_DIRECTORIES}) diff --git a/src/bindings/c/src/CMakeLists.txt b/src/bindings/c/src/CMakeLists.txt index e491424cb27afb..8f5eaeac581735 100644 --- a/src/bindings/c/src/CMakeLists.txt +++ b/src/bindings/c/src/CMakeLists.txt @@ -32,6 +32,10 @@ set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_REL ov_add_vs_version_file(NAME ${TARGET_NAME} FILEDESCRIPTION "OpenVINO C API Core Runtime library") +ov_set_apple_rpath(${TARGET_NAME} + # openvino_c installed in the same directory as openvino + ${OV_CPACK_RUNTIMEDIR} ${OV_CPACK_RUNTIMEDIR}) + # export set_target_properties(${TARGET_NAME} PROPERTIES EXPORT_NAME runtime::c) diff --git a/src/bindings/python/src/compatibility/openvino/inference_engine/CMakeLists.txt b/src/bindings/python/src/compatibility/openvino/inference_engine/CMakeLists.txt index 92cd19c7c8f3f9..be45fe1281b3d1 100644 --- a/src/bindings/python/src/compatibility/openvino/inference_engine/CMakeLists.txt +++ b/src/bindings/python/src/compatibility/openvino/inference_engine/CMakeLists.txt @@ -84,6 +84,10 @@ add_custom_command(TARGET ${TARGET_NAME} COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/__init__.py ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/__init__.py ) +foreach(target IN LISTS INSTALLED_TARGETS) + ov_set_apple_rpath(${target} ${OV_CPACK_PYTHONDIR}/openvino/inference_engine ${OV_CPACK_RUNTIMEDIR}) +endforeach() + # install install(TARGETS ${INSTALLED_TARGETS} diff --git a/src/bindings/python/src/compatibility/pyngraph/CMakeLists.txt b/src/bindings/python/src/compatibility/pyngraph/CMakeLists.txt index 998c950ff4cc97..ba20fd76055cac 100644 --- a/src/bindings/python/src/compatibility/pyngraph/CMakeLists.txt +++ b/src/bindings/python/src/compatibility/pyngraph/CMakeLists.txt @@ -68,6 +68,8 @@ if(OpenVINO_SOURCE_DIR) ) endif() +ov_set_apple_rpath(_${PROJECT_NAME} ${OV_CPACK_PYTHONDIR} ${OV_CPACK_RUNTIMEDIR}) + # Install if(OpenVINO_SOURCE_DIR OR OpenVINODeveloperPackage_FOUND) diff --git a/src/bindings/python/src/pyopenvino/CMakeLists.txt b/src/bindings/python/src/pyopenvino/CMakeLists.txt index 5d5aeeb40021ac..99ae9983ee82c5 100644 --- a/src/bindings/python/src/pyopenvino/CMakeLists.txt +++ b/src/bindings/python/src/pyopenvino/CMakeLists.txt @@ -72,7 +72,7 @@ endif() target_include_directories(${PROJECT_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/..") target_link_libraries(${PROJECT_NAME} PRIVATE - openvino::runtime::dev openvino::runtime ${OFFLINE_TRANSFORMATIONS_LIB}) + openvino::core::dev openvino::runtime ${OFFLINE_TRANSFORMATIONS_LIB}) set_target_properties(${PROJECT_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO} OUTPUT_NAME "_pyopenvino") @@ -124,6 +124,13 @@ if(OpenVINO_SOURCE_DIR OR OpenVINODeveloperPackage_FOUND) COMPONENT ${OV_CPACK_COMP_PYTHON_OPENVINO}_${pyversion} ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL}) + ov_set_apple_rpath(${PROJECT_NAME} ${OV_CPACK_PYTHONDIR}/openvino + # path to OpenVINO C++ libraries + ${OV_CPACK_RUNTIMEDIR} + # pyopenvino also depends on TBB because of: + # pyopenvino => openvino::offline_transformations => TBB optimized openvino::reference + ${TBB_LIB_INSTALL_DIR}) + ov_cpack_add_component(${OV_CPACK_COMP_OPENVINO_REQ_FILES} HIDDEN) install(FILES ${OpenVINOPython_SOURCE_DIR}/requirements.txt diff --git a/src/bindings/python/src/pyopenvino/core/async_infer_queue.cpp b/src/bindings/python/src/pyopenvino/core/async_infer_queue.cpp index 91da7a810e4951..188cbe263edf30 100644 --- a/src/bindings/python/src/pyopenvino/core/async_infer_queue.cpp +++ b/src/bindings/python/src/pyopenvino/core/async_infer_queue.cpp @@ -3,7 +3,6 @@ #include "pyopenvino/core/async_infer_queue.hpp" -#include #include #include diff --git a/src/bindings/python/src/pyopenvino/core/common.hpp b/src/bindings/python/src/pyopenvino/core/common.hpp index 0430470310af61..187f0d87ce5a15 100644 --- a/src/bindings/python/src/pyopenvino/core/common.hpp +++ b/src/bindings/python/src/pyopenvino/core/common.hpp @@ -9,13 +9,12 @@ #include #include -#include -#include #include #include #include #include "Python.h" +#include "openvino/core/type/element_type.hpp" #include "openvino/runtime/compiled_model.hpp" #include "openvino/runtime/infer_request.hpp" #include "openvino/runtime/tensor.hpp" diff --git a/src/bindings/python/src/pyopenvino/core/core.cpp b/src/bindings/python/src/pyopenvino/core/core.cpp index db04ee5bb76d52..734d8ea1ef9ad9 100644 --- a/src/bindings/python/src/pyopenvino/core/core.cpp +++ b/src/bindings/python/src/pyopenvino/core/core.cpp @@ -4,7 +4,6 @@ #include "pyopenvino/core/core.hpp" -#include #include #include diff --git a/src/bindings/python/src/pyopenvino/core/infer_request.cpp b/src/bindings/python/src/pyopenvino/core/infer_request.cpp index 6b087ecaa20cd1..71e73e4dff3720 100644 --- a/src/bindings/python/src/pyopenvino/core/infer_request.cpp +++ b/src/bindings/python/src/pyopenvino/core/infer_request.cpp @@ -3,7 +3,6 @@ #include "pyopenvino/core/infer_request.hpp" -#include #include #include #include diff --git a/src/bindings/python/src/pyopenvino/frontend/frontend_module.cmake b/src/bindings/python/src/pyopenvino/frontend/frontend_module.cmake index f4220bde3aa6ab..5e7a1f2d426288 100644 --- a/src/bindings/python/src/pyopenvino/frontend/frontend_module.cmake +++ b/src/bindings/python/src/pyopenvino/frontend/frontend_module.cmake @@ -25,7 +25,7 @@ function(frontend_module TARGET FRAMEWORK INSTALL_COMPONENT) target_include_directories(${TARGET_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}" "${OpenVINOPython_SOURCE_DIR}/src/pyopenvino/utils/") - target_link_libraries(${TARGET_NAME} PRIVATE openvino::runtime openvino::runtime::dev openvino::frontend::${FRAMEWORK}) + target_link_libraries(${TARGET_NAME} PRIVATE openvino::runtime openvino::core::dev openvino::frontend::${FRAMEWORK}) set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO}) @@ -39,8 +39,11 @@ function(frontend_module TARGET FRAMEWORK INSTALL_COMPONENT) COMMAND ${CMAKE_COMMAND} -E copy ${OpenVINOPython_SOURCE_DIR}/src/openvino/frontend/${FRAMEWORK}/__init__.py ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/__init__.py) + set(frontend_install_path ${OV_CPACK_PYTHONDIR}/openvino/frontend/${FRAMEWORK}) install(TARGETS ${TARGET_NAME} - DESTINATION ${OV_CPACK_PYTHONDIR}/openvino/frontend/${FRAMEWORK} + DESTINATION ${frontend_install_path} COMPONENT ${INSTALL_COMPONENT} ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL}) + + ov_set_apple_rpath(${TARGET_NAME} ${frontend_install_path} ${OV_CPACK_RUNTIMEDIR}) endfunction() diff --git a/src/bindings/python/src/pyopenvino/graph/node_factory.cpp b/src/bindings/python/src/pyopenvino/graph/node_factory.cpp index 5c274d1bf3f6bd..7dde2d53b7b89f 100644 --- a/src/bindings/python/src/pyopenvino/graph/node_factory.cpp +++ b/src/bindings/python/src/pyopenvino/graph/node_factory.cpp @@ -17,7 +17,6 @@ #include #include "dict_attribute_visitor.hpp" -#include "ngraph/check.hpp" #include "openvino/core/except.hpp" #include "openvino/core/node.hpp" #include "openvino/core/op_extension.hpp" diff --git a/src/cmake/install_tbb.cmake b/src/cmake/install_tbb.cmake index 19a30ee624c918..a5f7bd19c5856c 100644 --- a/src/cmake/install_tbb.cmake +++ b/src/cmake/install_tbb.cmake @@ -128,7 +128,7 @@ if(THREADING MATCHES "^(TBB|TBB_AUTO)$" AND endforeach() endforeach() - set(pkg_config_tbb_lib_dir "runtime/3rdparty/tbb/lib") + set(TBB_LIB_INSTALL_DIR "runtime/3rdparty/tbb/lib" CACHE PATH "TBB library install directory" FORCE) elseif(tbb_custom) # for custom TBB we need to install it to our package # to simplify life for our customers @@ -183,7 +183,7 @@ if(THREADING MATCHES "^(TBB|TBB_AUTO)$" AND endif() endforeach() - set(pkg_config_tbb_lib_dir "${IE_TBBROOT_INSTALL}/${tbb_libs_dir}") + set(TBB_LIB_INSTALL_DIR "${IE_TBBROOT_INSTALL}/${tbb_libs_dir}" CACHE PATH "TBB library install directory" FORCE) elseif(tbb_downloaded) set(OV_TBB_DIR_INSTALL "runtime/3rdparty/tbb") @@ -234,13 +234,16 @@ if(THREADING MATCHES "^(TBB|TBB_AUTO)$" AND PATTERN "cmake" EXCLUDE) endif() - set(pkg_config_tbb_lib_dir "${OV_TBB_DIR_INSTALL}/lib") + set(TBB_LIB_INSTALL_DIR "${OV_TBB_DIR_INSTALL}/lib" CACHE PATH "TBB library install directory" FORCE) else() + unset(TBB_LIB_INSTALL_DIR CACHE) message(WARNING "TBB of unknown origin. TBB files are not installed") endif() unset(tbb_downloaded) unset(tbb_custom) +else() + unset(TBB_LIB_INSTALL_DIR CACHE) endif() # install tbbbind for static OpenVINO case diff --git a/src/cmake/openvino.cmake b/src/cmake/openvino.cmake index 068ae2b0cd9816..ba3786cd697a75 100644 --- a/src/cmake/openvino.cmake +++ b/src/cmake/openvino.cmake @@ -72,6 +72,14 @@ endif() ov_set_threading_interface_for(${TARGET_NAME}) ov_mark_target_as_cc(${TARGET_NAME}) +if(TBB_FOUND) + if(NOT TBB_LIB_INSTALL_DIR) + message(FATAL_ERROR "Internal error: variable 'TBB_LIB_INSTALL_DIR' is not defined") + endif() + # set LC_RPATH to TBB library directory + ov_set_apple_rpath(${TARGET_NAME} ${OV_CPACK_RUNTIMEDIR} ${TBB_LIB_INSTALL_DIR}) +endif() + # must be called after all target_link_libraries ov_add_api_validator_post_build_step(TARGET ${TARGET_NAME} EXTRA ${TBB_IMPORTED_TARGETS}) @@ -96,7 +104,7 @@ install(TARGETS ${TARGET_NAME} EXPORT OpenVINOTargets # OpenVINO runtime library dev # -# Add openvin::dev target +# Add openvino::runtine::dev target # add_library(${TARGET_NAME}_dev INTERFACE) @@ -112,6 +120,7 @@ target_compile_definitions(${TARGET_NAME}_dev INTERFACE target_link_libraries(${TARGET_NAME}_dev INTERFACE ${TARGET_NAME} openvino::core::dev) +# TODO: remove once NPU will use explicltly `ov_set_threading_interface_for` ov_set_threading_interface_for(${TARGET_NAME}_dev) set_target_properties(${TARGET_NAME}_dev PROPERTIES EXPORT_NAME runtime::dev) @@ -253,11 +262,11 @@ if(ENABLE_PKGCONFIG_GEN) if(ENABLE_SYSTEM_TBB) set(PKGCONFIG_OpenVINO_PRIVATE_DEPS "-ltbb") elseif(TBB_FOUND) - if(NOT pkg_config_tbb_lib_dir) - message(FATAL_ERROR "Internal error: variable 'pkg_config_tbb_lib_dir' is not defined") + if(NOT TBB_LIB_INSTALL_DIR) + message(FATAL_ERROR "Internal error: variable 'TBB_LIB_INSTALL_DIR' is not defined") endif() - set(PKGCONFIG_OpenVINO_PRIVATE_DEPS "-L\${prefix}/${pkg_config_tbb_lib_dir} -ltbb") + set(PKGCONFIG_OpenVINO_PRIVATE_DEPS "-L\${prefix}/${TBB_LIB_INSTALL_DIR} -ltbb") endif() if(ENABLE_SYSTEM_PUGIXML) diff --git a/src/cmake/ov_parallel.cmake b/src/cmake/ov_parallel.cmake index a9d4d391e4543f..510b207689add7 100644 --- a/src/cmake/ov_parallel.cmake +++ b/src/cmake/ov_parallel.cmake @@ -261,18 +261,16 @@ function(ov_set_threading_interface_for TARGET_NAME) if(target_type STREQUAL "INTERFACE_LIBRARY") set(LINK_TYPE "INTERFACE") set(COMPILE_DEF_TYPE "INTERFACE") - elseif(target_type STREQUAL "EXECUTABLE" OR target_type STREQUAL "OBJECT_LIBRARY" OR - target_type STREQUAL "MODULE_LIBRARY") + elseif(target_type MATCHES "^(EXECUTABLE|OBJECT_LIBRARY|MODULE_LIBRARY)$") set(LINK_TYPE "PRIVATE") - set(COMPILE_DEF_TYPE "PUBLIC") + set(COMPILE_DEF_TYPE "PRIVATE") elseif(target_type STREQUAL "STATIC_LIBRARY") # Affected libraries: inference_engine_s, openvino_gapi_preproc_s # they don't have TBB in public headers => PRIVATE set(LINK_TYPE "PRIVATE") set(COMPILE_DEF_TYPE "PUBLIC") elseif(target_type STREQUAL "SHARED_LIBRARY") - # Affected libraries: inference_engine only - # TODO: why TBB propogates its headers to inference_engine? + # Affected libraries: 'openvino' only set(LINK_TYPE "PRIVATE") set(COMPILE_DEF_TYPE "PUBLIC") else() diff --git a/src/common/offline_transformations/CMakeLists.txt b/src/common/offline_transformations/CMakeLists.txt index ccd11c7eb3b51d..6712f2f28586e3 100644 --- a/src/common/offline_transformations/CMakeLists.txt +++ b/src/common/offline_transformations/CMakeLists.txt @@ -19,11 +19,7 @@ source_group("include" FILES ${PUBLIC_HEADERS}) add_library(${TARGET_NAME} STATIC ${LIBRARY_SRC} ${PUBLIC_HEADERS}) -target_link_libraries(${TARGET_NAME} PRIVATE openvino::runtime::dev openvino::itt openvino::pugixml openvino::reference - openvino::runtime) - -set_source_files_properties(INCLUDE_DIRECTORIES - $) +target_link_libraries(${TARGET_NAME} PRIVATE openvino::core::dev openvino::reference openvino::runtime) target_include_directories(${TARGET_NAME} PUBLIC ${PUBLIC_HEADERS_DIR} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/src") diff --git a/src/inference/tests/functional/CMakeLists.txt b/src/inference/tests/functional/CMakeLists.txt index 16bcc08d4b95c8..4a42e9eb5bd5b4 100644 --- a/src/inference/tests/functional/CMakeLists.txt +++ b/src/inference/tests/functional/CMakeLists.txt @@ -44,6 +44,8 @@ ov_add_test_target( LINK_LIBRARIES gmock func_test_utils + DEFINES + ${COMPILE_DEFINITIONS} INCLUDES $/src ${CMAKE_CURRENT_SOURCE_DIR} @@ -52,4 +54,4 @@ ov_add_test_target( OV UNIT RUNTIME ) -add_compile_definitions(${TARGET_NAME} ${COMPILE_DEFINITIONS}) +ov_set_threading_interface_for(${TARGET_NAME}) diff --git a/src/inference/tests/unit/CMakeLists.txt b/src/inference/tests/unit/CMakeLists.txt index ef8e346aaf486b..8030fe0ddb7a15 100644 --- a/src/inference/tests/unit/CMakeLists.txt +++ b/src/inference/tests/unit/CMakeLists.txt @@ -19,3 +19,5 @@ ov_add_test_target( LABELS OV UNIT RUNTIME ) + +ov_set_threading_interface_for(${TARGET_NAME}) diff --git a/src/plugins/auto/tests/CMakeLists.txt b/src/plugins/auto/tests/CMakeLists.txt index bce0f68667ca23..59ce3e84bc86cf 100644 --- a/src/plugins/auto/tests/CMakeLists.txt +++ b/src/plugins/auto/tests/CMakeLists.txt @@ -1,8 +1,7 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -# RPATH is always enabled for unit tests -set(CMAKE_SKIP_RPATH OFF) + # because unit tests use plugins object files compiled with LTO if(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 9.0) set(CMAKE_INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO}) diff --git a/src/plugins/auto_batch/CMakeLists.txt b/src/plugins/auto_batch/CMakeLists.txt index ffc873e5c6c833..9ff585740abc98 100644 --- a/src/plugins/auto_batch/CMakeLists.txt +++ b/src/plugins/auto_batch/CMakeLists.txt @@ -18,17 +18,13 @@ ov_add_plugin(NAME ${TARGET_NAME} SOURCES ${SOURCES} ${HEADERS} VERSION_DEFINES_FOR src/plugin.cpp ADD_CLANG_FORMAT) -target_link_libraries(${TARGET_NAME} PRIVATE Threads::Threads) +ov_set_threading_interface_for(${TARGET_NAME}) # must be called after all target_link_libraries ov_add_api_validator_post_build_step(TARGET ${TARGET_NAME}) set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO}) -if(ENABLE_FUNCTIONAL_TESTS) - add_subdirectory(tests/functional) -endif() - if(ENABLE_TESTS) - add_subdirectory(tests/unit) + add_subdirectory(tests) endif() diff --git a/src/plugins/auto_batch/tests/CMakeLists.txt b/src/plugins/auto_batch/tests/CMakeLists.txt new file mode 100644 index 00000000000000..9d4bb95917dcc9 --- /dev/null +++ b/src/plugins/auto_batch/tests/CMakeLists.txt @@ -0,0 +1,6 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +add_subdirectory(unit) +add_subdirectory(functional) diff --git a/src/plugins/hetero/CMakeLists.txt b/src/plugins/hetero/CMakeLists.txt index fdb972a7b427b4..8d20a7d6c5b03a 100644 --- a/src/plugins/hetero/CMakeLists.txt +++ b/src/plugins/hetero/CMakeLists.txt @@ -29,38 +29,31 @@ ov_add_api_validator_post_build_step(TARGET ${TARGET_NAME}) set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO}) -if(BUILD_SHARED_LIBS) - set(OBJ_NAME ${TARGET_NAME}_obj) - - add_library(${OBJ_NAME} OBJECT ${SOURCES} ${HEADERS}) - ov_link_system_libraries(${OBJ_NAME} PUBLIC openvino::pugixml) - - ov_add_version_defines(src/version.cpp ${OBJ_NAME}) +if(ENABLE_TESTS) + if(BUILD_SHARED_LIBS) + set(OBJ_NAME ${TARGET_NAME}_obj) - target_include_directories(${OBJ_NAME} - PRIVATE - $ - $ - PUBLIC - ${CMAKE_CURRENT_SOURCE_DIR}/src - $) + add_library(${OBJ_NAME} OBJECT ${SOURCES} ${HEADERS}) + ov_link_system_libraries(${OBJ_NAME} PUBLIC openvino::pugixml) - ov_set_threading_interface_for(${OBJ_NAME}) + ov_add_version_defines(src/version.cpp ${OBJ_NAME}) - target_compile_definitions(${OBJ_NAME} - PRIVATE - USE_STATIC_IE IMPLEMENT_INFERENCE_ENGINE_PLUGIN IMPLEMENT_INFERENCE_EXTENSION_API - $ - $) + target_include_directories(${OBJ_NAME} + PRIVATE + $ + $ + PUBLIC + ${CMAKE_CURRENT_SOURCE_DIR}/src + $) - set_target_properties(${TARGET_NAME}_obj PROPERTIES EXCLUDE_FROM_ALL ON) - set_target_properties(${TARGET_NAME}_obj PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO}) -endif() + ov_set_threading_interface_for(${OBJ_NAME}) -if(ENABLE_TESTS) - add_subdirectory(tests/unit) -endif() + target_compile_definitions(${OBJ_NAME} + PRIVATE + USE_STATIC_IE IMPLEMENT_INFERENCE_ENGINE_PLUGIN IMPLEMENT_INFERENCE_EXTENSION_API + $ + $) + endif() -if(ENABLE_FUNCTIONAL_TESTS) - add_subdirectory(tests/functional) + add_subdirectory(tests) endif() diff --git a/src/plugins/hetero/tests/CMakeLists.txt b/src/plugins/hetero/tests/CMakeLists.txt new file mode 100644 index 00000000000000..9d4bb95917dcc9 --- /dev/null +++ b/src/plugins/hetero/tests/CMakeLists.txt @@ -0,0 +1,6 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +add_subdirectory(unit) +add_subdirectory(functional) diff --git a/src/plugins/hetero/tests/functional/CMakeLists.txt b/src/plugins/hetero/tests/functional/CMakeLists.txt index a1726e31c5a413..550bfa0f075795 100644 --- a/src/plugins/hetero/tests/functional/CMakeLists.txt +++ b/src/plugins/hetero/tests/functional/CMakeLists.txt @@ -8,7 +8,6 @@ ov_add_test_target( NAME ${TARGET_NAME} ROOT ${CMAKE_CURRENT_SOURCE_DIR} DEPENDENCIES - openvino::runtime mock_engine openvino_hetero_plugin LINK_LIBRARIES diff --git a/src/plugins/hetero/tests/unit/CMakeLists.txt b/src/plugins/hetero/tests/unit/CMakeLists.txt index 939229e4ee9ffb..ad2d38e156b5a9 100644 --- a/src/plugins/hetero/tests/unit/CMakeLists.txt +++ b/src/plugins/hetero/tests/unit/CMakeLists.txt @@ -24,7 +24,6 @@ ov_add_test_target( ov_models DEPENDENCIES mock_engine - ov_models ADD_CLANG_FORMAT LABELS OV UNIT HETERO diff --git a/src/plugins/intel_gna/tests/deprecated/unit/CMakeLists.txt b/src/plugins/intel_gna/tests/deprecated/unit/CMakeLists.txt index fd85b52a313767..035e05de64516b 100644 --- a/src/plugins/intel_gna/tests/deprecated/unit/CMakeLists.txt +++ b/src/plugins/intel_gna/tests/deprecated/unit/CMakeLists.txt @@ -8,9 +8,6 @@ set(TARGET_NAME InferenceEngineUnitTests) ov_disable_deprecated_warnings() -# rpath enabled for unit tests only -SET (CMAKE_SKIP_RPATH OFF) - # collect sources file(GLOB diff --git a/src/plugins/intel_gna/tests/unit/CMakeLists.txt b/src/plugins/intel_gna/tests/unit/CMakeLists.txt index 87f4223a643f79..cb77a5e190ca74 100644 --- a/src/plugins/intel_gna/tests/unit/CMakeLists.txt +++ b/src/plugins/intel_gna/tests/unit/CMakeLists.txt @@ -4,9 +4,6 @@ set(TARGET_NAME ov_gna_unit_tests) -# RPATH is always enabled for unit tests -set(CMAKE_SKIP_RPATH OFF) - # because unit tests use plugins object files compiled with LTO if(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 9.0) set(CMAKE_INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO}) diff --git a/src/plugins/intel_gpu/CMakeLists.txt b/src/plugins/intel_gpu/CMakeLists.txt index 2561407b6faaa7..b0c66a435d6470 100644 --- a/src/plugins/intel_gpu/CMakeLists.txt +++ b/src/plugins/intel_gpu/CMakeLists.txt @@ -65,6 +65,8 @@ target_link_libraries(${TARGET_NAME} PRIVATE openvino_intel_gpu_graph openvino:: target_include_directories(${TARGET_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include/) +ov_set_threading_interface_for(${TARGET_NAME}) + set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO}) # Workaround to avoid warnings during LTO build From cf83750867bb17ff0ff7f508ac5415c8b24ba040 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Tue, 10 Oct 2023 09:32:18 +0400 Subject: [PATCH 110/257] Move ConvEltwiseFuse to new API (#20331) --- .../subgraph_tests/conv_eltwise_fusion.cpp | 2 +- .../include/subgraph_tests/conv_eltwise_fusion.hpp | 8 +++++--- .../include/shared_test_classes/base/ov_subgraph.hpp | 11 +++++++++++ .../subgraph/conv_eltwise_fusion.hpp | 9 ++++++--- .../src/subgraph/conv_eltwise_fusion.cpp | 9 +++++---- 5 files changed, 28 insertions(+), 11 deletions(-) diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/conv_eltwise_fusion.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/conv_eltwise_fusion.cpp index 2e23919cfd6c4c..e92f457f369aa9 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/conv_eltwise_fusion.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/conv_eltwise_fusion.cpp @@ -8,7 +8,7 @@ #include "common_test_utils/test_constants.hpp" -using namespace SubgraphTestsDefinitions; +using namespace ov::test; namespace { const std::vector types{ov::element::f32, ov::element::f16}; diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/conv_eltwise_fusion.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/conv_eltwise_fusion.hpp index f41991680ab4f6..088716c36ab130 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/conv_eltwise_fusion.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/conv_eltwise_fusion.hpp @@ -6,10 +6,12 @@ #include "shared_test_classes/subgraph/conv_eltwise_fusion.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { TEST_P(ConvEltwiseFusion, CompareWithRefs) { - Run(); + run(); } -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/ov_subgraph.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/ov_subgraph.hpp index 42d021072a49a2..05867b81d67c8c 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/ov_subgraph.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/ov_subgraph.hpp @@ -106,5 +106,16 @@ inline std::vector static_shapes_to_test_representation(const std::v } return result; } + +class SubgraphBaseStaticTest : public ov::test::SubgraphBaseTest { +public: + void run() override { + std::vector input_shapes; + for (const auto& param : function->get_parameters()) + input_shapes.emplace_back(param->get_shape()); + init_input_shapes(ov::test::static_shapes_to_test_representation(input_shapes)); + ov::test::SubgraphBaseTest::run(); + } +}; } // namespace test } // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_eltwise_fusion.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_eltwise_fusion.hpp index d3dea8eb01b080..eff28f7d7f2574 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_eltwise_fusion.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_eltwise_fusion.hpp @@ -10,8 +10,10 @@ #include "ov_models/builders.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { typedef std::tuple, - virtual public LayerTestsUtils::LayerTestsCommon { + virtual public ov::test::SubgraphBaseStaticTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj); @@ -35,4 +37,5 @@ class ConvEltwiseFusion : public testing::WithParamInterface& obj) { std::tuple conv_params; @@ -205,4 +205,5 @@ void ConvEltwiseFusion::SetUp() { auto res = compare_functions(cloned_function, function_ref); ASSERT_TRUE(res.first) << res.second; } -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov From 60b82372d1a95c2e661744291c0f5a61b80ee504 Mon Sep 17 00:00:00 2001 From: Wanglei Shen Date: Tue, 10 Oct 2023 13:59:27 +0800 Subject: [PATCH 111/257] Support SRF in MT 2.0 on Linux (#20301) * add test data for SRF on Linux * update cpu map detection for Ecore only platform * update test data for smoke test of streams generation * update test data --- src/inference/src/os/lin/lin_system_conf.cpp | 8 +- .../cpu_map_parser/cache_parser_linux.cpp | 87 +++++++++++ .../unit/streams_info/streams_e2e_test.cpp | 138 +++++++++++++++++- 3 files changed, 228 insertions(+), 5 deletions(-) diff --git a/src/inference/src/os/lin/lin_system_conf.cpp b/src/inference/src/os/lin/lin_system_conf.cpp index 02fbe263e15902..6d8bb4afad2b3b 100644 --- a/src/inference/src/os/lin/lin_system_conf.cpp +++ b/src/inference/src/os/lin/lin_system_conf.cpp @@ -201,7 +201,9 @@ CPU::CPU() { _cpu_mapping_table); } - if ((_proc_type_table.size() == 0) || (_proc_type_table[0][MAIN_CORE_PROC] == 0)) { + if ((_proc_type_table.size() == 0) || + ((_proc_type_table[0][MAIN_CORE_PROC] == 0) && (_proc_type_table[0][ALL_PROC] > 0) && + (_proc_type_table[0][ALL_PROC] != _proc_type_table[0][EFFICIENT_CORE_PROC]))) { if (!get_freq_info_linux()) { parse_freq_info_linux(system_info_table, node_info_table, @@ -214,7 +216,9 @@ CPU::CPU() { } } - if ((_proc_type_table.size() == 0) || (_proc_type_table[0][MAIN_CORE_PROC] == 0)) { + if ((_proc_type_table.size() == 0) || + ((_proc_type_table[0][MAIN_CORE_PROC] == 0) && (_proc_type_table[0][ALL_PROC] > 0) && + (_proc_type_table[0][ALL_PROC] != _proc_type_table[0][EFFICIENT_CORE_PROC]))) { /*Previous CPU resource based on calculation*/ std::ifstream cpuinfo("/proc/cpuinfo"); std::vector processors; diff --git a/src/inference/tests/unit/cpu_map_parser/cache_parser_linux.cpp b/src/inference/tests/unit/cpu_map_parser/cache_parser_linux.cpp index b96a86d62f72cf..7dab5dc907ff0b 100644 --- a/src/inference/tests/unit/cpu_map_parser/cache_parser_linux.cpp +++ b/src/inference/tests/unit/cpu_map_parser/cache_parser_linux.cpp @@ -300,6 +300,92 @@ LinuxCpuMapTestCase cache_2sockets_104cores_hyperthreading = { }, // param[in]: The CPU cache information table of this simulated platform {{"0-51,104-155"}, {"52-103,156-207"}}, // param[in]: The numa node information table of this simulated platform }; +LinuxCpuMapTestCase cache_1sockets_96cores = { + 96, + 1, + 1, + 96, + {{96, 0, 96, 0, 0, 0}}, + { + {0, 0, 0, 0, EFFICIENT_CORE_PROC, 0, -1}, {1, 0, 0, 1, EFFICIENT_CORE_PROC, 0, -1}, + {2, 0, 0, 2, EFFICIENT_CORE_PROC, 0, -1}, {3, 0, 0, 3, EFFICIENT_CORE_PROC, 0, -1}, + {4, 0, 0, 4, EFFICIENT_CORE_PROC, 1, -1}, {5, 0, 0, 5, EFFICIENT_CORE_PROC, 1, -1}, + {6, 0, 0, 6, EFFICIENT_CORE_PROC, 1, -1}, {7, 0, 0, 7, EFFICIENT_CORE_PROC, 1, -1}, + {8, 0, 0, 8, EFFICIENT_CORE_PROC, 2, -1}, {9, 0, 0, 9, EFFICIENT_CORE_PROC, 2, -1}, + {10, 0, 0, 10, EFFICIENT_CORE_PROC, 2, -1}, {11, 0, 0, 11, EFFICIENT_CORE_PROC, 2, -1}, + {12, 0, 0, 12, EFFICIENT_CORE_PROC, 3, -1}, {13, 0, 0, 13, EFFICIENT_CORE_PROC, 3, -1}, + {14, 0, 0, 14, EFFICIENT_CORE_PROC, 3, -1}, {15, 0, 0, 15, EFFICIENT_CORE_PROC, 3, -1}, + {16, 0, 0, 16, EFFICIENT_CORE_PROC, 4, -1}, {17, 0, 0, 17, EFFICIENT_CORE_PROC, 4, -1}, + {18, 0, 0, 18, EFFICIENT_CORE_PROC, 4, -1}, {19, 0, 0, 19, EFFICIENT_CORE_PROC, 4, -1}, + {20, 0, 0, 20, EFFICIENT_CORE_PROC, 5, -1}, {21, 0, 0, 21, EFFICIENT_CORE_PROC, 5, -1}, + {22, 0, 0, 22, EFFICIENT_CORE_PROC, 5, -1}, {23, 0, 0, 23, EFFICIENT_CORE_PROC, 5, -1}, + {24, 0, 0, 24, EFFICIENT_CORE_PROC, 6, -1}, {25, 0, 0, 25, EFFICIENT_CORE_PROC, 6, -1}, + {26, 0, 0, 26, EFFICIENT_CORE_PROC, 6, -1}, {27, 0, 0, 27, EFFICIENT_CORE_PROC, 6, -1}, + {28, 0, 0, 28, EFFICIENT_CORE_PROC, 7, -1}, {29, 0, 0, 29, EFFICIENT_CORE_PROC, 7, -1}, + {30, 0, 0, 30, EFFICIENT_CORE_PROC, 7, -1}, {31, 0, 0, 31, EFFICIENT_CORE_PROC, 7, -1}, + {32, 0, 0, 32, EFFICIENT_CORE_PROC, 8, -1}, {33, 0, 0, 33, EFFICIENT_CORE_PROC, 8, -1}, + {34, 0, 0, 34, EFFICIENT_CORE_PROC, 8, -1}, {35, 0, 0, 35, EFFICIENT_CORE_PROC, 8, -1}, + {36, 0, 0, 36, EFFICIENT_CORE_PROC, 9, -1}, {37, 0, 0, 37, EFFICIENT_CORE_PROC, 9, -1}, + {38, 0, 0, 38, EFFICIENT_CORE_PROC, 9, -1}, {39, 0, 0, 39, EFFICIENT_CORE_PROC, 9, -1}, + {40, 0, 0, 40, EFFICIENT_CORE_PROC, 10, -1}, {41, 0, 0, 41, EFFICIENT_CORE_PROC, 10, -1}, + {42, 0, 0, 42, EFFICIENT_CORE_PROC, 10, -1}, {43, 0, 0, 43, EFFICIENT_CORE_PROC, 10, -1}, + {44, 0, 0, 44, EFFICIENT_CORE_PROC, 11, -1}, {45, 0, 0, 45, EFFICIENT_CORE_PROC, 11, -1}, + {46, 0, 0, 46, EFFICIENT_CORE_PROC, 11, -1}, {47, 0, 0, 47, EFFICIENT_CORE_PROC, 11, -1}, + {48, 0, 0, 48, EFFICIENT_CORE_PROC, 12, -1}, {49, 0, 0, 49, EFFICIENT_CORE_PROC, 12, -1}, + {50, 0, 0, 50, EFFICIENT_CORE_PROC, 12, -1}, {51, 0, 0, 51, EFFICIENT_CORE_PROC, 12, -1}, + {52, 0, 0, 52, EFFICIENT_CORE_PROC, 13, -1}, {53, 0, 0, 53, EFFICIENT_CORE_PROC, 13, -1}, + {54, 0, 0, 54, EFFICIENT_CORE_PROC, 13, -1}, {55, 0, 0, 55, EFFICIENT_CORE_PROC, 13, -1}, + {56, 0, 0, 56, EFFICIENT_CORE_PROC, 14, -1}, {57, 0, 0, 57, EFFICIENT_CORE_PROC, 14, -1}, + {58, 0, 0, 58, EFFICIENT_CORE_PROC, 14, -1}, {59, 0, 0, 59, EFFICIENT_CORE_PROC, 14, -1}, + {60, 0, 0, 60, EFFICIENT_CORE_PROC, 15, -1}, {61, 0, 0, 61, EFFICIENT_CORE_PROC, 15, -1}, + {62, 0, 0, 62, EFFICIENT_CORE_PROC, 15, -1}, {63, 0, 0, 63, EFFICIENT_CORE_PROC, 15, -1}, + {64, 0, 0, 64, EFFICIENT_CORE_PROC, 16, -1}, {65, 0, 0, 65, EFFICIENT_CORE_PROC, 16, -1}, + {66, 0, 0, 66, EFFICIENT_CORE_PROC, 16, -1}, {67, 0, 0, 67, EFFICIENT_CORE_PROC, 16, -1}, + {68, 0, 0, 68, EFFICIENT_CORE_PROC, 17, -1}, {69, 0, 0, 69, EFFICIENT_CORE_PROC, 17, -1}, + {70, 0, 0, 70, EFFICIENT_CORE_PROC, 17, -1}, {71, 0, 0, 71, EFFICIENT_CORE_PROC, 17, -1}, + {72, 0, 0, 72, EFFICIENT_CORE_PROC, 18, -1}, {73, 0, 0, 73, EFFICIENT_CORE_PROC, 18, -1}, + {74, 0, 0, 74, EFFICIENT_CORE_PROC, 18, -1}, {75, 0, 0, 75, EFFICIENT_CORE_PROC, 18, -1}, + {76, 0, 0, 76, EFFICIENT_CORE_PROC, 19, -1}, {77, 0, 0, 77, EFFICIENT_CORE_PROC, 19, -1}, + {78, 0, 0, 78, EFFICIENT_CORE_PROC, 19, -1}, {79, 0, 0, 79, EFFICIENT_CORE_PROC, 19, -1}, + {80, 0, 0, 80, EFFICIENT_CORE_PROC, 20, -1}, {81, 0, 0, 81, EFFICIENT_CORE_PROC, 20, -1}, + {82, 0, 0, 82, EFFICIENT_CORE_PROC, 20, -1}, {83, 0, 0, 83, EFFICIENT_CORE_PROC, 20, -1}, + {84, 0, 0, 84, EFFICIENT_CORE_PROC, 21, -1}, {85, 0, 0, 85, EFFICIENT_CORE_PROC, 21, -1}, + {86, 0, 0, 86, EFFICIENT_CORE_PROC, 21, -1}, {87, 0, 0, 87, EFFICIENT_CORE_PROC, 21, -1}, + {88, 0, 0, 88, EFFICIENT_CORE_PROC, 22, -1}, {89, 0, 0, 89, EFFICIENT_CORE_PROC, 22, -1}, + {90, 0, 0, 90, EFFICIENT_CORE_PROC, 22, -1}, {91, 0, 0, 91, EFFICIENT_CORE_PROC, 22, -1}, + {92, 0, 0, 92, EFFICIENT_CORE_PROC, 23, -1}, {93, 0, 0, 93, EFFICIENT_CORE_PROC, 23, -1}, + {94, 0, 0, 94, EFFICIENT_CORE_PROC, 23, -1}, {95, 0, 0, 95, EFFICIENT_CORE_PROC, 23, -1}, + }, + { + {"0", "0-3", "0-95"}, {"1", "0-3", "0-95"}, {"2", "0-3", "0-95"}, {"3", "0-3", "0-95"}, + {"4", "4-7", "0-95"}, {"5", "4-7", "0-95"}, {"6", "4-7", "0-95"}, {"7", "4-7", "0-95"}, + {"8", "8-11", "0-95"}, {"9", "8-11", "0-95"}, {"10", "8-11", "0-95"}, {"11", "8-11", "0-95"}, + {"12", "12-15", "0-95"}, {"13", "12-15", "0-95"}, {"14", "12-15", "0-95"}, {"15", "12-15", "0-95"}, + {"16", "16-19", "0-95"}, {"17", "16-19", "0-95"}, {"18", "16-19", "0-95"}, {"19", "16-19", "0-95"}, + {"20", "20-23", "0-95"}, {"21", "20-23", "0-95"}, {"22", "20-23", "0-95"}, {"23", "20-23", "0-95"}, + {"24", "24-27", "0-95"}, {"25", "24-27", "0-95"}, {"26", "24-27", "0-95"}, {"27", "24-27", "0-95"}, + {"28", "28-31", "0-95"}, {"29", "28-31", "0-95"}, {"30", "28-31", "0-95"}, {"31", "28-31", "0-95"}, + {"32", "32-35", "0-95"}, {"33", "32-35", "0-95"}, {"34", "32-35", "0-95"}, {"35", "32-35", "0-95"}, + {"36", "36-39", "0-95"}, {"37", "36-39", "0-95"}, {"38", "36-39", "0-95"}, {"39", "36-39", "0-95"}, + {"40", "40-43", "0-95"}, {"41", "40-43", "0-95"}, {"42", "40-43", "0-95"}, {"43", "40-43", "0-95"}, + {"44", "44-47", "0-95"}, {"45", "44-47", "0-95"}, {"46", "44-47", "0-95"}, {"47", "44-47", "0-95"}, + {"48", "48-51", "0-95"}, {"49", "48-51", "0-95"}, {"50", "48-51", "0-95"}, {"51", "48-51", "0-95"}, + {"52", "52-55", "0-95"}, {"53", "52-55", "0-95"}, {"54", "52-55", "0-95"}, {"55", "52-55", "0-95"}, + {"56", "56-59", "0-95"}, {"57", "56-59", "0-95"}, {"58", "56-59", "0-95"}, {"59", "56-59", "0-95"}, + {"60", "60-63", "0-95"}, {"61", "60-63", "0-95"}, {"62", "60-63", "0-95"}, {"63", "60-63", "0-95"}, + {"64", "64-67", "0-95"}, {"65", "64-67", "0-95"}, {"66", "64-67", "0-95"}, {"67", "64-67", "0-95"}, + {"68", "68-71", "0-95"}, {"69", "68-71", "0-95"}, {"70", "68-71", "0-95"}, {"71", "68-71", "0-95"}, + {"72", "72-75", "0-95"}, {"73", "72-75", "0-95"}, {"74", "72-75", "0-95"}, {"75", "72-75", "0-95"}, + {"76", "76-79", "0-95"}, {"77", "76-79", "0-95"}, {"78", "76-79", "0-95"}, {"79", "76-79", "0-95"}, + {"80", "80-83", "0-95"}, {"81", "80-83", "0-95"}, {"82", "80-83", "0-95"}, {"83", "80-83", "0-95"}, + {"84", "84-87", "0-95"}, {"85", "84-87", "0-95"}, {"86", "84-87", "0-95"}, {"87", "84-87", "0-95"}, + {"88", "88-91", "0-95"}, {"89", "88-91", "0-95"}, {"90", "88-91", "0-95"}, {"91", "88-91", "0-95"}, + {"92", "92-95", "0-95"}, {"93", "92-95", "0-95"}, {"94", "92-95", "0-95"}, {"95", "92-95", "0-95"}, + }, + { + {"0-95"}, + }, +}; LinuxCpuMapTestCase cache_2sockets_48cores_hyperthreading = { 96, 2, @@ -1092,6 +1178,7 @@ TEST_P(LinuxCpuMapCacheParserTests, LinuxCache) {} INSTANTIATE_TEST_SUITE_P(CPUMap, LinuxCpuMapCacheParserTests, testing::Values(cache_2sockets_104cores_hyperthreading, + cache_1sockets_96cores, cache_2sockets_48cores_hyperthreading, cache_2sockets_48cores_hyperthreading_1, cache_2sockets_24cores_hyperthreading, diff --git a/src/plugins/intel_cpu/tests/unit/streams_info/streams_e2e_test.cpp b/src/plugins/intel_cpu/tests/unit/streams_info/streams_e2e_test.cpp index 7174b616377456..6836e6e75fb8de 100644 --- a/src/plugins/intel_cpu/tests/unit/streams_info/streams_e2e_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/streams_info/streams_e2e_test.cpp @@ -553,8 +553,134 @@ StreamGenerateionTestCase generation_tput_2sockets_48cores_9 = { {{48, 48, 0, 0, -1, -1}, {24, 24, 0, 0, 0, 0}, {24, 24, 0, 0, 1, 1}}, {{24, MAIN_CORE_PROC, 1, 0, 0}, {24, MAIN_CORE_PROC, 1, 1, 1}}, }; +StreamGenerateionTestCase generation_latency_1sockets_96cores_pinning = { + 1, + false, + 0, + 0, + 0, + ov::hint::SchedulingCoreType::ANY_CORE, + false, + false, + true, + true, + ov::hint::PerformanceMode::LATENCY, + ov::intel_cpu::Config::LatencyThreadingMode::PER_SOCKET, + {{96, 0, 96, 0, 0, 0}}, + ov::hint::SchedulingCoreType::ANY_CORE, + false, + true, + ov::hint::PerformanceMode::LATENCY, + {{96, 0, 96, 0, 0, 0}}, + {{1, EFFICIENT_CORE_PROC, 96, 0, 0}}, +}; +StreamGenerateionTestCase generation_tput_1sockets_96cores_pinning = { + 1, + false, + 0, + 0, + 0, + ov::hint::SchedulingCoreType::ANY_CORE, + false, + false, + true, + true, + ov::hint::PerformanceMode::THROUGHPUT, + ov::intel_cpu::Config::LatencyThreadingMode::PER_SOCKET, + {{96, 0, 96, 0, 0, 0}}, + ov::hint::SchedulingCoreType::ANY_CORE, + false, + true, + ov::hint::PerformanceMode::THROUGHPUT, + {{96, 0, 96, 0, 0, 0}}, + {{24, EFFICIENT_CORE_PROC, 4, 0, 0}}, +}; +StreamGenerateionTestCase generation_tput_1sockets_96cores_2_pinning = { + 1, + false, + 0, + 0, + 0, + ov::hint::SchedulingCoreType::PCORE_ONLY, + true, + true, + true, + true, + ov::hint::PerformanceMode::THROUGHPUT, + ov::intel_cpu::Config::LatencyThreadingMode::PER_SOCKET, + {{96, 0, 96, 0, 0, 0}}, + ov::hint::SchedulingCoreType::ANY_CORE, + false, + true, + ov::hint::PerformanceMode::THROUGHPUT, + {{96, 0, 96, 0, 0, 0}}, + {{24, EFFICIENT_CORE_PROC, 4, 0, 0}}, +}; +StreamGenerateionTestCase generation_latency_1sockets_96cores_unpinning = { + 1, + false, + 0, + 0, + 0, + ov::hint::SchedulingCoreType::ANY_CORE, + false, + false, + true, + true, + ov::hint::PerformanceMode::LATENCY, + ov::intel_cpu::Config::LatencyThreadingMode::PER_SOCKET, + {{96, 0, 96, 0, 0, 0}}, + ov::hint::SchedulingCoreType::ANY_CORE, + false, + false, + ov::hint::PerformanceMode::LATENCY, + {{96, 0, 96, 0, 0, 0}}, + {{1, EFFICIENT_CORE_PROC, 96, 0, 0}}, +}; +StreamGenerateionTestCase generation_tput_1sockets_96cores_unpinning = { + 1, + false, + 0, + 0, + 0, + ov::hint::SchedulingCoreType::ANY_CORE, + false, + false, + false, + false, + ov::hint::PerformanceMode::THROUGHPUT, + ov::intel_cpu::Config::LatencyThreadingMode::PER_SOCKET, + {{96, 0, 96, 0, 0, 0}}, + ov::hint::SchedulingCoreType::ANY_CORE, + false, + false, + ov::hint::PerformanceMode::THROUGHPUT, + {{96, 0, 96, 0, 0, 0}}, + {{24, EFFICIENT_CORE_PROC, 4, 0, 0}}, +}; +StreamGenerateionTestCase generation_tput_1sockets_96cores_2_unpinning = { + 1, + false, + 0, + 0, + 0, + ov::hint::SchedulingCoreType::PCORE_ONLY, + true, + true, + false, + true, + ov::hint::PerformanceMode::THROUGHPUT, + ov::intel_cpu::Config::LatencyThreadingMode::PER_SOCKET, + {{96, 0, 96, 0, 0, 0}}, + ov::hint::SchedulingCoreType::ANY_CORE, + false, + false, + ov::hint::PerformanceMode::THROUGHPUT, + {{96, 0, 96, 0, 0, 0}}, + {{24, EFFICIENT_CORE_PROC, 4, 0, 0}}, +}; -#if defined (__linux__) || defined(_WIN32) +#if defined(__linux__) || defined(_WIN32) INSTANTIATE_TEST_SUITE_P(smoke_StreamsGeneration, StreamGenerationTests, ::testing::Values(generation_latency_1sockets_14cores_3, @@ -574,7 +700,10 @@ INSTANTIATE_TEST_SUITE_P(smoke_StreamsGeneration, generation_tput_2sockets_48cores_6, generation_tput_2sockets_48cores_7, generation_tput_2sockets_48cores_8, - generation_tput_2sockets_48cores_9)); + generation_tput_2sockets_48cores_9, + generation_latency_1sockets_96cores_pinning, + generation_tput_1sockets_96cores_pinning, + generation_tput_1sockets_96cores_2_pinning)); #else INSTANTIATE_TEST_SUITE_P(smoke_StreamsGeneration, StreamGenerationTests, @@ -595,7 +724,10 @@ INSTANTIATE_TEST_SUITE_P(smoke_StreamsGeneration, generation_tput_2sockets_48cores_6, generation_tput_2sockets_48cores_7, generation_tput_2sockets_48cores_8, - generation_tput_2sockets_48cores_9)); + generation_tput_2sockets_48cores_9, + generation_latency_1sockets_96cores_unpinning, + generation_tput_1sockets_96cores_unpinning, + generation_tput_1sockets_96cores_2_unpinning)); #endif } // namespace \ No newline at end of file From d07b2ccc5f44a70ab00625084a28965b9a1e6c94 Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Tue, 10 Oct 2023 07:59:46 +0200 Subject: [PATCH 112/257] [PT FE] Return dynamic shape when static shape exist in graph (#20289) --- .../python/src/openvino/frontend/pytorch/ts_decoder.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py b/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py index 32e62084e89e41..b6caf22cfc7b68 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py @@ -169,7 +169,8 @@ def _get_known_type_for_value(self, pt_type): def get_shape_for_value(self, value: torch.Value): if value.isCompleteTensor(): - ps = PartialShape(value.type().sizes()) + # We avoid static shapes, they don't generalize on other inputs + ps = PartialShape([-1] * len(value.type().sizes())) return ps else: # TODO: Recognize types that we can represent as a nested constructs with objects from DecoderType From feaf05cc5f64af6d82898a851bd84710722456c6 Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Tue, 10 Oct 2023 08:00:02 +0200 Subject: [PATCH 113/257] [PT FE] Support aten::max_poolnd_with_indices (#20322) --- src/frontends/pytorch/src/op/max_poolnd.cpp | 19 +++++- src/frontends/pytorch/src/op_table.cpp | 3 + .../layer_tests/pytorch_tests/test_pooling.py | 59 ++++++++++++++++++- 3 files changed, 77 insertions(+), 4 deletions(-) diff --git a/src/frontends/pytorch/src/op/max_poolnd.cpp b/src/frontends/pytorch/src/op/max_poolnd.cpp index 0d685f03e16019..30bf1546ec87e8 100644 --- a/src/frontends/pytorch/src/op/max_poolnd.cpp +++ b/src/frontends/pytorch/src/op/max_poolnd.cpp @@ -100,8 +100,23 @@ OutputVector translate_max_poolnd(const NodeContext& context) { std::fill_n(pads.begin(), pads.size(), 0); } - return { - context.mark_node(std::make_shared(input, strides, dilations, pads, pads, kernel, rounding_type))}; + auto res = context.mark_node(std::make_shared(input, + strides, + dilations, + pads, + pads, + kernel, + rounding_type, + PadType::EXPLICIT, + element::i64, + 2)); + if (context.get_output_size() == 2) { + auto out1 = res->output(0); + auto out2 = res->output(1); + return {out1, out2}; + } else { + return {res}; + } }; OutputVector translate_max_poolnd_fx(const NodeContext& context) { diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index 41a790d1ef2079..f9ab3d13ab9696 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -367,8 +367,11 @@ const std::map get_supported_ops_ts() { {"aten::max", op::translate_max}, {"aten::maximum", op::translate_maximum}, {"aten::max_pool1d", op::quantizable_op}, + {"aten::max_pool1d_with_indices", op::quantizable_op}, {"aten::max_pool2d", op::quantizable_op}, + {"aten::max_pool2d_with_indices", op::quantizable_op}, {"aten::max_pool3d", op::quantizable_op}, + {"aten::max_pool3d_with_indices", op::quantizable_op}, {"aten::mean", op::quantizable_op}, {"aten::meshgrid", op::translate_meshgrid}, {"aten::min", op::translate_min}, diff --git a/tests/layer_tests/pytorch_tests/test_pooling.py b/tests/layer_tests/pytorch_tests/test_pooling.py index bd5e7b5345396c..3f4c94db6d45d0 100644 --- a/tests/layer_tests/pytorch_tests/test_pooling.py +++ b/tests/layer_tests/pytorch_tests/test_pooling.py @@ -4,6 +4,7 @@ import pytest from pytorch_layer_test_class import PytorchLayerTest +import numpy as np d2_params = [{'kernel_size': [3, 3], 'stride': 1, 'padding': 0}, {'kernel_size': [3, 3], 'stride': [1, 1], 'padding': 1}, @@ -95,13 +96,31 @@ def forward(self, x): return torch.nn.functional.max_pool1d(x, self.kernel_size, self.stride, self.padding, self.dilation, self.ceil_mode) + class aten_max_pool2d_indices(aten_max_pooling_base): + def forward(self, x): + return torch.nn.functional.max_pool2d(x, self.kernel_size, self.stride, self.padding, self.dilation, + self.ceil_mode, return_indices=True) + + class aten_max_pool3d_indices(aten_max_pooling_base): + def forward(self, x): + return torch.nn.functional.max_pool3d(x, self.kernel_size, self.stride, self.padding, self.dilation, + self.ceil_mode, return_indices=True) + + class aten_max_pool1d_indices(aten_max_pooling_base): + def forward(self, x): + return torch.nn.functional.max_pool1d(x, self.kernel_size, self.stride, self.padding, self.dilation, + self.ceil_mode, return_indices=True) + ops = { "max_pool1d": aten_max_pool1d, "max_pool2d": aten_max_pool2d, "max_pool3d": aten_max_pool3d, "avg_pool1d": aten_avg_pool1d, "avg_pool2d": aten_avg_pool2d, - "avg_pool3d": aten_avg_pool3d + "avg_pool3d": aten_avg_pool3d, + "max_pool1d_with_indices": aten_max_pool1d_indices, + "max_pool2d_with_indices": aten_max_pool2d_indices, + "max_pool3d_with_indices": aten_max_pool3d_indices, } ref_net = None @@ -160,7 +179,7 @@ def test_max_pool1d(self, params, ceil_mode, dilation, ie_device, precision, ir_ @pytest.mark.parametrize("dilation", [1, 2]) @pytest.mark.nightly @pytest.mark.precommit - def test_max_pool2d(self, params, ceil_mode, dilation, ie_device, precision, ir_version): + def test_max_pool2d(self, params, ceil_mode, dilation, ie_device, precision, ir_version): to_trace = False if params["stride"] == []: to_trace = True @@ -175,3 +194,39 @@ def test_max_pool2d(self, params, ceil_mode, dilation, ie_device, precision, ir_ def test_max_pool3d(self, params, ceil_mode, dilation, ie_device, precision, ir_version): self._test(*self.create_model("max_pool3d", **params, ceil_mode=ceil_mode, dilation=dilation), ie_device, precision, ir_version, kwargs_to_prepare_input={'ndim': 5}, dynamic_shapes=False) + + @pytest.mark.parametrize("params", d1_params) + @pytest.mark.parametrize("ceil_mode", [True, False]) + @pytest.mark.parametrize("dilation", [1, 2]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_max_pool1d_indices(self, params, ceil_mode, dilation, ie_device, precision, ir_version): + if ceil_mode and (np.array(params["padding"]).any() != 0): + pytest.skip("ticket 122418") + self._test(*self.create_model("max_pool1d_with_indices", **params, ceil_mode=ceil_mode, dilation=dilation), + ie_device, precision, ir_version, kwargs_to_prepare_input={'ndim': 3}, dynamic_shapes=False) + + @pytest.mark.parametrize("params", d2_params + d2_params_corner_case) + @pytest.mark.parametrize("ceil_mode", [True, False]) + @pytest.mark.parametrize("dilation", [1, 2]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_max_pool2d_indices(self, params, ceil_mode, dilation, ie_device, precision, ir_version): + if ceil_mode and (np.array(params["padding"]).any() != 0): + pytest.skip("ticket 122418") + to_trace = False + if params["stride"] == []: + to_trace = True + self._test(*self.create_model("max_pool2d_with_indices", **params, ceil_mode=ceil_mode, dilation=dilation), + ie_device, precision, ir_version, dynamic_shapes=False, trace_model=to_trace) + + @pytest.mark.parametrize("params", d3_params) + @pytest.mark.parametrize("ceil_mode", [True, False]) + @pytest.mark.parametrize("dilation", [1, 2]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_max_pool3d_indices(self, params, ceil_mode, dilation, ie_device, precision, ir_version): + if ceil_mode and (np.array(params["padding"]).any() != 0): + pytest.skip("ticket 122418") + self._test(*self.create_model("max_pool3d_with_indices", **params, ceil_mode=ceil_mode, dilation=dilation), + ie_device, precision, ir_version, kwargs_to_prepare_input={'ndim': 5}, dynamic_shapes=False) From 67a62186ee5ec33a1e3b061104a3d3f3675faefd Mon Sep 17 00:00:00 2001 From: Ekaterina Aidova Date: Tue, 10 Oct 2023 10:16:26 +0400 Subject: [PATCH 114/257] support aten::channel_shuffle (#20240) * support aten::channel_shuffle * remove getting rank --- .../pytorch/src/op/pixel_shuffle.cpp | 31 +++++++++++++ src/frontends/pytorch/src/op_table.cpp | 2 + .../pytorch_tests/test_pixel_shuffle.py | 43 ++++++++++++++++--- 3 files changed, 71 insertions(+), 5 deletions(-) diff --git a/src/frontends/pytorch/src/op/pixel_shuffle.cpp b/src/frontends/pytorch/src/op/pixel_shuffle.cpp index e2d3e8c6390c55..dec771fe6a4f57 100644 --- a/src/frontends/pytorch/src/op/pixel_shuffle.cpp +++ b/src/frontends/pytorch/src/op/pixel_shuffle.cpp @@ -6,6 +6,7 @@ #include "openvino/op/add.hpp" #include "openvino/op/concat.hpp" #include "openvino/op/constant.hpp" +#include "openvino/op/divide.hpp" #include "openvino/op/gather.hpp" #include "openvino/op/multiply.hpp" #include "openvino/op/range.hpp" @@ -15,6 +16,7 @@ #include "openvino/op/split.hpp" #include "openvino/op/squeeze.hpp" #include "openvino/op/transpose.hpp" +#include "openvino/op/unsqueeze.hpp" #include "utils.hpp" namespace ov { @@ -67,6 +69,35 @@ OutputVector translate_pixel_shuffle(const NodeContext& context) { return {context.mark_node(std::make_shared(transpose, shape_after, false))}; }; +OutputVector translate_channel_shuffle(const NodeContext& context) { + // aten::channel_shuffle(Tensor self, int groups) -> Tensor + num_inputs_check(context, 2, 2); + auto x = context.get_input(0); + auto groups = context.get_input(1); + auto neg_1 = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-1})); + auto zero = context.mark_node(v0::Constant::create(element::i32, Shape{}, {0})); + auto one = context.mark_node(v0::Constant::create(element::i32, Shape{}, {1})); + auto shape = context.mark_node(std::make_shared(x, element::i32)); + // PyTorch realization uses assumption that channels dim is always 1 + auto indices = context.mark_node(v0::Constant::create(element::i32, Shape{2}, {0, 1})); + auto dims = context.mark_node(std::make_shared(shape, indices, zero)); + auto dims_splitted = context.mark_node(std::make_shared(dims, zero, 2)); + auto c = dims_splitted->output(1); + auto n = dims_splitted->output(0); + groups = context.mark_node(std::make_shared(groups, element::i32)); + auto k = context.mark_node(std::make_shared(c, groups, true)); + auto g = context.mark_node(std::make_shared(groups, zero)); + // 1. Reshape input [N, G, K=C/G, -1] + auto reshape_indices = context.mark_node(std::make_shared(OutputVector{n, g, k, neg_1}, 0)); + x = context.mark_node(std::make_shared(x, reshape_indices, false)); + // 2. Transpose to [N, K, G, -1] + auto permute_indices = context.mark_node(v0::Constant::create(element::i32, Shape{4}, {0, 2, 1, 3})); + auto y = context.mark_node(std::make_shared(x, permute_indices)); + // 3. Reshape back to original shape + auto result = context.mark_node(std::make_shared(y, shape, false)); + return {result}; +}; + } // namespace op } // namespace pytorch } // namespace frontend diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index f9ab3d13ab9696..c420a1b16e10f4 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -37,6 +37,7 @@ OP_CONVERTER(translate_bitwise_not); OP_CONVERTER(translate_bitwise_or); OP_CONVERTER(translate_cat); OP_CONVERTER(translate_cdist); +OP_CONVERTER(translate_channel_shuffle); OP_CONVERTER(translate_clamp); OP_CONVERTER(translate_constant); OP_CONVERTER(translate_conv_transposend); @@ -263,6 +264,7 @@ const std::map get_supported_ops_ts() { {"aten::cdist", op::translate_cdist}, {"aten::ceil", op::translate_1to1_match_1_inputs}, {"aten::ceil_", op::inplace_op>}, + {"aten::channel_shuffle", op::translate_channel_shuffle}, {"aten::clamp", op::translate_clamp}, {"aten::clamp_max", op::translate_1to1_match_2_inputs}, {"aten::clamp_min", op::translate_1to1_match_2_inputs}, diff --git a/tests/layer_tests/pytorch_tests/test_pixel_shuffle.py b/tests/layer_tests/pytorch_tests/test_pixel_shuffle.py index 8b71c9f55cc824..3a47d1abd397e8 100644 --- a/tests/layer_tests/pytorch_tests/test_pixel_shuffle.py +++ b/tests/layer_tests/pytorch_tests/test_pixel_shuffle.py @@ -7,7 +7,7 @@ from pytorch_layer_test_class import PytorchLayerTest -class TestOneHot(PytorchLayerTest): +class TestPixelShuffle(PytorchLayerTest): def _prepare_input(self): return (np.random.randn(*self.shape).astype(np.float32),) @@ -15,21 +15,54 @@ def create_model(self, upscale_factor): import torch import torch.nn.functional as F - class aten_one_hot(torch.nn.Module): + class aten_pixel_shuffle(torch.nn.Module): def __init__(self, upscale_factor): - super(aten_one_hot, self).__init__() + super(aten_pixel_shuffle, self).__init__() self.upscale_factor = upscale_factor def forward(self, x): return F.pixel_shuffle(x, self.upscale_factor) - return aten_one_hot(upscale_factor), None, "aten::pixel_shuffle" + return aten_pixel_shuffle(upscale_factor), None, "aten::pixel_shuffle" @pytest.mark.parametrize(("upscale_factor,shape"), [(3, [1, 9, 4, 4]), (2, [1, 2, 3, 8, 4, 4]),]) @pytest.mark.nightly @pytest.mark.precommit - def test_one_hot(self, upscale_factor, shape, ie_device, precision, ir_version): + def test_pixel_shuffle(self, upscale_factor, shape, ie_device, precision, ir_version): self.shape = shape self._test(*self.create_model(upscale_factor), ie_device, precision, ir_version) + + +class TestChannelShuffle(PytorchLayerTest): + def _prepare_input(self): + return (np.random.randn(*self.shape).astype(np.float32),) + + def create_model(self, groups): + import torch + import torch.nn.functional as F + + class aten_channel_shuffle(torch.nn.Module): + def __init__(self, upscale_factor): + super(aten_channel_shuffle, self).__init__() + self.upscale_factor = upscale_factor + + def forward(self, x): + return F.channel_shuffle(x, self.upscale_factor) + + return aten_channel_shuffle(groups), None, "aten::channel_shuffle" + + @pytest.mark.parametrize(("groups,shape"), [ + (3, [1, 9, 4, 4]), + (2, [1, 8, 8, 4, 4]), + (4, [4, 4, 2]), + (5, [4, 10, 2, 10, 1, 1]), + (1, [2, 3, 4]) + ]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_channel_shuffle(self, groups, shape, ie_device, precision, ir_version): + self.shape = shape + self._test(*self.create_model(groups), + ie_device, precision, ir_version) \ No newline at end of file From c417d154322d6ebb28e4c5e1a89f24f5a699d0f1 Mon Sep 17 00:00:00 2001 From: Anton Voronov Date: Tue, 10 Oct 2023 10:24:38 +0400 Subject: [PATCH 115/257] [CPU] FullyConnected: sparse weights fix (#20117) --- .../intel_cpu/src/nodes/fullyconnected.cpp | 25 +++++++++++++------ 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp b/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp index fcb112c368131c..117290ebcb0e52 100644 --- a/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp +++ b/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp @@ -52,6 +52,7 @@ struct FCKey { dnnl::primitive_attr attr; impl_desc_type implType; bool useConv1x1; + bool useSparseWeights; size_t hash() const; bool operator==(const FCKey& rhs) const; @@ -72,6 +73,7 @@ size_t FCKey::hash() const { seed = hash_combine(seed, get_attr_hash(*attr.get())); seed = hash_combine(seed, implType); seed = hash_combine(seed, useConv1x1); + seed = hash_combine(seed, useSparseWeights); return seed; } @@ -90,7 +92,7 @@ bool FCKey::operator==(const FCKey &rhs) const { retVal = retVal && out && rhs.out && out->getDnnlDesc() == rhs.out->getDnnlDesc(); } retVal = retVal && *attr.get() == *rhs.attr.get() && - implType == rhs.implType && useConv1x1 == rhs.useConv1x1; + implType == rhs.implType && useConv1x1 == rhs.useConv1x1 && useSparseWeights == rhs.useSparseWeights; return retVal; } @@ -416,15 +418,20 @@ static dnnl::primitive_desc createPrimitiveDesc(const FCKey& key, const dnnl::en auto normalizedOutDims = { outDims[0] * outDims[1], outDims[2] }; outDesc = outDesc.reshape(normalizedOutDims); } - auto wghDescAny = dnnl::memory::desc(DnnlExtensionUtils::convertToDnnlDims(key.inp1->getShape().getStaticDims()), - key.inp1->getDataType(), memory::format_tag::any); + dnnl::memory::desc weiDesc; + if (key.useSparseWeights) { + weiDesc = key.inp1->getDnnlDesc(); + } else { + weiDesc = dnnl::memory::desc(DnnlExtensionUtils::convertToDnnlDims(key.inp1->getShape().getStaticDims()), + key.inp1->getDataType(), memory::format_tag::any); + } dnnl::inner_product_forward::primitive_desc prim_desc; if (key.bias) { prim_desc = dnnl::inner_product_forward::primitive_desc( engine, dnnl::prop_kind::forward_inference, inDesc, - wghDescAny, + weiDesc, key.bias->getDnnlDesc(), outDesc, key.attr); @@ -433,7 +440,7 @@ static dnnl::primitive_desc createPrimitiveDesc(const FCKey& key, const dnnl::en engine, dnnl::prop_kind::forward_inference, inDesc, - wghDescAny, + weiDesc, outDesc, key.attr); } @@ -542,7 +549,8 @@ void FullyConnected::prepareParams() { outDesc, attr, implementationTypeIP, - useConv1x1}; + useConv1x1, + useSparseWeights}; auto& engine = getEngine(); @@ -597,7 +605,8 @@ void FullyConnected::prepareParams() { // changed shapes may also cause the kernel type changed selected_pd->setImplementationType(execPtr->getImplementationType()); // WA: We update implType to know whether weights decompression was used inside the kernel - if (selected_pd->getImplementationType() == ov::intel_cpu::brgemm_avx512_amx && useSparseWeights) { + if (selected_pd->getImplementationType() == ov::intel_cpu::brgemm_avx512_amx && + execPtr->getDnnlWeightDesc().get_format_kind() == memory::format_kind::sparsed) { selected_pd->setImplementationType(ov::intel_cpu::brgemm_sparse_avx512_amx); } // maybe expected 1x1 conv is not created, update the flag depends on the real type @@ -960,7 +969,7 @@ std::shared_ptr FullyConnected::getSrcMemDesc(const dnnl::primitive_ if (getInputShapeAtPort(idx).getRank() == 3 // report original plain layout for weight since it needs to be reordered dynamically at runtime - || idx == 1) { + || (idx == 1 && !useSparseWeights)) { return std::make_shared( DnnlExtensionUtils::DataTypeToIEPrecision(desc.get_data_type()), getInputShapeAtPort(idx)); } From 1e05949888a93aa215a2246311d059957f2d110f Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Tue, 10 Oct 2023 08:34:53 +0200 Subject: [PATCH 116/257] Changing file structure of Learn Openvino section (#20337) --- docs/{Documentation => articles_en}/learn_openvino.md | 0 .../learn_openvino/openvino_samples.md} | 0 .../openvino_samples/c_sample_hello_classification.md | 0 .../c_sample_hello_nv12_input_classification.md | 0 .../learn_openvino/openvino_samples/cpp_benchmark_tool.md | 0 .../openvino_samples/cpp_sample_automatic_speech_recognition.md | 0 .../openvino_samples/cpp_sample_hello_classification.md | 0 .../cpp_sample_hello_nv12_input_classification.md | 0 .../openvino_samples/cpp_sample_hello_query_device.md | 0 .../openvino_samples/cpp_sample_hello_reshape_ssd.md | 0 .../openvino_samples/cpp_sample_image_classification_async.md | 0 .../openvino_samples/cpp_sample_model_creation.md | 0 .../openvino_samples/cpp_sample_sync_benchmark.md | 0 .../openvino_samples/cpp_sample_throughput_benchmark.md | 0 .../learn_openvino/openvino_samples}/get_started_demos.md | 0 .../learn_openvino/openvino_samples/python_benchmark_tool.md | 0 .../python_sample_automatic_speech_recognition.md | 0 .../openvino_samples/python_sample_bert_benchmark.md | 0 .../openvino_samples/python_sample_hello_classification.md | 0 .../openvino_samples/python_sample_hello_query_device.md | 0 .../openvino_samples/python_sample_hello_reshape_ssd.md | 0 .../python_sample_image_classification_async.md | 0 .../openvino_samples/python_sample_model_creation.md | 0 .../openvino_samples/python_sample_sync_benchmark.md | 0 .../openvino_samples/python_sample_throughput_benchmark.md | 0 docs/{ => articles_en/learn_openvino}/tutorials.md | 0 .../learn_openvino/tutorials}/notebooks-installation.md | 0 docs/nbdoc/nbdoc.py | 2 +- docs/notebooks/247-code-language-id-with-output.rst | 2 +- 29 files changed, 2 insertions(+), 2 deletions(-) rename docs/{Documentation => articles_en}/learn_openvino.md (100%) rename docs/{OV_Runtime_UG/Samples_Overview.md => articles_en/learn_openvino/openvino_samples.md} (100%) rename samples/c/hello_classification/README.md => docs/articles_en/learn_openvino/openvino_samples/c_sample_hello_classification.md (100%) rename samples/c/hello_nv12_input_classification/README.md => docs/articles_en/learn_openvino/openvino_samples/c_sample_hello_nv12_input_classification.md (100%) rename samples/cpp/benchmark_app/README.md => docs/articles_en/learn_openvino/openvino_samples/cpp_benchmark_tool.md (100%) rename samples/cpp/speech_sample/README.md => docs/articles_en/learn_openvino/openvino_samples/cpp_sample_automatic_speech_recognition.md (100%) rename samples/cpp/hello_classification/README.md => docs/articles_en/learn_openvino/openvino_samples/cpp_sample_hello_classification.md (100%) rename samples/cpp/hello_nv12_input_classification/README.md => docs/articles_en/learn_openvino/openvino_samples/cpp_sample_hello_nv12_input_classification.md (100%) rename samples/cpp/hello_query_device/README.md => docs/articles_en/learn_openvino/openvino_samples/cpp_sample_hello_query_device.md (100%) rename samples/cpp/hello_reshape_ssd/README.md => docs/articles_en/learn_openvino/openvino_samples/cpp_sample_hello_reshape_ssd.md (100%) rename samples/cpp/classification_sample_async/README.md => docs/articles_en/learn_openvino/openvino_samples/cpp_sample_image_classification_async.md (100%) rename samples/cpp/model_creation_sample/README.md => docs/articles_en/learn_openvino/openvino_samples/cpp_sample_model_creation.md (100%) rename samples/cpp/benchmark/sync_benchmark/README.md => docs/articles_en/learn_openvino/openvino_samples/cpp_sample_sync_benchmark.md (100%) rename samples/cpp/benchmark/throughput_benchmark/README.md => docs/articles_en/learn_openvino/openvino_samples/cpp_sample_throughput_benchmark.md (100%) rename docs/{get_started => articles_en/learn_openvino/openvino_samples}/get_started_demos.md (100%) rename tools/benchmark_tool/README.md => docs/articles_en/learn_openvino/openvino_samples/python_benchmark_tool.md (100%) rename samples/python/speech_sample/README.md => docs/articles_en/learn_openvino/openvino_samples/python_sample_automatic_speech_recognition.md (100%) rename samples/python/benchmark/bert_benchmark/README.md => docs/articles_en/learn_openvino/openvino_samples/python_sample_bert_benchmark.md (100%) rename samples/python/hello_classification/README.md => docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_classification.md (100%) rename samples/python/hello_query_device/README.md => docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_query_device.md (100%) rename samples/python/hello_reshape_ssd/README.md => docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_reshape_ssd.md (100%) rename samples/python/classification_sample_async/README.md => docs/articles_en/learn_openvino/openvino_samples/python_sample_image_classification_async.md (100%) rename samples/python/model_creation_sample/README.md => docs/articles_en/learn_openvino/openvino_samples/python_sample_model_creation.md (100%) rename samples/python/benchmark/sync_benchmark/README.md => docs/articles_en/learn_openvino/openvino_samples/python_sample_sync_benchmark.md (100%) rename samples/python/benchmark/throughput_benchmark/README.md => docs/articles_en/learn_openvino/openvino_samples/python_sample_throughput_benchmark.md (100%) rename docs/{ => articles_en/learn_openvino}/tutorials.md (100%) rename docs/{ => articles_en/learn_openvino/tutorials}/notebooks-installation.md (100%) diff --git a/docs/Documentation/learn_openvino.md b/docs/articles_en/learn_openvino.md similarity index 100% rename from docs/Documentation/learn_openvino.md rename to docs/articles_en/learn_openvino.md diff --git a/docs/OV_Runtime_UG/Samples_Overview.md b/docs/articles_en/learn_openvino/openvino_samples.md similarity index 100% rename from docs/OV_Runtime_UG/Samples_Overview.md rename to docs/articles_en/learn_openvino/openvino_samples.md diff --git a/samples/c/hello_classification/README.md b/docs/articles_en/learn_openvino/openvino_samples/c_sample_hello_classification.md similarity index 100% rename from samples/c/hello_classification/README.md rename to docs/articles_en/learn_openvino/openvino_samples/c_sample_hello_classification.md diff --git a/samples/c/hello_nv12_input_classification/README.md b/docs/articles_en/learn_openvino/openvino_samples/c_sample_hello_nv12_input_classification.md similarity index 100% rename from samples/c/hello_nv12_input_classification/README.md rename to docs/articles_en/learn_openvino/openvino_samples/c_sample_hello_nv12_input_classification.md diff --git a/samples/cpp/benchmark_app/README.md b/docs/articles_en/learn_openvino/openvino_samples/cpp_benchmark_tool.md similarity index 100% rename from samples/cpp/benchmark_app/README.md rename to docs/articles_en/learn_openvino/openvino_samples/cpp_benchmark_tool.md diff --git a/samples/cpp/speech_sample/README.md b/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_automatic_speech_recognition.md similarity index 100% rename from samples/cpp/speech_sample/README.md rename to docs/articles_en/learn_openvino/openvino_samples/cpp_sample_automatic_speech_recognition.md diff --git a/samples/cpp/hello_classification/README.md b/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_hello_classification.md similarity index 100% rename from samples/cpp/hello_classification/README.md rename to docs/articles_en/learn_openvino/openvino_samples/cpp_sample_hello_classification.md diff --git a/samples/cpp/hello_nv12_input_classification/README.md b/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_hello_nv12_input_classification.md similarity index 100% rename from samples/cpp/hello_nv12_input_classification/README.md rename to docs/articles_en/learn_openvino/openvino_samples/cpp_sample_hello_nv12_input_classification.md diff --git a/samples/cpp/hello_query_device/README.md b/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_hello_query_device.md similarity index 100% rename from samples/cpp/hello_query_device/README.md rename to docs/articles_en/learn_openvino/openvino_samples/cpp_sample_hello_query_device.md diff --git a/samples/cpp/hello_reshape_ssd/README.md b/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_hello_reshape_ssd.md similarity index 100% rename from samples/cpp/hello_reshape_ssd/README.md rename to docs/articles_en/learn_openvino/openvino_samples/cpp_sample_hello_reshape_ssd.md diff --git a/samples/cpp/classification_sample_async/README.md b/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_image_classification_async.md similarity index 100% rename from samples/cpp/classification_sample_async/README.md rename to docs/articles_en/learn_openvino/openvino_samples/cpp_sample_image_classification_async.md diff --git a/samples/cpp/model_creation_sample/README.md b/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_model_creation.md similarity index 100% rename from samples/cpp/model_creation_sample/README.md rename to docs/articles_en/learn_openvino/openvino_samples/cpp_sample_model_creation.md diff --git a/samples/cpp/benchmark/sync_benchmark/README.md b/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_sync_benchmark.md similarity index 100% rename from samples/cpp/benchmark/sync_benchmark/README.md rename to docs/articles_en/learn_openvino/openvino_samples/cpp_sample_sync_benchmark.md diff --git a/samples/cpp/benchmark/throughput_benchmark/README.md b/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_throughput_benchmark.md similarity index 100% rename from samples/cpp/benchmark/throughput_benchmark/README.md rename to docs/articles_en/learn_openvino/openvino_samples/cpp_sample_throughput_benchmark.md diff --git a/docs/get_started/get_started_demos.md b/docs/articles_en/learn_openvino/openvino_samples/get_started_demos.md similarity index 100% rename from docs/get_started/get_started_demos.md rename to docs/articles_en/learn_openvino/openvino_samples/get_started_demos.md diff --git a/tools/benchmark_tool/README.md b/docs/articles_en/learn_openvino/openvino_samples/python_benchmark_tool.md similarity index 100% rename from tools/benchmark_tool/README.md rename to docs/articles_en/learn_openvino/openvino_samples/python_benchmark_tool.md diff --git a/samples/python/speech_sample/README.md b/docs/articles_en/learn_openvino/openvino_samples/python_sample_automatic_speech_recognition.md similarity index 100% rename from samples/python/speech_sample/README.md rename to docs/articles_en/learn_openvino/openvino_samples/python_sample_automatic_speech_recognition.md diff --git a/samples/python/benchmark/bert_benchmark/README.md b/docs/articles_en/learn_openvino/openvino_samples/python_sample_bert_benchmark.md similarity index 100% rename from samples/python/benchmark/bert_benchmark/README.md rename to docs/articles_en/learn_openvino/openvino_samples/python_sample_bert_benchmark.md diff --git a/samples/python/hello_classification/README.md b/docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_classification.md similarity index 100% rename from samples/python/hello_classification/README.md rename to docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_classification.md diff --git a/samples/python/hello_query_device/README.md b/docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_query_device.md similarity index 100% rename from samples/python/hello_query_device/README.md rename to docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_query_device.md diff --git a/samples/python/hello_reshape_ssd/README.md b/docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_reshape_ssd.md similarity index 100% rename from samples/python/hello_reshape_ssd/README.md rename to docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_reshape_ssd.md diff --git a/samples/python/classification_sample_async/README.md b/docs/articles_en/learn_openvino/openvino_samples/python_sample_image_classification_async.md similarity index 100% rename from samples/python/classification_sample_async/README.md rename to docs/articles_en/learn_openvino/openvino_samples/python_sample_image_classification_async.md diff --git a/samples/python/model_creation_sample/README.md b/docs/articles_en/learn_openvino/openvino_samples/python_sample_model_creation.md similarity index 100% rename from samples/python/model_creation_sample/README.md rename to docs/articles_en/learn_openvino/openvino_samples/python_sample_model_creation.md diff --git a/samples/python/benchmark/sync_benchmark/README.md b/docs/articles_en/learn_openvino/openvino_samples/python_sample_sync_benchmark.md similarity index 100% rename from samples/python/benchmark/sync_benchmark/README.md rename to docs/articles_en/learn_openvino/openvino_samples/python_sample_sync_benchmark.md diff --git a/samples/python/benchmark/throughput_benchmark/README.md b/docs/articles_en/learn_openvino/openvino_samples/python_sample_throughput_benchmark.md similarity index 100% rename from samples/python/benchmark/throughput_benchmark/README.md rename to docs/articles_en/learn_openvino/openvino_samples/python_sample_throughput_benchmark.md diff --git a/docs/tutorials.md b/docs/articles_en/learn_openvino/tutorials.md similarity index 100% rename from docs/tutorials.md rename to docs/articles_en/learn_openvino/tutorials.md diff --git a/docs/notebooks-installation.md b/docs/articles_en/learn_openvino/tutorials/notebooks-installation.md similarity index 100% rename from docs/notebooks-installation.md rename to docs/articles_en/learn_openvino/tutorials/notebooks-installation.md diff --git a/docs/nbdoc/nbdoc.py b/docs/nbdoc/nbdoc.py index bf6c6040b809ab..a890c13c0f2f55 100644 --- a/docs/nbdoc/nbdoc.py +++ b/docs/nbdoc/nbdoc.py @@ -166,7 +166,7 @@ def main(): sourcedir = args.sourcedir outdir = args.outdir - main_tutorials_file = Path('../../docs/tutorials.md').resolve(strict=True) + main_tutorials_file = Path('../../docs/articles_en/learn_openvino/tutorials.md').resolve(strict=True) add_glob_directive(main_tutorials_file) if args.download: diff --git a/docs/notebooks/247-code-language-id-with-output.rst b/docs/notebooks/247-code-language-id-with-output.rst index 2d0c9d3019b418..61eab5b121da3b 100644 --- a/docs/notebooks/247-code-language-id-with-output.rst +++ b/docs/notebooks/247-code-language-id-with-output.rst @@ -69,7 +69,7 @@ will allow to automatically convert models to the OpenVINO™ IR format. Install prerequisites ~~~~~~~~~~~~~~~~~~~~~ -First, complete the `repository installation steps <../notebooks_installation.html>`__. +First, complete the :doc:`repository installation steps `. Then, the following cell will install: - HuggingFace Optimum with OpenVINO support - HuggingFace Evaluate to benchmark results From ef0c10df6c9501873d3ee7467bcb3295a2a2502e Mon Sep 17 00:00:00 2001 From: Mateusz Mikolajczyk Date: Tue, 10 Oct 2023 09:04:06 +0200 Subject: [PATCH 117/257] [Opset13][pyAPI] Python API binary BitwiseAnd-13, BitwiseOr-13, BitwiseXor-13 (#20261) * add xor * Add and and or * Add ch srequested in review --- .../src/openvino/runtime/opset13/__init__.py | 3 + .../src/openvino/runtime/opset13/ops.py | 72 ++++++++++++++++++- .../tests/test_graph/test_ops_binary.py | 52 +++++++++++++- 3 files changed, 125 insertions(+), 2 deletions(-) diff --git a/src/bindings/python/src/openvino/runtime/opset13/__init__.py b/src/bindings/python/src/openvino/runtime/opset13/__init__.py index 66d2b3e9d46096..bddc3e1aebbb40 100644 --- a/src/bindings/python/src/openvino/runtime/opset13/__init__.py +++ b/src/bindings/python/src/openvino/runtime/opset13/__init__.py @@ -18,6 +18,9 @@ from openvino.runtime.opset5.ops import batch_norm_inference from openvino.runtime.opset2.ops import batch_to_space from openvino.runtime.opset1.ops import binary_convolution +from openvino.runtime.opset13.ops import bitwise_and +from openvino.runtime.opset13.ops import bitwise_or +from openvino.runtime.opset13.ops import bitwise_xor from openvino.runtime.opset3.ops import broadcast from openvino.runtime.opset3.ops import bucketize from openvino.runtime.opset1.ops import ceiling diff --git a/src/bindings/python/src/openvino/runtime/opset13/ops.py b/src/bindings/python/src/openvino/runtime/opset13/ops.py index f50d3cd91edaad..4840beb69127bc 100644 --- a/src/bindings/python/src/openvino/runtime/opset13/ops.py +++ b/src/bindings/python/src/openvino/runtime/opset13/ops.py @@ -8,7 +8,7 @@ from openvino.runtime import Node from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.decorators import nameable_op +from openvino.runtime.utils.decorators import binary_op, nameable_op from openvino.runtime.utils.types import ( NodeInput, as_nodes, @@ -19,6 +19,76 @@ # -------------------------------------------- ops ------------------------------------------------ +@binary_op +def bitwise_and( + left_node: NodeInput, + right_node: NodeInput, + auto_broadcast: str = "NUMPY", + name: Optional[str] = None, +) -> Node: + """Return node which performs bitwise AND operation on input nodes element-wise. + + For boolean input tensors, operator is equivalent to logical_and. + + :param left_node: Tensor of integer or boolean datatype providing data. + :param right_node: Tensor of integer or boolean datatype providing data. + :param auto_broadcast: The type of broadcasting specifies rules used for auto-broadcasting of input tensors. Defaults to “NUMPY”. + :param name: The optional new name for output node. + :return: The node performing bitwise AND operation on input nodes corresponding elements. + """ + return _get_node_factory_opset13().create( + "BitwiseAnd", + [left_node, right_node], + {"auto_broadcast": auto_broadcast.upper()}, + ) + + +@binary_op +def bitwise_or( + left_node: NodeInput, + right_node: NodeInput, + auto_broadcast: str = "NUMPY", + name: Optional[str] = None, +) -> Node: + """Return node which performs bitwise OR operation on input nodes element-wise. + + For boolean input tensors, operator is equivalent to logical_or. + + :param left_node: Tensor of integer or boolean datatype providing data. + :param right_node: Tensor of integer or boolean datatype providing data. + :param auto_broadcast: The type of broadcasting specifies rules used for auto-broadcasting of input tensors. Defaults to “NUMPY”. + :param name: The optional new name for output node. + :return: The node performing bitwise OR operation on input nodes corresponding elements. + """ + return _get_node_factory_opset13().create( + "BitwiseOr", + [left_node, right_node], + {"auto_broadcast": auto_broadcast.upper()}, + ) + + +@binary_op +def bitwise_xor( + left_node: NodeInput, + right_node: NodeInput, + auto_broadcast: str = "NUMPY", + name: Optional[str] = None, +) -> Node: + """Return node which performs bitwise XOR operation on input nodes element-wise. + + For boolean input tensors, operator is equivalent to logical_xor. + + :param left_node: Tensor of integer or boolean datatype providing data. + :param right_node: Tensor of integer or boolean datatype providing data. + :param auto_broadcast: The type of broadcasting specifies rules used for auto-broadcasting of input tensors. Defaults to “NUMPY”. + :param name: The optional new name for output node. + :return: The node performing bitwise XOR operation on input nodes corresponding elements. + """ + return _get_node_factory_opset13().create( + "BitwiseXor", + [left_node, right_node], + {"auto_broadcast": auto_broadcast.upper()}, + ) @nameable_op diff --git a/src/bindings/python/tests/test_graph/test_ops_binary.py b/src/bindings/python/tests/test_graph/test_ops_binary.py index c4c55da4731010..a7b7ff2bff63c4 100644 --- a/src/bindings/python/tests/test_graph/test_ops_binary.py +++ b/src/bindings/python/tests/test_graph/test_ops_binary.py @@ -8,7 +8,7 @@ import pytest from openvino.runtime import Type -import openvino.runtime.opset8 as ov +import openvino.runtime.opset13 as ov @pytest.mark.parametrize( @@ -183,3 +183,53 @@ def test_power_v1(): assert node.get_output_size() == 1 assert list(node.get_output_shape(0)) == [8, 4, 6, 5] assert node.get_output_element_type(0) == Type.f32 + + +@pytest.mark.parametrize( + "graph_api_helper", + [ov.bitwise_and, ov.bitwise_or, ov.bitwise_xor], +) +@pytest.mark.parametrize( + "dtype", + [bool, np.int32], +) +@pytest.mark.parametrize( + ("shape_a", "shape_b", "broadcast", "shape_out"), + [ + ([2, 2], [2, 2], "NONE", [2, 2]), + ([2, 1, 5], [1, 4, 5], "NUMPY", [2, 4, 5]), + ([3, 2, 1, 4], [5, 4], "NUMPY", [3, 2, 5, 4]), + ([2, 3, 4, 5], [], "PDPD", [2, 3, 4, 5]), + ([2, 3, 4, 5], [2, 3, 1, 5], "PDPD", [2, 3, 4, 5]), + ], +) +def test_binary_bitwise_op(graph_api_helper, dtype, shape_a, shape_b, broadcast, shape_out): + parameter_a = ov.parameter(shape_a, name="A", dtype=dtype) + parameter_b = ov.parameter(shape_b, name="B", dtype=dtype) + + model = graph_api_helper(parameter_a, parameter_b, broadcast) + + assert model.get_output_size() == 1 + assert list(model.get_output_shape(0)) == shape_out + assert model.get_output_element_type(0) == Type(dtype) + + +@pytest.mark.parametrize( + "graph_api_helper", + [ov.bitwise_and, ov.bitwise_or, ov.bitwise_xor], +) +@pytest.mark.parametrize( + "dtype", + [bool, np.int32], +) +def test_binary_bitwise_op_with_constant(graph_api_helper, dtype): + value_b = np.array([[3, 0], [-7, 21]], dtype=dtype) + + shape = [2, 2] + parameter_a = ov.parameter(shape, name="A", dtype=dtype) + + model = graph_api_helper(parameter_a, value_b) + + assert model.get_output_size() == 1 + assert list(model.get_output_shape(0)) == shape + assert model.get_output_element_type(0) == Type(dtype) From aa6adcd2612116958f3098b1e3b975a7e321ad2f Mon Sep 17 00:00:00 2001 From: Anton Voronov Date: Tue, 10 Oct 2023 11:17:33 +0400 Subject: [PATCH 118/257] Gather: removed indices normalization pass and added some checks for Gather-8 -> Gather-7 conversion (#19666) * Do not normalize negative indices for Gather v8 * code style fix * added transformation test with accuracy check for Gather-v8 * removed GatherNegativeConstIndicesNormalize transformation at all * ConvertGather8ToGather7 conversion: added more checks * Introduced shared Gather8withIndicesDataLayerTest: added CPU, GPU instances * code style fix * small fix * review fixes * do negative indices normalization if possible * code style fix * refactor cpu test instances * code style fix --- .../gather_normalize_negative_indices.hpp | 29 -- .../common_optimizations.cpp | 2 - .../convert_gather_downgrade.cpp | 53 +++- .../gather_normalize_negative_indices.cpp | 86 ------ .../convert_gather_downgrade_test.cpp | 89 ++++++- ...gather_normalize_negative_indices_test.cpp | 252 ------------------ .../single_layer_tests/gather.cpp | 26 ++ .../skip_tests_config.cpp | 2 - .../single_layer_tests/gather.cpp | 28 ++ .../include/single_layer_tests/gather.hpp | 4 + .../shared/include/single_op_tests/gather.hpp | 4 + .../single_layer/gather.hpp | 14 + .../shared_test_classes/single_op/gather.hpp | 15 ++ .../src/single_layer/gather.cpp | 52 ++++ .../src/single_op/gather.cpp | 71 +++++ 15 files changed, 352 insertions(+), 375 deletions(-) delete mode 100644 src/common/transformations/include/transformations/op_conversions/gather_normalize_negative_indices.hpp delete mode 100644 src/common/transformations/src/transformations/op_conversions/gather_normalize_negative_indices.cpp delete mode 100644 src/common/transformations/tests/op_conversions/gather_normalize_negative_indices_test.cpp diff --git a/src/common/transformations/include/transformations/op_conversions/gather_normalize_negative_indices.hpp b/src/common/transformations/include/transformations/op_conversions/gather_normalize_negative_indices.hpp deleted file mode 100644 index bb912f5a980f88..00000000000000 --- a/src/common/transformations/include/transformations/op_conversions/gather_normalize_negative_indices.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "openvino/pass/graph_rewrite.hpp" -#include "transformations_visibility.hpp" - -namespace ov { -namespace pass { - -class TRANSFORMATIONS_API GatherNegativeConstIndicesNormalize; - -} // namespace pass -} // namespace ov - -/** - * @ingroup ie_transformation_common_api - * @brief GatherNegativeConstIndicesNormalize checks if indices value is negative scalar and - * normalizes it using ShapeOf->Add->Cast subgraph. - * We need to remove this transformation after adding support of negative indices in - * future version of Gather operation. - */ -class ov::pass::GatherNegativeConstIndicesNormalize : public ov::pass::MatcherPass { -public: - OPENVINO_RTTI("GatherNegativeConstIndicesNormalize", "0"); - GatherNegativeConstIndicesNormalize(); -}; diff --git a/src/common/transformations/src/transformations/common_optimizations/common_optimizations.cpp b/src/common/transformations/src/transformations/common_optimizations/common_optimizations.cpp index 8ad09f95dede3e..4d4cebe5c62222 100644 --- a/src/common/transformations/src/transformations/common_optimizations/common_optimizations.cpp +++ b/src/common/transformations/src/transformations/common_optimizations/common_optimizations.cpp @@ -97,7 +97,6 @@ #include "transformations/op_conversions/detection_output_upgrade.hpp" #include "transformations/op_conversions/einsum_decomposition.hpp" #include "transformations/op_conversions/eye_decomposition.hpp" -#include "transformations/op_conversions/gather_normalize_negative_indices.hpp" #include "transformations/op_conversions/gelu7_downgrade.hpp" #include "transformations/op_conversions/group_normalization_decomposition.hpp" #include "transformations/op_conversions/hsigmoid_decomposition.hpp" @@ -170,7 +169,6 @@ bool ov::pass::CommonOptimizations::run_on_model(const std::shared_ptradd_matcher(); ADD_MATCHER(decomp, SoftSignDecomposition) - ADD_MATCHER(decomp, GatherNegativeConstIndicesNormalize) ADD_MATCHER(decomp, DropoutWithRandomUniformReplacer) ADD_MATCHER(decomp, TransposeReshapeEliminationForMatmul) ADD_MATCHER(decomp, EyeDecomposition) diff --git a/src/common/transformations/src/transformations/op_conversions/convert_gather_downgrade.cpp b/src/common/transformations/src/transformations/op_conversions/convert_gather_downgrade.cpp index ae0aeb4d99ae8e..4c525f7d3ddbee 100644 --- a/src/common/transformations/src/transformations/op_conversions/convert_gather_downgrade.cpp +++ b/src/common/transformations/src/transformations/op_conversions/convert_gather_downgrade.cpp @@ -48,8 +48,59 @@ pass::ConvertGather8ToGather7::ConvertGather8ToGather7() { if (!gather_v8_node) return false; + auto data = gather_v8_node->input_value(0); + auto indices_constant = + std::dynamic_pointer_cast(gather_v8_node->input_value(1).get_node_shared_ptr()); + auto axis_constant = + std::dynamic_pointer_cast(gather_v8_node->input_value(2).get_node_shared_ptr()); + if (!indices_constant || !axis_constant) + return false; + + auto axis = axis_constant->cast_vector(); + if (axis.size() != 1) { + return false; + } + auto axis_value = axis[0]; + // normalize `axis` value if it is negative + if (axis_value < 0) { + if (!data.get_partial_shape().rank().is_static()) { + return false; + } + axis_value = axis_value + data.get_partial_shape().rank().get_length(); + } + if (data.get_partial_shape().rank().get_length() < axis_value) { + return false; + } + // check `axis` dimension of data tensor is static + if (!data.get_partial_shape()[axis_value].is_static()) { + return false; + } + auto axis_dim = data.get_partial_shape()[axis_value].get_length(); + + auto indices = indices_constant->cast_vector(); + // Check all the indices are not out of bound and check whether normalization is possible for negative values + bool do_indices_normalization = false; + for (size_t i = 0; i < indices.size(); i++) { + if (indices[i] < -axis_dim || indices[i] >= axis_dim) { + return false; + } + if (indices[i] < 0) { + do_indices_normalization = true; + indices[i] = indices[i] + axis_dim; + } + } + + std::shared_ptr new_indices_constant; + if (do_indices_normalization) { + new_indices_constant = std::make_shared(indices_constant->get_element_type(), + indices_constant->get_shape(), + indices); + } else { + new_indices_constant = indices_constant; + } + auto gather_v7_node = make_shared(gather_v8_node->input_value(0), - gather_v8_node->input_value(1), + new_indices_constant, gather_v8_node->input_value(2), gather_v8_node->get_batch_dims()); diff --git a/src/common/transformations/src/transformations/op_conversions/gather_normalize_negative_indices.cpp b/src/common/transformations/src/transformations/op_conversions/gather_normalize_negative_indices.cpp deleted file mode 100644 index 47ff221d82fa86..00000000000000 --- a/src/common/transformations/src/transformations/op_conversions/gather_normalize_negative_indices.cpp +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "transformations/op_conversions/gather_normalize_negative_indices.hpp" - -#include - -#include "itt.hpp" -#include "openvino/core/rt_info.hpp" -#include "openvino/core/validation_util.hpp" -#include "openvino/op/add.hpp" -#include "openvino/op/constant.hpp" -#include "openvino/op/gather.hpp" -#include "openvino/op/shape_of.hpp" -#include "openvino/pass/pattern/op/wrap_type.hpp" - -ov::pass::GatherNegativeConstIndicesNormalize::GatherNegativeConstIndicesNormalize() { - MATCHER_SCOPE(GatherNegativeConstIndicesNormalize); - auto data_input = pattern::any_input(pattern::has_static_rank()); - auto axis_input = ov::pass::pattern::wrap_type(); - auto indices_input = ov::pass::pattern::wrap_type(); - auto gather_node = ov::pass::pattern::wrap_type({data_input, indices_input, axis_input}); - - matcher_pass_callback callback = [=](ov::pass::pattern::Matcher& m) { - auto& pattern_to_output = m.get_pattern_value_map(); - auto gather = pattern_to_output.at(gather_node).get_node_shared_ptr(); - auto data = pattern_to_output.at(data_input); - auto axis_constant = - std::dynamic_pointer_cast(pattern_to_output.at(axis_input).get_node_shared_ptr()); - auto indices_constant = - std::dynamic_pointer_cast(pattern_to_output.at(indices_input).get_node_shared_ptr()); - - if (!gather || !axis_constant || !indices_constant) { - return false; - } - - auto indices = indices_constant->cast_vector(); - if (indices.size() != 1 || indices[0] >= 0) { - return false; - } - - auto axis = axis_constant->cast_vector(); - if (axis.size() != 1) { - return false; - } - - auto axis_value = axis[0]; - - // normalize `axis` value if it is negative - if (axis_value < 0) { - axis_value = axis_value + data.get_partial_shape().rank().get_length(); - } - - if (data.get_partial_shape().rank().get_length() < axis_value) { - return false; - } - - // check `axis` dimension of data tensor is static - if (!data.get_partial_shape()[axis_value].is_static()) { - return false; - } - - auto input_type = indices_constant->get_element_type(); - auto shape_of = std::make_shared(data, input_type); - auto input_gather = - std::make_shared(shape_of, - ov::op::v0::Constant::create(input_type, Shape{}, {axis_value}), - ov::op::v0::Constant::create(input_type, Shape{}, {0})); - - std::shared_ptr add = std::make_shared(input_gather, indices_constant); - OPENVINO_SUPPRESS_DEPRECATED_START - if (auto folded_const = ov::get_constant_from_source(add)) { - OPENVINO_SUPPRESS_DEPRECATED_END - add = folded_const; - } - gather->input(1).replace_source_output(add); - - ov::copy_runtime_info(gather, {shape_of, input_gather, add}); - - return true; - }; - - auto m = std::make_shared(gather_node, matcher_name); - register_matcher(m, callback); -} diff --git a/src/common/transformations/tests/op_conversions/convert_gather_downgrade_test.cpp b/src/common/transformations/tests/op_conversions/convert_gather_downgrade_test.cpp index 9f8447b9c77302..b38ff9239c5228 100644 --- a/src/common/transformations/tests/op_conversions/convert_gather_downgrade_test.cpp +++ b/src/common/transformations/tests/op_conversions/convert_gather_downgrade_test.cpp @@ -55,7 +55,7 @@ TEST_F(TransformationTestsF, ConvertGather7toGather1_nonzero_batch_dims) { } } -TEST_F(TransformationTestsF, ConvertGather8toGather7) { +TEST_F(TransformationTestsF, ConvertGather8toGather7_param_indices) { { auto data = std::make_shared(element::f32, Shape{2, 3}); auto indices = std::make_shared(element::i32, Shape{2, 2}); @@ -68,15 +68,98 @@ TEST_F(TransformationTestsF, ConvertGather8toGather7) { manager.register_pass(); } +} +TEST_F(TransformationTestsF, ConvertGather8toGather7_const_indices) { { auto data = std::make_shared(element::f32, Shape{2, 3}); - auto indices = std::make_shared(element::i32, Shape{2, 2}); + auto indices = opset8::Constant::create(element::i32, Shape{2, 2}, {0, 1, 2, 0}); auto axis = opset1::Constant::create(element::i32, Shape{1}, {1}); int64_t batch_dims = 1; + auto gather_v8 = std::make_shared(data, indices, axis, batch_dims); + + model = std::make_shared(NodeVector{gather_v8}, ParameterVector{data}); + + manager.register_pass(); + } + + { + auto data = std::make_shared(element::f32, Shape{2, 3}); + auto indices = opset8::Constant::create(element::i32, Shape{2, 2}, {0, 1, 2, 0}); + auto axis = opset1::Constant::create(element::i32, Shape{1}, {1}); + int64_t batch_dims = 1; + + auto gather_v7 = std::make_shared(data, indices, axis, batch_dims); + + model_ref = std::make_shared(NodeVector{gather_v7}, ParameterVector{data}); + } +} + +TEST_F(TransformationTestsF, ConvertGather8toGather7_negative_indices) { + { + auto data = std::make_shared(element::f32, Shape{2, 3}); + auto indices = opset8::Constant::create(element::i32, Shape{2, 2}, {2, 1, 0, -1}); + auto axis = opset1::Constant::create(element::i32, Shape{1}, {1}); + int64_t batch_dims = 1; + + auto gather_v8 = std::make_shared(data, indices, axis, batch_dims); + + model = std::make_shared(NodeVector{gather_v8}, ParameterVector{data}); + + manager.register_pass(); + comparator.enable(FunctionsComparator::CONST_VALUES); + } + + { + auto data = std::make_shared(element::f32, Shape{2, 3}); + auto indices = opset8::Constant::create(element::i32, Shape{2, 2}, {2, 1, 0, 2}); + auto axis = opset1::Constant::create(element::i32, Shape{1}, {1}); + int64_t batch_dims = 1; + + auto gather_v7 = std::make_shared(data, indices, axis, batch_dims); + + model_ref = std::make_shared(NodeVector{gather_v7}, ParameterVector{data}); + } +} + +TEST_F(TransformationTestsF, ConvertGather8toGather7_out_of_bound_indices) { + { + auto data = std::make_shared(element::f32, Shape{2, 3}); + auto indices = opset8::Constant::create(element::i32, Shape{2, 2}, {0, 1, 2, 3}); + auto axis = opset1::Constant::create(element::i32, Shape{1}, {1}); + int64_t batch_dims = 1; + + auto gather_v8 = std::make_shared(data, indices, axis, batch_dims); + + model = std::make_shared(NodeVector{gather_v8}, ParameterVector{data}); + + manager.register_pass(); + } +} + +TEST_F(TransformationTestsF, ConvertGather8toGather7_negative_axis) { + { + auto data = std::make_shared(element::f32, Shape{2, 3}); + auto indices = opset8::Constant::create(element::i32, Shape{2, 2}, {0, 1, 2, 0}); + auto axis = opset1::Constant::create(element::i32, Shape{1}, {-1}); + int64_t batch_dims = 1; + + auto gather_v8 = std::make_shared(data, indices, axis, batch_dims); + + model = std::make_shared(NodeVector{gather_v8}, ParameterVector{data}); + + manager.register_pass(); + } + + { + auto data = std::make_shared(element::f32, Shape{2, 3}); + auto indices = opset8::Constant::create(element::i32, Shape{2, 2}, {0, 1, 2, 0}); + auto axis = opset1::Constant::create(element::i32, Shape{1}, {-1}); + int64_t batch_dims = 1; + auto gather_v7 = std::make_shared(data, indices, axis, batch_dims); - model_ref = std::make_shared(NodeVector{gather_v7}, ParameterVector{data, indices}); + model_ref = std::make_shared(NodeVector{gather_v7}, ParameterVector{data}); } } diff --git a/src/common/transformations/tests/op_conversions/gather_normalize_negative_indices_test.cpp b/src/common/transformations/tests/op_conversions/gather_normalize_negative_indices_test.cpp deleted file mode 100644 index 8f71da2a3361c7..00000000000000 --- a/src/common/transformations/tests/op_conversions/gather_normalize_negative_indices_test.cpp +++ /dev/null @@ -1,252 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "transformations/op_conversions/gather_normalize_negative_indices.hpp" - -#include - -#include -#include - -#include "common_test_utils/ov_test_utils.hpp" -#include "openvino/core/model.hpp" -#include "openvino/core/validation_util.hpp" -#include "openvino/opsets/opset7.hpp" -#include "openvino/pass/manager.hpp" -#include "transformations/init_node_info.hpp" -using namespace ov; -using namespace testing; - -TEST_F(TransformationTestsF, GatherNegativeIndicesNormalize) { - { - auto data = std::make_shared(element::f32, Shape{1, 15, 128}); - auto indices = opset7::Constant::create(element::i32, Shape{}, {-1}); - auto axis = opset7::Constant::create(element::i32, Shape{}, {1}); - - auto gather = std::make_shared(data, indices, axis, 0); - - model = std::make_shared(NodeVector{gather}, ParameterVector{data}); - - manager.register_pass(); - } - - { - auto indices_type = element::i32; - - auto data = std::make_shared(element::f32, Shape{1, 15, 128}); - auto indices = opset7::Constant::create(indices_type, Shape{}, {-1}); - auto axis = opset7::Constant::create(element::i32, Shape{}, {1}); - - auto shape_of = std::make_shared(data, indices_type); - auto input_gather = std::make_shared(shape_of, - opset7::Constant::create(indices_type, Shape{}, {1}), - opset7::Constant::create(indices_type, Shape{}, {0})); - auto add = std::make_shared(input_gather, indices); - OPENVINO_SUPPRESS_DEPRECATED_START - auto const_add = get_constant_from_source(add); - OPENVINO_SUPPRESS_DEPRECATED_END - if (const_add == nullptr) - OPENVINO_THROW("indices should've been constant folded"); - auto gather = std::make_shared(data, const_add, axis); - - model_ref = std::make_shared(NodeVector{gather}, ParameterVector{data}); - } -} - -TEST_F(TransformationTestsF, GatherNegativeIndicesNormalize_neg_axis) { - { - auto data = std::make_shared(element::f32, Shape{1, 15, 128}); - auto indices = opset7::Constant::create(element::i32, Shape{}, {-1}); - auto axis = opset7::Constant::create(element::i32, Shape{}, {-2}); - - auto gather = std::make_shared(data, indices, axis, 0); - - model = std::make_shared(NodeVector{gather}, ParameterVector{data}); - - manager.register_pass(); - } - - { - auto indices_type = element::i32; - - auto data = std::make_shared(element::f32, Shape{1, 15, 128}); - auto indices = opset7::Constant::create(indices_type, Shape{}, {-1}); - auto axis = opset7::Constant::create(element::i32, Shape{}, {-2}); - - auto shape_of = std::make_shared(data, indices_type); - auto input_gather = std::make_shared(shape_of, - opset7::Constant::create(indices_type, Shape{}, {1}), - opset7::Constant::create(indices_type, Shape{}, {0})); - auto add = std::make_shared(input_gather, indices); - OPENVINO_SUPPRESS_DEPRECATED_START - auto const_add = get_constant_from_source(add); - OPENVINO_SUPPRESS_DEPRECATED_END - if (const_add == nullptr) - OPENVINO_THROW("indices should've been constant folded"); - auto gather = std::make_shared(data, const_add, axis); - - model_ref = std::make_shared(NodeVector{gather}, ParameterVector{data}); - } -} - -TEST_F(TransformationTestsF, GatherNegativeIndicesNormalize_dif_input_types) { - { - auto data = std::make_shared(element::f32, Shape{1, 15, 128}); - auto indices = opset7::Constant::create(element::i32, Shape{}, {-1}); - auto axis = opset7::Constant::create(element::i64, Shape{}, {1}); - - auto gather = std::make_shared(data, indices, axis, 0); - - model = std::make_shared(NodeVector{gather}, ParameterVector{data}); - - manager.register_pass(); - } - - { - auto indices_type = element::i32; - - auto data = std::make_shared(element::f32, Shape{1, 15, 128}); - auto indices = opset7::Constant::create(indices_type, Shape{}, {-1}); - auto axis = opset7::Constant::create(element::i64, Shape{}, {1}); - - auto shape_of = std::make_shared(data, indices_type); - auto input_gather = std::make_shared(shape_of, - opset7::Constant::create(indices_type, Shape{}, {1}), - opset7::Constant::create(indices_type, Shape{}, {0})); - auto add = std::make_shared(input_gather, indices); - OPENVINO_SUPPRESS_DEPRECATED_START - auto const_add = get_constant_from_source(add); - OPENVINO_SUPPRESS_DEPRECATED_END - if (const_add == nullptr) - OPENVINO_THROW("indices should've been constant folded"); - auto gather = std::make_shared(data, const_add, axis); - - model_ref = std::make_shared(NodeVector{gather}, ParameterVector{data}); - } -} - -TEST_F(TransformationTestsF, GatherNegativeIndicesNormalize_static_axis_dim) { - { - auto data = std::make_shared(element::f32, PartialShape{DYN, 15, DYN}); - auto indices = opset7::Constant::create(element::i32, Shape{}, {-1}); - auto axis = opset7::Constant::create(element::i32, Shape{}, {1}); - - auto gather = std::make_shared(data, indices, axis, 0); - - model = std::make_shared(NodeVector{gather}, ParameterVector{data}); - - manager.register_pass(); - } - - { - auto indices_type = element::i32; - - auto data = std::make_shared(element::f32, PartialShape{DYN, 15, DYN}); - auto indices = opset7::Constant::create(indices_type, Shape{}, {2}); - auto axis = opset7::Constant::create(element::i32, Shape{}, {1}); - - auto gather = std::make_shared(data, indices, axis); - model_ref = std::make_shared(NodeVector{gather}, ParameterVector{data}); - } -} - -TEST_F(TransformationTestsF, GatherNegativeIndicesNormalize_static_axis_dim_neg_axis) { - { - auto data = std::make_shared(element::f32, PartialShape{DYN, 15, DYN}); - auto indices = opset7::Constant::create(element::i32, Shape{}, {-1}); - auto axis = opset7::Constant::create(element::i32, Shape{}, {-2}); - - auto gather = std::make_shared(data, indices, axis, 0); - - model = std::make_shared(NodeVector{gather}, ParameterVector{data}); - - manager.register_pass(); - } - - { - auto indices_type = element::i32; - - auto data = std::make_shared(element::f32, PartialShape{DYN, 15, DYN}); - auto indices = opset7::Constant::create(indices_type, Shape{}, {2}); - auto axis = opset7::Constant::create(element::i32, Shape{}, {-2}); - - auto gather = std::make_shared(data, indices, axis); - - model_ref = std::make_shared(NodeVector{gather}, ParameterVector{data}); - } -} - -TEST_F(TransformationTestsF, GatherNegativeIndicesNormalize_non_static_axis_dim) { - { - auto data = std::make_shared(element::f32, PartialShape{DYN, DYN, DYN}); - auto indices = opset7::Constant::create(element::i32, Shape{}, {-1}); - auto axis = opset7::Constant::create(element::i32, Shape{}, {1}); - - auto gather = std::make_shared(data, indices, axis, 0); - - model = std::make_shared(NodeVector{gather}, ParameterVector{data}); - - manager.register_pass(); - } - - { - auto indices_type = element::i32; - - auto data = std::make_shared(element::f32, PartialShape{DYN, DYN, DYN}); - auto indices = opset7::Constant::create(indices_type, Shape{}, {-1}); - auto axis = opset7::Constant::create(element::i32, Shape{}, {1}); - - auto gather = std::make_shared(data, indices, axis); - - model_ref = std::make_shared(NodeVector{gather}, ParameterVector{data}); - } -} - -TEST_F(TransformationTestsF, GatherNegativeIndicesNormalize_positive_ind) { - { - auto data = std::make_shared(element::f32, Shape{2, 3}); - auto indices = opset7::Constant::create(element::i32, Shape{}, {1}); - auto axis = opset7::Constant::create(element::i32, Shape{}, {0}); - - auto gather = std::make_shared(data, indices, axis, 0); - - model = std::make_shared(NodeVector{gather}, ParameterVector{data}); - - manager.register_pass(); - } - - { - auto data = std::make_shared(element::f32, Shape{2, 3}); - auto indices = opset7::Constant::create(element::i32, Shape{}, {1}); - auto axis = opset7::Constant::create(element::i32, Shape{}, {0}); - - auto gather = std::make_shared(data, indices, axis); - - model_ref = std::make_shared(NodeVector{gather}, ParameterVector{data}); - } -} - -TEST_F(TransformationTestsF, GatherNegativeIndicesNormalize_non_static_rank) { - { - auto data = std::make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); - auto indices = opset7::Constant::create(element::i32, Shape{}, {-1}); - auto axis = opset7::Constant::create(element::i32, Shape{}, {0}); - - auto gather = std::make_shared(data, indices, axis, 0); - - model = std::make_shared(NodeVector{gather}, ParameterVector{data}); - - manager.register_pass(); - } - - { - auto data = std::make_shared(element::f32, PartialShape::dynamic()); - auto indices = opset7::Constant::create(element::i32, Shape{}, {-1}); - auto axis = opset7::Constant::create(element::i32, Shape{}, {0}); - - auto gather = std::make_shared(data, indices, axis); - - model_ref = std::make_shared(NodeVector{gather}, ParameterVector{data}); - } -} diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/gather.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/gather.cpp index b82cadb87d9d80..68a16954a19a85 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/gather.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/gather.cpp @@ -9,6 +9,7 @@ namespace { using ov::test::Gather7LayerTest; using ov::test::Gather8LayerTest; +using ov::test::Gather8withIndicesDataLayerTest; const std::vector model_types = { ov::element::f32, @@ -203,4 +204,29 @@ const auto gatherParamsVec3 = testing::Combine( INSTANTIATE_TEST_CASE_P(smoke_Vec3, Gather8LayerTest, gatherParamsVec3, Gather8LayerTest::getTestCaseName); + +const ov::test::gather7ParamsTuple dummyParams = { + ov::test::static_shapes_to_test_representation(std::vector{{2, 3}}), // input shape + ov::Shape{2, 2}, // indices shape + std::tuple{1, 1}, // axis, batch + ov::element::f32, // model type + ov::test::utils::DEVICE_CPU // device +}; + +const std::vector> indicesData = { + {0, 1, 2, 0}, // positive in bound + {-1, -2, -3, -1}, // negative in bound + {-1, 0, 1, 2}, // positive and negative in bound + {0, 1, 2, 3}, // positive out of bound + {-1, -2, -3, -4}, // negative out of bound + {0, 4, -4, 0}, // positive and negative out of bound +}; + +const auto gatherWithIndicesParams = testing::Combine( + testing::Values(dummyParams), + testing::ValuesIn(indicesData) +); + +INSTANTIATE_TEST_CASE_P(smoke, Gather8withIndicesDataLayerTest, gatherWithIndicesParams, Gather8withIndicesDataLayerTest::getTestCaseName); + } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 127002565e2a1f..bc4b9786d1511f 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -177,8 +177,6 @@ std::vector disabledTestPatterns() { R"(.*(Hetero).*InferRequestPreprocessTest.*SetPreProcessToInferRequest.*)", // TODO: for 22.2 (Issue 68949) R"(.*smoke_AutoBatching_CPU/AutoBatching_Test_DetectionOutput.*)", - // Issue: 117837 - R"(.*smoke_4D_out_of_range/GatherInPlaceLayerTestCPU.*_indices=\(\-15\).*)", // Issue: 120222 R"(.*smoke_TopK/TopKLayerTest.Inference.*_k=1_axis=3_.*_modelType=f16_trgDev=CPU.*)", R"(.*smoke_TopK/TopKLayerTest.Inference.*_k=7_axis=3_.*_modelType=f16_trgDev=CPU.*)", diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gather.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gather.cpp index 4ab3baadfabdad..04b501af6d6a51 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gather.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gather.cpp @@ -578,4 +578,32 @@ INSTANTIATE_TEST_SUITE_P( Gather8IndiceScalarLayerTest::getTestCaseName ); +gather7ParamsTuple dummyParams = { + std::vector{2, 3}, + std::vector{2, 2}, + std::tuple{1, 1}, + InferenceEngine::Precision::FP32, + InferenceEngine::Precision::UNSPECIFIED, + InferenceEngine::Precision::UNSPECIFIED, + InferenceEngine::Layout::ANY, + InferenceEngine::Layout::ANY, + ov::test::utils::DEVICE_GPU, +}; + +std::vector> indicesData = { + {0, 1, 2, 0}, // positive in bound + {-1, -2, -3, -1}, // negative in bound + {-1, 0, 1, 2}, // positive and negative in bound + {0, 1, 2, 3}, // positive out of bound + {-1, -2, -3, -4}, // negative out of bound + {0, 4, -4, 0}, // positive and negative out of bound +}; + +const auto gatherWithIndicesParams = testing::Combine( + testing::Values(dummyParams), + testing::ValuesIn(indicesData) +); + +INSTANTIATE_TEST_CASE_P(smoke, Gather8withIndicesDataLayerTest, gatherWithIndicesParams, Gather8withIndicesDataLayerTest::getTestCaseName); + } // namespace diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/gather.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/gather.hpp index f6b8f091bd8125..e909283ff31336 100644 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/gather.hpp +++ b/src/tests/functional/plugin/shared/include/single_layer_tests/gather.hpp @@ -24,4 +24,8 @@ TEST_P(Gather8IndiceScalarLayerTest, CompareWithRefs) { Run(); }; +TEST_P(Gather8withIndicesDataLayerTest, CompareWithRefs) { + Run(); +}; + } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/gather.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/gather.hpp index 6c27459fc18712..be9aa74077694e 100644 --- a/src/tests/functional/plugin/shared/include/single_op_tests/gather.hpp +++ b/src/tests/functional/plugin/shared/include/single_op_tests/gather.hpp @@ -23,5 +23,9 @@ TEST_P(Gather8LayerTest, Inference) { TEST_P(Gather8IndiceScalarLayerTest, Inference) { run(); }; + +TEST_P(Gather8withIndicesDataLayerTest, Inference) { + run(); +}; } // namespace test } // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gather.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gather.hpp index 5f9e9473708969..c2f354f39200a6 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gather.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gather.hpp @@ -83,4 +83,18 @@ class Gather8IndiceScalarLayerTest : public testing::WithParamInterface // indices data +> gather8withIndicesDataParamsTuple; + +class Gather8withIndicesDataLayerTest : public testing::WithParamInterface, + virtual public LayerTestsUtils::LayerTestsCommon { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); + +protected: + void SetUp() override; +}; + } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/gather.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/gather.hpp index 36d6ba48dfd25a..a41a13ce962eb7 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/gather.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/gather.hpp @@ -63,5 +63,20 @@ class Gather8IndiceScalarLayerTest : public testing::WithParamInterface // indices data +> gather8withIndicesDataParamsTuple; + +class Gather8withIndicesDataLayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); + +protected: + void SetUp() override; +}; + } // namespace test } // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_layer/gather.cpp b/src/tests/functional/shared_test_classes/src/single_layer/gather.cpp index a9b58ecea05e37..97538823dc52ae 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/gather.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/gather.cpp @@ -178,4 +178,56 @@ void Gather8IndiceScalarLayerTest::SetUp() { function = std::make_shared(results, functionParams, "gather"); } +std::string Gather8withIndicesDataLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { + gather7ParamsTuple basicParams; + std::vector indicesData; + std::tie(basicParams, indicesData) = obj.param; + + std::tuple axis_batchIdx; + std::vector indices; + std::vector indicesShape, inputShape; + InferenceEngine::Precision netPrecision; + InferenceEngine::Precision inPrc, outPrc; + InferenceEngine::Layout inLayout, outLayout; + std::string targetName; + std::tie(inputShape, indicesShape, axis_batchIdx, netPrecision, inPrc, outPrc, inLayout, outLayout, targetName) = basicParams; + std::ostringstream result; + result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; + result << "indicesShape=" << ov::test::utils::vec2str(indicesShape) << "_"; + result << "axis=" << std::get<0>(axis_batchIdx) << "_"; + result << "batchIdx=" << std::get<1>(axis_batchIdx) << "_"; + result << "netPRC=" << netPrecision.name() << "_"; + result << "inPRC=" << inPrc.name() << "_"; + result << "outPRC=" << outPrc.name() << "_"; + result << "inL=" << inLayout << "_"; + result << "outL=" << outLayout << "_"; + result << "trgDev=" << targetName << "_"; + + result << "indicesData=" << ov::test::utils::vec2str(indicesData) << "_"; + + return result.str(); +} + +void Gather8withIndicesDataLayerTest::SetUp() { + gather7ParamsTuple basicParams; + std::vector indicesData; + std::tie(basicParams, indicesData) = GetParam(); + + std::tuple axis_batchIdx; + std::vector indicesShape; + std::vector inputShape; + InferenceEngine::Precision netPrecision; + std::tie(inputShape, indicesShape, axis_batchIdx, netPrecision, inPrc, outPrc, inLayout, outLayout, targetDevice) = basicParams; + int axis = std::get<0>(axis_batchIdx); + int batchIdx = std::get<1>(axis_batchIdx); + auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + ov::ParameterVector functionParams {std::make_shared(ngPrc, ov::Shape(inputShape))}; + auto paramOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes(functionParams)); + auto indicesNode = ngraph::builder::makeConstant(ngraph::element::i64, indicesShape, indicesData); + auto axisNode = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape({}), { axis }); + auto gather = std::make_shared(paramOuts[0], indicesNode, axisNode, batchIdx); + ngraph::ResultVector results{ std::make_shared(gather) }; + function = std::make_shared(results, functionParams, "gather"); +} + } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_op/gather.cpp b/src/tests/functional/shared_test_classes/src/single_op/gather.cpp index 0dad09af952720..f4c91752190e19 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/gather.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/gather.cpp @@ -167,5 +167,76 @@ void Gather8IndiceScalarLayerTest::SetUp() { auto result = std::make_shared(gather); function = std::make_shared(result, ov::ParameterVector{param}, "gather"); } + +std::string Gather8withIndicesDataLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { + gather7ParamsTuple basicParams; + std::vector indicesData; + std::tie(basicParams, indicesData) = obj.param; + + std::tuple axis_batch_idx; + std::vector indices; + ov::Shape indices_shape; + std::vector shapes; + ov::element::Type model_type; + std::string device_name; + std::tie(shapes, indices_shape, axis_batch_idx, model_type, device_name) = basicParams; + + std::ostringstream result; + result << "IS=("; + for (size_t i = 0lu; i < shapes.size(); i++) { + result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < shapes.size(); j++) { + result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + result << "axis=" << std::get<0>(axis_batch_idx) << "_"; + result << "batch_idx=" << std::get<1>(axis_batch_idx) << "_"; + result << "indices_shape=" << ov::test::utils::vec2str(indices_shape) << "_"; + result << "netPRC=" << model_type.get_type_name() << "_"; + result << "trgDev=" << device_name << "_"; + + result << "indicesData=" << ov::test::utils::vec2str(indicesData) << "_"; + + return result.str(); +} + +void Gather8withIndicesDataLayerTest::SetUp() { + gather7ParamsTuple basicParams; + std::vector indicesData; + std::tie(basicParams, indicesData) = GetParam(); + + std::tuple axis_batch_idx; + ov::Shape indices_shape; + std::vector shapes; + ov::element::Type model_type; + std::tie(shapes, indices_shape, axis_batch_idx, model_type, targetDevice) = basicParams; + init_input_shapes(shapes); + + int axis = std::get<0>(axis_batch_idx); + int batch_idx = std::get<1>(axis_batch_idx); + + auto param = std::make_shared(model_type, inputDynamicShapes.front()); + + // create indices tensor and fill data + ov::Tensor indices_node_tensor{ov::element::i64, indices_shape}; + auto indices_tensor_data = indices_node_tensor.data(); + for (size_t i = 0; i < shape_size(indices_shape); i++) { + indices_tensor_data[i] = indicesData[i]; + } + + auto indices_node = std::make_shared(indices_node_tensor); + auto axis_node = ov::op::v0::Constant::create(ov::element::i64, ov::Shape(), {axis}); + + auto gather = std::make_shared(param, indices_node, axis_node, batch_idx); + + auto result = std::make_shared(gather); + function = std::make_shared(result, ov::ParameterVector{param}, "gather"); +} + } // namespace test } // namespace ov From d205684aff0f0db42f229b970b72a688d22daddf Mon Sep 17 00:00:00 2001 From: Xuejun Zhai Date: Tue, 10 Oct 2023 15:21:31 +0800 Subject: [PATCH 119/257] [C API] Fix coverity scan issue about GET_INTEL_GPU_PROPERTY_FROM_ARGS_LIST (#20330) Signed-off-by: Zhai, Xuejun --- src/bindings/c/src/ov_remote_context.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/bindings/c/src/ov_remote_context.cpp b/src/bindings/c/src/ov_remote_context.cpp index c484ea60b635cc..68e802e256973e 100644 --- a/src/bindings/c/src/ov_remote_context.cpp +++ b/src/bindings/c/src/ov_remote_context.cpp @@ -37,11 +37,11 @@ inline bool check_intel_gpu_property_value_is_ptr(std::string& key) { std::string property_key = va_arg(args_ptr, char*); \ if (check_intel_gpu_property_value_is_ptr(property_key)) { \ ov::Any value = va_arg(args_ptr, void*); \ - property[property_key] = value; \ + property[property_key] = std::move(value); \ } else { \ std::string _value = va_arg(args_ptr, char*); \ ov::Any value = _value; \ - property[property_key] = value; \ + property[property_key] = std::move(value); \ } \ } From ed45a92e30a0014f4e825d5adef208302ce2eea8 Mon Sep 17 00:00:00 2001 From: Pratham Ingawale <94799826+PRATHAM-SPS@users.noreply.github.com> Date: Tue, 10 Oct 2023 14:12:49 +0530 Subject: [PATCH 120/257] Remove test-generator dependency (#18512) * changed from generator to unittest * common_test.py tested for pylint 7.96/10 * ChangeRandomUniformOutputType_test pylint 10/10 * replaced generator functionality from compress... * replaced generator functionality in MatMulNormal.. * replaced generator functionality in ShuffleChan... * replaced generator functionality in import_from_mo_test.py * replaced generator functionality in meta_data_test.py * replaced generator functionality in extractor_test.py * replaced generator functionality in interpolate_reshape_test.py * replaced generator functionality in Pack_test.py * replaced generator functionality in rank_decomposer_test.py * replaced generator functionality in size_replacer_test.py * replaced generator functionality in utils_test.py * replaced generator functionality in eltwise_test.py * replaced generator functionality in concat_test.py * replaced generator functionality in tdnn_component_replacer_test.py * replaced generator functionality in MXFFTToDFT_test.py * replaced generator functionality in activation_ext_test.py * replaced generator functionality in AttributedSliceToSlice_test * replaced generator functionality in squeeze_ext_test.py * replaced generator functionality in transpose_ext_test.py * replaced generator functionality in unsqueeze_ext_test.py * replaced generator functionality in ObjectDetectionAPI_test.py * replaced generator functionality in RFFTRealImagToRFFTSplit_test.py * replaced generator functionality in TFFFTToDFT_test.py * replaced generator functionality in WhereDecomposition_test.py * replaced generator functionality in graph_test.py * replaced generator functionality in ConvertGroupedStridedSlice_test.py * replaced generator functionality in dequantize_linear_resolver_test.py * replaced generator functionality in FusedBatchNormTraining_test.py * replaced generator functionality in L2NormFusing_test.py * replaced generator functionality in PreserveRuntimeInfo_test.py * replaced generator functionality in quantize_linear_resolver_test.py * replaced generator functionality in UpsampleToResample_test.py * replaced generator functionality in broadcast_test.py * replaced generator functionality in loader_test.py * replaced generator functionality in cast_test.py * replaced generator functionality in Complex_test.py * replaced generator functionality in dft_signal_size_canonicalization_test.py * replaced generator functionality in div_value_propagation_test.py * replaced generator functionality in einsum_test.py * replaced generator functionality in expand_dims_test.py * replaced generator functionality in ExtractImagePatches_test.py * replaced generator functionality in eye_test.py * replaced generator functionality in gatherelements_test.py * replaced generator functionality in If_test.py * replaced generator functionality in interpolate_test.py * replaced generator functionality in MatMul_test.py * replaced generator functionality in MatMul_value_propagation_test.py * replaced generator functionality in one_hot_test.py * replaced generator functionality in ONNXResize11_test.py * replaced generator functionality in ReduceOps_test.py * replaced generator functionality in reshape_test.py * replaced generator functionality in scatter_test.py * replaced generator functionality in slice_test.py * replaced generator functionality in conversion_with_layout_test.py * replaced generator functionality in conversion_incorrect_models_test.py * replaced generator functionality in conversion_basic_models_test.py * replaced generator functionality in split_test.py * replaced generator functionality in squeeze_test.py * replaced generator functionality in mo_fallback_test_actual.py * replaced generator functionality in layer_to_class_test.py * replaced generator functionality in ir_engine_test.py * replaced generator functionality in mo_fallback_test_tf_fe.py * replaced generator functionality in freeze_placeholder_test.py * replaced generator functionality in broadcasting_test.py * replaced generator functionality in broadcasting_test.py * replaced generator functionality in transpose_test.py * replaced generator functionality in custom_replacement_config_test.py * replaced generator functionality in unsqueeze_test.py * replaced generator functionality in upsample_test.py * replaced generator functionality in upsample_test.py * Removed test-generator dependency from openvino/tools/constraints.txt * replaced generator functionality in freeze_placeholder_test.py * replaced generator functionality in conversion_incorrect_models_test.py * removed test-generator from requirements_dev,constraints.txt,requirements.txt * removed import generator from CorrectPaddingsForPadAfterComplex_test.py * adding test_generator dep.. * revert back constraints.txt * revert back requirements_dev * pytest:- MatMulNormalizer_test.py * pytest:- ShuffleChannelPatternOptimization_test.py * pytest:- import_from_mo_test.py * generator_to_pytest interpolate_reshape_test.py * pytest:- rank_decomposer_test.py * pytest:- size_replacer_test.py * pytest:- concat_test.py * pytest:- eltwise_test.py * pytest:- utils_test.py * pytest:- tdnn_component_replacer_test.py * pytest:- MXFFTToDFT_test.py * pytest:- activation_ext_test.py * pytest:- AttributedSliceToSlice_test.py * pytest:- squeeze_ext_test.py * pytest:- transpose_ext_test.py * pytest:- unsqueeze_ext_test.py * pytest:- ObjectDetectionAPI_test.py * pytest:- RFFTRealImagToRFFTSplit_test.py * pytest:- TFFFTToDFT_test.py * pytest:- WhereDecomposition_test.py * pytest:- graph_test.py * pytest:- ConvertGroupedStridedSlice_test.py * dequantize_linear_resolver_test.py * pytest:- FusedBatchNormTraining_test.py * pytest:- L2NormFusing_test.py * pytest:- PreserveRuntimeInfo_test.py * pytest:- quantize_linear_resolver_test.py * pytest:- UpsampleToResample_test.py * pytest:- broadcast_test.py * pytest:- cast_test.py * pytest:- Complex_test.py * pytest:- dft_signal_size_canonicalization_test.py * pytest:- div_value_propagation_test.py * pytest:- einsum_test.py * pytest:- expand_dims_test.py * pytest:- ExtractImagePatches_test.py * pytest:- eye_test.py * pytest:- gatherelements_test.py * pytest:- If_test.py * pytest:- interpolate_test.py * pytest:- MatMul_test.py * pytest:- MatMul_value_propagation_test.py * pytest:- one_hot_test.py * pytest:- ONNXResize11_test.py * pytest:- ReduceOps_test.py * pytest:- reshape_test.py * scatter_test.py * pytest:- slice_test.py * pytest:- split_test.py * pytest:- squeeze_test.py * pytest:- transpose_test.py * pytest:- unsqueeze_test.py * pytest:- upsample_test.py * pytest:- common_test.py * pytest:- broadcasting_test.py * revert back ir_engine_test.py * revertback :- custom_replacement_config_test.py * revertback:- mo_fallback_test_actual.py * revertback:- mo_fallback_test_tf_fe.py * pytest:- layer_to_class_test.py * revertback:- conversion_basic_models_test.py * revertback:- conversion_incorrect_models_test.py * revertback:- conversion_with_layout_test * revertback:- constraints.txt * revertback:- loader_test.py * pytest:- Pack_test.py * revertback:- freeze_placeholder_test.py --------- Co-authored-by: Andrei Kochin --- .../mo/back/MatMulNormalizer_test.py | 15 ++--- .../ShuffleChannelPatternOptimization_test.py | 34 +++++----- .../mo/convert/import_from_mo_test.py | 8 +-- .../unit_tests/mo/convert/meta_data_test.py | 2 - tools/mo/unit_tests/mo/front/Pack_test.py | 11 ++-- .../front/common/partial_infer/concat_test.py | 20 +++--- .../common/partial_infer/eltwise_test.py | 18 +++--- .../front/common/partial_infer/utils_test.py | 55 ++++++++-------- .../mo/front/interpolate_reshape_test.py | 18 +++--- .../kaldi/tdnn_component_replacer_test.py | 10 ++- .../mo/front/mxnet/MXFFTToDFT_test.py | 16 ++--- .../front/onnx/AttributedSliceToSlice_test.py | 10 ++- .../mo/front/onnx/activation_ext_test.py | 18 ++---- .../mo/front/onnx/squeeze_ext_test.py | 13 ++-- .../mo/front/onnx/transpose_ext_test.py | 14 ++-- .../mo/front/onnx/unsqueeze_ext_test.py | 12 ++-- .../mo/front/rank_decomposer_test.py | 17 +++-- .../unit_tests/mo/front/size_replacer_test.py | 17 +++-- .../CorrectPaddingsForPadAfterComplex_test.py | 1 - .../mo/front/tf/ObjectDetectionAPI_test.py | 9 ++- .../front/tf/RFFTRealImagToRFFTSplit_test.py | 12 ++-- .../unit_tests/mo/front/tf/TFFFTToDFT_test.py | 15 ++--- .../mo/front/tf/WhereDecomposition_test.py | 11 ++-- tools/mo/unit_tests/mo/graph/graph_test.py | 12 ++-- .../middle/ConvertGroupedStridedSlice_test.py | 33 +++++----- .../mo/middle/FusedBatchNormTraining_test.py | 12 ++-- .../unit_tests/mo/middle/L2NormFusing_test.py | 18 +++--- .../mo/middle/PreserveRuntimeInfo_test.py | 36 +++++------ .../mo/middle/UpsampleToResample_test.py | 14 ++-- .../middle/dequantize_linear_resolver_test.py | 10 +-- .../middle/quantize_linear_resolver_test.py | 10 +-- tools/mo/unit_tests/mo/ops/Complex_test.py | 12 ++-- .../mo/ops/ExtractImagePatches_test.py | 11 ++-- tools/mo/unit_tests/mo/ops/If_test.py | 12 ++-- tools/mo/unit_tests/mo/ops/MatMul_test.py | 17 +++-- .../mo/ops/MatMul_value_propagation_test.py | 10 ++- .../mo/unit_tests/mo/ops/ONNXResize11_test.py | 33 +++++----- tools/mo/unit_tests/mo/ops/ReduceOps_test.py | 14 ++-- tools/mo/unit_tests/mo/ops/broadcast_test.py | 64 +++++++++---------- tools/mo/unit_tests/mo/ops/cast_test.py | 10 ++- .../dft_signal_size_canonicalization_test.py | 10 ++- .../mo/ops/div_value_propagation_test.py | 10 ++- tools/mo/unit_tests/mo/ops/einsum_test.py | 39 ++++++----- .../mo/unit_tests/mo/ops/expand_dims_test.py | 26 ++++---- tools/mo/unit_tests/mo/ops/eye_test.py | 22 +++---- .../unit_tests/mo/ops/gatherelements_test.py | 26 ++++---- .../mo/unit_tests/mo/ops/interpolate_test.py | 34 +++++----- tools/mo/unit_tests/mo/ops/one_hot_test.py | 22 +++---- tools/mo/unit_tests/mo/ops/reshape_test.py | 12 ++-- tools/mo/unit_tests/mo/ops/scatter_test.py | 21 +++--- tools/mo/unit_tests/mo/ops/slice_test.py | 21 +++--- tools/mo/unit_tests/mo/ops/split_test.py | 21 +++--- tools/mo/unit_tests/mo/ops/squeeze_test.py | 14 ++-- tools/mo/unit_tests/mo/ops/transpose_test.py | 20 +++--- tools/mo/unit_tests/mo/ops/unsqueeze_test.py | 15 ++--- tools/mo/unit_tests/mo/ops/upsample_test.py | 14 ++-- .../mo/unit_tests/mo/pipeline/common_test.py | 15 ++--- .../unit_tests/mo/utils/broadcasting_test.py | 28 ++++---- .../mo/utils/ir_reader/layer_to_class_test.py | 16 ++--- 59 files changed, 484 insertions(+), 586 deletions(-) diff --git a/tools/mo/unit_tests/mo/back/MatMulNormalizer_test.py b/tools/mo/unit_tests/mo/back/MatMulNormalizer_test.py index 5187ca71c2bd84..c896f8b0ae2b38 100644 --- a/tools/mo/unit_tests/mo/back/MatMulNormalizer_test.py +++ b/tools/mo/unit_tests/mo/back/MatMulNormalizer_test.py @@ -5,7 +5,7 @@ from argparse import Namespace import numpy as np -from generator import generate, generator +import pytest from openvino.tools.mo.back.MatMulNormalizer import SmartReshape_HC_Reshape_MatMul, PullTransposeThroughFQUp from openvino.tools.mo.ops.MatMul import MatMul @@ -19,10 +19,9 @@ from unit_tests.utils.graph import regular_op_with_empty_data as op_with_empty_data -@generator -class SmartReshape_HC_Reshape_MatMulTest(unittest.TestCase): - @generate( - *[ +class TestSmartReshape_HC_Reshape_MatMulTest(): + @pytest.mark.parametrize("in1_shape, in2_shape, reshape_pattern, transpose_a, transpose_b, updated_pattern", + [ ([1, 20, 30], [30, 40], [20, -1], False, False, [-1, 30]), ([1, 20, 30], [40, 30], [20, -1], False, True, [-1, 30]), ([1, 30, 20], [30, 40], [-1, 20], True, False, [30, -1]), @@ -59,9 +58,9 @@ def test_reshape_on_the_A_input(self, graph_ref.clean_up() (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp - @generate(*[ + @pytest.mark.parametrize("in1_shape, in2_shape, reshape_pattern, transpose_a, transpose_b, updated_pattern",[ ([20, 30], [1, 30, 40], [-1, 40], False, False, [30, -1]), ([20, 30], [1, 40, 30], [40, -1], False, True, [-1, 30]), ([30, 20], [1, 30, 40], [-1, 40], True, False, [30, -1]), @@ -97,7 +96,7 @@ def test_reshape_on_the_B_input(self, graph_ref.clean_up() (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp class FQTransposePullerTest(unittest.TestCase): diff --git a/tools/mo/unit_tests/mo/back/ShuffleChannelPatternOptimization_test.py b/tools/mo/unit_tests/mo/back/ShuffleChannelPatternOptimization_test.py index 681ea349c8de65..ff4aaf8b1897ad 100644 --- a/tools/mo/unit_tests/mo/back/ShuffleChannelPatternOptimization_test.py +++ b/tools/mo/unit_tests/mo/back/ShuffleChannelPatternOptimization_test.py @@ -1,10 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest from argparse import Namespace - -from generator import generate, generator +import pytest from openvino.tools.mo.back.ShuffleChannelPatternOptimization import ShuffleChannelFusion, DepthToSpaceFusion from openvino.tools.mo.ops.depth_to_space import DepthToSpaceOp @@ -18,8 +16,7 @@ valued_const_with_data, connect, regular_op_with_empty_data -@generator -class ShuffleChannelFusionTest(unittest.TestCase): +class TestShuffleChannelFusionTest(): @staticmethod def get_graphs(input_shape, reshape_0_pattern, order, reshape_1_pattern, group): nodes = { @@ -67,7 +64,7 @@ def get_graphs(input_shape, reshape_0_pattern, order, reshape_1_pattern, group): return graph, graph_ref - @generate(*[ + @pytest.mark.parametrize("input_shape, reshape_0_pattern, order, reshape_1_pattern, group",[ ([1, 512, 7, 6], [1, 2, 256, 7, 6], [0, 2, 1, 3, 4], [1, 512, 7, 6], 2), ([2, 512, 7, 6], [2, 2, 256, 7, 6], [0, 2, 1, 3, 4], [2, 512, 7, 6], 2), ([1, 200, 200, 200], [1, 50, 4, 200, 200], [0, 2, 1, 3, 4], [1, 200, 200, 200], 50), @@ -77,11 +74,11 @@ def test_fusion(self, input_shape, reshape_0_pattern, order, reshape_1_pattern, ShuffleChannelFusion().find_and_replace_pattern(graph) graph.clean_up() (flag, resp) = compare_graphs(graph, graph_ref, 'output') - self.assertTrue(flag, resp) - self.assertTrue(len(graph.get_op_nodes(name='final_reshape')) == 1 and - graph.get_op_nodes(name='final_reshape')[0].op == 'ShuffleChannels') + assert flag, resp + assert len(graph.get_op_nodes(name='final_reshape')) == 1 and \ + graph.get_op_nodes(name='final_reshape')[0].op == 'ShuffleChannels' - @generate(*[ + @pytest.mark.parametrize("input_shape, reshape_0_pattern, order, reshape_1_pattern, group",[ ([1, 512, 7, 6], [0, 2, 256, 7, 6], [0, 2, 1, 3, 4], [1, 512, 7, 6], 2), ([1, 512, 7, 6], [1, 2, 256, 7, 6], [0, 2, 1, 4, 3], [1, 512, 7, 6], 2), ([1, 512, 7, 6], [1, 2, 256, 7, 6], [0, 2, 1, 3, 4], [-1, 512, 7, 6], 2), @@ -91,11 +88,10 @@ def test_negative(self, input_shape, reshape_0_pattern, order, reshape_1_pattern graph_ref = graph.copy() ShuffleChannelFusion().find_and_replace_pattern(graph) (flag, resp) = compare_graphs(graph, graph_ref, 'output') - self.assertTrue(flag, resp) + assert flag, resp -@generator -class DepthToSpaceFusionTest(unittest.TestCase): +class TestDepthToSpaceFusionTest(): @staticmethod def get_graphs(input_shape, reshape_0_pattern, order, reshape_1_pattern, block_size): nodes = { @@ -145,7 +141,7 @@ def get_graphs(input_shape, reshape_0_pattern, order, reshape_1_pattern, block_s return graph, graph_ref - @generate(*[ + @pytest.mark.parametrize("input_shape, reshape_0_pattern, order, reshape_1_pattern, block_size",[ ([1, 512, 7, 6], [1, 2, 2, 128, 7, 6], [0, 1, 4, 2, 5, 3], [1, 128, 14, 12], 2), ([2, 512, 7, 6], [2, 2, 2, 128, 7, 6], [0, 1, 4, 2, 5, 3], [2, 128, 14, 12], 2), ([1, 200, 200, 200], [1, 2, 2, 50, 200, 200], [0, 1, 4, 2, 5, 3], [1, 50, 400, 400], 2), @@ -155,11 +151,11 @@ def test_fusion(self, input_shape, reshape_0_pattern, order, reshape_1_pattern, DepthToSpaceFusion().find_and_replace_pattern(graph) graph.clean_up() (flag, resp) = compare_graphs(graph, graph_ref, 'output') - self.assertTrue(flag, resp) - self.assertTrue(len(graph.get_op_nodes(name='final_reshape')) == 1 and - graph.get_op_nodes(name='final_reshape')[0].op == 'DepthToSpace') + assert flag, resp + assert len(graph.get_op_nodes(name='final_reshape')) == 1 and \ + graph.get_op_nodes(name='final_reshape')[0].op == 'DepthToSpace' - @generate(*[ + @pytest.mark.parametrize("input_shape, reshape_0_pattern, order, reshape_1_pattern, group",[ ([1, 512, 7, 6], [0, 2, 2, 128, 7, 6], [0, 1, 4, 2, 5, 3], [1, 128, 14, 12], 2), ([2, 512, 7, 6], [2, 2, 2, 128, 7, 6], [0, 1, 4, 2, 5, 3], [-1, 128, 14, 12], 2), ([1, 200, 200, 200], [1, 2, 2, 50, 200, 200], [0, 1, 4, 2, 3, 5], [1, 50, 400, 400], 2), @@ -169,4 +165,4 @@ def test_negative(self, input_shape, reshape_0_pattern, order, reshape_1_pattern graph_ref = graph.copy() DepthToSpaceFusion().find_and_replace_pattern(graph) (flag, resp) = compare_graphs(graph, graph_ref, 'output') - self.assertTrue(flag, resp) + assert flag, resp diff --git a/tools/mo/unit_tests/mo/convert/import_from_mo_test.py b/tools/mo/unit_tests/mo/convert/import_from_mo_test.py index 603ff6eeb4b06d..a0565fbb0f7132 100644 --- a/tools/mo/unit_tests/mo/convert/import_from_mo_test.py +++ b/tools/mo/unit_tests/mo/convert/import_from_mo_test.py @@ -5,18 +5,16 @@ import tempfile from pathlib import Path -from generator import generator, generate +import pytest from openvino.runtime import serialize from openvino.tools.mo import InputCutInfo, LayoutMap from openvino.tools.mo.utils.ir_engine.ir_engine import IREngine -from unit_tests.mo.unit_test_with_mocked_telemetry import UnitTestWithMockedTelemetry from unit_tests.utils.graph import build_graph from utils import create_onnx_model, save_to_onnx -@generator -class ConvertImportMOTest(UnitTestWithMockedTelemetry): +class TestConvertImportMOTest(): test_directory = os.path.dirname(os.path.realpath(__file__)) @staticmethod @@ -79,7 +77,7 @@ def create_model_ref(): ]) return ref_graph - @generate(*[ + @pytest.mark.parametrize("params",[ ({}), ({'input': InputCutInfo(name='LeakyRelu_out', shape=None, type=None, value=None)}), ({'layout': {'input': LayoutMap(source_layout='NCHW', target_layout='NHWC')}}), diff --git a/tools/mo/unit_tests/mo/convert/meta_data_test.py b/tools/mo/unit_tests/mo/convert/meta_data_test.py index b5e78a15b0f67d..515dfd28ea50b2 100644 --- a/tools/mo/unit_tests/mo/convert/meta_data_test.py +++ b/tools/mo/unit_tests/mo/convert/meta_data_test.py @@ -5,7 +5,6 @@ import tempfile from pathlib import Path -from generator import generator from openvino.runtime import get_version as get_rt_version from openvino.runtime import serialize @@ -18,7 +17,6 @@ from openvino.tools.mo.utils.ir_reader.restore_graph import restore_graph_from_ir, save_restored_graph -@generator class MetaDataTest(UnitTestWithMockedTelemetry): test_directory = os.path.dirname(os.path.realpath(__file__)) diff --git a/tools/mo/unit_tests/mo/front/Pack_test.py b/tools/mo/unit_tests/mo/front/Pack_test.py index efa9a45b5179bb..73ab96db5fc80e 100644 --- a/tools/mo/unit_tests/mo/front/Pack_test.py +++ b/tools/mo/unit_tests/mo/front/Pack_test.py @@ -1,10 +1,9 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest import numpy as np -from generator import generator, generate +import pytest from openvino.tools.mo.front.Pack import Pack from openvino.tools.mo.front.common.partial_infer.utils import int64_array @@ -34,10 +33,10 @@ } -@generator -class PackTest(unittest.TestCase): +class TestPackTest(): - @generate(*[(2, 2, 0), (3, 3, 0), (4, 4, 0), (4, 4, 1), (4, 1, 0), (4, 1, 1)]) + @pytest.mark.parametrize("num_inputs, num_placeholders, axis", [(2, 2, 0), (3, 3, 0), (4, 4, 0), + (4, 4, 1), (4, 1, 0), (4, 1, 1)]) def test_pack_test_all(self, num_inputs: int, num_placeholders: int, axis: list): graph_edges = [] @@ -79,4 +78,4 @@ def test_pack_test_all(self, num_inputs: int, num_placeholders: int, axis: list) replacer.find_and_replace_pattern(graph) (flag, resp) = compare_graphs(graph, graph_ref, 'last', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp diff --git a/tools/mo/unit_tests/mo/front/common/partial_infer/concat_test.py b/tools/mo/unit_tests/mo/front/common/partial_infer/concat_test.py index a8d8347a050eb3..20edc563d213b7 100644 --- a/tools/mo/unit_tests/mo/front/common/partial_infer/concat_test.py +++ b/tools/mo/unit_tests/mo/front/common/partial_infer/concat_test.py @@ -1,10 +1,9 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np -from generator import generate, generator from openvino.tools.mo.front.common.partial_infer.concat import concat_infer from openvino.tools.mo.front.common.partial_infer.utils import shape_array, dynamic_dimension_value, strict_compare_tensors @@ -20,9 +19,9 @@ } -@generator -class TestConcatPartialInfer(unittest.TestCase): - @generate(*[([1, 3, 227, 227], [1, 3, 220, 227], [1, 3, 447, 227], 2), +class TestConcatPartialInfer(): + @pytest.mark.parametrize("shape1, shape2, output_shape, axis",[([1, 3, 227, 227], [1, 3, 220, 227], + [1, 3, 447, 227], 2), ([1, 3, 227, 227], [1, 3, 227, 220], [1, 3, 227, 447], -1), ([1, 3, dynamic_dimension_value, 227], [1, dynamic_dimension_value, 227, 220], [1, 3, 227, 447], -1), ([1, 3, 10, 227], [1, 3, 10, dynamic_dimension_value], [1, 3, 10, dynamic_dimension_value], -1), @@ -43,9 +42,10 @@ def test_concat_infer(self, shape1, shape2, output_shape, axis): concat_node = Node(graph, 'concat') concat_infer(concat_node) res_shape = graph.node['node_3']['shape'] - self.assertTrue(strict_compare_tensors(output_shape, res_shape)) + assert strict_compare_tensors(output_shape, res_shape) - @generate(*[(shape_array([1]), shape_array([4]), shape_array([1, 4]), 0), + @pytest.mark.parametrize("value1, value2, output_value, axis",[(shape_array([1]), + shape_array([4]), shape_array([1, 4]), 0), (shape_array([dynamic_dimension_value]), shape_array([4]), shape_array([dynamic_dimension_value, 4]), -1), ]) @@ -65,7 +65,7 @@ def test_concat_value_infer(self, value1, value2, output_value, axis): concat_node = Node(graph, 'concat') concat_infer(concat_node) res_value = graph.node['node_3']['value'] - self.assertTrue(strict_compare_tensors(output_value, res_value)) + assert strict_compare_tensors(output_value, res_value) def test_concat_infer_not_match(self): graph = build_graph(nodes_attributes, @@ -81,7 +81,7 @@ def test_concat_infer_not_match(self): }) concat_node = Node(graph, 'concat') - with self.assertRaisesRegex(Error, "Concat input shapes do not match for node*"): + with pytest.raises(Error, match="Concat input shapes do not match for node*"): concat_infer(concat_node) def test_concat_infer_no_shape(self): @@ -98,5 +98,5 @@ def test_concat_infer_no_shape(self): }) concat_node = Node(graph, 'concat') - with self.assertRaisesRegex(Error, "One of the input shapes is not defined for node *"): + with pytest.raises(Error, match="One of the input shapes is not defined for node *"): concat_infer(concat_node) diff --git a/tools/mo/unit_tests/mo/front/common/partial_infer/eltwise_test.py b/tools/mo/unit_tests/mo/front/common/partial_infer/eltwise_test.py index 7e4bedf454e2e0..1a352dfb19c206 100644 --- a/tools/mo/unit_tests/mo/front/common/partial_infer/eltwise_test.py +++ b/tools/mo/unit_tests/mo/front/common/partial_infer/eltwise_test.py @@ -4,7 +4,7 @@ import unittest import numpy as np -from generator import generator, generate +import pytest from openvino.tools.mo.front.common.partial_infer.eltwise import eltwise_infer, eltwise_reverse_infer from openvino.tools.mo.front.common.partial_infer.utils import shape_array, strict_compare_tensors, \ @@ -24,9 +24,9 @@ } -@generator -class TestEltwiseInfer(unittest.TestCase): - @generate(*[ + +class TestEltwiseInfer(): + @pytest.mark.parametrize("value1, shape1, value2, shape2, shape_infer, exp_value, exp_shape",[ (np.array(2), [], np.array(3), [], lambda a, b: np.multiply(a, b), np.array(6), []), (np.array(2), [], np.array(3), [], lambda a, b: np.maximum(a, b), np.array(3), []), (np.array(2), [], np.array(3), [], lambda a, b: np.add(a, b), np.array(5), []), @@ -67,8 +67,8 @@ def test_eltwise_infer(self, value1, shape1, value2, shape2, shape_infer, exp_va res_shape = graph.node['node_3']['shape'] res_value = eltwise_node.out_node().value if exp_value is not None: - self.assertTrue(strict_compare_tensors(res_value, shape_array(exp_value))) - self.assertTrue(strict_compare_tensors(res_shape, shape_array(exp_shape))) + assert strict_compare_tensors(res_value, shape_array(exp_value)) + assert strict_compare_tensors(res_shape, shape_array(exp_shape)) def test_eltwise_infer_none_val(self): graph = build_graph(nodes_attributes, @@ -89,9 +89,9 @@ def test_eltwise_infer_none_val(self): res_shape = graph.node['node_3']['shape'] res_value = eltwise_node.out_node().value for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) + assert exp_shape[i] == res_shape[i] - self.assertIsNone(res_value) + assert res_value is None def test_eltwise_infer_none_min_max(self): graph = build_graph(nodes_attributes, @@ -107,7 +107,7 @@ def test_eltwise_infer_none_min_max(self): graph.graph['layout'] = 'NCHW' eltwise_node = Node(graph, 'eltw_1') - with self.assertRaisesRegex(Error, 'Input shapes mismatch*'): + with pytest.raises(Error, match='Input shapes mismatch*'): eltwise_infer(eltwise_node) diff --git a/tools/mo/unit_tests/mo/front/common/partial_infer/utils_test.py b/tools/mo/unit_tests/mo/front/common/partial_infer/utils_test.py index b6fc90204ac42c..e535d7f8f20603 100644 --- a/tools/mo/unit_tests/mo/front/common/partial_infer/utils_test.py +++ b/tools/mo/unit_tests/mo/front/common/partial_infer/utils_test.py @@ -4,7 +4,7 @@ import unittest import numpy as np -from generator import generator, generate +import pytest from openvino.tools.mo.front.common.partial_infer.utils import int64_array, mo_array, is_fully_defined, \ dynamic_dimension_value, dynamic_dimension, shape_array, compatible_shapes, shape_delete, shape_insert, \ @@ -26,9 +26,8 @@ def gen_masked_array(array, masked_indices): return res -@generator -class IsFullyDefinedTest(unittest.TestCase): - @generate(*[(None, False), +class TestIsFullyDefinedTest(): + @pytest.mark.parametrize("data, result",[(None, False), (int64_array([2, 3, 5, 7]), True), # int64 array with valid values (np.array([2, 3, 5, 7]), True), # any numpy array with valid values (np.array([2, dynamic_dimension_value]), True), # array with dynamic dimension value is fully defined! @@ -42,12 +41,11 @@ class IsFullyDefinedTest(unittest.TestCase): ([dynamic_dimension, 1], False), # list with dynamic dimension is not fully defined ]) def test_is_fully_defined(self, data, result): - self.assertEqual(is_fully_defined(data), result) + assert is_fully_defined(data) == result -@generator -class ShapeArrayTest(unittest.TestCase): - @generate(*[([1], shape_array([1]), True), +class TestShapeArrayTest(): + @pytest.mark.parametrize("data, ref, result",[([1], shape_array([1]), True), # if we provide a list with dynamic_dimension_value then it is converted to dynamic dimension ([dynamic_dimension_value, 5], gen_masked_array([1, 5], [0]), True), # if we provide a list with dynamic_dimension then the generated shape array still have it @@ -56,12 +54,12 @@ class ShapeArrayTest(unittest.TestCase): ([2], gen_masked_array([1], []), False), ]) def test_shape_array(self, data, ref, result): - self.assertEqual(strict_compare_tensors(shape_array(data), ref), result) + assert strict_compare_tensors(shape_array(data), ref) == result -@generator -class CompareShapesTest(unittest.TestCase): - @generate(*[(gen_masked_array([1, 2, 3], []), gen_masked_array([1, 2, 3], []), True), +class TestCompareShapesTest(): + @pytest.mark.parametrize("input1, input2, result",[(gen_masked_array([1, 2, 3], []), + gen_masked_array([1, 2, 3], []), True), (gen_masked_array([4, 2, 3], []), gen_masked_array([1, 2, 3], []), False), (gen_masked_array([1, 2], []), gen_masked_array([1, 2, 3], []), False), (gen_masked_array([1, 2, 3], []), gen_masked_array([1, 2], []), False), @@ -75,12 +73,12 @@ class CompareShapesTest(unittest.TestCase): (np.array([1, 2]), np.array([3, 2]), False), ]) def test_compare_shapes(self, input1, input2, result): - self.assertEqual(compatible_shapes(input1, input2), result) + assert compatible_shapes(input1, input2) == result -@generator -class ShapeDeleteTest(unittest.TestCase): - @generate(*[(gen_masked_array([1, 2, 3], []), [], gen_masked_array([1, 2, 3], [])), +class TestShapeDeleteTest(): + @pytest.mark.parametrize("shape, indices, result",[(gen_masked_array([1, 2, 3], []), [], + gen_masked_array([1, 2, 3], [])), # [1, d, 3] -> [d, 3]. Indices input is a list (gen_masked_array([1, 2, 3], [1]), [0], gen_masked_array([2, 3], [0])), # [1, d, 3] -> [d, 3]. Indices input is a numpy array @@ -103,16 +101,16 @@ class ShapeDeleteTest(unittest.TestCase): (np.array([1, 2, 3, 4]), -2, [1, 2, 4]), # [1, 2, 3, 4] -> [1, 2, 4]. Negative index ]) def test_shape_delete(self, shape, indices, result): - self.assertTrue(strict_compare_tensors(shape_delete(shape, indices), result)) + assert strict_compare_tensors(shape_delete(shape, indices), result) def test_shape_delete_raise_exception(self): - with self.assertRaisesRegex(Error, '.*Incorrect parameter type.*'): + with pytest.raises(Error, match ='.*Incorrect parameter type.*'): shape_delete(gen_masked_array([1, 2, 3], []), {}) -@generator -class ShapeInsertTest(unittest.TestCase): - @generate(*[(gen_masked_array([1, 2, 3], []), 1, [5], gen_masked_array([1, 5, 2, 3], [])), +class TestShapeInsertTest(): + @pytest.mark.parametrize("shape, pos, values, result",[(gen_masked_array([1, 2, 3], []), 1, [5], + gen_masked_array([1, 5, 2, 3], [])), (gen_masked_array([1, 2, 3], [1]), 1, [5], gen_masked_array([1, 5, 2, 3], [2])), (gen_masked_array([1, 2, 3], [1]), 1, [dynamic_dimension], gen_masked_array([1, 5, 2, 3], [1, 2])), (gen_masked_array([1, 2, 3], [1]), 0, [dynamic_dimension], gen_masked_array([5, 1, 2, 3], [0, 2])), @@ -124,26 +122,25 @@ class ShapeInsertTest(unittest.TestCase): (gen_masked_array([1], [0]), 0, [7, dynamic_dimension], gen_masked_array([7, 5, 2], [1, 2])), ]) def test_shape_insert(self, shape, pos, values, result): - self.assertTrue(strict_compare_tensors(shape_insert(shape, pos, values), result)) + assert strict_compare_tensors(shape_insert(shape, pos, values), result) def test_shape_insert_raise_exception(self): - with self.assertRaisesRegex(Error, '.*Incorrect parameter type.*'): + with pytest.raises(Error, match='.*Incorrect parameter type.*'): shape_insert(gen_masked_array([1, 2, 3], []), 2, {}) -@generator -class mo_array_test(unittest.TestCase): - @generate(*[(mo_array([2, 3, 5, 7]), np.array([2, 3, 5, 7])), +class Testmo_array_test(): + @pytest.mark.parametrize("data, result",[(mo_array([2, 3, 5, 7]), np.array([2, 3, 5, 7])), (mo_array([2., 3., 5., 7.], dtype=np.float64), np.array([2., 3., 5., 7.])), (mo_array([2., 3., 5., 7.]), np.array([2., 3., 5., 7.], dtype=np.float32)), ]) def test_mo_array_positive(self, data, result): - self.assertEqual(data.dtype, result.dtype) + assert data.dtype == result.dtype - @generate(*[(mo_array([2., 3., 5., 7.]), np.array([2., 3., 5., 7.])), + @pytest.mark.parametrize("data, result",[(mo_array([2., 3., 5., 7.]), np.array([2., 3., 5., 7.])), ]) def test_mo_array_negative(self, data, result): - self.assertNotEqual(data.dtype, result.dtype) + assert data.dtype != result.dtype class clarify_partial_shape_test(unittest.TestCase): diff --git a/tools/mo/unit_tests/mo/front/interpolate_reshape_test.py b/tools/mo/unit_tests/mo/front/interpolate_reshape_test.py index da55cd415bf8a4..7d62cff4b70d96 100644 --- a/tools/mo/unit_tests/mo/front/interpolate_reshape_test.py +++ b/tools/mo/unit_tests/mo/front/interpolate_reshape_test.py @@ -1,10 +1,9 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest import numpy as np -from generator import generator, generate +import pytest from openvino.tools.mo.front.interpolate_reshape import InterpolateWithConcat from openvino.tools.mo.front.common.partial_infer.utils import int64_array @@ -39,8 +38,7 @@ } -@generator -class TestInterpolateConcat(unittest.TestCase): +class TestInterpolateConcat(): def test_interpolate_concat_reshape_graph_comparison(self): graph = build_graph(nodes, [ *connect('placeholder', '0:interpolate'), @@ -64,7 +62,7 @@ def test_interpolate_concat_reshape_graph_comparison(self): *connect('concat', 'output'), ], nodes_with_edges_only=True) (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp def test_interpolate_identity_concat_reshape_graph_comparison(self): graph = build_graph(nodes, [ @@ -97,7 +95,7 @@ def test_interpolate_identity_concat_reshape_graph_comparison(self): *connect('concat', 'output'), ], nodes_with_edges_only=True) (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp def test_interpolate_concat_negate(self): graph = build_graph(nodes, [ @@ -120,9 +118,9 @@ def test_interpolate_concat_negate(self): *connect('identity_01', 'output_1'), ], nodes_with_edges_only=True) (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp - @generate(*[ + @pytest.mark.parametrize("update_attrs",[ {'concat': {'axis': None}}, {'concat': {'axis': -1}}, @@ -148,7 +146,7 @@ def test_negative_axes_conditions(self, update_attrs): ], update_attributes=update_attrs, nodes_with_edges_only=True) (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp def test_interpolate_tf_style_concat(self): graph = build_graph(nodes, [ @@ -161,4 +159,4 @@ def test_interpolate_tf_style_concat(self): graph_ref = graph.copy() InterpolateWithConcat().find_and_replace_pattern(graph) (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp diff --git a/tools/mo/unit_tests/mo/front/kaldi/tdnn_component_replacer_test.py b/tools/mo/unit_tests/mo/front/kaldi/tdnn_component_replacer_test.py index 0b71abb4be8a1b..14385f66b7930b 100644 --- a/tools/mo/unit_tests/mo/front/kaldi/tdnn_component_replacer_test.py +++ b/tools/mo/unit_tests/mo/front/kaldi/tdnn_component_replacer_test.py @@ -1,20 +1,18 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest import numpy as np -from generator import generator, generate +import pytest from openvino.tools.mo.front.kaldi.tdnn_component_replacer import TdnnComponentReplacer from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs from unit_tests.utils.graph import build_graph, regular_op, result, connect_front, const -@generator -class TdnnComponentReplacerTest(unittest.TestCase): +class TestTdnnComponentReplacerTest(): - @generate(*[ + @pytest.mark.parametrize("weights, biases, time_offsets",[ ([[1, 1, 1], [4, 4, 4]], [1, 2], [-1, 1],), ([[1, 1, 1], [4, 4, 4]], [1, 2], [-1, 1, 2, 10, 1000],), ([[1, 1, 1], [4, 4, 4]], [1, 2], [-1, 0]), @@ -72,4 +70,4 @@ def generate_offsets(): TdnnComponentReplacer().find_and_replace_pattern(graph) (flag, resp) = compare_graphs(graph, ref_graph, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp diff --git a/tools/mo/unit_tests/mo/front/mxnet/MXFFTToDFT_test.py b/tools/mo/unit_tests/mo/front/mxnet/MXFFTToDFT_test.py index 31b3f9eb97d89b..d1eb31dc1af674 100644 --- a/tools/mo/unit_tests/mo/front/mxnet/MXFFTToDFT_test.py +++ b/tools/mo/unit_tests/mo/front/mxnet/MXFFTToDFT_test.py @@ -2,9 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 -import unittest - -from generator import generator, generate +import pytest from openvino.tools.mo.front.mxnet.MXFFTToDFT import MXFFTToDFT from openvino.tools.mo.front.common.partial_infer.utils import int64_array @@ -152,10 +150,8 @@ ('abs', 'output'), ] - -@generator -class MXFFTToDFTTest(unittest.TestCase): - @generate(*[int64_array([3, 100, 100, 8]), int64_array([5, 60])]) +class TestMXFFTToDFTTest(): + @pytest.mark.parametrize("input_shape",[int64_array([3, 100, 100, 8]), int64_array([5, 60])]) def test_fft_replacement(self, input_shape): graph = build_graph(nodes_attrs=fft_graph_node_attrs, edges=fft_graph_edges, @@ -170,9 +166,9 @@ def test_fft_replacement(self, input_shape): 'placeholder': {'shape': input_shape} }) (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) + assert flag, resp - @generate(*[int64_array([3, 100, 100, 8]), int64_array([5, 60])]) + @pytest.mark.parametrize("input_shape",[int64_array([3, 100, 100, 8]), int64_array([5, 60])]) def test_ifft_replacement(self, input_shape): graph = build_graph(nodes_attrs=fft_graph_node_attrs, edges=fft_graph_edges, @@ -188,4 +184,4 @@ def test_ifft_replacement(self, input_shape): 'placeholder': {'shape': input_shape} }) (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) + assert flag, resp diff --git a/tools/mo/unit_tests/mo/front/onnx/AttributedSliceToSlice_test.py b/tools/mo/unit_tests/mo/front/onnx/AttributedSliceToSlice_test.py index edf66a916ed330..e6bbd636b6bc2e 100644 --- a/tools/mo/unit_tests/mo/front/onnx/AttributedSliceToSlice_test.py +++ b/tools/mo/unit_tests/mo/front/onnx/AttributedSliceToSlice_test.py @@ -1,19 +1,17 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np -from generator import generator, generate from openvino.tools.mo.front.onnx.AttributedSliceToSlice import AttributedSliceToSliceReplacer from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs from unit_tests.utils.graph import build_graph, regular_op_with_empty_data, result, const, connect_front -@generator -class SliceReplacerTest(unittest.TestCase): - @generate(*[ +class TestSliceReplacerTest(): + @pytest.mark.parametrize("attributed_slice_attrs",[ {'op': 'AttributedSlice', 'type': None, 'starts': np.array([0, 0]), 'ends': np.array([1, -1]), 'axes': np.array([0, 1])} ]) def test_attributed_slice_replacer(self, attributed_slice_attrs): @@ -46,4 +44,4 @@ def test_attributed_slice_replacer(self, attributed_slice_attrs): ], nodes_with_edges_only=True) (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp diff --git a/tools/mo/unit_tests/mo/front/onnx/activation_ext_test.py b/tools/mo/unit_tests/mo/front/onnx/activation_ext_test.py index ff510b7573c94b..282a92c7629e36 100644 --- a/tools/mo/unit_tests/mo/front/onnx/activation_ext_test.py +++ b/tools/mo/unit_tests/mo/front/onnx/activation_ext_test.py @@ -1,11 +1,9 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest - import numpy as np import onnx -from generator import generator, generate +import pytest import openvino.tools.mo.front.onnx.activation_ext as extractors from openvino.tools.mo.ops.activation_ops import Elu @@ -15,8 +13,7 @@ from unit_tests.utils.graph import build_graph -@generator -class ActivationOpsONNXExtractorTest(unittest.TestCase): +class TestActivationOpsONNXExtractorTest(): @staticmethod def _create_node(op_name: str): pb = onnx.helper.make_node(op_name, ["X"], ["Y"]) @@ -37,7 +34,7 @@ def _match(self, out, ref): status = out[key] == ref[key] if type(status) in [list, np.ndarray]: status = np.all(status) - self.assertTrue(status, 'Mismatch for field {}, observed: {}, expected: {}'.format(key, out[key], ref[key])) + assert status, f"Mismatch for field {key}, observed: {out[key]}, expected: {ref[key]}" @staticmethod def _extract(op_name): @@ -45,7 +42,7 @@ def _extract(op_name): getattr(extractors, op_name + 'Extractor').extract(node) return node.graph.node[node.id] - @generate(*['Abs', 'Acos', 'Asin', 'Atan', 'Acosh', 'Asinh', 'Atanh', 'Cos', 'Cosh', 'Erf', 'Exp', 'Floor', 'Log', 'Not', 'Sigmoid', 'Sin', + @pytest.mark.parametrize("op_name",['Abs', 'Acos', 'Asin', 'Atan', 'Acosh', 'Asinh', 'Atanh', 'Cos', 'Cosh', 'Erf', 'Exp', 'Floor', 'Log', 'Not', 'Sigmoid', 'Sin', 'Sinh', 'Tan', 'Tanh']) def test_default(self, op_name): ref = self._base_attrs(op_name) @@ -55,8 +52,7 @@ def test_default(self, op_name): self._match(out, ref) -@generator -class TestEluONNXExt(unittest.TestCase): +class TestEluONNXExt(): @staticmethod def _create_elu_node(alpha=1.0): pb = onnx.helper.make_node( @@ -72,7 +68,7 @@ def _create_elu_node(alpha=1.0): def setUpClass(cls): Op.registered_ops['Elu'] = Elu - @generate(*[1.0, 2.0, 3.0]) + @pytest.mark.parametrize("alpha",[1.0, 2.0, 3.0]) def test_elu_ext(self, alpha): node = self._create_elu_node(alpha) extractors.EluExtractor.extract(node) @@ -84,4 +80,4 @@ def test_elu_ext(self, alpha): } for key in exp_res.keys(): - self.assertEqual(node[key], exp_res[key]) + assert node[key] == exp_res[key] diff --git a/tools/mo/unit_tests/mo/front/onnx/squeeze_ext_test.py b/tools/mo/unit_tests/mo/front/onnx/squeeze_ext_test.py index 8dd62f9b1ac86c..a1659794832ebe 100644 --- a/tools/mo/unit_tests/mo/front/onnx/squeeze_ext_test.py +++ b/tools/mo/unit_tests/mo/front/onnx/squeeze_ext_test.py @@ -1,11 +1,9 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest - import numpy as np import onnx -from generator import generator, generate +import pytest from openvino.tools.mo.front.onnx.squeeze_ext import SqueezeFrontExtractor from openvino.tools.mo.ops.op import Op @@ -13,8 +11,7 @@ from unit_tests.utils.extractors import PB -@generator -class TestSqueezeONNXExt(unittest.TestCase): +class TestSqueezeONNXExt(): @staticmethod def _create_squeeze_node(axes): if axes is None: @@ -38,7 +35,7 @@ def _create_squeeze_node(axes): def setUpClass(cls): Op.registered_ops['Squeeze'] = Squeeze - @generate(*[[0, 1, 2, 3], [1], None]) + @pytest.mark.parametrize("axes",[[0, 1, 2, 3], [1], None]) def test_squeeze_ext(self, axes): node = self._create_squeeze_node(axes) SqueezeFrontExtractor.extract(node) @@ -50,6 +47,6 @@ def test_squeeze_ext(self, axes): for key in exp_res.keys(): if type(node[key]) in [list, np.ndarray]: - self.assertTrue(np.array_equal(np.array(node[key]), np.array(exp_res[key]))) + assert np.array_equal(np.array(node[key]), np.array(exp_res[key])) else: - self.assertEqual(node[key], exp_res[key]) + assert node[key] == exp_res[key] diff --git a/tools/mo/unit_tests/mo/front/onnx/transpose_ext_test.py b/tools/mo/unit_tests/mo/front/onnx/transpose_ext_test.py index c7fdd8c24e9747..c58e4871cb80ec 100644 --- a/tools/mo/unit_tests/mo/front/onnx/transpose_ext_test.py +++ b/tools/mo/unit_tests/mo/front/onnx/transpose_ext_test.py @@ -2,11 +2,10 @@ # SPDX-License-Identifier: Apache-2.0 import itertools -import unittest +import pytest import numpy as np import onnx -from generator import generator, generate from openvino.tools.mo.front.onnx.transpose_ext import TransposeFrontExtractor from openvino.tools.mo.ops.transpose import Transpose @@ -14,8 +13,7 @@ from unit_tests.utils.extractors import PB -@generator -class TestTransposeONNXExt(unittest.TestCase): +class TestTransposeONNXExt(): @staticmethod def _create_transpose_node(order: list): if order is None: @@ -42,7 +40,7 @@ def setUpClass(cls): pass # This generator generates all permutations for [0,1,2,3] and [0,1,2] orders - @generate(*[list(order) for order in list(itertools.permutations(np.arange(4)))] + + @pytest.mark.parametrize("order",[list(order) for order in list(itertools.permutations(np.arange(4)))] + [list(order) for order in list(itertools.permutations(np.arange(3)))] + [None]) def test_transpose_ext(self, order): node = self._create_transpose_node(order) @@ -56,7 +54,7 @@ def test_transpose_ext(self, order): for key in exp_res.keys(): if isinstance(exp_res[key], list): - self.assertTrue(np.array_equal(node[key], exp_res[key]), - "Orders are not the same: {} and {}".format(node[key], exp_res[key])) + assert np.array_equal(node[key], exp_res[key]),\ + "Orders are not the same: {} and {}".format(node[key], exp_res[key]) else: - self.assertEqual(node[key], exp_res[key]) + assert node[key] == exp_res[key] diff --git a/tools/mo/unit_tests/mo/front/onnx/unsqueeze_ext_test.py b/tools/mo/unit_tests/mo/front/onnx/unsqueeze_ext_test.py index 852121cecab936..07a38883d2adeb 100644 --- a/tools/mo/unit_tests/mo/front/onnx/unsqueeze_ext_test.py +++ b/tools/mo/unit_tests/mo/front/onnx/unsqueeze_ext_test.py @@ -1,11 +1,10 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np import onnx -from generator import generator, generate from openvino.tools.mo.front.onnx.unsqueeze_ext import UnsqueezeFrontExtractor from openvino.tools.mo.ops.op import Op @@ -13,8 +12,7 @@ from unit_tests.utils.extractors import PB -@generator -class TestUnsqueezeONNXExt(unittest.TestCase): +class TestUnsqueezeONNXExt(): @staticmethod def _create_unsqueeze_node(axes): if axes is None: @@ -38,7 +36,7 @@ def _create_unsqueeze_node(axes): def setUpClass(cls): Op.registered_ops['Unsqueeze'] = Unsqueeze - @generate(*[[0, 1, 2, 3], [1], []]) + @pytest.mark.parametrize("axes",[[0, 1, 2, 3], [1], []]) def test_unsqueeze_ext(self, axes): node = self._create_unsqueeze_node(axes) UnsqueezeFrontExtractor.extract(node) @@ -49,6 +47,6 @@ def test_unsqueeze_ext(self, axes): for key in exp_res.keys(): if type(node[key]) in [list, np.ndarray]: - self.assertTrue(np.array_equal(np.array(node[key]), np.array(exp_res[key]))) + assert np.array_equal(np.array(node[key]), np.array(exp_res[key])) else: - self.assertEqual(node[key], exp_res[key]) + assert node[key] == exp_res[key] diff --git a/tools/mo/unit_tests/mo/front/rank_decomposer_test.py b/tools/mo/unit_tests/mo/front/rank_decomposer_test.py index b068dfcfead9f0..2b7fb690f33080 100644 --- a/tools/mo/unit_tests/mo/front/rank_decomposer_test.py +++ b/tools/mo/unit_tests/mo/front/rank_decomposer_test.py @@ -1,10 +1,9 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np -from generator import generator, generate from openvino.tools.mo.front.rank_decomposer import RankDecomposer from openvino.tools.mo.front.common.partial_infer.utils import int64_array @@ -24,10 +23,9 @@ } -@generator -class RankDecomposerTest(unittest.TestCase): +class TestRankDecomposerTest(): - @generate(np.int32, np.int64) + @pytest.mark.parametrize("output_type", [np.int32, np.int64]) def test_rank_decomposer(self, output_type): graph = build_graph(nodes_attrs=nodes(output_type), edges=[ *connect('input', 'rank'), @@ -44,9 +42,9 @@ def test_rank_decomposer(self, output_type): ], nodes_with_edges_only=True) (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - self.assertEqual(graph.get_op_nodes(type='Squeeze')[0]['name'], 'my_rank', - 'Name is not inherited from original node for RankDecomposer') + assert flag, resp + assert graph.get_op_nodes(type='Squeeze')[0]['name'] == 'my_rank',\ + 'Name is not inherited from original node for RankDecomposer' print(output_type) def test_rank_decomposer_assertion(self): @@ -54,4 +52,5 @@ def test_rank_decomposer_assertion(self): *connect('input', 'rank'), *connect('rank', 'output'), ], nodes_with_edges_only=True) - self.assertRaises(AssertionError, RankDecomposer().find_and_replace_pattern, graph) + with pytest.raises(AssertionError): + RankDecomposer().find_and_replace_pattern (graph) diff --git a/tools/mo/unit_tests/mo/front/size_replacer_test.py b/tools/mo/unit_tests/mo/front/size_replacer_test.py index 0ad4dbdcfd5803..75288d4b580cb1 100644 --- a/tools/mo/unit_tests/mo/front/size_replacer_test.py +++ b/tools/mo/unit_tests/mo/front/size_replacer_test.py @@ -1,10 +1,9 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np -from generator import generator, generate from openvino.tools.mo.front.SizeReplacer import SizeFrontReplacer from openvino.tools.mo.front.common.partial_infer.utils import int64_array @@ -23,10 +22,9 @@ } -@generator -class SizeReplacerTest(unittest.TestCase): +class TestSizeReplacerTest(): - @generate(np.int32, np.int64) + @pytest.mark.parametrize("output_type" ,[np.int32, np.int64]) def test_size_replacer(self, output_type): graph = build_graph(nodes_attrs=nodes(output_type), edges=[ *connect('input', 'size'), @@ -42,9 +40,9 @@ def test_size_replacer(self, output_type): ], nodes_with_edges_only=True) (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - self.assertEqual(graph.get_op_nodes(type='ReduceProd')[0]['name'], 'my_size', - 'Name is not inherited from original node for SizeReplacer') + assert flag, resp + assert graph.get_op_nodes(type='ReduceProd')[0]['name'] == 'my_size',\ + 'Name is not inherited from original node for SizeReplacer' print(output_type) def test_size_replacer_assertion(self): @@ -52,4 +50,5 @@ def test_size_replacer_assertion(self): *connect('input', 'size'), *connect('size', 'output'), ], nodes_with_edges_only=True) - self.assertRaises(AssertionError, SizeFrontReplacer().find_and_replace_pattern, graph) + with pytest.raises(AssertionError): + SizeFrontReplacer().find_and_replace_pattern (graph) diff --git a/tools/mo/unit_tests/mo/front/tf/CorrectPaddingsForPadAfterComplex_test.py b/tools/mo/unit_tests/mo/front/tf/CorrectPaddingsForPadAfterComplex_test.py index 744f5585b8e573..687cb9401026b8 100644 --- a/tools/mo/unit_tests/mo/front/tf/CorrectPaddingsForPadAfterComplex_test.py +++ b/tools/mo/unit_tests/mo/front/tf/CorrectPaddingsForPadAfterComplex_test.py @@ -5,7 +5,6 @@ import unittest import numpy as np -from generator import generator, generate from openvino.tools.mo.front.tf.CorrectPaddingsForPadAfterComplex import CorrectPaddingsForPadAfterComplex from openvino.tools.mo.front.common.partial_infer.utils import int64_array diff --git a/tools/mo/unit_tests/mo/front/tf/ObjectDetectionAPI_test.py b/tools/mo/unit_tests/mo/front/tf/ObjectDetectionAPI_test.py index 44ecbe6b265ca7..235a43b540655f 100644 --- a/tools/mo/unit_tests/mo/front/tf/ObjectDetectionAPI_test.py +++ b/tools/mo/unit_tests/mo/front/tf/ObjectDetectionAPI_test.py @@ -6,7 +6,7 @@ from unittest.mock import patch import os -from generator import generator, generate +import pytest from openvino.tools.mo.front.tf.ObjectDetectionAPI import calculate_shape_keeping_aspect_ratio, \ calculate_placeholder_spatial_shape, ObjectDetectionAPIPreprocessor2Replacement @@ -31,12 +31,11 @@ def get_param(self, param: str): return self._model_params[param] -@generator -class TestCalculateShape(unittest.TestCase): +class TestCalculateShape(): min_size = 600 max_size = 1024 - @generate(*[(100, 300, 341, 1024), + @pytest.mark.parametrize("h, w, th, tw",[(100, 300, 341, 1024), (100, 600, 171, 1024), (100, 3000, 34, 1024), (300, 300, 600, 600), @@ -53,7 +52,7 @@ class TestCalculateShape(unittest.TestCase): (2000, 1800, 667, 600), ]) def test_calculate_shape(self, h, w, th, tw): - self.assertTupleEqual(calculate_shape_keeping_aspect_ratio(h, w, self.min_size, self.max_size), (th, tw)) + assert calculate_shape_keeping_aspect_ratio(h, w, self.min_size, self.max_size) == (th, tw) class TestCalculatePlaceholderSpatialShape(unittest.TestCase): diff --git a/tools/mo/unit_tests/mo/front/tf/RFFTRealImagToRFFTSplit_test.py b/tools/mo/unit_tests/mo/front/tf/RFFTRealImagToRFFTSplit_test.py index 15ec580b118d26..1dfb957c747ce2 100644 --- a/tools/mo/unit_tests/mo/front/tf/RFFTRealImagToRFFTSplit_test.py +++ b/tools/mo/unit_tests/mo/front/tf/RFFTRealImagToRFFTSplit_test.py @@ -1,10 +1,7 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 - -import unittest - -from generator import generator, generate +import pytest from openvino.tools.mo.front.common.partial_infer.utils import int64_array from openvino.tools.mo.front.tf.RFFTRealImagToRFFTSplit import RFFTRealImagToRDFTSplit @@ -85,9 +82,8 @@ ] -@generator -class RFFTRealImagToRFFTSplitTest(unittest.TestCase): - @generate(*[1, 2, 3]) +class TestRFFTRealImagToRFFTSplitTest(): + @pytest.mark.parametrize("num_of_dims",[1, 2, 3]) def test_replacement(self, num_of_dims): graph = build_graph(nodes_attrs=graph_node_attrs, edges=graph_edges, @@ -102,4 +98,4 @@ def test_replacement(self, num_of_dims): 'rfft': {'num_of_dimensions': num_of_dims} }) (flag, resp) = compare_graphs(graph, ref_graph, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp diff --git a/tools/mo/unit_tests/mo/front/tf/TFFFTToDFT_test.py b/tools/mo/unit_tests/mo/front/tf/TFFFTToDFT_test.py index b5e5d234a0cf50..2464b8679d8117 100644 --- a/tools/mo/unit_tests/mo/front/tf/TFFFTToDFT_test.py +++ b/tools/mo/unit_tests/mo/front/tf/TFFFTToDFT_test.py @@ -2,9 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 -import unittest - -from generator import generator, generate +import pytest from openvino.tools.mo.front.common.partial_infer.utils import int64_array from openvino.tools.mo.front.tf.TFFFTToDFT import TFFFTToDFT @@ -83,9 +81,8 @@ ] -@generator -class TFFFTToDFTTest(unittest.TestCase): - @generate(*[(2, 'DFT', int64_array([-2, -1])), +class TestTFFFTToDFTTest(): + @pytest.mark.parametrize("num_of_dimensions, dft_type, fft_axes",[(2, 'DFT', int64_array([-2, -1])), (2, 'IDFT', int64_array([-2, -1])), (1, 'DFT', int64_array([-1])), (1, 'IDFT', int64_array([-1])), @@ -113,9 +110,9 @@ def test_replacement(self, num_of_dimensions, dft_type, fft_axes): 'fft_axes': {'value': fft_axes, 'shape': int64_array(fft_axes.shape)}, }) (flag, resp) = compare_graphs(graph, ref_graph, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp - @generate(*[ + @pytest.mark.parametrize("num_of_dims, fft_kind, fft_axes, input_shape, signal_size",[ (2, 'RDFT', int64_array([-2, -1]), int64_array([3, 100, 100]), int64_array([100, -1])), (2, 'IRDFT', int64_array([-2, -1]), int64_array([3, 100, 100, 2]), int64_array([100, -1])), (2, 'RDFT', int64_array([-2, -1]), int64_array([3, 100, 100]), int64_array([95, 116])), @@ -159,4 +156,4 @@ def test_replacement_for_signal_size(self, num_of_dims, fft_kind, fft_axes, inpu 'fft_axes': {'value': fft_axes, 'shape': int64_array(fft_axes.shape)}, }) (flag, resp) = compare_graphs(graph, ref_graph, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp diff --git a/tools/mo/unit_tests/mo/front/tf/WhereDecomposition_test.py b/tools/mo/unit_tests/mo/front/tf/WhereDecomposition_test.py index 0247ab46e5530f..22dc6a4d44e2a8 100644 --- a/tools/mo/unit_tests/mo/front/tf/WhereDecomposition_test.py +++ b/tools/mo/unit_tests/mo/front/tf/WhereDecomposition_test.py @@ -1,12 +1,10 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np -from generator import generator, generate - from openvino.tools.mo.front.tf.WhereDecomposition import WhereDecomposition from openvino.tools.mo.front.common.partial_infer.utils import int64_array from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs @@ -64,9 +62,8 @@ ] -@generator -class TFWhereDecompositionTest(unittest.TestCase): - @generate(*[[1, 100, 120, 150], [16, 125, 14]]) +class TestTFWhereDecompositionTest(): + @pytest.mark.parametrize("input_shape",[[1, 100, 120, 150], [16, 125, 14]]) def test_1(self, input_shape): in_shape = int64_array(input_shape) graph = build_graph(graph_node_attrs, @@ -81,4 +78,4 @@ def test_1(self, input_shape): 'placeholder_data': {'shape': in_shape} }) (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) + assert flag,resp diff --git a/tools/mo/unit_tests/mo/graph/graph_test.py b/tools/mo/unit_tests/mo/graph/graph_test.py index e106c3ee8065c6..3611c9c4c5faf3 100644 --- a/tools/mo/unit_tests/mo/graph/graph_test.py +++ b/tools/mo/unit_tests/mo/graph/graph_test.py @@ -4,7 +4,7 @@ import unittest import numpy as np -from generator import generator, generate +import pytest from openvino.tools.mo.graph.graph import Node, Graph, add_opoutput, dict_includes_compare_attrs, get_edge_attribute_between_nodes, \ set_edge_attribute_between_nodes @@ -364,8 +364,7 @@ def test_check_shape_consistency_2(self): graph.check_shapes_consistency() -@generator -class TestGraphPortsChecker(unittest.TestCase): +class TestGraphPortsChecker(): nodes = { '0': {'type': 'Parameter', 'value': None, 'kind': 'op', 'op': 'Parameter'}, '0_data': {'value': None, 'shape': None, 'kind': 'data'}, @@ -380,7 +379,7 @@ class TestGraphPortsChecker(unittest.TestCase): '3_data': {'value': None, 'shape': None, 'kind': 'data'}, } - @generate(*[('0', 'in', 1), ('0', 'out', 2), ('1', 'in', 2), ('3', 'out', 2)]) + @pytest.mark.parametrize("node_id, port_type, port_idx",[('0', 'in', 1), ('0', 'out', 2), ('1', 'in', 2), ('3', 'out', 2)]) def test_check_shape_consistency_1(self, node_id: str, port_type: str, port_idx: int): # # ,->2-->2_data---,->3-->3_data @@ -404,7 +403,7 @@ def test_check_shape_consistency_1(self, node_id: str, port_type: str, port_idx: else: node.add_output_port(idx=port_idx) - with self.assertRaisesRegex(Error, "Node {} has not consecutive {} ports indexes:.*".format(node_id, + with pytest.raises (Error, match= "Node {} has not consecutive {} ports indexes:.*".format(node_id, port_type)): graph.check_nodes_ports_are_consecutive() @@ -1864,4 +1863,5 @@ def test_sort_with_start_node(self): stat_node = Node(graph, "E") nodes_names = [node.name for node in graph.pseudo_topological_sort_with_start_node(start_node=stat_node, reverse=True)] - assert nodes_names == ['E'] \ No newline at end of file + assert nodes_names == ['E'] + \ No newline at end of file diff --git a/tools/mo/unit_tests/mo/middle/ConvertGroupedStridedSlice_test.py b/tools/mo/unit_tests/mo/middle/ConvertGroupedStridedSlice_test.py index 4eb09947a85f36..edaebbf1ee1554 100644 --- a/tools/mo/unit_tests/mo/middle/ConvertGroupedStridedSlice_test.py +++ b/tools/mo/unit_tests/mo/middle/ConvertGroupedStridedSlice_test.py @@ -4,7 +4,7 @@ import unittest import numpy as np -from generator import generator, generate +import pytest from openvino.tools.mo.middle.ConvertGroupedStridedSlice import ConvertGroupedStridedSlice from openvino.tools.mo.front.common.partial_infer.utils import int64_array, shape_array, dynamic_dimension_value @@ -108,8 +108,7 @@ ] -@generator -class ConvertGroupedStridedSliceTests(unittest.TestCase): +class TestConvertGroupedStridedSliceTests(): def test_1(self): graph = build_graph(nodes_attributes, [('placeholder_1', 'placeholder_1_data'), @@ -172,7 +171,7 @@ def test_1(self): ConvertGroupedStridedSlice().find_and_replace_pattern(graph) (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp def test_2(self): graph = build_graph(nodes_attributes, @@ -236,7 +235,7 @@ def test_2(self): pattern.find_and_replace_pattern(graph) (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp # Intersection of split ranges in feature dimension def test_3_neg(self): @@ -307,7 +306,7 @@ def test_3_neg(self): pattern.find_and_replace_pattern(graph) (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp # Split range overflow in feature dimension def test_4_neg(self): @@ -377,7 +376,7 @@ def test_4_neg(self): ConvertGroupedStridedSlice().find_and_replace_pattern(graph) (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp # Split(1,H,W,54)--->Fake_data (1,H,W,1) # |`---->Sslice1_out (1,H,W,18) @@ -447,7 +446,7 @@ def test_5(self): ConvertGroupedStridedSlice().find_and_replace_pattern(graph) (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp # Split(1,H,W,54) # |`---->Sslice1_out (1,H,W,(0,18)) @@ -511,7 +510,7 @@ def test_6(self): ConvertGroupedStridedSlice().find_and_replace_pattern(graph) (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp def test_7_neg(self): graph = build_graph(nodes_attributes, @@ -567,7 +566,7 @@ def test_7_neg(self): pattern.find_and_replace_pattern(graph) (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp # Split(1,54,W,C) # |`---->Sslice1_out (1,(0,18),W,C) @@ -628,10 +627,10 @@ def test_8(self): pattern.find_and_replace_pattern(graph) (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp # Test for the case when there is only 1 StridedSlice. - @generate(*[(np.array([1, 227, 227, 54]), + @pytest.mark.parametrize("input_shape, slices, output_shape",[(np.array([1, 227, 227, 54]), np.array([slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(0, 18, 1)]), np.array([1, 227, 227, 18])), (np.array([57, 16, 100, 23]), @@ -659,7 +658,7 @@ def test_9(self, input_shape, slices, output_shape): pattern = ConvertGroupedStridedSlice() pattern.find_and_replace_pattern(graph) (flag, resp) = compare_graphs(graph, graph_ref, 'op_output', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp # Test for case when # 1) There are 4 StridedSlice operations. @@ -763,7 +762,7 @@ def test_10(self): pattern = ConvertGroupedStridedSlice() pattern.find_and_replace_pattern(graph) (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp # dynamic slice def test_11(self): @@ -804,7 +803,7 @@ def test_11(self): pattern.find_and_replace_pattern(graph) (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp # one unuque StridedSlice def test_12(self): @@ -841,9 +840,9 @@ def test_12(self): pattern.find_and_replace_pattern(graph) (flag, resp) = compare_graphs(graph, graph_ref, 'sslice_1_data', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp (flag, resp) = compare_graphs(graph, graph_ref, 'sslice_2_data', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp class AddReshapeAfterStridedSliceTests(unittest.TestCase): diff --git a/tools/mo/unit_tests/mo/middle/FusedBatchNormTraining_test.py b/tools/mo/unit_tests/mo/middle/FusedBatchNormTraining_test.py index a58cfcecf679ca..e997e57b61471e 100644 --- a/tools/mo/unit_tests/mo/middle/FusedBatchNormTraining_test.py +++ b/tools/mo/unit_tests/mo/middle/FusedBatchNormTraining_test.py @@ -1,10 +1,9 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np -from generator import generator, generate from openvino.tools.mo.middle.FusedBatchNormTraining import FusedBatchNormTraining from openvino.tools.mo.front.common.partial_infer.utils import int64_array @@ -71,9 +70,8 @@ } -@generator -class FusedBatchNormTrainingTest(unittest.TestCase): - @generate(*[ +class TestFusedBatchNormTrainingTest(): + @pytest.mark.parametrize("op",[ 'FusedBatchNorm', 'FusedBatchNormV2', 'FusedBatchNormV3', ]) def test_transformation(self, op: str): @@ -137,7 +135,7 @@ def test_transformation(self, op: str): graph_ref.nodes['batchnorm']['op'] = op (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp def test_non_training(self): graph = build_graph(nodes_attributes, @@ -161,4 +159,4 @@ def test_non_training(self): shape_inference(graph) (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp diff --git a/tools/mo/unit_tests/mo/middle/L2NormFusing_test.py b/tools/mo/unit_tests/mo/middle/L2NormFusing_test.py index f952d60daacfe5..b5620a71b582ca 100644 --- a/tools/mo/unit_tests/mo/middle/L2NormFusing_test.py +++ b/tools/mo/unit_tests/mo/middle/L2NormFusing_test.py @@ -1,10 +1,9 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np -from generator import generator, generate from openvino.tools.mo.middle.L2NormFusing import L2NormToNorm from openvino.tools.mo.front.common.partial_infer.utils import int64_array @@ -67,9 +66,9 @@ ] -@generator -class L2NormToNormTest(unittest.TestCase): - @generate(*[(int64_array([2, 3]), int64_array([1]), 'NCHW'), # NC layout, normalize C dimension +class TestL2NormToNormTest(): + @pytest.mark.parametrize("input_shape, axes, layout", + [(int64_array([2, 3]), int64_array([1]), 'NCHW'), # NC layout, normalize C dimension (int64_array([2, 3]), int64_array([1]), 'NHWC'), # NC layout, normalize C dimension (int64_array([2, 3, 5]), int64_array([1]), 'NCHW'), # NCH layout, normalize C dimension (int64_array([2, 3, 5]), int64_array([1]), 'NHWC'), # NCH layout, normalize C dimension @@ -102,10 +101,11 @@ def test_positive(self, input_shape, axes, layout): ], edges_after_replacement, nodes_with_edges_only=True) (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(graph.node[graph.get_nodes_with_attributes(type='NormalizeL2')[0]]['name'] == 'l2_norm_name') - self.assertTrue(flag, resp) + assert (graph.node[graph.get_nodes_with_attributes(type='NormalizeL2')[0]]['name'] == 'l2_norm_name') + assert flag, resp - @generate(*[(int64_array([2]), int64_array([0]), 'NCHW'), + @pytest.mark.parametrize("input_shape, axes, layout", + [(int64_array([2]), int64_array([0]), 'NCHW'), (int64_array([2, 3]), int64_array([0]), 'NCHW'), (int64_array([2, 3]), int64_array([0]), 'NHWC'), (int64_array([2, 3]), int64_array([0, 1]), 'NCHW'), @@ -161,4 +161,4 @@ def test_negative(self, input_shape, axes, layout): ], edges, nodes_with_edges_only=True) (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp diff --git a/tools/mo/unit_tests/mo/middle/PreserveRuntimeInfo_test.py b/tools/mo/unit_tests/mo/middle/PreserveRuntimeInfo_test.py index 11ec3cc69ae941..393b4f7bac6c85 100644 --- a/tools/mo/unit_tests/mo/middle/PreserveRuntimeInfo_test.py +++ b/tools/mo/unit_tests/mo/middle/PreserveRuntimeInfo_test.py @@ -1,10 +1,9 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np -from generator import generator, generate from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer from openvino.tools.mo.front.common.partial_infer.utils import int64_array @@ -94,9 +93,8 @@ ] -@generator -class PreserveRuntimeInfoTest(unittest.TestCase): - @generate(*[ +class TestPreserveRuntimeInfoTest(): + @pytest.mark.parametrize("nhwc_to_nchw_order, nchw_to_nhwc_order, add_permutation_attrs",[ ([0, 3, 1, 2], [0, 2, 3, 1], True), ([0, 4, 1, 2, 3], [0, 2, 3, 4, 1], True), (None, None, False), @@ -135,19 +133,19 @@ def test_transpose_insert(self, nhwc_to_nchw_order, nchw_to_nhwc_order, add_perm PreserveRuntimeInfo().find_and_replace_pattern(graph) (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) + assert flag, resp - self.assertFalse(param_node.has_valid('permute_attrs')) - self.assertFalse(param_node.out_node(0).has_valid('permutation')) + assert not param_node.has_valid('permute_attrs') + assert not param_node.out_node(0).has_valid('permutation') if add_permutation_attrs: rt_info = param_node.rt_info.info old_api_map = rt_info[('old_api_map_order', 0)].info - self.assertTrue(np.array_equal(old_api_map['inverse_order'], nchw_to_nhwc_order)) + assert np.array_equal(old_api_map['inverse_order'], nchw_to_nhwc_order) rt_info = result_node.rt_info.info old_api_map = rt_info[('old_api_map_order', 0)].info - self.assertTrue(np.array_equal(old_api_map['order'], nhwc_to_nchw_order)) + assert np.array_equal(old_api_map['order'], nhwc_to_nchw_order) def test_auto_disable_nhwc_to_nchw(self): shape_len = 4 @@ -173,18 +171,18 @@ def test_auto_disable_nhwc_to_nchw(self): PreserveRuntimeInfo().find_and_replace_pattern(graph) (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) + assert flag, resp rt_info = param_node.rt_info.info old_api_map = rt_info[('old_api_map_order', 0)].info - self.assertTrue(np.array_equal(old_api_map['inverse_order'], [0, 2, 3, 1])) + assert np.array_equal(old_api_map['inverse_order'], [0, 2, 3, 1]) rt_info = result_node.rt_info.info old_api_map = rt_info[('old_api_map_order', 0)].info - self.assertTrue(np.array_equal(old_api_map['order'], [0, 3, 1, 2])) + assert np.array_equal(old_api_map['order'], [0, 3, 1, 2]) - @generate(*[ - ([0, 3, 1, 2], [0, 2, 3, 1], True, 'DFT'), + @pytest.mark.parametrize("nhwc_to_nchw_order, nchw_to_nhwc_order,add_permutation_attrs, fft_kind", + [([0, 3, 1, 2], [0, 2, 3, 1], True, 'DFT'), ([0, 3, 1, 2], [0, 2, 3, 1], True, 'IDFT'), (None, None, False, 'DFT'), (None, None, False, 'IDFT'), @@ -235,12 +233,12 @@ def test_transpose_insert_with_two_result_nodes(self, nhwc_to_nchw_order, nchw_t PreserveRuntimeInfo().find_and_replace_pattern(graph) (flag, resp) = compare_graphs(graph, graph_ref, 'result1') - self.assertTrue(flag, resp) + assert flag, resp - self.assertFalse(param1_node.has_valid('permute_attrs')) - self.assertFalse(param1_node.out_node(0).has_valid('permutation')) + assert not param1_node.has_valid('permute_attrs') + assert not param1_node.out_node(0).has_valid('permutation') if add_permutation_attrs: rt_info = param1_node.rt_info.info old_api_map = rt_info[('old_api_map_order', 0)].info - self.assertTrue(np.array_equal(old_api_map['inverse_order'], nchw_to_nhwc_order)) + assert np.array_equal(old_api_map['inverse_order'], nchw_to_nhwc_order) diff --git a/tools/mo/unit_tests/mo/middle/UpsampleToResample_test.py b/tools/mo/unit_tests/mo/middle/UpsampleToResample_test.py index 6eff641ccdda1a..e65eef945a1705 100644 --- a/tools/mo/unit_tests/mo/middle/UpsampleToResample_test.py +++ b/tools/mo/unit_tests/mo/middle/UpsampleToResample_test.py @@ -1,10 +1,9 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np -from generator import generator, generate from openvino.tools.mo.middle.UpsampleToResample import UpsampleToResample from openvino.tools.mo.front.common.partial_infer.utils import int64_array, float32_array @@ -143,9 +142,8 @@ ] -@generator -class UpsampleToResampleTest(unittest.TestCase): - @generate(*[([2, 10, 20, 30], [1, 1, 5, 5], [2, 3]), +class TestUpsampleToResampleTest(): + @pytest.mark.parametrize("input_shape, scales, axes",[([2, 10, 20, 30], [1, 1, 5, 5], [2, 3]), ([2, 20, 30, 40], [1, 1, 3, 3], [2, 3]), ([2, 10, 20, 30], [1, 1, 6, 5], [2, 3]), ([2, 20, 30, 40], [1, 1, 3, 4], [2, 3]), @@ -193,9 +191,9 @@ def test_conversion(self, input_shape, scales, axes): }) UpsampleToResample().find_and_replace_pattern(graph) (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) + assert flag, resp - @generate(*[([2, 10, 20, 30], [1, 2, 5, 5],), + @pytest.mark.parametrize("input_shape, scales",[([2, 10, 20, 30], [1, 2, 5, 5],), ([2, 3, 20, 30, 40], [1, 2, 3, 3, 3],), ]) def test_pattern_does_not_satisfy(self, input_shape, scales): @@ -214,4 +212,4 @@ def test_pattern_does_not_satisfy(self, input_shape, scales): UpsampleToResample().find_and_replace_pattern(graph) (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) + assert flag, resp diff --git a/tools/mo/unit_tests/mo/middle/dequantize_linear_resolver_test.py b/tools/mo/unit_tests/mo/middle/dequantize_linear_resolver_test.py index 065419fe6d475c..de0c363fb9ad49 100644 --- a/tools/mo/unit_tests/mo/middle/dequantize_linear_resolver_test.py +++ b/tools/mo/unit_tests/mo/middle/dequantize_linear_resolver_test.py @@ -9,7 +9,7 @@ from openvino.tools.mo.front.common.partial_infer.utils import int64_array from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs from unit_tests.utils.graph import build_graph -from generator import generator, generate +import pytest nodes1_attributes = { 'input': {'kind': 'op', 'op': 'AnyOp'}, @@ -145,9 +145,9 @@ def test_dequantize_no_zerop(self): (flag, resp) = compare_graphs(graph, graph_ref, 'out', check_op_attrs=True) self.assertTrue(flag, resp) -@generator -class TestDequantizeWithAxis(unittest.TestCase): - @generate(*[(int64_array([1, 3, 4, 4]), np.array([2, 3, 4, 5], dtype=np.float32), +class TestDequantizeWithAxis(): + @pytest.mark.parametrize("input_shape, scale_param_value, zero_param_value, target_shape, axis", + [(int64_array([1, 3, 4, 4]), np.array([2, 3, 4, 5], dtype=np.float32), np.array([2, 3, 4, 5], dtype=np.uint8), int64_array([1, 1, 4, 1]), 2), (int64_array([1, 3, 4, 4]), int64_array([2, 3, 4, 5]), np.array([2, 3, 4, 5], dtype=np.uint8), int64_array([1, 3, 1, 1]), 1), @@ -234,4 +234,4 @@ def test_dequantize_with_axis(self, input_shape, scale_param_value, zero_param_v DequantizeLinearResolver().find_and_replace_pattern(graph) (flag, resp) = compare_graphs(graph, graph_ref, 'out', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp diff --git a/tools/mo/unit_tests/mo/middle/quantize_linear_resolver_test.py b/tools/mo/unit_tests/mo/middle/quantize_linear_resolver_test.py index a6b2aceb871ed1..eaacfe796946d8 100644 --- a/tools/mo/unit_tests/mo/middle/quantize_linear_resolver_test.py +++ b/tools/mo/unit_tests/mo/middle/quantize_linear_resolver_test.py @@ -9,7 +9,7 @@ from openvino.tools.mo.front.common.partial_infer.utils import int64_array from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs from unit_tests.utils.graph import build_graph -from generator import generator, generate +import pytest nodes1_attributes = { 'input': {'kind': 'op', 'op': 'AnyOp'}, @@ -247,9 +247,9 @@ def test_quantize_no_zerop(self): self.assertTrue(flag, resp) -@generator -class TestQuantizeWithAxis(unittest.TestCase): - @generate(*[(int64_array([1, 3, 4, 4]), np.array([2, 3, 4, 5], dtype=np.float32), +class TestQuantizeWithAxis(): + @pytest.mark.parametrize("input_shape, scale_param_value, zero_param_value,target_shape, in_low, in_high, out_low, out_high, axis", + [(int64_array([1, 3, 4, 4]), np.array([2, 3, 4, 5], dtype=np.float32), np.array([2, 3, 4, 5], dtype=np.uint8), int64_array([1, 1, 4, 1]), np.array([-2., -3., -4., -5.]), np.array([253., 252., 251., 250.]), 0, 255, 2), @@ -366,4 +366,4 @@ def test_quantize_with_axis(self, input_shape, scale_param_value, zero_param_val QuantizeLinearResolver().find_and_replace_pattern(graph) (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp diff --git a/tools/mo/unit_tests/mo/ops/Complex_test.py b/tools/mo/unit_tests/mo/ops/Complex_test.py index 48e37c04c39c78..4fcd8a61e610c3 100644 --- a/tools/mo/unit_tests/mo/ops/Complex_test.py +++ b/tools/mo/unit_tests/mo/ops/Complex_test.py @@ -1,10 +1,9 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np -from generator import generator, generate from openvino.tools.mo.front.common.partial_infer.utils import int64_array from openvino.tools.mo.graph.graph import Node @@ -31,9 +30,8 @@ ] -@generator -class TestComplexOp(unittest.TestCase): - @generate(*[ +class TestComplexOp(): + @pytest.mark.parametrize("input_shape, output_shape",[ ([1, 260, 100, 150], [1, 260, 100, 150, 2]), ([1, 260, 100], [1, 260, 100, 2]), ([5, 14, 300, 40], [5, 14, 300, 40, 2]), @@ -52,5 +50,5 @@ def test_complex_op_shape_inference(self, input_shape, output_shape): msg = "Complex operation infer failed for case: expected_shape={}, actual_shape={}" - self.assertTrue(np.array_equal(graph.node['complex_data']['shape'], int64_array(output_shape)), - msg.format(output_shape, graph.node['complex_data']['shape'])) + assert np.array_equal(graph.node['complex_data']['shape'], int64_array(output_shape)),\ + msg.format(output_shape, graph.node['complex_data']['shape']) diff --git a/tools/mo/unit_tests/mo/ops/ExtractImagePatches_test.py b/tools/mo/unit_tests/mo/ops/ExtractImagePatches_test.py index 91ea30aa25f5be..89632480a0f809 100644 --- a/tools/mo/unit_tests/mo/ops/ExtractImagePatches_test.py +++ b/tools/mo/unit_tests/mo/ops/ExtractImagePatches_test.py @@ -1,10 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 - -import unittest +import pytest import numpy as np -from generator import generator, generate from openvino.tools.mo.ops.ExtractImagePatches import ExtractImagePatches from openvino.tools.mo.front.common.partial_infer.utils import int64_array @@ -26,9 +24,8 @@ ('EIP_data', 'output'), ] -@generator -class TestExtractImagePatchesPartialInfer(unittest.TestCase): - @generate(*[ +class TestExtractImagePatchesPartialInfer(): + @pytest.mark.parametrize("input_shape, sizes, strides, rates, auto_pad, layout, output_shape",[ ([1, 10, 10, 3], [1, 3, 3, 1], [1, 5, 5, 1], [1, 1, 1, 1], 'valid', 'NHWC', [1, 2, 2, 27]), ([1, 10, 10, 3], [1, 3, 3, 1], [1, 5, 5, 1], [1, 2, 2, 1], 'valid', 'NHWC', [1, 2, 2, 27]), ([1, 10, 10, 3], [1, 4, 4, 1], [1, 8, 8, 1], [1, 1, 1, 1], 'valid', 'NHWC', [1, 1, 1, 48]), @@ -65,4 +62,4 @@ def test_eip_infer(self, input_shape, sizes, strides, rates, auto_pad, layout, o eip_node = Node(graph, 'EIP') ExtractImagePatches.infer(eip_node) - self.assertTrue(np.array_equal(eip_node.out_port(0).data.get_shape(), output_shape)) + assert np.array_equal(eip_node.out_port(0).data.get_shape(), output_shape) diff --git a/tools/mo/unit_tests/mo/ops/If_test.py b/tools/mo/unit_tests/mo/ops/If_test.py index 4ced43aec82f3d..fcc457831bd230 100644 --- a/tools/mo/unit_tests/mo/ops/If_test.py +++ b/tools/mo/unit_tests/mo/ops/If_test.py @@ -1,11 +1,10 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np import numpy.testing as npt -from generator import generate, generator from openvino.tools.mo.ops.If import If from openvino.tools.mo.ops.elementwise import Add, Mul @@ -22,9 +21,8 @@ empty_data -@generator -class TestIf(unittest.TestCase): - @generate(*[ +class TestIf(): + @pytest.mark.parametrize("cond, output_port_0_shape, output_port_1_shape",[ (np.array([True], dtype=bool), shape_array([3]), shape_array([3])), (np.array([False], dtype=bool), shape_array([3]), shape_array([2])), (shape_array(dynamic_dimension_value), shape_array([3]), shape_array([dynamic_dimension_value])), @@ -94,9 +92,9 @@ def test_simple_shape_inf(self, cond, output_port_0_shape, output_port_1_shape): graph.stage = 'middle' partial_infer(graph) if_node = Node(graph, 'if') - self.assertTrue(strict_compare_tensors(if_node.out_port(0).data.get_shape(), output_port_0_shape)) + assert strict_compare_tensors(if_node.out_port(0).data.get_shape(), output_port_0_shape) # shape of the "then" branch is [3] and shape of the "else" branch is [2], so the output shape is "[dynamic]" - self.assertTrue(strict_compare_tensors(if_node.out_port(1).data.get_shape(), output_port_1_shape)) + assert strict_compare_tensors(if_node.out_port(1).data.get_shape(), output_port_1_shape) def test_fake_results(self): then_graph_nodes = {**valued_const_with_data('fake_const', int64_array(0)), diff --git a/tools/mo/unit_tests/mo/ops/MatMul_test.py b/tools/mo/unit_tests/mo/ops/MatMul_test.py index f1e8c29fe91aeb..319250d89a9a05 100644 --- a/tools/mo/unit_tests/mo/ops/MatMul_test.py +++ b/tools/mo/unit_tests/mo/ops/MatMul_test.py @@ -1,10 +1,9 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np -from generator import generator, generate from openvino.tools.mo.ops.MatMul import MatMul from openvino.tools.mo.front.common.partial_infer.utils import int64_array, shape_array, dynamic_dimension_value @@ -12,8 +11,7 @@ from unit_tests.utils.graph import build_graph_with_attrs -@generator -class TestMatMul(unittest.TestCase): +class TestMatMul(): nodes = [ ('A', {'type': 'Parameter', 'kind': 'op'}), ('A_d', {'kind': 'data'}), @@ -32,7 +30,7 @@ class TestMatMul(unittest.TestCase): ('mat_mul_d', 'op_output'), ] - @generate(*[ + @pytest.mark.parametrize("A_shape, B_shape, C_shape, transpose_a, transpose_b",[ ([1024], [1024, 1000], [1000], False, False), ([dynamic_dimension_value], [1024, 1000], [1000], False, False), ([1024], [dynamic_dimension_value, 1000], [1000], False, False), @@ -65,11 +63,11 @@ def test_positive_matmul_infer(self, A_shape, B_shape, C_shape, transpose_a, tra msg = "MatMul infer failed for case: A_shape={}, B_shape={}, transpose_a={}, transpose_b={} " \ "expected_shape={}, actual_shape={}" - self.assertTrue(np.array_equal(graph.node['mat_mul_d']['shape'], shape_array(C_shape)), + assert np.array_equal(graph.node['mat_mul_d']['shape'], shape_array(C_shape)),\ msg.format(A_shape, B_shape, transpose_a, transpose_b, C_shape, - graph.node['mat_mul_d']['shape'])) + graph.node['mat_mul_d']['shape']) - @generate(*[ + @pytest.mark.parametrize("A_shape, B_shape",[ (None, [1024, 1000]), (1, [1024, 1000]), ([], [1024, 1000]), @@ -84,4 +82,5 @@ def test_negative_matmul_infer(self, A_shape, B_shape): ]) node = Node(graph, 'mat_mul') - self.assertRaises(AssertionError, MatMul.infer, node) + with pytest.raises(AssertionError): + MatMul.infer(node) diff --git a/tools/mo/unit_tests/mo/ops/MatMul_value_propagation_test.py b/tools/mo/unit_tests/mo/ops/MatMul_value_propagation_test.py index f71a84d966c737..e845fe5f20a374 100644 --- a/tools/mo/unit_tests/mo/ops/MatMul_value_propagation_test.py +++ b/tools/mo/unit_tests/mo/ops/MatMul_value_propagation_test.py @@ -1,10 +1,9 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np -from generator import generator, generate from openvino.tools.mo.ops.MatMul import MatMul, transpose from openvino.tools.mo.front.common.partial_infer.utils import int64_array @@ -32,9 +31,8 @@ ] -@generator -class TestMatMulValuePropagation(unittest.TestCase): - @generate(*[ +class TestMatMulValuePropagation(): + @pytest.mark.parametrize("a_shape, a_value, b_shape, b_value, transpose_a, transpose_b",[ ([16, 3], np.arange(-5, -5 + 16 * 3).reshape((16, 3)), [3, 5], np.arange(0, 3 * 5).reshape((3, 5)), False, False), @@ -91,4 +89,4 @@ def test_value_propagation(self, a_shape, a_value, b_shape, b_value, transpose_a node_data_shape = node_data.shape ref_data_shape = ref_data.shape msg = "Value propagation for 'matmul' node is not correct." - self.assertTrue(node_data_shape == ref_data_shape and np.all(node_data == ref_data), msg) + assert node_data_shape == ref_data_shape and np.all(node_data == ref_data), msg diff --git a/tools/mo/unit_tests/mo/ops/ONNXResize11_test.py b/tools/mo/unit_tests/mo/ops/ONNXResize11_test.py index 319ccc69dbc233..bf9a42a6b9c0bf 100644 --- a/tools/mo/unit_tests/mo/ops/ONNXResize11_test.py +++ b/tools/mo/unit_tests/mo/ops/ONNXResize11_test.py @@ -1,10 +1,9 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np -from generator import generator, generate from openvino.tools.mo.ops.ONNXResize11 import ONNXResize11Op from openvino.tools.mo.front.common.partial_infer.utils import int64_array @@ -68,9 +67,8 @@ ] -@generator -class TestONNXResize11Op(unittest.TestCase): - @generate(*[([1, 260, 100, 150], [1, 260, 200, 350], [1, 260, 200, 350], [1.0, 1.0, 1.0, 1.0]), +class TestONNXResize11Op(): + @pytest.mark.parametrize("input_shape, output_shape, sizes, scales",[([1, 260, 100, 150], [1, 260, 200, 350], [1, 260, 200, 350], [1.0, 1.0, 1.0, 1.0]), ([1, 260, 100, 150], [1, 260, 200, 350], [1, 1, 200, 350], [1.0, 1.0, 1.0, 1.0]), ([5, 14, 300, 40], [5, 14, 140, 280], [1, 1, 140, 280], [1.0, 1.0, 1.0, 1.0]), ([5, 14, 300, 40], [5, 14, 140, 280], [5, 14, 140, 280], [1.0, 1.0, 1.0, 1.0]), @@ -95,10 +93,11 @@ def test_onnx_resize11_using_sizes(self, input_shape, output_shape, sizes, scale msg = "ONNXResize11 infer failed for case: sizes={}, scales={}, expected_shape={}, actual_shape={}" - self.assertTrue(np.array_equal(graph.node['onnx_resize11_data']['shape'], int64_array(output_shape)), - msg.format(sizes, scales, output_shape, graph.node['onnx_resize11_data']['shape'])) + assert np.array_equal(graph.node['onnx_resize11_data']['shape'], int64_array(output_shape)),\ + msg.format(sizes, scales, output_shape, graph.node['onnx_resize11_data']['shape']) - @generate(*[([1, 260, 100, 150], [1, 260, 200, 350], [1.0, 1.0, 2.0, 350 / 150]), + @pytest.mark.parametrize("input_shape, output_shape, scales", + [([1, 260, 100, 150], [1, 260, 200, 350], [1.0, 1.0, 2.0, 350 / 150]), ([1, 3, 100, 200], [1, 3, 350, 150], [1.0, 1.0, 3.5, 150 / 200]), ([5, 14, 300, 40], [5, 14, 140, 280], [1.0, 1.0, 140 / 300, 7.0]), ([5, 14, 300, 40], [5, 14, 140, 560], [1.0, 1.0, 140 / 300, 14.0]), @@ -121,10 +120,11 @@ def test_onnx_resize_using_scales(self, input_shape, output_shape, scales): msg = "ONNXResize11 infer failed for case: scales={}, expected_shape={}, actual_shape={}" - self.assertTrue(np.array_equal(graph.node['onnx_resize11_data']['shape'], int64_array(output_shape)), - msg.format(scales, output_shape, graph.node['onnx_resize11_data']['shape'])) + assert np.array_equal(graph.node['onnx_resize11_data']['shape'], int64_array(output_shape)),\ + msg.format(scales, output_shape, graph.node['onnx_resize11_data']['shape']) - @generate(*[([1, 260, 100, 150], [1, 260, 200, 350], [1, 260, 200, 350], [1.0, 1.0, 1.0, 1.0]), + @pytest.mark.parametrize("input_shape, output_shape, sizes, scales", + [([1, 260, 100, 150], [1, 260, 200, 350], [1, 260, 200, 350], [1.0, 1.0, 1.0, 1.0]), ([1, 260, 100, 150], [1, 260, 200, 350], [1, 1, 200, 350], [1.0, 1.0, 1.0, 1.0]), ([5, 14, 300, 40], [5, 14, 140, 280], [1, 1, 140, 280], [1.0, 1.0, 1.0, 1.0]), ([5, 14, 300, 40], [5, 14, 140, 280], [5, 14, 140, 280], [1.0, 1.0, 1.0, 1.0]), @@ -155,10 +155,11 @@ def test_onnx_resize11_using_sizes_without_roi_input(self, input_shape, output_s msg = "ONNXResize11 infer failed for case: sizes={}, scales={}, expected_shape={}, actual_shape={}" - self.assertTrue(np.array_equal(graph.node['onnx_resize11_data']['shape'], int64_array(output_shape)), - msg.format(sizes, scales, output_shape, graph.node['onnx_resize11_data']['shape'])) + assert np.array_equal(graph.node['onnx_resize11_data']['shape'], int64_array(output_shape)),\ + msg.format(sizes, scales, output_shape, graph.node['onnx_resize11_data']['shape']) - @generate(*[([1, 260, 100, 150], [1, 260, 200, 350], [1.0, 1.0, 2.0, 350 / 150]), + @pytest.mark.parametrize("input_shape, output_shape, scales", + [([1, 260, 100, 150], [1, 260, 200, 350], [1.0, 1.0, 2.0, 350 / 150]), ([1, 3, 100, 200], [1, 3, 350, 150], [1.0, 1.0, 3.5, 150 / 200]), ([5, 14, 300, 40], [5, 14, 140, 280], [1.0, 1.0, 140 / 300, 7.0]), ([5, 14, 300, 40], [5, 14, 140, 560], [1.0, 1.0, 140 / 300, 14.0]), @@ -187,5 +188,5 @@ def test_onnx_resize_using_scales_without_roi(self, input_shape, output_shape, s msg = "ONNXResize11 infer failed for case: scales={}, expected_shape={}, actual_shape={}" - self.assertTrue(np.array_equal(graph.node['onnx_resize11_data']['shape'], int64_array(output_shape)), - msg.format(scales, output_shape, graph.node['onnx_resize11_data']['shape'])) + assert np.array_equal(graph.node['onnx_resize11_data']['shape'], int64_array(output_shape)),\ + msg.format(scales, output_shape, graph.node['onnx_resize11_data']['shape']) diff --git a/tools/mo/unit_tests/mo/ops/ReduceOps_test.py b/tools/mo/unit_tests/mo/ops/ReduceOps_test.py index 8db3a7ab75cd5c..3cfa78c544e910 100644 --- a/tools/mo/unit_tests/mo/ops/ReduceOps_test.py +++ b/tools/mo/unit_tests/mo/ops/ReduceOps_test.py @@ -1,10 +1,9 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np -from generator import generate, generator from openvino.tools.mo.ops.ReduceOps import reduce_infer from openvino.tools.mo.front.common.partial_infer.utils import int64_array, strict_compare_tensors, is_fully_defined @@ -21,9 +20,8 @@ } -@generator -class ReduceLpTest(unittest.TestCase): - @generate(*[ +class TestReduceLpTest(): + @pytest.mark.parametrize("shape, axes, keepdims, p",[ ([3, 2, 2], [0], True, 1), ([3, 2, 2], [0], True, 2), ([3, 2, 2], [1], True, 2), @@ -53,9 +51,9 @@ def test_reduce_lp(self, shape, axes, keepdims, p): reduce_node = Node(graph, 'reduce_lp') reduce_node.op = reduce_node.type = 'ReduceL' + str(p) reduce_infer(reduce_node) - self.assertTrue(np.array_equal(reduce_node.out_port(0).data.get_value(), reduced)) + assert np.array_equal(reduce_node.out_port(0).data.get_value(), reduced) - @generate(*[ + @pytest.mark.parametrize("shape, axes, keepdims, p",[ ([3, 2, 2], [0], True, 1), ([3, 2, 2], [2], False, 2), ([3, 2, 2], [0, 2], False, 2), @@ -86,4 +84,4 @@ def test_reduce_dynamic(self, shape, axes, keepdims, p): reduce_node = Node(graph, 'reduce_lp') reduce_node.op = reduce_node.type = 'ReduceL' + str(p) reduce_infer(reduce_node) - self.assertTrue(strict_compare_tensors(reduce_node.out_port(0).data.get_value(), fully_undefined)) + assert strict_compare_tensors(reduce_node.out_port(0).data.get_value(), fully_undefined) diff --git a/tools/mo/unit_tests/mo/ops/broadcast_test.py b/tools/mo/unit_tests/mo/ops/broadcast_test.py index 7da252317a7e3c..72ee2cf5c23d47 100644 --- a/tools/mo/unit_tests/mo/ops/broadcast_test.py +++ b/tools/mo/unit_tests/mo/ops/broadcast_test.py @@ -1,10 +1,9 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np -from generator import generator, generate from openvino.tools.mo.front.common.partial_infer.utils import int64_array, undefined_shape_of_rank from openvino.tools.mo.graph.graph import Node @@ -13,31 +12,30 @@ shaped_data -@generator -class BroadcastTest(unittest.TestCase): - @generate(*[ - ([1], [3, 3], None, 'numpy', [[1, 1, 1], [1, 1, 1], [1, 1, 1]]), - ([1], [3, 3], None, 'numpy'), +class TestBroadcastTest(): + @pytest.mark.parametrize("data, target_shape, axes_mapping, mode, ref_out, test_raising",[ + ([1], [3, 3], None, 'numpy', [[1, 1, 1], [1, 1, 1], [1, 1, 1]], False), + ([1], [3, 3], None, 'numpy', None, False), # shape broadcasting - ([1], [1, 2], [0], 'explicit'), - ([1], [1, 2], [-2], 'explicit'), - ([1, 7], [5, 1, 7, 3], [1, 2], 'explicit'), - ([2, 1, 3], [2, 1, 3, 3], [0, 1, 2], 'explicit'), - ([2, 1, 3], [5, 2, 1, 3], [1, 2, 3], 'explicit'), + ([1], [1, 2], [0], 'explicit', None, False), + ([1], [1, 2], [-2], 'explicit', None, False), + ([1, 7], [5, 1, 7, 3], [1, 2], 'explicit', None, False), + ([2, 1, 3], [2, 1, 3, 3], [0, 1, 2], 'explicit', None, False), + ([2, 1, 3], [5, 2, 1, 3], [1, 2, 3], 'explicit', None, False), # value broadcasting - ([1], [1, 2], [0], 'explicit', [[1, 1]]), + ([1], [1, 2], [0], 'explicit', [[1, 1]], False), - ([[3, 1]], [2, 1, 2], [1, 2], 'explicit', [[[3, 1]], [[3, 1]]]), # ref_shape (2, 1, 2) + ([[3, 1]], [2, 1, 2], [1, 2], 'explicit', [[[3, 1]], [[3, 1]]], False), # ref_shape (2, 1, 2) - ([[3, 1]], [2, 1, 2], [-2, -1], 'explicit', [[[3, 1]], [[3, 1]]]), # ref_shape (2, 1, 2) + ([[3, 1]], [2, 1, 2], [-2, -1], 'explicit', [[[3, 1]], [[3, 1]]], False), # ref_shape (2, 1, 2) ([[[9, 5, 7]], [[9, 5, 7]]], [2, 2, 1, 3], [1, 2, 3], 'explicit', # in_shape (2, 1, 3) - [[[[9, 5, 7]], [[9, 5, 7]]], [[[9, 5, 7]], [[9, 5, 7]]]]), # ref_out_shape (2, 2, 1, 3) + [[[[9, 5, 7]], [[9, 5, 7]]], [[[9, 5, 7]], [[9, 5, 7]]]], False), # ref_out_shape (2, 2, 1, 3) ([[[9, 5, 7]], [[3, 4, 8]]], [2, 1, 3, 3], [0, 1, 2], 'explicit', # in_shape (2, 1, 3) - [[[[9, 9, 9], [5, 5, 5], [7, 7, 7]]], [[[3, 3, 3], [4, 4, 4], [8, 8, 8]]]]), # ref_out_shape (2, 1, 3, 3) + [[[[9, 9, 9], [5, 5, 5], [7, 7, 7]]], [[[3, 3, 3], [4, 4, 4], [8, 8, 8]]]], False), # ref_out_shape (2, 1, 3, 3) # negative tests ([1], [2, 2], [0], 'explicit', None, True), @@ -45,7 +43,7 @@ class BroadcastTest(unittest.TestCase): ([1, 7], [5, 2, 7, 3], [2, 1], 'explicit', None, True), ([1, 7], [5, 2, 7, 3], [-3, -2], 'explicit', None, True), ]) - def test_broadcast(self, data, target_shape, axes_mapping=None, mode='numpy', ref_out=None, test_raising=False): + def test_broadcast(self, data, target_shape, axes_mapping, mode, ref_out, test_raising): if ref_out is not None: input = valued_const_with_data('data', int64_array(data)) else: @@ -68,25 +66,26 @@ def test_broadcast(self, data, target_shape, axes_mapping=None, mode='numpy', re broadcast_node = Node(graph, 'broadcast') if test_raising: - self.assertRaises(AssertionError, Broadcast.infer, broadcast_node) + with pytest.raises(AssertionError): + Broadcast.infer(broadcast_node) return Broadcast.infer(broadcast_node) if ref_out is not None: - self.assertTrue(np.array_equal(broadcast_node.out_node().value, np.array(ref_out))) + assert np.array_equal(broadcast_node.out_node().value, np.array(ref_out)) else: - self.assertTrue(np.array_equal(broadcast_node.out_node().shape, np.array(target_shape))) - - @generate(*[ - ([1], [3], [0], 'explicit', undefined_shape_of_rank(3)), - ([1], [3], None, 'numpy', undefined_shape_of_rank(3)), - ([1], [3], None, 'bidirectional', undefined_shape_of_rank(3)), - ([1, 7], [4], [1, 2], 'explicit', undefined_shape_of_rank(4)), - ([1, 2], [3], None, 'numpy', undefined_shape_of_rank(3)), - ([1, 1], [2], None, 'bidirectional', undefined_shape_of_rank(2)), + assert np.array_equal(broadcast_node.out_node().shape, np.array(target_shape)) + + @pytest.mark.parametrize("data, target_shape_shape, axes_mapping, mode, ref_out_shape, test_raising",[ + ([1], [3], [0], 'explicit', undefined_shape_of_rank(3), False), + ([1], [3], None, 'numpy', undefined_shape_of_rank(3), False), + ([1], [3], None, 'bidirectional', undefined_shape_of_rank(3),False), + ([1, 7], [4], [1, 2], 'explicit', undefined_shape_of_rank(4), False), + ([1, 2], [3], None, 'numpy', undefined_shape_of_rank(3),False), + ([1, 1], [2], None, 'bidirectional', undefined_shape_of_rank(2), False), ([1, 1], [2, 1], None, 'numpy', None, True), ]) - def test_broadcast_dynamic(self, data, target_shape_shape, axes_mapping=None, mode='numpy', ref_out_shape=None, test_raising=False): + def test_broadcast_dynamic(self, data, target_shape_shape, axes_mapping, mode, ref_out_shape, test_raising): nodes = { **shaped_data('data', int64_array(data)), **shaped_data('target_shape', int64_array(target_shape_shape)), @@ -105,8 +104,9 @@ def test_broadcast_dynamic(self, data, target_shape_shape, axes_mapping=None, mo broadcast_node = Node(graph, 'broadcast') if test_raising: - self.assertRaises(AssertionError, Broadcast.infer, broadcast_node) + with pytest.raises(AssertionError): + Broadcast.infer(broadcast_node) return Broadcast.infer(broadcast_node) - self.assertTrue(np.array_equal(broadcast_node.out_node().shape, ref_out_shape)) + assert np.array_equal(broadcast_node.out_node().shape, ref_out_shape) diff --git a/tools/mo/unit_tests/mo/ops/cast_test.py b/tools/mo/unit_tests/mo/ops/cast_test.py index efdf33a4e90db0..985a7276514235 100644 --- a/tools/mo/unit_tests/mo/ops/cast_test.py +++ b/tools/mo/unit_tests/mo/ops/cast_test.py @@ -1,10 +1,9 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np -from generator import generator, generate from openvino.tools.mo.ops.Cast import Cast from openvino.tools.mo.middle.passes.convert_data_type import packed_U4, packed_I4 @@ -19,8 +18,7 @@ } -@generator -class CastTest(unittest.TestCase): +class TestCastTest(): """ Example of checking: 7 == 0111, padded to 0111 0000, results in 112 @@ -29,7 +27,7 @@ class CastTest(unittest.TestCase): -8 == 1000, padded to 1000 0000, results in 128 """ - @generate(*[ + @pytest.mark.parametrize("value, expected, custom_dtype",[ ([0], [0], packed_U4), ([1], [16], packed_U4), ([2], [32], packed_U4), @@ -110,4 +108,4 @@ def test_custom_value_propagation(self, value, expected, custom_dtype): 'value': expected}}) (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp diff --git a/tools/mo/unit_tests/mo/ops/dft_signal_size_canonicalization_test.py b/tools/mo/unit_tests/mo/ops/dft_signal_size_canonicalization_test.py index 23c893bd215ad3..7843b20337140b 100644 --- a/tools/mo/unit_tests/mo/ops/dft_signal_size_canonicalization_test.py +++ b/tools/mo/unit_tests/mo/ops/dft_signal_size_canonicalization_test.py @@ -1,18 +1,16 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np -from generator import generator, generate from openvino.tools.mo.ops.dft import FFTBase from openvino.tools.mo.front.common.partial_infer.utils import int64_array -@generator -class DFTSignalSizeCanonicalizationTest(unittest.TestCase): - @generate(*[ +class TestDFTSignalSizeCanonicalizationTest(): + @pytest.mark.parametrize("signal_size, axes, input_shape, expected_result",[ (int64_array([-1, 77]), int64_array([1, 2]), int64_array([2, 180, 180, 2]), int64_array([180, 77])), (int64_array([390, 87]), int64_array([2, 0]), int64_array([2, 180, 180, 2]), int64_array([390, 87])), (int64_array([600, -1, 40]), @@ -38,4 +36,4 @@ class DFTSignalSizeCanonicalizationTest(unittest.TestCase): ]) def test_canonicalization(self, signal_size, axes, input_shape, expected_result): canonicalized_signal_size = FFTBase.canonicalize_signal_size(signal_size, axes, input_shape) - self.assertTrue(np.array_equal(canonicalized_signal_size, expected_result)) + assert np.array_equal(canonicalized_signal_size, expected_result) diff --git a/tools/mo/unit_tests/mo/ops/div_value_propagation_test.py b/tools/mo/unit_tests/mo/ops/div_value_propagation_test.py index 9202aa08c98d2a..5d68f196e62fc6 100644 --- a/tools/mo/unit_tests/mo/ops/div_value_propagation_test.py +++ b/tools/mo/unit_tests/mo/ops/div_value_propagation_test.py @@ -1,10 +1,9 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np -from generator import generator, generate from openvino.tools.mo.ops.elementwise import Div from openvino.tools.mo.front.common.partial_infer.utils import int64_array @@ -32,9 +31,8 @@ ] -@generator -class TestDivValuePropagation(unittest.TestCase): - @generate(*[ +class TestDivValuePropagation(): + @pytest.mark.parametrize("a_shape, a_value, b_shape, b_value, elem_type",[ ([2, 3], np.array([[1, 4, -6], [0, -16, 45]], dtype=np.int64), [2, 3], np.array([[1, 2, -4], [1, -8, -5]], dtype=np.int64), np.int64), @@ -80,4 +78,4 @@ def func_for_ref(): node_data_shape = node_data.shape ref_data_shape = ref_data.shape msg = "Value propagation for 'div' node is not correct." - self.assertTrue(node_data_shape == ref_data_shape and np.all(node_data == ref_data), msg) + assert node_data_shape == ref_data_shape and np.all(node_data == ref_data), msg diff --git a/tools/mo/unit_tests/mo/ops/einsum_test.py b/tools/mo/unit_tests/mo/ops/einsum_test.py index ad7e81a2c117fa..884f6b5e10c8b6 100644 --- a/tools/mo/unit_tests/mo/ops/einsum_test.py +++ b/tools/mo/unit_tests/mo/ops/einsum_test.py @@ -1,10 +1,9 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np -from generator import generator, generate from openvino.tools.mo.ops.einsum import Einsum from openvino.tools.mo.front.common.partial_infer.utils import int64_array @@ -35,9 +34,8 @@ def create_einsum_graph(input_shapes: list, equation: str) -> Graph: return graph -@generator -class TestEinsum(unittest.TestCase): - @generate(*[ +class TestEinsum(): + @pytest.mark.parametrize("input_shapes, equation, ref_output_shape",[ # dot product ([int64_array([10]), int64_array([10])], "i,i->", int64_array([])), # matrix multiplication @@ -74,22 +72,23 @@ def test_einsum(self, input_shapes, equation, ref_output_shape): # get the result res_output_shape = graph.node['einsum_node_d']['shape'] - self.assertTrue(np.array_equal(ref_output_shape, res_output_shape), - 'shape does not match expected: {} and given: {}'.format(ref_output_shape, res_output_shape)) + assert np.array_equal(ref_output_shape, res_output_shape),\ + 'shape does not match expected: {} and given: {}'.format(ref_output_shape, res_output_shape) - @generate(*[ - # incorrect subscript numbers or inputs - ([int64_array([3, 11]), int64_array([11, 4])], "ab,bc,cd->ac", None), - # invalid labels - ([int64_array([3, 11]), int64_array([11, 4])], "a$,Bc->ac", None), - # incompatible shapes - ([int64_array([3, 11]), int64_array([12, 4])], "ab,bc->ac", None), - # not broadcastable shapes - ([int64_array([11, 1, 4, 3]), int64_array([3, 11, 7, 5])], "a...b,b...->a...", None), - # missed ellipsis - ([int64_array([11, 1, 4, 3]), int64_array([3, 11, 7, 4])], "a...b,b...->a", None), - ]) + @pytest.mark.parametrize("input_shapes, equation, ref_output_shape", [ + # incorrect subscript numbers or inputs + ([int64_array([3, 11]), int64_array([11, 4])], "ab,bc,cd->ac", None), + # invalid labels + ([int64_array([3, 11]), int64_array([11, 4])], "a$,Bc->ac", None), + # incompatible shapes + ([int64_array([3, 11]), int64_array([12, 4])], "ab,bc->ac", None), + # not broadcastable shapes + ([int64_array([11, 1, 4, 3]), int64_array([3, 11, 7, 5])], "a...b,b...->a...", None), + # missed ellipsis + ([int64_array([11, 1, 4, 3]), int64_array([3, 11, 7, 4])], "a...b,b...->a", None), +]) def test_invalid_cases(self, input_shapes, equation, ref_output_shape): graph = create_einsum_graph(input_shapes, equation) einsum_node = Node(graph, 'einsum_node') - self.assertRaises(AssertionError, Einsum.infer, einsum_node) + with pytest.raises(AssertionError): + Einsum.infer(einsum_node) diff --git a/tools/mo/unit_tests/mo/ops/expand_dims_test.py b/tools/mo/unit_tests/mo/ops/expand_dims_test.py index be36d55bd21d97..768fe4077f4a81 100644 --- a/tools/mo/unit_tests/mo/ops/expand_dims_test.py +++ b/tools/mo/unit_tests/mo/ops/expand_dims_test.py @@ -1,10 +1,9 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np -from generator import generator, generate from openvino.tools.mo.front.common.partial_infer.utils import shape_array, dynamic_dimension_value, strict_compare_tensors from openvino.tools.mo.graph.graph import Node @@ -28,9 +27,8 @@ } } -@generator -class ExpandDimsOp(unittest.TestCase): - @generate(*[(0, [1, 2, 3, 224, 224]), +class TestExpandDimsOp(): + @pytest.mark.parametrize("axis, ref_out_shape",[(0, [1, 2, 3, 224, 224]), (1, [2, 1, 3, 224, 224]), (2, [2, 3, 1, 224, 224]), (3, [2, 3, 224, 1, 224]), @@ -45,12 +43,11 @@ def test_expand_dims_infer(self, axis, ref_out_shape): ExpandDims.infer(expand_dims_node) - self.assertTrue(np.array_equal(expand_dims_node.out_node().shape, np.array(ref_out_shape))) + assert np.array_equal(expand_dims_node.out_node().shape, np.array(ref_out_shape)) -@generator -class ExpandDimsOpDynamicDims(unittest.TestCase): - @generate(*[(0, [1, 2, 3, dynamic_dimension_value, 224]), +class TestExpandDimsOpDynamicDims(): + @pytest.mark.parametrize("axis, ref_out_shape",[(0, [1, 2, 3, dynamic_dimension_value, 224]), (1, [2, 1, 3, dynamic_dimension_value, 224]), (2, [2, 3, 1, dynamic_dimension_value, 224]), (3, [2, 3, dynamic_dimension_value, 1, 224]), @@ -66,12 +63,11 @@ def test_expand_dims_infer(self, axis, ref_out_shape): ExpandDims.infer(expand_dims_node) - self.assertTrue(strict_compare_tensors(expand_dims_node.out_node().shape, shape_array(ref_out_shape))) + assert strict_compare_tensors(expand_dims_node.out_node().shape, shape_array(ref_out_shape)) -@generator -class ExpandDimsOpValueInfer(unittest.TestCase): - @generate(*[(0, [2, 3, 224, 224], [1, 2, 3, 224, 224]), +class TestExpandDimsOpValueInfer(): + @pytest.mark.parametrize("axis, in_shape, ref_out_shape",[(0, [2, 3, 224, 224], [1, 2, 3, 224, 224]), (1, [2, 3, 224, 224], [2, 1, 3, 224, 224]), (2, [2, 3, 224, 224], [2, 3, 1, 224, 224]), (3, [2, 3, 224, 224], [2, 3, 224, 1, 224]), @@ -88,5 +84,5 @@ def test_expand_dims_infer_value(self, axis, in_shape, ref_out_shape): ExpandDims.infer(expand_dims_node) - self.assertTrue(np.array_equal(expand_dims_node.out_node().shape, np.array(ref_out_shape))) - self.assertTrue(np.array_equal(expand_dims_node.out_node().value, np.array(in_value.reshape(ref_out_shape)))) + assert np.array_equal(expand_dims_node.out_node().shape, np.array(ref_out_shape)) + assert np.array_equal(expand_dims_node.out_node().value, np.array(in_value.reshape(ref_out_shape))) diff --git a/tools/mo/unit_tests/mo/ops/eye_test.py b/tools/mo/unit_tests/mo/ops/eye_test.py index c005200be961a1..1689f161fdd40b 100644 --- a/tools/mo/unit_tests/mo/ops/eye_test.py +++ b/tools/mo/unit_tests/mo/ops/eye_test.py @@ -1,10 +1,9 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np -from generator import generator, generate from openvino.tools.mo.ops.eye import Eye from openvino.tools.mo.front.common.partial_infer.utils import int64_array @@ -50,17 +49,16 @@ ] -@generator -class TestComplexOp(unittest.TestCase): - @generate(*[ - ([], [dynamic_dimension_value, dynamic_dimension_value]), - ([1], [dynamic_dimension_value, dynamic_dimension_value]), +class TestComplexOp(): + @pytest.mark.parametrize("input_shape, output_shape, num_rows, num_cols, batch_shape",[ + ([], [dynamic_dimension_value, dynamic_dimension_value],None,None,[]), + ([1], [dynamic_dimension_value, dynamic_dimension_value],None,None,[]), ([1], [2, dynamic_dimension_value, dynamic_dimension_value], None, None, [2]), ([1], [2, 3, dynamic_dimension_value], 3, None, [2]), ([1], [2, dynamic_dimension_value, 4], None, 4, [2]), ([1], [2, 3, 4], [3], [4], [2]) ]) - def test_complex_op_shape_inference(self, input_shape, output_shape, num_rows=None, num_cols=None, batch_shape=[]): + def test_complex_op_shape_inference(self, input_shape, output_shape, num_rows, num_cols, batch_shape): graph = build_graph_with_attrs(nodes_with_attrs=graph_node_attrs_sizes, edges_with_attrs=graph_edges_sizes, update_nodes_attributes=[ @@ -75,8 +73,8 @@ def test_complex_op_shape_inference(self, input_shape, output_shape, num_rows=No msg = "Eye operation infer failed for case: expected_shape={}, actual_shape={}" - self.assertTrue(np.array_equal(graph.node['eye_op_data']['shape'], output_shape), - msg.format(output_shape, graph.node['eye_op_data']['shape'])) + assert np.array_equal(graph.node['eye_op_data']['shape'], output_shape),\ + msg.format(output_shape, graph.node['eye_op_data']['shape']) def test_value_inference(self): graph_node_attrs_sizes = { @@ -103,5 +101,5 @@ def test_value_inference(self): msg = "Eye operation infer failed for case: expected_value={}, actual_value={}" - self.assertTrue(np.array_equal(graph.node['eye_op_d']['value'], output_value), - msg.format(output_value, graph.node['eye_op_d']['value'])) + assert np.array_equal(graph.node['eye_op_d']['value'], output_value),\ + msg.format(output_value, graph.node['eye_op_d']['value']) diff --git a/tools/mo/unit_tests/mo/ops/gatherelements_test.py b/tools/mo/unit_tests/mo/ops/gatherelements_test.py index 90ee62fd4b0f41..feb1807c5cbb5d 100644 --- a/tools/mo/unit_tests/mo/ops/gatherelements_test.py +++ b/tools/mo/unit_tests/mo/ops/gatherelements_test.py @@ -1,10 +1,9 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np -from generator import generator, generate from openvino.tools.mo.ops.gatherelements import GatherElements from openvino.tools.mo.front.common.partial_infer.utils import int64_array, strict_compare_tensors, dynamic_dimension @@ -15,9 +14,8 @@ dyn = dynamic_dimension -@generator -class GatherElementsInferTest(unittest.TestCase): - @generate(*[ +class TestGatherElementsInferTest(): + @pytest.mark.parametrize("data, indices, axis, ref_res",[ ([[1, 2], [3, 4]], [[0, 1], @@ -96,11 +94,11 @@ def test_gatherelements_value_infer(self, data, indices, axis, ref_res): GatherElements.infer(gather_el_node) res_output_shape = gather_el_node.out_node().shape - self.assertTrue(np.array_equal(int64_array(ref_res).shape, res_output_shape)) + assert np.array_equal(int64_array(ref_res).shape, res_output_shape) res_output_value = gather_el_node.out_node().value if res_output_value is not None: - self.assertTrue(np.array_equal(int64_array(ref_res), res_output_value)) + assert np.array_equal(int64_array(ref_res), res_output_value) def check_shape_infer(self, data_shape, indices_shape, axis, ref): nodes = { @@ -121,7 +119,7 @@ def check_shape_infer(self, data_shape, indices_shape, axis, ref): GatherElements.infer(gather_el_node) res_output_shape = gather_el_node.out_node().shape - self.assertTrue(strict_compare_tensors(res_output_shape, ref)) + assert strict_compare_tensors(res_output_shape, ref) def test_shape_infer_1(self): self.check_shape_infer(data_shape=[3], indices_shape=[100], ref=[100], axis=0) @@ -165,13 +163,13 @@ def test_shape_infer_13(self): # negative tests def test_negative_shape_infer_ranks_differ(self): - self.assertRaises(AssertionError, self.check_shape_infer, - data_shape=[1, 3, 64], indices_shape=[1, 3], ref=[1, 3, 1024], axis=2) + with pytest.raises(AssertionError): + self.check_shape_infer(data_shape=[1, 3, 64], indices_shape=[1, 3], ref=[1, 3, 1024], axis=2) def test_negative_shape_infer_axis_out_of_bound(self): - self.assertRaises(AssertionError, self.check_shape_infer, - data_shape=[1, 4, 64], indices_shape=[1, 3, 64], ref=[1, 3, 1024], axis=20) + with pytest.raises(AssertionError): + self.check_shape_infer(data_shape=[1, 4, 64], indices_shape=[1, 3, 64], ref=[1, 3, 1024], axis=20) def test_negative_shape_infer_inconsistent_shapes(self): - self.assertRaises(Error, self.check_shape_infer, - data_shape=[1, 4, 64], indices_shape=[1, 3, 64], ref=[1, 3, 1024], axis=2) + with pytest.raises(Error): + self.check_shape_infer(data_shape=[1, 4, 64], indices_shape=[1, 3, 64], ref=[1, 3, 1024], axis=2) diff --git a/tools/mo/unit_tests/mo/ops/interpolate_test.py b/tools/mo/unit_tests/mo/ops/interpolate_test.py index 3d773ea8177c3e..72b954509365b4 100644 --- a/tools/mo/unit_tests/mo/ops/interpolate_test.py +++ b/tools/mo/unit_tests/mo/ops/interpolate_test.py @@ -1,10 +1,9 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np -from generator import generator, generate from openvino.tools.mo.ops.interpolate import Interpolate from openvino.tools.mo.front.common.partial_infer.utils import int64_array @@ -72,9 +71,9 @@ ] -@generator -class TestInterpolateOp(unittest.TestCase): - @generate(*[([0], [0], [1, 3, 100, 200], [1, 3, 350, 150], [350, 150], [3.5, 150 / 200], [2, 3]), +class TestInterpolateOp(): + @pytest.mark.parametrize("pads_begin, pads_end, input_shape, output_shape, sizes, scales, axes", + [([0], [0], [1, 3, 100, 200], [1, 3, 350, 150], [350, 150], [3.5, 150 / 200], [2, 3]), ([0, 3, 10, 10], [0], [16, 7, 190, 400], [8, 10, 390, 600], [8, 390, 600], [0.5, 390 / 200, 600 / 410], [0, 2, 3]), ([10, 5, 0, 10], [0, 4, 16, 18], [4, 33, 1024, 8000], [56, 42, 520, 8028], @@ -114,11 +113,12 @@ def test_interpolate4_using_sizes(self, pads_begin, pads_end, input_shape, outpu msg = "Interpolate-4 infer failed for case: sizes={}, scales={}, pads_begin={}, pads_end={}, axes={}," \ " expected_shape={}, actual_shape={}" - self.assertTrue(np.array_equal(graph.node['interpolate_data']['shape'], int64_array(output_shape)), + assert np.array_equal(graph.node['interpolate_data']['shape'], int64_array(output_shape)),\ msg.format(sizes, scales, pads_begin, pads_end, axes, output_shape, - graph.node['interpolate_data']['shape'])) + graph.node['interpolate_data']['shape']) - @generate(*[([0], [0], [1, 3, 100, 200], [1, 3, 350, 150], [350, 150], [3.5, 150 / 200], [2, 3]), + @pytest.mark.parametrize("pads_begin, pads_end, input_shape, output_shape, sizes, scales, axes", + [([0], [0], [1, 3, 100, 200], [1, 3, 350, 150], [350, 150], [3.5, 150 / 200], [2, 3]), ([0, 3, 10, 10], [0], [16, 7, 190, 400], [8, 10, 390, 600], [8, 390, 600], [0.5, 390 / 200, 600 / 410], [0, 2, 3]), ([10, 5, 0, 10], [0, 4, 16, 18], [4, 33, 1024, 8000], [56, 42, 520, 8028], @@ -165,11 +165,12 @@ def test_interpolate4_using_scales(self, pads_begin, pads_end, input_shape, outp msg = "Interpolate-4 infer failed for case: sizes={}, scales={}, pads_begin={}, pads_end={}, axes={}," \ " expected_shape={}, actual_shape={}" - self.assertTrue(np.array_equal(graph.node['interpolate_data']['shape'], int64_array(output_shape)), + assert np.array_equal(graph.node['interpolate_data']['shape'], int64_array(output_shape)),\ msg.format(sizes, scales, pads_begin, pads_end, axes, output_shape, - graph.node['interpolate_data']['shape'])) + graph.node['interpolate_data']['shape']) - @generate(*[([0], [0], [1, 3, 100, 200], [1, 3, 350, 150], [1, 3, 350, 150], [1.0, 1.0, 3.5, 150 / 200]), + @pytest.mark.parametrize("pads_begin, pads_end, input_shape, output_shape, sizes, scales", + [([0], [0], [1, 3, 100, 200], [1, 3, 350, 150], [1, 3, 350, 150], [1.0, 1.0, 3.5, 150 / 200]), ([0, 3, 10, 10], [0], [16, 7, 190, 400], [8, 10, 390, 600], [8, 10, 390, 600], [0.5, 1.0, 390 / 200, 600 / 410]), ([10, 5, 0, 10], [0, 4, 16, 18], [4, 33, 1024, 8000], [56, 42, 520, 8028], @@ -212,11 +213,12 @@ def test_interpolate4_using_sizes_without_axes(self, pads_begin, pads_end, input msg = "Interpolate-4 infer failed for case: sizes={}, scales={}, pads_begin={}, pads_end={}," \ " expected_shape={}, actual_shape={}" - self.assertTrue(np.array_equal(graph.node['interpolate_data']['shape'], int64_array(output_shape)), + assert np.array_equal(graph.node['interpolate_data']['shape'], int64_array(output_shape)),\ msg.format(sizes, scales, pads_begin, pads_end, output_shape, - graph.node['interpolate_data']['shape'])) + graph.node['interpolate_data']['shape']) - @generate(*[([0], [0], [1, 3, 100, 200], [1, 3, 350, 150], [1, 3, 350, 150], [1.0, 1.0, 3.5, 150 / 200]), + @pytest.mark.parametrize("pads_begin, pads_end, input_shape, output_shape, sizes, scales", + [([0], [0], [1, 3, 100, 200], [1, 3, 350, 150], [1, 3, 350, 150], [1.0, 1.0, 3.5, 150 / 200]), ([0, 3, 10, 10], [0], [16, 7, 190, 400], [8, 10, 390, 600], [8, 10, 390, 600], [0.5, 1.0, 390 / 200, 600 / 410]), ([10, 5, 0, 10], [0, 4, 16, 18], [4, 33, 1024, 8000], [56, 42, 520, 8028], @@ -262,6 +264,6 @@ def test_interpolate4_using_scales_without_axes(self, pads_begin, pads_end, inpu msg = "Interpolate-4 infer failed for case: sizes={}, scales={}, pads_begin={}, pads_end={}," \ " expected_shape={}, actual_shape={}" - self.assertTrue(np.array_equal(graph.node['interpolate_data']['shape'], int64_array(output_shape)), + assert np.array_equal(graph.node['interpolate_data']['shape'], int64_array(output_shape)),\ msg.format(sizes, scales, pads_begin, pads_end, output_shape, - graph.node['interpolate_data']['shape'])) + graph.node['interpolate_data']['shape']) diff --git a/tools/mo/unit_tests/mo/ops/one_hot_test.py b/tools/mo/unit_tests/mo/ops/one_hot_test.py index be5248177987d5..0c4d839dc620cb 100644 --- a/tools/mo/unit_tests/mo/ops/one_hot_test.py +++ b/tools/mo/unit_tests/mo/ops/one_hot_test.py @@ -1,10 +1,9 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np -from generator import generator, generate from openvino.tools.mo.ops.one_hot import OneHot from openvino.tools.mo.front.common.partial_infer.utils import int64_array, float_array @@ -32,29 +31,28 @@ def generate_nodes(data, axis=-1, depth=4, on_value=1., off_value=0.): ] -@generator -class TestOneHotInfer(unittest.TestCase): - @generate(*[ +class TestOneHotInfer(): + @pytest.mark.parametrize("input_value, exp_value, axis",[ # 0d input - (1, [0, 1, 0, 0]), + (1, [0, 1, 0, 0], -1), # 1d input - ([1, 2], [[0, 1, 0, 0], [0, 0, 1, 0]]), + ([1, 2], [[0, 1, 0, 0], [0, 0, 1, 0]], -1), # 2D input ([[1, 2], [3, 4]], [[[0, 1, 0, 0], [0, 0, 1, 0]], - [[0, 0, 0, 1], [0, 0, 0, 0]]]), + [[0, 0, 0, 1], [0, 0, 0, 0]]], -1), # 3d input ([[[0, 2], [1, 2]], [[2, 1], [3, 0]]], [[[[1, 0, 0, 0], [0, 0, 1, 0]], [[0, 1, 0, 0], [0, 0, 1, 0]]], - [[[0, 0, 1, 0], [0, 1, 0, 0]], [[0, 0, 0, 1], [1, 0, 0, 0]]]]), + [[[0, 0, 1, 0], [0, 1, 0, 0]], [[0, 0, 0, 1], [1, 0, 0, 0]]]], -1), # 1d input with negative indices - ([-2, 2], [[0, 0, 1, 0], [0, 0, 1, 0]]), + ([-2, 2], [[0, 0, 1, 0], [0, 0, 1, 0]], -1), # check if axis is neither 0 nor -1 ([[1, 2], [3, 4]], [[[0, 0], [1, 0], [0, 1], [0, 0]], [[0, 0], [0, 0], [0, 0], [1, 0]]], 1) ]) - def test_infer(self, input_value, exp_value, axis=-1): + def test_infer(self, input_value, exp_value, axis): graph = build_graph(generate_nodes(int64_array(input_value), axis), edges) onehot_node = Node(graph, 'one_hot') OneHot.infer(onehot_node) res_value = graph.node['one_hot_d']['value'] - self.assertTrue(np.array_equal(exp_value, int64_array(res_value))) + assert np.array_equal(exp_value, int64_array(res_value)) diff --git a/tools/mo/unit_tests/mo/ops/reshape_test.py b/tools/mo/unit_tests/mo/ops/reshape_test.py index 7b0bb26676b6aa..86fd2eb85b24f7 100644 --- a/tools/mo/unit_tests/mo/ops/reshape_test.py +++ b/tools/mo/unit_tests/mo/ops/reshape_test.py @@ -1,10 +1,9 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np -from generator import generate, generator from openvino.tools.mo.front.common.partial_infer.utils import shape_array, dynamic_dimension_value, strict_compare_tensors from openvino.tools.mo.graph.graph import Node @@ -47,9 +46,8 @@ } -@generator -class TestReshapeShapeInfer(unittest.TestCase): - @generate(*[ +class TestReshapeShapeInfer(): + @pytest.mark.parametrize("input_value, input_shape, output_shape, ref_value, ref_shape",[ (None, shape_array([1, 100, 4]), shape_array([-1, 25]), None, [16, 25]), (None, shape_array([5, 100, 4]), shape_array([0, -1, 25]), None, [5, 16, 25]), (None, shape_array([5, dynamic_dimension_value, 4]), shape_array([4, -1, 5]), None, @@ -89,5 +87,5 @@ def test_reshape_infer(self, input_value, input_shape, output_shape, ref_value, node = Node(graph, 'reshape') Reshape.infer(node) if ref_value is not None: - self.assertTrue(strict_compare_tensors(node.out_port(0).data.get_value(), shape_array(ref_value))) - self.assertTrue(strict_compare_tensors(node.out_port(0).data.get_shape(), shape_array(ref_shape))) + assert strict_compare_tensors(node.out_port(0).data.get_value(), shape_array(ref_value)) + assert strict_compare_tensors(node.out_port(0).data.get_shape(), shape_array(ref_shape)) diff --git a/tools/mo/unit_tests/mo/ops/scatter_test.py b/tools/mo/unit_tests/mo/ops/scatter_test.py index 79b91cec1ceb68..1066ea8b1c5c43 100644 --- a/tools/mo/unit_tests/mo/ops/scatter_test.py +++ b/tools/mo/unit_tests/mo/ops/scatter_test.py @@ -1,10 +1,9 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np -from generator import generator, generate from openvino.tools.mo.ops.scatter import ScatterElementsUpdate, ScatterUpdate from openvino.tools.mo.front.common.partial_infer.utils import int64_array, shape_array, dynamic_dimension_value @@ -12,9 +11,8 @@ from unit_tests.utils.graph import build_graph, regular_op_with_empty_data, result, connect, valued_const_with_data -@generator -class ScatterElementsInferTest(unittest.TestCase): - @generate(*[ +class TestScatterElementsInferTest(): + @pytest.mark.parametrize("data, indices, updates, axis, ref_res",[ ([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], @@ -96,15 +94,14 @@ def test_scatterelements_value_infer(self, data, indices, updates, axis, ref_res ScatterElementsUpdate.infer(scatter_el_node) res_output_shape = scatter_el_node.out_node().shape - self.assertTrue(np.array_equal(int64_array(ref_res).shape, res_output_shape)) + assert np.array_equal(int64_array(ref_res).shape, res_output_shape) res_output_value = scatter_el_node.out_node().value - self.assertTrue(np.array_equal(ref_res, res_output_value)) + assert np.array_equal(ref_res, res_output_value) -@generator -class ScatterUpdateInferTest(unittest.TestCase): - @generate(*[ +class TestScatterUpdateInferTest(): + @pytest.mark.parametrize("data, indices, updates, axis, ref_res",[ ([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], @@ -188,7 +185,7 @@ def test_scatter_update_value_infer(self, data, indices, updates, axis, ref_res) ScatterUpdate.infer(scatter_update_node) res_output_shape = scatter_update_node.out_node().shape - self.assertTrue(np.array_equal(int64_array(ref_res).shape, res_output_shape)) + assert np.array_equal(int64_array(ref_res).shape, res_output_shape) res_output_value = scatter_update_node.out_node().value - self.assertTrue(np.array_equal(ref_res, res_output_value)) + assert np.array_equal(ref_res, res_output_value) diff --git a/tools/mo/unit_tests/mo/ops/slice_test.py b/tools/mo/unit_tests/mo/ops/slice_test.py index a07514728ef7eb..c69a8fec7ec3d1 100644 --- a/tools/mo/unit_tests/mo/ops/slice_test.py +++ b/tools/mo/unit_tests/mo/ops/slice_test.py @@ -1,10 +1,9 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np -from generator import generator, generate from openvino.tools.mo.front.common.partial_infer.utils import int64_array, dynamic_dimension_value, shape_array, \ strict_compare_tensors @@ -14,9 +13,8 @@ connect, shaped_data, shaped_const_with_data -@generator -class TestSliceOp(unittest.TestCase): - @generate(*[ +class TestSliceOp(): + @pytest.mark.parametrize("inp_value, inp_shape, starts, ends, axes, steps, expected_value, expected_shape",[ # standard case ([[4, 5, 6, 7], [2, 3, 5, 6], [5, 6, 8, 9], [5, 6, 8, 9]], [4, 4], [0, 1], [3, 2], [0, 1], [1, 1], [[5], [3], [6]], [3, 1]), @@ -107,13 +105,12 @@ def convert_args(val, name=''): Slice.infer(slice_node) if expected_value is not None: - self.assertTrue(strict_compare_tensors(slice_node.out_node().value, expected_value)) - self.assertTrue(strict_compare_tensors(slice_node.out_node().shape, expected_shape)) + assert strict_compare_tensors(slice_node.out_node().value, expected_value) + assert strict_compare_tensors(slice_node.out_node().shape, expected_shape) -@generator -class TestOvSliceOp(unittest.TestCase): - @generate(*[ +class TestOvSliceOp(): + @pytest.mark.parametrize("inp_value, inp_shape, starts, ends, axes, steps, expected_value, expected_shape",[ # standard case ([[4, 5, 6, 7], [2, 3, 5, 6], [5, 6, 8, 9], [5, 6, 8, 9]], [4, 4], [0, 1], [3, 2], [0, 1], [1, 1], [[5], [3], [6]], [3, 1]), @@ -204,5 +201,5 @@ def convert_args(val, name=''): OvSlice.infer(slice_node) if expected_value is not None: - self.assertTrue(strict_compare_tensors(slice_node.out_node().value, expected_value)) - self.assertTrue(strict_compare_tensors(slice_node.out_node().shape, expected_shape)) + assert strict_compare_tensors(slice_node.out_node().value, expected_value) + assert strict_compare_tensors(slice_node.out_node().shape, expected_shape) diff --git a/tools/mo/unit_tests/mo/ops/split_test.py b/tools/mo/unit_tests/mo/ops/split_test.py index 962a476489cc50..de7e5c71c2cd48 100644 --- a/tools/mo/unit_tests/mo/ops/split_test.py +++ b/tools/mo/unit_tests/mo/ops/split_test.py @@ -4,7 +4,7 @@ import unittest import numpy as np -from generator import generator, generate +import pytest from openvino.tools.mo.front.common.partial_infer.utils import int64_array, shape_array, \ dynamic_dimension_value, dynamic_dimension, strict_compare_tensors, mo_array @@ -248,8 +248,7 @@ def test_splitv_2_zero_not_last(self): self.assertTrue(np.all(node.split_lengths == np.array([2, 13, 10]))) -@generator -class TestVariadicSplitOp(unittest.TestCase): +class TestVariadicSplitOp(): nodes = { 'input': {'kind': 'op'}, 'split_input_data': {'kind': 'data', 'shape': None, 'value': None}, @@ -280,7 +279,7 @@ class TestVariadicSplitOp(unittest.TestCase): ('split_output_2_data', 'output_2'), ] - @generate(*[int64_array(2), + @pytest.mark.parametrize("axis",[int64_array(2), int64_array([2])]) def test_variadic_split_axis(self, axis): lengths = int64_array([2, 13, 10]) @@ -299,9 +298,9 @@ def test_variadic_split_axis(self, axis): VariadicSplit.infer(node) ont_nodes_count = len(node.out_edges()) - self.assertTrue(ont_nodes_count == 3) + assert ont_nodes_count == 3 for out in range(ont_nodes_count): - self.assertTrue(np.all(node.out_node(out).shape == int64_array([2, 12, lengths[out], 30]))) + assert np.all(node.out_node(out).shape == int64_array([2, 12, lengths[out], 30])) def test_variadic_split_value_inference_with_uint32(self): axis = int64_array(2) @@ -329,11 +328,11 @@ def test_variadic_split_value_inference_with_uint32(self): VariadicSplit.infer(node) ont_nodes_count = len(node.out_edges()) - self.assertTrue(ont_nodes_count == 3) + assert ont_nodes_count == 3 for out in range(ont_nodes_count): - self.assertTrue(np.all(node.out_node(out).shape == int64_array([2, 12, lengths[out], 30]))) + assert np.all(node.out_node(out).shape == int64_array([2, 12, lengths[out], 30])) - @generate(*[int64_array([[2], [2]]), + @pytest.mark.parametrize("axis",[int64_array([[2], [2]]), int64_array([2, 2])]) def test_negative_variadic_split_axis(self, axis): lengths = int64_array([2, 13, 10]) @@ -352,8 +351,8 @@ def test_negative_variadic_split_axis(self, axis): try: VariadicSplit.infer(node) except AssertionError as e: - self.assertTrue(e.args[0] == 'VariadicSplit `axis` should be scalar or tensor with shape [1], ' - 'but it`s not for node split_op') + assert e.args[0] == 'VariadicSplit `axis` should be scalar or tensor with shape [1], '\ + 'but it`s not for node split_op' class TestSplitReverseInfer(unittest.TestCase): diff --git a/tools/mo/unit_tests/mo/ops/squeeze_test.py b/tools/mo/unit_tests/mo/ops/squeeze_test.py index 714e90928b538f..42118250477e8d 100644 --- a/tools/mo/unit_tests/mo/ops/squeeze_test.py +++ b/tools/mo/unit_tests/mo/ops/squeeze_test.py @@ -1,10 +1,9 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np -from generator import generate, generator from openvino.tools.mo.front.common.partial_infer.utils import shape_array, dynamic_dimension_value, strict_compare_tensors from openvino.tools.mo.graph.graph import Node @@ -41,9 +40,8 @@ } -@generator -class TestSqueezeInfer(unittest.TestCase): - @generate(*[ +class TestSqueezeInfer(): + @pytest.mark.parametrize("input_value, input_shape, squeeze_dims, ref_value, ref_shape",[ (None, shape_array([1, 2, 1, 4]), shape_array([2]), None, [1, 2, 4]), # allow squeezing dynamic dimensions (None, shape_array([1, 2, dynamic_dimension_value, 4]), shape_array([2]), None, [1, 2, 4]), @@ -67,10 +65,10 @@ def test_squeeze_squeeze_dims(self, input_value, input_shape, squeeze_dims, ref_ }) node = Node(graph, 'squeeze') if ref_shape is None: # the test should fail - with self.assertRaises(Error): + with pytest.raises(Error): Squeeze.infer(node) else: Squeeze.infer(node) if ref_value is not None: - self.assertTrue(strict_compare_tensors(node.out_port(0).data.get_value(), ref_value)) - self.assertTrue(strict_compare_tensors(node.out_port(0).data.get_shape(), ref_shape)) + assert strict_compare_tensors(node.out_port(0).data.get_value(), ref_value) + assert strict_compare_tensors(node.out_port(0).data.get_shape(), ref_shape) diff --git a/tools/mo/unit_tests/mo/ops/transpose_test.py b/tools/mo/unit_tests/mo/ops/transpose_test.py index da3968cb3785d0..b28e0d381d1ae4 100644 --- a/tools/mo/unit_tests/mo/ops/transpose_test.py +++ b/tools/mo/unit_tests/mo/ops/transpose_test.py @@ -3,9 +3,8 @@ import itertools import unittest - +import pytest import numpy as np -from generator import generator, generate from openvino.tools.mo.front.common.partial_infer.utils import int64_array, shape_array, strict_compare_tensors, \ dynamic_dimension_value @@ -19,8 +18,7 @@ input_shape = np.array([1, 3, 224, 224]) -@generator -class TestTransposeOp(unittest.TestCase): +class TestTransposeOp(): nodes_attributes = { 'parameter': { 'kind': 'op', @@ -71,7 +69,7 @@ def _create_graph_with_transpose(self, order): graph.graph['layout'] = 'NCHW' return graph - @generate(*[list(order) for order in list(itertools.permutations(np.arange(4)))]) + @pytest.mark.parametrize("order",[list(order) for order in list(itertools.permutations(np.arange(4)))]) def test_transpose_infer_1(self, order): graph = self._create_graph_with_transpose(order) transpose_node = Node(graph, 'transpose') @@ -79,7 +77,7 @@ def test_transpose_infer_1(self, order): Transpose.infer(transpose_node) ref = [transpose_node.in_node().shape[i] for i in order] - self.assertTrue(np.array_equal(transpose_node.out_node().shape, np.array(ref))) + assert np.array_equal(transpose_node.out_node().shape, np.array(ref)) def test_transpose_infer_2(self): order = None @@ -89,22 +87,24 @@ def test_transpose_infer_2(self): Transpose.infer(transpose_node) ref = np.array([x for x in reversed(transpose_node.in_node().shape)]) - self.assertTrue(np.array_equal(transpose_node.out_node().shape, ref), - "Shapes are not the same: {} and {}".format(transpose_node.out_node().shape, ref)) + assert np.array_equal(transpose_node.out_node().shape, ref),\ + "Shapes are not the same: {} and {}".format(transpose_node.out_node().shape, ref) def test_transpose_infer_neg_1(self): order = np.array([0, 1, 2, 3]) graph = self._create_graph_with_transpose(order) transpose_node = Node(graph, 'transpose') transpose_node['reverse_order'] = True - self.assertRaises(AssertionError, Transpose.infer, transpose_node) + with pytest.raises(AssertionError): + Transpose.infer(transpose_node) def test_transpose_infer_neg_2(self): order = None graph = self._create_graph_with_transpose(order) transpose_node = Node(graph, 'transpose') transpose_node['reverse_order'] = False - self.assertRaises(AssertionError, Transpose.infer, transpose_node) + with pytest.raises(AssertionError): + Transpose.infer(transpose_node) dyn = dynamic_dimension_value diff --git a/tools/mo/unit_tests/mo/ops/unsqueeze_test.py b/tools/mo/unit_tests/mo/ops/unsqueeze_test.py index 72d519dd64c389..668bb71d3d1a7d 100644 --- a/tools/mo/unit_tests/mo/ops/unsqueeze_test.py +++ b/tools/mo/unit_tests/mo/ops/unsqueeze_test.py @@ -1,10 +1,9 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np -from generator import generator, generate from openvino.tools.mo.front.common.partial_infer.utils import int64_array, shape_array, dynamic_dimension_value, strict_compare_tensors from openvino.tools.mo.graph.graph import Node @@ -13,8 +12,7 @@ from unit_tests.utils.graph import build_graph -@generator -class TestUnsqueezeOp(unittest.TestCase): +class TestUnsqueezeOp(): nodes_attributes = { 'data_1': { 'kind': 'data', @@ -39,7 +37,8 @@ class TestUnsqueezeOp(unittest.TestCase): } } - @generate(*[(shape_array([1, 3, 64, 64]), int64_array([0, 4]), shape_array([1, 1, 3, 64, 1, 64]), + @pytest.mark.parametrize("input_shape, unsq_dims, output_shape, ref_uns_dims, input_value, output_value", + [(shape_array([1, 3, 64, 64]), int64_array([0, 4]), shape_array([1, 1, 3, 64, 1, 64]), int64_array([0, 4]), None, None), (shape_array([2, 3, 64, 64]), int64_array([-1]), shape_array([2, 3, 64, 64, 1]), int64_array([4]), None, None), @@ -75,7 +74,7 @@ def test_unsqueeze_infer(self, input_shape, unsq_dims, output_shape, ref_uns_dim Unsqueeze.infer(unsqueeze_node) (flag, resp) = compare_graphs(graph, graph_ref, 'data_2') - self.assertTrue(flag, resp) - self.assertTrue(strict_compare_tensors(Node(graph, 'data_2').shape, Node(graph_ref, 'data_2').shape)) + assert flag, resp + assert strict_compare_tensors(Node(graph, 'data_2').shape, Node(graph_ref, 'data_2').shape) if Node(graph_ref, 'data_2').value is not None: - self.assertTrue(strict_compare_tensors(Node(graph, 'data_2').value, Node(graph_ref, 'data_2').value)) + assert strict_compare_tensors(Node(graph, 'data_2').value, Node(graph_ref, 'data_2').value) diff --git a/tools/mo/unit_tests/mo/ops/upsample_test.py b/tools/mo/unit_tests/mo/ops/upsample_test.py index e0ae9020372b61..f70e37a7c2b404 100644 --- a/tools/mo/unit_tests/mo/ops/upsample_test.py +++ b/tools/mo/unit_tests/mo/ops/upsample_test.py @@ -1,10 +1,9 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np -from generator import generator, generate from openvino.tools.mo.ops.upsample import UpsampleOp from openvino.tools.mo.front.common.partial_infer.utils import shape_array, dynamic_dimension_value, strict_compare_tensors @@ -18,9 +17,8 @@ } -@generator -class TestUpsampleOp(unittest.TestCase): - @generate(*[ +class TestUpsampleOp(): + @pytest.mark.parametrize("scales, input_shape, expected_shape",[ (np.array([1., 1., 2., 2.]), shape_array([1, 3, 227, 227]), shape_array([1, 3, 454, 454])), (np.array([1., 1., 2.5, 1.5]), shape_array([1, 5, 227, 227]), shape_array([1, 5, 567, 340])), (np.array([1., 1., 1.3, 0.7]), shape_array([1, 14, 1023, 713]), shape_array([1, 14, 1329, 499])), @@ -46,9 +44,9 @@ def test_upsample_with_scales_infer(self, scales, input_shape, expected_shape): upsample_node = Node(graph, 'upsample') UpsampleOp.upsample_infer(upsample_node) res_shape = graph.node['node_3']['shape'] - self.assertTrue(strict_compare_tensors(expected_shape, res_shape)) + assert strict_compare_tensors(expected_shape, res_shape) - @generate(*[ + @pytest.mark.parametrize("scales, input_shape, expected_shape",[ (np.array([1., 1., 2., 2.]), shape_array([1, 3, 227, 227]), shape_array([1, 3, 454, 454])), (np.array([1., 1., 2.5, 1.5]), shape_array([1, 5, 227, 227]), shape_array([1, 5, 567, 340])), (np.array([1., 1., 1.3, 0.7]), shape_array([1, 14, 1023, 713]), shape_array([1, 14, 1329, 499])), @@ -76,4 +74,4 @@ def test_upsample_with_second_input_infer(self, scales, input_shape, expected_sh upsample_node = Node(graph, 'upsample') UpsampleOp.upsample_infer(upsample_node) res_shape = graph.node['node_3']['shape'] - self.assertTrue(strict_compare_tensors(expected_shape, res_shape)) + assert strict_compare_tensors(expected_shape, res_shape) diff --git a/tools/mo/unit_tests/mo/pipeline/common_test.py b/tools/mo/unit_tests/mo/pipeline/common_test.py index c62a45a5b97771..bb71d40b22d0f7 100644 --- a/tools/mo/unit_tests/mo/pipeline/common_test.py +++ b/tools/mo/unit_tests/mo/pipeline/common_test.py @@ -2,17 +2,14 @@ # SPDX-License-Identifier: Apache-2.0 import unittest - -from generator import generator, generate - +import pytest from openvino.tools.mo.graph.graph import Node from openvino.tools.mo.pipeline.common import determined_sort, get_fw_tensor_debug_info, get_sorted_outputs from unit_tests.utils.graph import build_graph_with_edge_attrs -@generator -class TestTopologicalSort(unittest.TestCase): - @generate( +class TestTopologicalSort(): + @pytest.mark.parametrize( "edges",[ [('A', 'Ad', {'out': 0}), ('Ad', 'B', {'in': 0}), ('B', 'Bd', {'out': 0}), @@ -92,7 +89,7 @@ class TestTopologicalSort(unittest.TestCase): ('Hd', 'J', {'in': 1}), ('Dd', 'F', {'in': 1}), ('Fd', 'H', {'in': 1}), - ('Gd', 'H', {'in': 0})] + ('Gd', 'H', {'in': 0})]] ) def test_determined_topological_sort(self, edges): nodes = {'A': {'type': 'Identity', 'kind': 'op'}, @@ -123,8 +120,8 @@ def test_determined_topological_sort(self, edges): outputs = [Node(graph, 'Kd')] for i in range(100): op_order, data_order = determined_sort(outputs) - self.assertListEqual(op_order, ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K']) - self.assertListEqual(data_order, ['Ad', 'Bd', 'Cd', 'Dd', 'Ed', 'Fd', 'Gd', 'Hd', 'Id', 'Jd', 'Kd']) + assert op_order == ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K'] + assert data_order == ['Ad', 'Bd', 'Cd', 'Dd', 'Ed', 'Fd', 'Gd', 'Hd', 'Id', 'Jd', 'Kd'] class TestGetFWTensorName(unittest.TestCase): diff --git a/tools/mo/unit_tests/mo/utils/broadcasting_test.py b/tools/mo/unit_tests/mo/utils/broadcasting_test.py index f12fa029919b56..91b79be4de6361 100644 --- a/tools/mo/unit_tests/mo/utils/broadcasting_test.py +++ b/tools/mo/unit_tests/mo/utils/broadcasting_test.py @@ -1,19 +1,17 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np -from generator import generator, generate from openvino.tools.mo.front.common.partial_infer.utils import int64_array, dynamic_dimension_value, shape_array, strict_compare_tensors from openvino.tools.mo.utils.broadcasting import uni_directional_broadcasting, uni_directional_shape_broadcasting, \ bi_directional_shape_broadcasting -@generator -class TestingBroadcasting(unittest.TestCase): - @generate(*[([], [20, 30, 10], [20, 30, 10]), +class TestingBroadcasting(): + @pytest.mark.parametrize("input_shape, target_shape, expected_shape",[([], [20, 30, 10], [20, 30, 10]), ([1], [20, 30, 10], [20, 30, 10]), ([1, 1, 10], [20, 30, 10], [20, 30, 10]), ([20, 1, 10], [20, 30, 10], [20, 30, 10]), @@ -24,18 +22,18 @@ class TestingBroadcasting(unittest.TestCase): ([5, 10], [1, 10], None), ]) def test_uni_directional_broadcasting(self, input_shape, target_shape, expected_shape): - self.assertTrue(np.array_equal(uni_directional_shape_broadcasting(input_shape, target_shape), expected_shape)) + assert np.array_equal(uni_directional_shape_broadcasting(input_shape, target_shape), expected_shape) input_value = np.array(np.random.rand(*input_shape)) if expected_shape is not None: expected_value = np.broadcast_to(input_value, int64_array(target_shape)) - self.assertTrue(np.array_equal(uni_directional_broadcasting(input_value, int64_array(target_shape)), - expected_value)) + assert np.array_equal(uni_directional_broadcasting(input_value, int64_array(target_shape)), + expected_value) else: - with self.assertRaisesRegex(Exception, '.*cannot be uni-directionally broadcasted.*'): + with pytest.raises(Exception,match = '.*cannot be uni-directionally broadcasted.*'): uni_directional_broadcasting(input_value, int64_array(target_shape)) - @generate(*[([], [20, 30, 10], [20, 30, 10]), + @pytest.mark.parametrize("input_shape, target_shape, expected_shape",[([], [20, 30, 10], [20, 30, 10]), ([1], [20, 30, 10], [20, 30, 10]), ([1, 1, 10], [20, 30, 10], [20, 30, 10]), ([20, 1, 10], [20, 30, 10], [20, 30, 10]), @@ -58,11 +56,11 @@ def test_uni_directional_broadcasting(self, input_shape, target_shape, expected_ def test_uni_directional_shape_broadcasting(self, input_shape, target_shape, expected_shape): result = uni_directional_shape_broadcasting(input_shape, target_shape) if expected_shape is None: - self.assertIsNone(result) + assert result is None else: - self.assertTrue(strict_compare_tensors(result, expected_shape)) + assert strict_compare_tensors(result, expected_shape) - @generate(*[([], [20, 30, 10], [20, 30, 10]), + @pytest.mark.parametrize("input_shape, target_shape, expected_shape",[([], [20, 30, 10], [20, 30, 10]), ([1], [20, 30, 10], [20, 30, 10]), ([1, 1, 10], [20, 30, 10], [20, 30, 10]), ([20, 1, 10], [20, 30, 10], [20, 30, 10]), @@ -85,6 +83,6 @@ def test_uni_directional_shape_broadcasting(self, input_shape, target_shape, exp def test_bi_directional_shape_broadcasting(self, input_shape, target_shape, expected_shape): result = bi_directional_shape_broadcasting(input_shape, target_shape) if expected_shape is None: - self.assertIsNone(result) + assert result is None else: - self.assertTrue(strict_compare_tensors(result, expected_shape)) + assert strict_compare_tensors(result, expected_shape) diff --git a/tools/mo/unit_tests/mo/utils/ir_reader/layer_to_class_test.py b/tools/mo/unit_tests/mo/utils/ir_reader/layer_to_class_test.py index 4c846bfe4e5cb1..5fec0f6d0b4e56 100644 --- a/tools/mo/unit_tests/mo/utils/ir_reader/layer_to_class_test.py +++ b/tools/mo/unit_tests/mo/utils/ir_reader/layer_to_class_test.py @@ -1,10 +1,9 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import unittest +import pytest import numpy as np -from generator import generator, generate import openvino.tools.mo.graph.graph from openvino.tools.mo.graph.graph import Node @@ -17,9 +16,8 @@ from openvino.tools.mo.ops.op import Op -@generator -class TestFunction(unittest.TestCase): - @generate(*[([1, 32, 112, 112], [32, 1, 1, 3], [32, 1, 1, 1, 3], 32), +class TestFunction(): + @pytest.mark.parametrize("shape, weights_shape, reshape_shape, group",[([1, 32, 112, 112], [32, 1, 1, 3], [32, 1, 1, 1, 3], 32), ([1, 32, 112, 112], [32, 1, 1, 1, 3], None, 32), ]) def test_groupconv_to_conv(self, shape, weights_shape, reshape_shape, group): @@ -75,7 +73,7 @@ def test_groupconv_to_conv(self, shape, weights_shape, reshape_shape, group): assert len(reshape_node.in_nodes()) == 0 and len(reshape_node.out_nodes()) == 0 (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp def test_restore_tensor_names(self): @@ -144,7 +142,7 @@ def test_squeeze(self): # Check that graph wasn't changed after shape infer (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp def test_squeeze_no_axes(self): nodes_attributes = { @@ -172,7 +170,7 @@ def test_squeeze_no_axes(self): # Check that graph wasn't changed after shape infer (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp def test_unsqueeze(self): nodes_attributes = { @@ -205,4 +203,4 @@ def test_unsqueeze(self): # Check that graph wasn't changed after shape infer (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) + assert flag, resp From 4a3ce48f7af1f0d5eb355be6cd9f1ff0aa54be00 Mon Sep 17 00:00:00 2001 From: Mateusz Tabaka Date: Tue, 10 Oct 2023 11:10:05 +0200 Subject: [PATCH 121/257] CompressQuantizeWeights optimizations (#20025) * Optimize CompressQuantizeWeights transformation - remove CoordinateTransform usage from FakeQuantize reference implementation - move ZeroPointOptimizer functionality inside CompressQuantizeWeights - compute scale and zero point in the same loop Ticket: CVS-119273 * review comments * clang format * fix comments --- .../core/offline_transformations.cpp | 1 - .../include/compress_quantize_weights.hpp | 29 +- .../src/compress_quantize_weigths.cpp | 981 ++++++++++++++---- .../tests/utils/compress_quantize_weights.cpp | 36 +- .../openvino/reference/fake_quantize.hpp | 614 ++++++++--- .../functional/op_reference/fake_quantize.cpp | 16 + 6 files changed, 1297 insertions(+), 380 deletions(-) diff --git a/src/bindings/python/src/pyopenvino/core/offline_transformations.cpp b/src/bindings/python/src/pyopenvino/core/offline_transformations.cpp index 17a879c72b5244..215a65da3165b0 100644 --- a/src/bindings/python/src/pyopenvino/core/offline_transformations.cpp +++ b/src/bindings/python/src/pyopenvino/core/offline_transformations.cpp @@ -109,7 +109,6 @@ void regmodule_offline_transformations(py::module m) { [](std::shared_ptr model) { ov::pass::Manager manager; manager.register_pass(); - manager.register_pass(); manager.run_passes(model); }, py::arg("model")); diff --git a/src/common/offline_transformations/include/compress_quantize_weights.hpp b/src/common/offline_transformations/include/compress_quantize_weights.hpp index 62119cd907c914..356ff01195ae3f 100644 --- a/src/common/offline_transformations/include/compress_quantize_weights.hpp +++ b/src/common/offline_transformations/include/compress_quantize_weights.hpp @@ -10,7 +10,6 @@ namespace ov { namespace pass { class CompressQuantizeWeights; -class ZeroPointOptimizer; } // namespace pass } // namespace ov @@ -57,36 +56,10 @@ class ZeroPointOptimizer; Transformation prepares quantized constant data for Low Precision pipeline. Such constant data packing reduces IR size (.bin file size) in offline transformations. With that we can skip same calculations in the runtime and make loading of such sub-graphs to the plugin faster. + Additionally zero point can be fused to weights if it doesn't affect accuracy. */ class ov::pass::CompressQuantizeWeights : public ov::pass::MatcherPass { public: OPENVINO_RTTI("CompressQuantizeWeights", "0"); CompressQuantizeWeights(); }; - -/* - if zero_point == 0 we can eliminate Subtract from following dequantization subgraph: - - +-----------------+ - | Constant | - | (low precision) | - +-----------------+ - | - v - +------------------+ - | Convert | - | (to high prec) | - +------------------+ - | - v - +----------+ +------------+ - |zero point|--->| Subtract | - +----------+ +-----+------+ - | - v -*/ -class ov::pass::ZeroPointOptimizer : public ov::pass::MatcherPass { -public: - OPENVINO_RTTI("ZeroPointOptimizer"); - ZeroPointOptimizer(); -}; diff --git a/src/common/offline_transformations/src/compress_quantize_weigths.cpp b/src/common/offline_transformations/src/compress_quantize_weigths.cpp index f8ff0d9d9f80b8..6c9e4554782a96 100644 --- a/src/common/offline_transformations/src/compress_quantize_weigths.cpp +++ b/src/common/offline_transformations/src/compress_quantize_weigths.cpp @@ -5,95 +5,125 @@ #include "compress_quantize_weights.hpp" #include "openvino/core/rt_info.hpp" #include "openvino/core/validation_util.hpp" -#include "openvino/opsets/opset8.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/fake_quantize.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/subtract.hpp" #include "openvino/pass/constant_folding.hpp" #include "openvino/pass/pattern/op/or.hpp" #include "openvino/pass/pattern/op/wrap_type.hpp" -#include "transformations/rt_info/decompression.hpp" - -static bool has_dequantization_subgraph(const std::shared_ptr& first_convert) { - auto first_convert_users = first_convert->get_users(); - const auto second_convert = std::find_if(first_convert_users.begin(), - first_convert_users.end(), - [](const std::shared_ptr& n) -> bool { - return ov::is_type(n); - }); - if (second_convert == first_convert_users.end()) - return false; - auto convert_or_subtract_users = (*second_convert)->get_users(); - const auto subtract = std::find_if(convert_or_subtract_users.begin(), - convert_or_subtract_users.end(), - [](const std::shared_ptr& n) -> bool { - return ov::is_type(n); - }); - if (subtract != convert_or_subtract_users.end()) { - convert_or_subtract_users = (*subtract)->get_users(); - } - const auto multiply = std::find_if(convert_or_subtract_users.begin(), - convert_or_subtract_users.end(), - [](const std::shared_ptr& n) -> bool { - return ov::is_type(n); - }); - return multiply != convert_or_subtract_users.end(); -} +#include "openvino/reference/autobroadcast_binop.hpp" +#include "openvino/reference/convert.hpp" +#include "openvino/reference/fake_quantize.hpp" +#include "validation_util.hpp" + +static bool has_dequantization_subgraph(const std::shared_ptr& fq, + std::shared_ptr& convert_to_low_precision, + std::shared_ptr& convert_to_high_precision, + std::shared_ptr& zero_point); + +static bool compute_scale_and_zero_point(const std::shared_ptr& output_low, + const std::shared_ptr& output_high, + size_t levels, + ov::Tensor& scale_tensor, + ov::Tensor& zero_point_tensor, + bool& zero_point_is_zero); + +static std::shared_ptr compress_quantized_weights( + const std::shared_ptr& weights, + const std::shared_ptr& fq, + const std::shared_ptr& input_low, + const std::shared_ptr& input_high, + const std::shared_ptr& output_low, + const std::shared_ptr& output_high, + const std::shared_ptr& convert, + const std::shared_ptr& zero_point, + bool& can_fuse_zero_point); + +static std::shared_ptr compress_quantized_weights( + const std::shared_ptr& weights, + const std::shared_ptr& input_low, + const std::shared_ptr& input_high, + const ov::element::Type& low_precision_type, + size_t levels, + bool zero_point_is_zero, + const ov::Tensor& zero_point_tensor, + bool& can_fuse_zero_point); + +static void replace_with_dequantize_subgraph(const std::shared_ptr& fq, + const std::shared_ptr& new_weights, + const ov::element::Type& high_precision_type, + const ov::Shape& scale_or_zero_point_shape, + const ov::Tensor& scale_tensor, + bool zero_point_is_zero, + const ov::Tensor& zero_point_tensor = {}); ov::pass::CompressQuantizeWeights::CompressQuantizeWeights() { - auto weights_const_pattern = pattern::wrap_type(); - auto weigths_convert_pattern = pattern::wrap_type({weights_const_pattern}); - OutputVector weights_options{weights_const_pattern, weigths_convert_pattern}; + auto weights_const_pattern = pattern::wrap_type(); + auto weights_convert_pattern = pattern::wrap_type({weights_const_pattern}); + OutputVector weights_options{weights_const_pattern, weights_convert_pattern}; auto weights_pattern = std::make_shared(weights_options); - auto input_low_pattern = pattern::wrap_type(); - auto input_high_pattern = pattern::wrap_type(); - auto output_low_pattern = pattern::wrap_type(); - auto output_high_pattern = pattern::wrap_type(); - auto fq_pattern = pattern::wrap_type( + auto input_low_pattern = pattern::wrap_type(); + auto input_high_pattern = pattern::wrap_type(); + auto output_low_pattern = pattern::wrap_type(); + auto output_high_pattern = pattern::wrap_type(); + auto fq_pattern = pattern::wrap_type( {weights_pattern, input_low_pattern, input_high_pattern, output_low_pattern, output_high_pattern}); ov::matcher_pass_callback callback = [=](pattern::Matcher& m) { - auto fq = std::dynamic_pointer_cast(m.get_match_root()); + auto fq = std::dynamic_pointer_cast(m.get_match_root()); if (!fq) return false; - auto levels = fq->get_levels(); - if (levels <= 2 || levels > 256) - return false; - auto quantized_type = element::undefined; - // Currently we support two weights quantize types: i4 and i8 - if (levels <= 16) { - quantized_type = element::i4; - } else if (levels <= 256) { - quantized_type = element::i8; - } + const auto& high_precision_type = fq->get_element_type(); - const auto& pattern_value_map = m.get_pattern_value_map(); - const auto& input_type = fq->get_element_type(); - const auto& fq_data_input = fq->get_input_node_shared_ptr(0); - bool are_weights_decompressed = is_decompression(fq_data_input); - if (are_weights_decompressed) { - unmark_as_decompression(fq_data_input); - } + auto weights = ov::util::constantfold_subgraph(fq->get_input_node_shared_ptr(0)); + if (!weights) + return false; + auto input_low = ov::as_type_ptr(fq->get_input_node_shared_ptr(1)); + if (!input_low) + return false; + auto input_high = ov::as_type_ptr(fq->get_input_node_shared_ptr(2)); + if (!input_high) + return false; + auto output_low = ov::as_type_ptr(fq->get_input_node_shared_ptr(3)); + if (!output_low) + return false; + auto output_high = ov::as_type_ptr(fq->get_input_node_shared_ptr(4)); + if (!output_high) + return false; // skip dequantize part if there is already dequantization subgraph after FakeQuantize - auto fq_users = fq->get_users(); - if (fq_users.size() == 1 && has_dequantization_subgraph(fq_users[0])) { - auto& first_convert = fq_users[0]; - OPENVINO_SUPPRESS_DEPRECATED_START - if (auto new_weights = ov::get_constant_from_source(first_convert)) { - OPENVINO_SUPPRESS_DEPRECATED_END - new_weights->set_friendly_name(first_convert->get_friendly_name()); - replace_node(first_convert, new_weights); - copy_runtime_info(first_convert, new_weights); - // preserve dequantization subgraph for LP transformations - auto weights_users = new_weights->get_users(); - if (weights_users.size() == 1 && ov::is_type(weights_users[0])) { - ov::pass::disable_constant_folding(weights_users[0]); - } - return true; - } else { - if (are_weights_decompressed) { - mark_as_decompression(fq_data_input); - } + std::shared_ptr convert_to_low_precision; + std::shared_ptr convert_to_high_precision; + std::shared_ptr zero_point; + if (has_dequantization_subgraph(fq, convert_to_low_precision, convert_to_high_precision, zero_point)) { + bool can_fuse_zero_point = false; + auto new_weights = compress_quantized_weights(weights, + fq, + input_low, + input_high, + output_low, + output_high, + convert_to_low_precision, + zero_point, + can_fuse_zero_point); + if (!new_weights) return false; + + new_weights->set_friendly_name(convert_to_low_precision->get_friendly_name()); + replace_node(convert_to_low_precision, new_weights); + copy_runtime_info({fq, convert_to_low_precision}, new_weights); + // preserve dequantization subgraph for LP transformations + ov::pass::disable_constant_folding(convert_to_high_precision); + if (can_fuse_zero_point) { + auto subtract = convert_to_high_precision->get_users()[0]; + auto subtract_consumers = subtract->output(0).get_target_inputs(); + auto multiply = *(subtract_consumers.begin()); + multiply.replace_source_output(convert_to_high_precision); } + return true; } else { /* Quantize part @@ -103,33 +133,7 @@ ov::pass::CompressQuantizeWeights::CompressQuantizeWeights() { output_low = -levels / 2 output_high = levels - 1 + output_low The FakeQuantize result is converted to low precision type and then constant folded - */ - std::shared_ptr new_output_low = - op::v0::Constant::create(input_type, Shape{}, {-static_cast(levels / 2)}); - std::shared_ptr new_output_high = - std::make_shared(new_output_low, - op::v0::Constant::create(input_type, Shape{}, {levels - 1})); - const auto& weights_const = pattern_value_map.at(weights_const_pattern); - Output input_low = pattern_value_map.at(input_low_pattern); - Output input_high = pattern_value_map.at(input_high_pattern); - auto quantize = - fq->clone_with_new_inputs({fq_data_input, input_low, input_high, new_output_low, new_output_high}); - // Convert quantized weights to low precision type - std::shared_ptr new_weights = std::make_shared(quantize, quantized_type); - // Constant fold quantized weights - OPENVINO_SUPPRESS_DEPRECATED_START - if (auto constant = ov::get_constant_from_source(new_weights)) { - OPENVINO_SUPPRESS_DEPRECATED_END - new_weights = constant; - } else { - if (are_weights_decompressed) { - mark_as_decompression(fq_data_input); - } - return false; - } - new_weights->set_friendly_name(weights_const.get_node()->get_friendly_name()); - /* Dequantize part is performed by Convert(from low to high precision)->Subtract->Multiply subgraph. +-------------------------+ @@ -153,56 +157,65 @@ ov::pass::CompressQuantizeWeights::CompressQuantizeWeights() { scale = (output_high - output_low) / (new_output_high - new_output_low) zero_point = new_output_low - output_low / scale */ - Output output_low = pattern_value_map.at(output_low_pattern); - Output output_high = pattern_value_map.at(output_high_pattern); - const auto& fq_type = fq->get_output_element_type(0); - const bool should_convert = fq_type.is_real() && fq_type.size() < element::f32.size(); - if (should_convert) { - input_low = std::make_shared(input_low, element::f32); - input_high = std::make_shared(input_high, element::f32); - output_low = std::make_shared(output_low, element::f32); - output_high = std::make_shared(output_high, element::f32); - new_output_low = std::make_shared(new_output_low, element::f32); - new_output_high = std::make_shared(new_output_high, element::f32); - } - auto output_range = std::make_shared(output_high, output_low); - auto input_range = std::make_shared(new_output_high, new_output_low); - std::shared_ptr scale = std::make_shared(output_range, input_range); - auto descaled_output_low = std::make_shared(output_low, scale); - std::shared_ptr shift = std::make_shared(new_output_low, descaled_output_low); - OPENVINO_SUPPRESS_DEPRECATED_START - if (auto constant = ov::get_constant_from_source(scale)) { - OPENVINO_SUPPRESS_DEPRECATED_END - scale = constant; + + auto levels = fq->get_levels(); + if (levels <= 2 || levels > 256) + return false; + auto low_precision_type = element::undefined; + // Currently we support two weights quantize types: i4 and i8 + if (levels <= 16) { + low_precision_type = element::i4; + } else if (levels <= 256) { + low_precision_type = element::i8; } - auto zero = op::v0::Constant::create(scale->get_output_element_type(0), Shape{}, {0}); - auto scale_eq_zero = std::make_shared(scale, zero); - // shift equals to input_low - output_low / scale - // for positions where scale == 0, we put zero as shift - std::shared_ptr zero_point = std::make_shared(scale_eq_zero, zero, shift); - - if (should_convert) { - scale = std::make_shared(scale, fq_type); - zero_point = std::make_shared(zero_point, fq_type); + + bool zero_point_is_zero = true; + PartialShape merged_shape{output_low->get_shape()}; + PartialShape::broadcast_merge_into(merged_shape, output_high->get_shape(), op::AutoBroadcastType::NUMPY); + Shape scale_or_zero_point_shape = merged_shape.to_shape(); + Tensor scale_tensor(high_precision_type, scale_or_zero_point_shape); + Tensor zero_point_tensor(high_precision_type, scale_or_zero_point_shape); + + if (!compute_scale_and_zero_point(output_low, + output_high, + levels, + scale_tensor, + zero_point_tensor, + zero_point_is_zero)) { + return false; } - OPENVINO_SUPPRESS_DEPRECATED_START - if (auto constant = ov::get_constant_from_source(zero_point)) { - OPENVINO_SUPPRESS_DEPRECATED_END - zero_point = constant; + bool can_fuse_zero_point = false; + auto new_weights = compress_quantized_weights(weights, + input_low, + input_high, + low_precision_type, + levels, + zero_point_is_zero, + zero_point_tensor, + can_fuse_zero_point); + if (!new_weights) { + return false; } - OPENVINO_SUPPRESS_DEPRECATED_START - if (auto constant = ov::get_constant_from_source(scale)) { - OPENVINO_SUPPRESS_DEPRECATED_END - scale = constant; + + if (zero_point_is_zero || can_fuse_zero_point) { + replace_with_dequantize_subgraph(fq, + new_weights, + high_precision_type, + scale_or_zero_point_shape, + scale_tensor, + true); + } else { + replace_with_dequantize_subgraph(fq, + new_weights, + high_precision_type, + scale_or_zero_point_shape, + scale_tensor, + zero_point_is_zero, + zero_point_tensor); } - auto convert_to_high_prec = std::make_shared(new_weights, input_type); - auto sub = register_new_node(convert_to_high_prec, zero_point); - auto mul = register_new_node(sub, scale); - mul->set_friendly_name(fq->get_friendly_name()); - copy_runtime_info(fq, {convert_to_high_prec, sub, mul}); - ov::pass::disable_constant_folding(convert_to_high_prec); - replace_node(fq, mul); + + return true; } return true; }; @@ -211,86 +224,622 @@ ov::pass::CompressQuantizeWeights::CompressQuantizeWeights() { this->register_matcher(m, callback); } -ov::pass::ZeroPointOptimizer::ZeroPointOptimizer() { - auto weights_pattern = pattern::wrap_type(); - auto zero_point_pattern = pattern::wrap_type(); - auto convert_pattern = pattern::wrap_type({weights_pattern}); - auto sub_pattern = pattern::wrap_type({convert_pattern, zero_point_pattern}); +static ov::Tensor tensor_from_constant(const std::shared_ptr& constant) { + return ov::Tensor(constant->get_element_type(), constant->get_shape(), const_cast(constant->get_data_ptr())); +} + +static bool evaluate_node(const std::shared_ptr& node, + const ov::TensorVector& input_tensors, + ov::Tensor& output_tensor) { + if (node->get_output_size() != 1) + return false; - ov::matcher_pass_callback callback = [=](pattern::Matcher& m) { - const auto& pattern_value_map = m.get_pattern_value_map(); - auto convert = pattern_value_map.at(convert_pattern).get_node_shared_ptr(); - auto sub = pattern_value_map.at(sub_pattern).get_node_shared_ptr(); - auto weights = - std::dynamic_pointer_cast(pattern_value_map.at(weights_pattern).get_node_shared_ptr()); - if (!weights || weights->get_element_type() != element::i8) - return false; - auto zero_point = - std::dynamic_pointer_cast(pattern_value_map.at(zero_point_pattern).get_node_shared_ptr()); - if (!zero_point) - return false; + ov::TensorVector output_tensors{ov::Tensor(node->get_output_element_type(0), node->get_output_shape(0))}; + if (!node->evaluate(output_tensors, input_tensors)) + return false; - auto zp_value = zero_point->cast_vector(); - if (std::all_of(zp_value.begin(), zp_value.end(), [](float f) -> bool { - return std::fabs(f) <= std::numeric_limits::epsilon(); - })) { - copy_runtime_info(sub, convert); - replace_node(sub, convert); - } + output_tensor = output_tensors[0]; - auto int8_zero_point = std::make_shared( - std::make_shared(zero_point, opset8::Round::RoundMode::HALF_TO_EVEN), - weights->get_element_type()); - auto adj_zero_point = std::make_shared( - zero_point, - std::make_shared(int8_zero_point, convert->get_element_type())); - - OPENVINO_SUPPRESS_DEPRECATED_START - auto adj_zero_point_const = ov::get_constant_from_source(adj_zero_point); - OPENVINO_SUPPRESS_DEPRECATED_END - if (!adj_zero_point_const) - return false; - auto adj_zero_point_val = adj_zero_point_const->cast_vector(); - bool is_adj_zero_point_close_to_zero = - std::all_of(adj_zero_point_val.begin(), adj_zero_point_val.end(), [](float f) -> bool { - return std::fabs(f) < 1e-4; - }); - if (!is_adj_zero_point_close_to_zero) - return false; + return true; +} - auto transformed = std::make_shared( - std::make_shared(std::make_shared(weights, int8_zero_point), - convert->get_element_type()), - adj_zero_point); - auto diff = std::make_shared(sub, transformed); - OPENVINO_SUPPRESS_DEPRECATED_START - auto diff_const = ov::get_constant_from_source(diff); - OPENVINO_SUPPRESS_DEPRECATED_END - if (!diff_const) - return false; - auto diff_val = diff_const->cast_vector(); - bool is_transformed_and_original_equal = std::all_of(diff_val.begin(), diff_val.end(), [](float f) -> bool { - return std::fabs(f) < std::numeric_limits::epsilon(); +static ov::TensorVector get_fake_quantize_input_tensors(const std::shared_ptr& fq) { + ov::Tensor weights_tensor; + + auto fq_input = fq->get_input_node_shared_ptr(0); + auto fq_input_constant = ov::as_type_ptr(fq_input); + + if (!fq_input_constant) { + auto weights = ov::as_type_ptr(fq_input->get_input_node_shared_ptr(0)); + if (!evaluate_node(fq_input, ov::TensorVector{tensor_from_constant(weights)}, weights_tensor)) + return {}; + } else { + weights_tensor = tensor_from_constant(fq_input_constant); + } + + auto in_low = ov::as_type_ptr(fq->get_input_node_shared_ptr(1)); + auto in_high = ov::as_type_ptr(fq->get_input_node_shared_ptr(2)); + auto out_low = ov::as_type_ptr(fq->get_input_node_shared_ptr(3)); + auto out_high = ov::as_type_ptr(fq->get_input_node_shared_ptr(4)); + + return ov::TensorVector{weights_tensor, + tensor_from_constant(in_low), + tensor_from_constant(in_high), + tensor_from_constant(out_low), + tensor_from_constant(out_high)}; +} + +template +static std::shared_ptr get_single_consumer_of_type(const std::shared_ptr& node) { + auto target_inputs = node->output(0).get_target_inputs(); + if (target_inputs.size() != 1) + return nullptr; + auto consumer = ov::as_type(target_inputs.begin()->get_node()); + if (!consumer) + return nullptr; + return consumer->shared_from_this(); +} + +bool has_dequantization_subgraph(const std::shared_ptr& fq, + std::shared_ptr& convert_to_low_precision, + std::shared_ptr& convert_to_high_precision, + std::shared_ptr& zero_point) { + convert_to_low_precision = get_single_consumer_of_type(fq); + if (!convert_to_low_precision) + return false; + convert_to_high_precision = get_single_consumer_of_type(convert_to_low_precision); + if (!convert_to_high_precision) + return false; + auto subtract = get_single_consumer_of_type(convert_to_high_precision); + if (subtract) { + zero_point = subtract->get_input_node_shared_ptr(1); + return get_single_consumer_of_type(subtract) != nullptr; + } else { + return get_single_consumer_of_type(convert_to_high_precision) != nullptr; + } +} + +static std::shared_ptr evaluate_fake_quantize(const std::shared_ptr& quantize, + const std::shared_ptr& convert) { + ov::Tensor quantize_output_tensor; + if (!evaluate_node(quantize, get_fake_quantize_input_tensors(quantize), quantize_output_tensor)) + return nullptr; + ov::Tensor new_weights_tensor; + if (!evaluate_node(convert, {quantize_output_tensor}, new_weights_tensor)) + return nullptr; + return std::make_shared(new_weights_tensor); +} + +void replace_with_dequantize_subgraph(const std::shared_ptr& fq, + const std::shared_ptr& new_weights, + const ov::element::Type& high_precision_type, + const ov::Shape& scale_or_zero_point_shape, + const ov::Tensor& scale_tensor, + bool zero_point_is_zero, + const ov::Tensor& zero_point_tensor) { + ov::pass::NodeRegistry node_registry; + auto convert = node_registry.make(new_weights, high_precision_type); + ov::pass::disable_constant_folding(convert); + std::shared_ptr mul; + auto scale = node_registry.make(scale_tensor); + if (!zero_point_is_zero) { + auto zero_point = node_registry.make(zero_point_tensor); + auto sub = node_registry.make(convert, zero_point); + mul = node_registry.make(sub, scale); + } else { + mul = node_registry.make(convert, scale); + } + mul->set_friendly_name(fq->get_friendly_name()); + copy_runtime_info(fq, node_registry.get()); + replace_node(fq, mul); +} + +template +static void compute_scale_and_zero_point_internal(const std::shared_ptr& output_low, + const std::shared_ptr& output_high, + size_t levels, + ov::Tensor& scale_tensor, + ov::Tensor& zero_point_tensor, + bool& zero_point_is_zero) { + zero_point_is_zero = true; + float input_range = static_cast(levels - 1); + float new_output_low = -static_cast(levels / 2); + T* zero_point = zero_point_tensor.data(); + T* scale = scale_tensor.data(); + ov::reference::autobroadcast_binop( + output_low->get_data_ptr(), + output_high->get_data_ptr(), + scale, + output_low->get_shape(), + output_high->get_shape(), + ov::op::AutoBroadcastType::NUMPY, + [input_range, new_output_low, zero_point, &zero_point_is_zero](float output_low_value, + float output_high_value) mutable { + float output_range = output_high_value - output_low_value; + float scale = output_range / input_range; + float zero_point_value = (new_output_low - output_low_value / scale) * (scale != 0); + zero_point_is_zero = + zero_point_is_zero && std::fabs(zero_point_value) < std::numeric_limits::epsilon(); + *zero_point++ = zero_point_value; + return scale; }); - if (!is_transformed_and_original_equal) - return false; +} - std::shared_ptr new_weights = std::make_shared(weights, int8_zero_point); - OPENVINO_SUPPRESS_DEPRECATED_START - if (auto constant = ov::get_constant_from_source(new_weights)) { - OPENVINO_SUPPRESS_DEPRECATED_END - new_weights = constant; - } else { - return false; +bool compute_scale_and_zero_point(const std::shared_ptr& output_low, + const std::shared_ptr& output_high, + size_t levels, + ov::Tensor& scale_tensor, + ov::Tensor& zero_point_tensor, + bool& zero_point_is_zero) { + const auto type = output_low->get_element_type(); + switch (type) { + case ov::element::Type_t::f32: { + compute_scale_and_zero_point_internal(output_low, + output_high, + levels, + scale_tensor, + zero_point_tensor, + zero_point_is_zero); + break; + } + case ov::element::f16: { + compute_scale_and_zero_point_internal(output_low, + output_high, + levels, + scale_tensor, + zero_point_tensor, + zero_point_is_zero); + break; + } + default: + return false; + } + + return true; +} + +template +static void +transform(const T* first1, const T* const last1, const T* first2, const T* first3, const T* first4, U* out, F& f) { + while (first1 < last1) { + *out++ = f(*first1++, *first2++, *first3++, *first4++); + } +} + +template +static void transform(const T* first1, + const T* const last1, + const T* first2, + const T* first3, + const T* first4, + const T* first5, + const T* first6, + U* out, + F& f) { + while (first1 < last1) { + *out++ = f(*first1++, *first2++, *first3++, *first4++, *first5++, *first6++); + } +} + +template +static void numpy_broadcast_4inputs(const T* weights, + const ov::Shape& weights_shape, + const T* in_low, + const ov::Shape& in_low_shape, + const T* in_high, + const ov::Shape& in_high_shape, + const T* zero_point, + const ov::Shape& zero_point_shape, + U* new_weights, + F& f) { + using namespace ov::reference::fake_quantize_details; + + std::vector output_strides = compute_strides(weights_shape, weights_shape); + std::vector in_low_strides = compute_strides(weights_shape, in_low_shape); + std::vector in_high_strides = compute_strides(weights_shape, in_high_shape); + std::vector zero_point_strides = compute_strides(weights_shape, zero_point_shape); + + size_t num_elements = shape_size(weights_shape); + + size_t weights_inner_stride = num_elements; + size_t in_low_inner_stride = 0; + size_t in_high_inner_stride = 0; + size_t zero_point_inner_stride = 0; + + std::tie(in_low_inner_stride, weights_inner_stride) = + get_inner_stride(num_elements, weights_shape, in_low_shape, weights_inner_stride); + std::tie(in_high_inner_stride, weights_inner_stride) = + get_inner_stride(num_elements, weights_shape, in_high_shape, weights_inner_stride); + std::tie(zero_point_inner_stride, weights_inner_stride) = + get_inner_stride(num_elements, weights_shape, zero_point_shape, weights_inner_stride); + + auto get_outer_strides = + [&output_strides, &in_low_strides, &in_high_strides, &zero_point_strides](size_t flat_index) { + size_t in_low_stride = 0; + size_t in_high_stride = 0; + size_t zero_point_stride = 0; + + for (size_t i = 0; i < output_strides.size(); i++) { + size_t div = flat_index / output_strides[i]; + flat_index = flat_index % output_strides[i]; + in_low_stride += div * in_low_strides[i]; + in_high_stride += div * in_high_strides[i]; + zero_point_stride += div * zero_point_strides[i]; + } + + return std::tuple{in_low_stride, in_high_stride, zero_point_stride}; + }; + + size_t in_low_stride = 0; + size_t in_high_stride = 0; + size_t zero_point_stride = 0; + + if (in_low_inner_stride * in_high_inner_stride * zero_point_inner_stride == 1) { + for (size_t i = 0; i < shape_size(weights_shape); i += weights_inner_stride) { + std::tie(in_low_stride, in_high_stride, zero_point_stride) = get_outer_strides(i); + T in_low_scalar = *(in_low + in_low_stride); + T in_high_scalar = *(in_high + in_high_stride); + T zero_point_scalar = *(zero_point + zero_point_stride); + std::transform(weights, + weights + weights_inner_stride, + new_weights, + [in_low_scalar, in_high_scalar, zero_point_scalar, &f](T w) { + return f(w, in_low_scalar, in_high_scalar, zero_point_scalar); + }); + weights += weights_inner_stride; + new_weights += weights_inner_stride; + } + } else if (in_low_inner_stride > 1 && in_high_inner_stride > 1 && zero_point_inner_stride > 1) { + for (size_t i = 0; i < shape_size(weights_shape); i += weights_inner_stride) { + std::tie(in_low_stride, in_high_stride, zero_point_stride) = get_outer_strides(i); + transform(weights, + weights + weights_inner_stride, + in_low + in_low_stride, + in_high + in_high_stride, + zero_point + zero_point_stride, + new_weights, + f); + weights += weights_inner_stride; + new_weights += weights_inner_stride; } - new_weights->set_friendly_name(weights->get_friendly_name()); - replace_node(weights, new_weights); + } else { + for (size_t i = 0; i < shape_size(weights_shape); i++) { + std::tie(in_low_stride, in_high_stride, zero_point_stride) = get_outer_strides(i); + *new_weights++ = f(*weights++, + *(in_low + in_low_stride), + *(in_high + in_high_stride), + *(zero_point + zero_point_stride)); + } + } +} - copy_runtime_info(sub, convert); - replace_node(sub, convert); - return true; +template +static void numpy_broadcast_6inputs(const T* weights, + const ov::Shape& weights_shape, + const T* in_low, + const ov::Shape& in_low_shape, + const T* in_high, + const ov::Shape& in_high_shape, + const T* out_low, + const ov::Shape& out_low_shape, + const T* out_high, + const ov::Shape& out_high_shape, + const T* zero_point, + const ov::Shape& zero_point_shape, + U* new_weights, + F& f) { + using namespace ov::reference::fake_quantize_details; + + std::vector output_strides = compute_strides(weights_shape, weights_shape); + std::vector in_low_strides = compute_strides(weights_shape, in_low_shape); + std::vector in_high_strides = compute_strides(weights_shape, in_high_shape); + std::vector out_low_strides = compute_strides(weights_shape, out_low_shape); + std::vector out_high_strides = compute_strides(weights_shape, out_high_shape); + std::vector zero_point_strides = compute_strides(weights_shape, zero_point_shape); + + auto get_outer_strides = + [&output_strides, &in_low_strides, &in_high_strides, &out_low_strides, &out_high_strides, &zero_point_strides]( + size_t flat_index) { + size_t in_low_stride = 0; + size_t in_high_stride = 0; + size_t out_low_stride = 0; + size_t out_high_stride = 0; + size_t zero_point_stride = 0; + + for (size_t i = 0; i < output_strides.size(); i++) { + size_t div = flat_index / output_strides[i]; + flat_index = flat_index % output_strides[i]; + in_low_stride += div * in_low_strides[i]; + in_high_stride += div * in_high_strides[i]; + out_low_stride += div * out_low_strides[i]; + out_high_stride += div * out_high_strides[i]; + zero_point_stride += div * zero_point_strides[i]; + } + + return std::tuple{in_low_stride, + in_high_stride, + out_low_stride, + out_high_stride, + zero_point_stride}; + }; + + size_t in_low_stride = 0; + size_t in_high_stride = 0; + size_t out_low_stride = 0; + size_t out_high_stride = 0; + size_t zero_point_stride = 0; + + for (size_t i = 0; i < shape_size(weights_shape); i++) { + std::tie(in_low_stride, in_high_stride, out_low_stride, out_high_stride, zero_point_stride) = + get_outer_strides(i); + *new_weights++ = f(*weights++, + *(in_low + in_low_stride), + *(in_high + in_high_stride), + *(out_low + out_low_stride), + *(out_high + out_high_stride), + *(zero_point + zero_point_stride)); + } +} + +static inline int8_t convert_to_int8(float val) { + return static_cast(std::nearbyint(val)); +} + +static inline int8_t convert_to_int4(float val) { + return static_cast(std::nearbyint(val)) & 0x0f; +} + +static std::shared_ptr create_weights_constant(const ov::Tensor& weights_tensor, + const ov::element::Type& type) { + auto weights = std::make_shared(weights_tensor); + if (weights->get_element_type() != type) { + return ov::util::constantfold_subgraph(std::make_shared(weights, type)); + } + return weights; +} + +template +static std::shared_ptr compress_quantized_weights_internal( + const ov::element::Type& low_precision_type, + const T* weights, + const ov::Shape& weights_shape, + const T* input_low, + const ov::Shape& input_low_shape, + const T* input_high, + const ov::Shape& input_high_shape, + const T* output_low, + const ov::Shape& output_low_shape, + const T* output_high, + const ov::Shape& output_high_shape, + const T* zero_point, + const ov::Shape& zero_point_shape, + size_t levels, + bool& can_fuse_zero_point) { + ov::Tensor compressed_weights_tensor(ov::element::i8, weights_shape); + int8_t* compressed_weights = compressed_weights_tensor.data(); + ov::Tensor compressed_weights_with_fused_zero_point_tensor(ov::element::i8, weights_shape); + int8_t* compressed_weights_with_fused_zero_point = compressed_weights_with_fused_zero_point_tensor.data(); + T levels_minus_one = static_cast(levels - 1); + can_fuse_zero_point = true; + const auto convert_to_low_precision = low_precision_type == ov::element::i4 ? convert_to_int4 : convert_to_int8; + + auto f = + [compressed_weights_with_fused_zero_point, levels_minus_one, convert_to_low_precision, &can_fuse_zero_point]( + T weights_value, + T input_low, + T input_high, + T output_low, + T output_high, + T zero_point) mutable { + int8_t compressed_weights_value = + convert_to_low_precision(ov::reference::fake_quantize_details::quantize(weights_value, + input_low, + input_high, + output_low, + output_high, + levels_minus_one)); + T weights_minus_zero_point = static_cast(compressed_weights_value) - zero_point; + int8_t compressed_weights_with_fused_zero_point_value = convert_to_low_precision(weights_minus_zero_point); + can_fuse_zero_point &= + std::fabs(compressed_weights_with_fused_zero_point_value - weights_minus_zero_point) < 1e-4; + *compressed_weights_with_fused_zero_point++ = compressed_weights_with_fused_zero_point_value; + return compressed_weights_value; + }; + + numpy_broadcast_6inputs(weights, + weights_shape, + input_low, + input_low_shape, + input_high, + input_high_shape, + output_low, + output_low_shape, + output_high, + output_high_shape, + zero_point, + zero_point_shape, + compressed_weights, + f); + + return create_weights_constant( + can_fuse_zero_point ? compressed_weights_with_fused_zero_point_tensor : compressed_weights_tensor, + low_precision_type); +} + +std::shared_ptr compress_quantized_weights( + const std::shared_ptr& weights, + const std::shared_ptr& fq, + const std::shared_ptr& input_low, + const std::shared_ptr& input_high, + const std::shared_ptr& output_low, + const std::shared_ptr& output_high, + const std::shared_ptr& convert, + const std::shared_ptr& zero_point, + bool& can_fuse_zero_point) { + std::shared_ptr new_weights; + const auto& weights_shape = weights->get_shape(); + const auto& type = weights->get_element_type(); + const auto& low_precision_type = convert->get_output_element_type(0); + + if (zero_point == nullptr) + return evaluate_fake_quantize(fq, convert); + + auto zero_point_constant = ov::util::constantfold_subgraph(zero_point); + if (!zero_point_constant) + return nullptr; + + switch (type) { + case ov::element::f32: { + new_weights = compress_quantized_weights_internal(low_precision_type, + weights->get_data_ptr(), + weights_shape, + input_low->get_data_ptr(), + input_low->get_shape(), + input_high->get_data_ptr(), + input_low->get_shape(), + output_low->get_data_ptr(), + output_low->get_shape(), + output_high->get_data_ptr(), + output_low->get_shape(), + zero_point_constant->get_data_ptr(), + zero_point_constant->get_shape(), + fq->get_levels(), + can_fuse_zero_point); + break; + } + case ov::element::f16: { + new_weights = compress_quantized_weights_internal(low_precision_type, + weights->get_data_ptr(), + weights_shape, + input_low->get_data_ptr(), + input_low->get_shape(), + input_high->get_data_ptr(), + input_low->get_shape(), + output_low->get_data_ptr(), + output_low->get_shape(), + output_high->get_data_ptr(), + output_low->get_shape(), + zero_point_constant->get_data_ptr(), + zero_point_constant->get_shape(), + fq->get_levels(), + can_fuse_zero_point); + break; + } + default: + return nullptr; + } + return new_weights; +} + +template +static std::shared_ptr compress_quantized_weights_internal( + const ov::element::Type& low_precision_type, + const T* weights, + const ov::Shape& weights_shape, + const T* input_low, + const ov::Shape& input_low_shape, + const T* input_high, + const ov::Shape& input_high_shape, + const T* zero_point, + const ov::Shape& zero_point_shape, + size_t levels, + bool zero_point_is_zero, + bool& can_fuse_zero_point) { + using namespace ov::reference::fake_quantize_details; + ov::Tensor compressed_weights_tensor(ov::element::i8, weights_shape); + int8_t* compressed_weights = compressed_weights_tensor.data(); + int8_t* compressed_weights_with_fused_zero_point = nullptr; + ov::Tensor compressed_weights_with_fused_zero_point_tensor; + if (!zero_point_is_zero) { + compressed_weights_with_fused_zero_point_tensor = ov::Tensor(ov::element::i8, weights_shape); + compressed_weights_with_fused_zero_point = compressed_weights_with_fused_zero_point_tensor.data(); + } + T levels_minus_one = static_cast(levels - 1); + T output_low = -static_cast(levels / 2); + T output_high = levels_minus_one + output_low; + can_fuse_zero_point = !zero_point_is_zero; + const auto convert_to_low_precision = low_precision_type == ov::element::i4 ? convert_to_int4 : convert_to_int8; + + auto f = [compressed_weights_with_fused_zero_point, + levels_minus_one, + output_low, + output_high, + zero_point_is_zero, + convert_to_low_precision, + &can_fuse_zero_point](T weights_value, T input_low, T input_high, T zero_point) mutable { + int8_t compressed_weights_value = convert_to_low_precision( + quantize(weights_value, input_low, input_high, output_low, output_high, levels_minus_one)); + if (!zero_point_is_zero && can_fuse_zero_point) { + T weights_minus_zero_point = static_cast(compressed_weights_value) - zero_point; + int8_t compressed_weights_with_fused_zero_point_value = convert_to_low_precision(weights_minus_zero_point); + can_fuse_zero_point &= + std::fabs(compressed_weights_with_fused_zero_point_value - weights_minus_zero_point) < 1e-4; + *compressed_weights_with_fused_zero_point++ = compressed_weights_with_fused_zero_point_value; + } + return compressed_weights_value; }; - auto m = std::make_shared(sub_pattern, "ZeroPointOptimizer"); - this->register_matcher(m, callback); + numpy_broadcast_4inputs(weights, + weights_shape, + input_low, + input_low_shape, + input_high, + input_high_shape, + zero_point, + zero_point_shape, + compressed_weights, + f); + + return create_weights_constant( + can_fuse_zero_point ? compressed_weights_with_fused_zero_point_tensor : compressed_weights_tensor, + low_precision_type); +} + +std::shared_ptr compress_quantized_weights( + const std::shared_ptr& weights, + const std::shared_ptr& input_low, + const std::shared_ptr& input_high, + const ov::element::Type& low_precision_type, + size_t levels, + bool zero_point_is_zero, + const ov::Tensor& zero_point_tensor, + bool& can_fuse_zero_point) { + std::shared_ptr new_weights; + const auto& weights_shape = weights->get_shape(); + const auto& type = weights->get_element_type(); + switch (type) { + case ov::element::f32: { + new_weights = compress_quantized_weights_internal(low_precision_type, + weights->get_data_ptr(), + weights_shape, + input_low->get_data_ptr(), + input_low->get_shape(), + input_high->get_data_ptr(), + input_low->get_shape(), + zero_point_tensor.data(), + zero_point_tensor.get_shape(), + levels, + zero_point_is_zero, + can_fuse_zero_point); + break; + } + case ov::element::f16: { + new_weights = compress_quantized_weights_internal(low_precision_type, + weights->get_data_ptr(), + weights_shape, + input_low->get_data_ptr(), + input_low->get_shape(), + input_high->get_data_ptr(), + input_low->get_shape(), + zero_point_tensor.data(), + zero_point_tensor.get_shape(), + levels, + zero_point_is_zero, + can_fuse_zero_point); + break; + } + default: + return nullptr; + } + return new_weights; } diff --git a/src/common/transformations/tests/utils/compress_quantize_weights.cpp b/src/common/transformations/tests/utils/compress_quantize_weights.cpp index 5a62b79bfaabc1..cc31017368863f 100644 --- a/src/common/transformations/tests/utils/compress_quantize_weights.cpp +++ b/src/common/transformations/tests/utils/compress_quantize_weights.cpp @@ -31,6 +31,7 @@ struct CompressQuantizeWeightsParams { std::vector expected_weights; float scale_val; float zero_point_val; + bool fuse_zero_point; }; class CompressQuantizeWeightsTests @@ -66,9 +67,14 @@ class CompressQuantizeWeightsTests auto data = opset8::Constant::create(param.expected_type, param.shape, param.expected_weights); auto convert = std::make_shared(data, element::f32); auto scale = opset8::Constant::create(element::f32, Shape{}, {param.scale_val}); - auto zero_point = opset8::Constant::create(element::f32, Shape{}, {param.zero_point_val}); - auto sub = std::make_shared(convert, zero_point); - auto mul = std::make_shared(sub, scale); + std::shared_ptr mul; + if (!param.fuse_zero_point) { + auto zero_point = opset8::Constant::create(element::f32, Shape{}, {param.zero_point_val}); + auto sub = std::make_shared(convert, zero_point); + mul = std::make_shared(sub, scale); + } else { + mul = std::make_shared(convert, scale); + } model_ref = std::make_shared(mul, ParameterVector{}); } comparator.enable(FunctionsComparator::CmpValues::CONST_VALUES); @@ -89,7 +95,8 @@ static std::vector params = { element::i4, {-1.0f, -1.0f, 0.0f, 0.0f, 0.0f, 1.0f}, 3.0f, - -0.666667f}, + -0.666667f, + false}, {Shape{2, 3, 1, 1}, {-1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 11.0f}, 0.0f, @@ -100,7 +107,8 @@ static std::vector params = { element::i4, {-8.0f, -5.0f, -4.0f, -2.0f, 0.0f, 7.0f}, 0.333333f, - -5.0f}, + -5.0f, + false}, {Shape{2, 4, 1, 1}, {-1.0f, 0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 11.0f}, 1.0f, @@ -109,9 +117,10 @@ static std::vector params = { 6.0f, 17, element::i8, - {-8.0f, -8.0f, -8.0f, -6.0f, -4.0f, -2.0f, 0.0f, 8.0f}, + {-4.0f, -4.0f, -4.0f, -2.0f, 0.0f, 2.0f, 4.0f, 12.0f}, 0.5f, - -4.0f}, + -4.0f, + true}, {Shape{2, 4, 1, 1}, {-1.0f, 0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 11.0f}, 1.0f, @@ -122,7 +131,8 @@ static std::vector params = { element::i8, {-128.0f, -128.0f, -128.0f, -96.0f, -64.0f, -32.0f, 0.0f, 127.0f}, 0.0313725f, - -64.25f}, + -64.25f, + false}, }; static element::TypeVector data_precisions = {element::f32, element::f16}; @@ -198,7 +208,7 @@ TEST_F(TransformationTestsF, CompressQuantizeWeightsWithDequantizationSubgraphFP comparator.enable(FunctionsComparator::CmpValues::ACCURACY); } -TEST_F(TransformationTestsF, CompressQuantizeWeightsWithZeroPointOptimizer) { +TEST_F(TransformationTestsF, CompressQuantizeWeightsWithZeroPointEliminated) { { auto data = opset8::Constant::create(element::f32, Shape{3, 1, 1, 1}, {-0.144816, 0.0858578, 0.110928}); auto input_low = opset8::Constant::create(element::f32, Shape{3, 1, 1, 1}, {-0.402659, -0.383148, -0.34054}); @@ -209,7 +219,6 @@ TEST_F(TransformationTestsF, CompressQuantizeWeightsWithZeroPointOptimizer) { model = std::make_shared(NodeVector{fq}, ParameterVector{}); manager.register_pass(); - manager.register_pass(); } { @@ -223,7 +232,7 @@ TEST_F(TransformationTestsF, CompressQuantizeWeightsWithZeroPointOptimizer) { comparator.enable(FunctionsComparator::CmpValues::ACCURACY); } -TEST_F(TransformationTestsF, CompressQuantizeWeightsWithZeroPointOptimizerFP16) { +TEST_F(TransformationTestsF, CompressQuantizeWeightsWithZeroPointEliminatedFP16) { { auto data = opset8::Constant::create(element::f16, Shape{3, 1, 1, 1}, {0.2, 1.2, 1.2}); auto input_low = @@ -239,7 +248,6 @@ TEST_F(TransformationTestsF, CompressQuantizeWeightsWithZeroPointOptimizerFP16) model = std::make_shared(NodeVector{fq}, ParameterVector{}); manager.register_pass(); - manager.register_pass(); } { @@ -253,7 +261,7 @@ TEST_F(TransformationTestsF, CompressQuantizeWeightsWithZeroPointOptimizerFP16) comparator.enable(FunctionsComparator::CmpValues::ACCURACY); } -TEST_F(TransformationTestsF, NegativeCompressQuantizeWeightsWithZeroPointOptimizer) { +TEST_F(TransformationTestsF, NegativeCompressQuantizeWeights) { { auto data = opset8::Constant::create(element::f32, Shape{2, 4, 1, 1}, {-1, 0, 1, 2, 3, 4, 5, 11}); auto input_low = opset8::Constant::create(element::f32, Shape{}, {1}); @@ -264,7 +272,6 @@ TEST_F(TransformationTestsF, NegativeCompressQuantizeWeightsWithZeroPointOptimiz model = std::make_shared(NodeVector{fq}, ParameterVector{}); manager.register_pass(); - manager.register_pass(); } { auto data = opset8::Constant::create(element::i8, Shape{2, 4, 1, 1}, {-128, -128, -128, -96, -64, -32, 0, 127}); @@ -289,7 +296,6 @@ TEST_F(TransformationTestsF, NegativeCompressQuantizeWeightsNonConstantInput) { model = std::make_shared(NodeVector{fq}, ParameterVector{data}); manager.register_pass(); - manager.register_pass(); comparator.enable(FunctionsComparator::CmpValues::CONST_VALUES); comparator.enable(FunctionsComparator::CmpValues::ACCURACY); diff --git a/src/core/reference/include/openvino/reference/fake_quantize.hpp b/src/core/reference/include/openvino/reference/fake_quantize.hpp index d0828cd23087d3..2fb30a4a5c492b 100644 --- a/src/core/reference/include/openvino/reference/fake_quantize.hpp +++ b/src/core/reference/include/openvino/reference/fake_quantize.hpp @@ -21,31 +21,86 @@ namespace ov { namespace reference { namespace fake_quantize_details { template -inline T quantize(const T& arg, - const T& in_low, - const T& in_high, - const T& out_low, - const T& out_high, - const size_t& levels) { +static inline T quantize(const T arg, + const T in_low, + const T in_high, + const T out_low, + const T out_high, + const T levels_minus_one) { if (arg <= std::min(in_low, in_high)) { return out_low; } else if (arg > std::max(in_low, in_high)) { return out_high; } - return static_cast(std::nearbyint((arg - in_low) / (in_high - in_low) * (levels - 1)) / (levels - 1) * + return static_cast(std::nearbyint((arg - in_low) / (in_high - in_low) * levels_minus_one) / levels_minus_one * (out_high - out_low) + out_low); } +static std::vector compute_strides(const ov::Shape& out_shape, const ov::Shape& shape); + +static std::tuple get_inner_stride(size_t num_output_elements, + const ov::Shape& output_shape, + const ov::Shape& shape, + size_t current_output_inner_stride); + +template +static void fake_quantize_non_unit_inner_stride(const T* arg, + const T* in_low, + const T* in_high, + const T* out_low, + const T* out_high, + T* out, + const Shape& arg_shape, + T levels_minus_one, + size_t input_inner_stride, + const F& get_outer_strides); + +template +static void fake_quantize_unit_inner_stride(const T* arg, + const T* in_low, + const T* in_high, + const T* out_low, + const T* out_high, + T* out, + const Shape& arg_shape, + T levels_minus_one, + size_t input_inner_stride, + const F& get_outer_strides); + +template +static void fake_quantize_unit_output_intervals_inner_stride(const T* arg, + const T* in_low, + const T* in_high, + const T* out_low, + const T* out_high, + T* out, + const Shape& arg_shape, + T levels_minus_one, + size_t input_inner_stride, + const F& get_outer_strides); + +template +static void fake_quantize_unit_input_intervals_inner_stride(const T* arg, + const T* in_low, + const T* in_high, + const T* out_low, + const T* out_high, + T* out, + const Shape& arg_shape, + T levels_minus_one, + size_t input_inner_stride, + const F& get_outer_strides); + } // namespace fake_quantize_details template -void fake_quantize(const T* const arg, - const T* const in_low, - const T* const in_high, - const T* const out_low, - const T* const out_high, - T* const out, +void fake_quantize(const T* arg, + const T* in_low, + const T* in_high, + const T* out_low, + const T* out_high, + T* out, const Shape& arg_shape, const Shape& in_low_shape, const Shape& in_high_shape, @@ -55,133 +110,452 @@ void fake_quantize(const T* const arg, const op::AutoBroadcastSpec& broadcast) { using namespace fake_quantize_details; + T levels_minus_one = static_cast(levels - 1); + const size_t arg_size = shape_size(arg_shape); + if (shape_size(in_low_shape) == 1 && shape_size(in_high_shape) == 1 && shape_size(out_low_shape) == 1 && shape_size(out_high_shape) == 1) { - const size_t arg_size = shape_size(arg_shape); - const auto q = [=](const T& a) { - return quantize(a, *in_low, *in_high, *out_low, *out_high, levels); - }; for (size_t i = 0; i < arg_size; ++i) { - out[i] = q(arg[i]); - } - } else { - OPENVINO_ASSERT(in_low_shape.size() <= arg_shape.size() && in_high_shape.size() <= arg_shape.size() && - out_low_shape.size() <= arg_shape.size() && out_high_shape.size() <= arg_shape.size(), - "Tensors with input\\output ranges should have rank less or " - "equal to data tensor rank equal to ", - arg_shape.size()); - - Shape arg0_padded_shape = arg_shape; - Shape arg1_padded_shape = in_low_shape; - Shape arg2_padded_shape = in_high_shape; - Shape arg3_padded_shape = out_low_shape; - Shape arg4_padded_shape = out_high_shape; - - size_t max_shape_size = arg_shape.size(); - - while (arg0_padded_shape.size() < max_shape_size) { - arg0_padded_shape.insert(arg0_padded_shape.begin(), 1); + out[i] = quantize(arg[i], *in_low, *in_high, *out_low, *out_high, levels_minus_one); } + return; + } - while (arg1_padded_shape.size() < max_shape_size) { - arg1_padded_shape.insert(arg1_padded_shape.begin(), 1); - } + // clang-format off + /* + * --------------------------------------------------- + * Overview: + * Numpy broadcasted input tensors can be partitioned into two: outer and inner part (which also defines inner + * stride as a product of inner part), so N-dimensional tensors can be processed using two loops. + * + * For example with two inputs [2, 2, 3, 4] and [1, 1, 3, 4] we can have: + * input 1 with shape [2, 2, 3, 4] can be divided into outer part [2, 2] and inner part [3, 4] + * with inner stride = 12 (3 * 4). + * input 2 with shape [1, 1, 3, 4] can be divided into outer part [1, 1] + * and inner part [3, 4] with inner stride = 12 (3 * 4) + * + * Having that, those inputs can be processed by the following: + * + * output_shape = {2, 2, 3, 4}; + * output_inner_stride = 12; + * for (i = 0; i < shape_size(shape); i += output_inner_stride) { + * first_input_stride = i; + * second_input_stride = 0; + * for (j = 0; j < 12; j++) { + * *out++ = f(first_input[first_input_stride + j], second_input[second_input_stride + j]); + * } + * } + * + * --------------------------------------------------- + * How the partitioning is done: + * Partitioning process starts with the last dimension of input tensor shape and it stops when either one of below + * occurs: + * - if the last dimension is equal to 1, partitioning stops at the dimension that is greater than 1 (this + * dimension is not included in the inner part), + * - if the last dimension is greater than 1, partitioning stops at the dimension that is equal to 1 (this + * dimension is not included in the inner part). + * + * Examples: + * tensor_shape=[2, 3, 4, 5], inner_part = [2, 3, 4, 5], inner_stride = 120 + * tensor_shape=[1, 1, 4, 5], inner_part = [4, 5], inner_stride = 20 + * tensor_shape=[2, 3, 1, 1], inner_part = [1, 1], inner_stride = 1 + * + * + * --------------------------------------------------- + * How the output inner stride is calculated: + * Inner part (and inner stride) for every input tensor is determined. Then the size of output inner part is the + * size of inner part with the fewest number of dimensions. + * + * Example with 5 inputs: + * input 1 shape [2, 3, 4, 5], inner_part = [2, 3, 4, 5], inner_stride = 120 + * input 2 shape [1, 3, 4, 5], inner_part = [3, 4, 5], inner_stride = 60 + * input 3 shape [2, 3, 1, 1], inner_part = [1, 1], inner_stride = 1 + * input 4 shape [2, 1, 1, 1], inner_part = [1, 1, 1], inner_stride = 1 + * input 5 shape [1, 1, 1, 1], inner_part = [1, 1, 1, 1], inner_stride = 1 + * + * output shape [2, 3, 4, 5], inner_part = [4, 5], inner_stride = 20 + * + * Inner part with fewest number of elements is [1, 1] for input 3. So the inner part for output shape is [4, 5] + * and output inner stride is 20. + */ + // clang-format on - while (arg2_padded_shape.size() < max_shape_size) { - arg2_padded_shape.insert(arg2_padded_shape.begin(), 1); - } + std::vector output_strides = compute_strides(arg_shape, arg_shape); + std::vector in_low_strides = compute_strides(arg_shape, in_low_shape); + std::vector in_high_strides = compute_strides(arg_shape, in_high_shape); + std::vector out_low_strides = compute_strides(arg_shape, out_low_shape); + std::vector out_high_strides = compute_strides(arg_shape, out_high_shape); - while (arg3_padded_shape.size() < max_shape_size) { - arg3_padded_shape.insert(arg3_padded_shape.begin(), 1); - } + size_t input_inner_stride = arg_size; + size_t in_low_inner_stride = 0; + size_t in_high_inner_stride = 0; + size_t out_low_inner_stride = 0; + size_t out_high_inner_stride = 0; - while (arg4_padded_shape.size() < max_shape_size) { - arg4_padded_shape.insert(arg4_padded_shape.begin(), 1); - } + std::tie(in_low_inner_stride, input_inner_stride) = + get_inner_stride(arg_size, arg_shape, in_low_shape, input_inner_stride); + std::tie(in_high_inner_stride, input_inner_stride) = + get_inner_stride(arg_size, arg_shape, in_high_shape, input_inner_stride); + std::tie(out_low_inner_stride, input_inner_stride) = + get_inner_stride(arg_size, arg_shape, out_low_shape, input_inner_stride); + std::tie(out_high_inner_stride, input_inner_stride) = + get_inner_stride(arg_size, arg_shape, out_high_shape, input_inner_stride); - Shape arg0_squeezed_shape, arg1_squeezed_shape, arg2_squeezed_shape, arg3_squeezed_shape, arg4_squeezed_shape; - AxisSet arg0_squeezed_axes, arg1_squeezed_axes, arg2_squeezed_axes, arg3_squeezed_axes, arg4_squeezed_axes; - Shape output_shape; + auto get_outer_strides = + [&output_strides, &in_low_strides, &in_high_strides, &out_low_strides, &out_high_strides](size_t flat_index) { + size_t in_low_stride = 0; + size_t in_high_stride = 0; + size_t out_low_stride = 0; + size_t out_high_stride = 0; - for (size_t i = 0; i < max_shape_size; i++) { - if (arg1_padded_shape[i] == 1) { - arg1_squeezed_axes.insert(i); - } else { - arg1_squeezed_shape.push_back(arg1_padded_shape[i]); + for (size_t i = 0; i < output_strides.size(); i++) { + size_t div = flat_index / output_strides[i]; + flat_index = flat_index % output_strides[i]; + in_low_stride += div * in_low_strides[i]; + in_high_stride += div * in_high_strides[i]; + out_low_stride += div * out_low_strides[i]; + out_high_stride += div * out_high_strides[i]; } - if (arg2_padded_shape[i] == 1) { - arg2_squeezed_axes.insert(i); - } else { - arg2_squeezed_shape.push_back(arg2_padded_shape[i]); - } + return std::tuple{in_low_stride, + in_high_stride, + out_low_stride, + out_high_stride}; + }; - if (arg0_padded_shape[i] == 1) { - arg0_squeezed_axes.insert(i); - } else { - arg0_squeezed_shape.push_back(arg0_padded_shape[i]); - } + if (in_low_inner_stride > 1 && in_high_inner_stride > 1 && out_low_inner_stride > 1 && out_high_inner_stride > 1) { + fake_quantize_non_unit_inner_stride(arg, + in_low, + in_high, + out_low, + out_high, + out, + arg_shape, + levels_minus_one, + input_inner_stride, + get_outer_strides); + } else if (in_low_inner_stride == 1 && in_high_inner_stride == 1 && out_low_inner_stride == 1 && + out_high_inner_stride == 1) { + fake_quantize_unit_inner_stride(arg, + in_low, + in_high, + out_low, + out_high, + out, + arg_shape, + levels_minus_one, + input_inner_stride, + get_outer_strides); - if (arg3_padded_shape[i] == 1) { - arg3_squeezed_axes.insert(i); - } else { - arg3_squeezed_shape.push_back(arg3_padded_shape[i]); - } + } else if (in_low_inner_stride > 1 && in_high_inner_stride > 1 && out_low_inner_stride == 1 && + out_high_inner_stride == 1) { + fake_quantize_unit_output_intervals_inner_stride(arg, + in_low, + in_high, + out_low, + out_high, + out, + arg_shape, + levels_minus_one, + input_inner_stride, + get_outer_strides); - if (arg4_padded_shape[i] == 1) { - arg4_squeezed_axes.insert(i); - } else { - arg4_squeezed_shape.push_back(arg4_padded_shape[i]); - } + } else if (in_low_inner_stride == 1 && in_high_inner_stride == 1 && out_low_inner_stride > 1 && + out_high_inner_stride > 1) { + fake_quantize_unit_input_intervals_inner_stride(arg, + in_low, + in_high, + out_low, + out_high, + out, + arg_shape, + levels_minus_one, + input_inner_stride, + get_outer_strides); + } else { + size_t in_low_stride = 0; + size_t in_high_stride = 0; + size_t out_low_stride = 0; + size_t out_high_stride = 0; - output_shape.push_back(std::max({arg0_padded_shape[i], - arg2_padded_shape[i], - arg1_padded_shape[i], - arg3_padded_shape[i], - arg4_padded_shape[i]})); + for (size_t i = 0; i < arg_size; i++) { + std::tie(in_low_stride, in_high_stride, out_low_stride, out_high_stride) = get_outer_strides(i); + *out++ = quantize(*arg++, + *(in_low + in_low_stride), + *(in_high + in_high_stride), + *(out_low + out_low_stride), + *(out_high + out_low_stride), + levels_minus_one); } + } +} - CoordinateTransformBasic arg0_transform(arg0_squeezed_shape); - CoordinateTransformBasic arg1_transform(arg1_squeezed_shape); - CoordinateTransformBasic arg2_transform(arg2_squeezed_shape); - CoordinateTransformBasic arg3_transform(arg3_squeezed_shape); - CoordinateTransformBasic arg4_transform(arg4_squeezed_shape); - CoordinateTransformBasic output_transform(output_shape); - - const auto arg0_strides = row_major_strides(arg0_squeezed_shape); - const auto arg1_strides = row_major_strides(arg1_squeezed_shape); - const auto arg2_strides = row_major_strides(arg2_squeezed_shape); - const auto arg3_strides = row_major_strides(arg3_squeezed_shape); - const auto arg4_strides = row_major_strides(arg4_squeezed_shape); - const auto output_strides = row_major_strides(output_shape); - - for (const Coordinate& output_coord : output_transform) { - const auto arg0_coord = util::reduce(output_coord, arg0_squeezed_axes); - const auto arg1_coord = util::reduce(output_coord, arg1_squeezed_axes); - const auto arg2_coord = util::reduce(output_coord, arg2_squeezed_axes); - const auto arg3_coord = util::reduce(output_coord, arg3_squeezed_axes); - const auto arg4_coord = util::reduce(output_coord, arg4_squeezed_axes); - - const size_t arg0_idx = - std::inner_product(arg0_coord.begin(), arg0_coord.end(), arg0_strides.begin(), uint64_t(0)); - const size_t arg1_idx = - std::inner_product(arg1_coord.begin(), arg1_coord.end(), arg1_strides.begin(), uint64_t(0)); - const size_t arg2_idx = - std::inner_product(arg2_coord.begin(), arg2_coord.end(), arg2_strides.begin(), uint64_t(0)); - const size_t arg3_idx = - std::inner_product(arg3_coord.begin(), arg3_coord.end(), arg3_strides.begin(), uint64_t(0)); - const size_t arg4_idx = - std::inner_product(arg4_coord.begin(), arg4_coord.end(), arg4_strides.begin(), uint64_t(0)); - const size_t output_idx = - std::inner_product(output_coord.begin(), output_coord.end(), output_strides.begin(), uint64_t(0)); - out[output_idx] = quantize(arg[arg0_idx], - in_low[arg1_idx], - in_high[arg2_idx], - out_low[arg3_idx], - out_high[arg4_idx], - levels); +namespace fake_quantize_details { +std::vector compute_strides(const ov::Shape& out_shape, const ov::Shape& shape) { + size_t stride = 1; + size_t out_rank = out_shape.size(); + size_t shape_rank = shape.size(); + std::vector strides(out_rank); + for (size_t i = 0; i < out_rank; i++) { + if (i < shape_rank && shape[shape_rank - i - 1] == out_shape[out_rank - i - 1]) { + strides[out_rank - i - 1] = stride; + stride *= shape[shape_rank - i - 1]; + } else { + strides[out_rank - i - 1] = 0; } } + return strides; } + +std::tuple get_inner_stride(size_t num_output_elements, + const ov::Shape& output_shape, + const ov::Shape& shape, + size_t current_output_inner_stride) { + if (shape.size() == 0) + return std::tuple{1, std::min(current_output_inner_stride, num_output_elements)}; + const size_t last = shape.back(); + auto it = std::find_if(shape.rbegin(), shape.rend(), [last](size_t dim) { + return (last == 1 && dim > 1) || (last > 1 && dim == 1); + }); + if (it == shape.rend()) { + const size_t num_elements = shape_size(shape); + return std::tuple{ + num_elements, + last == 1 ? current_output_inner_stride : std::min(current_output_inner_stride, num_elements)}; + } + const size_t idx = std::distance(it, shape.rbegin()) + static_cast(shape.size()); + const size_t inner_stride = + std::accumulate(shape.begin() + idx, shape.end(), static_cast(1), std::multiplies()); + const size_t output_inner_stride = std::accumulate(output_shape.begin() + output_shape.size() - shape.size() + idx, + output_shape.end(), + static_cast(1), + std::multiplies()); + return std::tuple{inner_stride, std::min(current_output_inner_stride, output_inner_stride)}; +} + +template +static void transform(const T* first1, const T* const last1, const T* first2, const T* first3, T* out, const F& f) { + while (first1 < last1) { + *out++ = f(*first1++, *first2++, *first3++); + } +} + +template +static void transform(const T* first1, + const T* const last1, + const T* first2, + const T* first3, + const T* first4, + const T* first5, + T* out, + const F& f) { + while (first1 < last1) { + *out++ = f(*first1++, *first2++, *first3++, *first4++, *first5++); + } +} + +template +static void fake_quantize_loop(const Shape& arg_shape, + const T* arg, + const T* in_low, + const T* in_high, + const T* out_low, + const T* out_high, + T* out, + size_t input_inner_stride, + const F1& get_outer_strides, + const F2& quantize_loop) { + size_t in_low_stride = 0; + size_t in_high_stride = 0; + size_t out_low_stride = 0; + size_t out_high_stride = 0; + + for (size_t i = 0; i < shape_size(arg_shape); i += input_inner_stride) { + std::tie(in_low_stride, in_high_stride, out_low_stride, out_high_stride) = get_outer_strides(i); + quantize_loop(arg, + arg + input_inner_stride, + in_low + in_low_stride, + in_high + in_high_stride, + out_low + out_low_stride, + out_high + out_high_stride, + out); + arg += input_inner_stride; + out += input_inner_stride; + } +} + +template +void fake_quantize_non_unit_inner_stride(const T* arg, + const T* in_low, + const T* in_high, + const T* out_low, + const T* out_high, + T* out, + const Shape& arg_shape, + T levels_minus_one, + size_t input_inner_stride, + const F& get_outer_strides) { + fake_quantize_loop(arg_shape, + arg, + in_low, + in_high, + out_low, + out_high, + out, + input_inner_stride, + get_outer_strides, + [levels_minus_one](const T* input, + const T* const input_end, + const T* in_low, + const T* in_high, + const T* out_low, + const T* out_high, + T* out) { + transform(input, + input_end, + in_low, + in_high, + out_low, + out_high, + out, + [levels_minus_one](T input, T in_low, T in_high, T out_low, T out_high) { + return quantize(input, in_low, in_high, out_low, out_high, levels_minus_one); + }); + }); +} + +template +void fake_quantize_unit_inner_stride(const T* arg, + const T* in_low, + const T* in_high, + const T* out_low, + const T* out_high, + T* out, + const Shape& arg_shape, + T levels_minus_one, + size_t input_inner_stride, + const F& get_outer_strides) { + auto quantize_with_scalar_intervals = [levels_minus_one](const T* input, + const T* const input_end, + const T* in_low, + const T* in_high, + const T* out_low, + const T* out_high, + T* out) { + const auto in_low_scalar = *in_low; + const auto in_high_scalar = *in_high; + const auto out_low_scalar = *out_low; + const auto out_high_scalar = *out_high; + std::transform(input, + input_end, + out, + [levels_minus_one, in_low_scalar, in_high_scalar, out_low_scalar, out_high_scalar](T input) { + return quantize(input, + in_low_scalar, + in_high_scalar, + out_low_scalar, + out_high_scalar, + levels_minus_one); + }); + }; + + fake_quantize_loop(arg_shape, + arg, + in_low, + in_high, + out_low, + out_high, + out, + input_inner_stride, + get_outer_strides, + quantize_with_scalar_intervals); +} + +template +void fake_quantize_unit_output_intervals_inner_stride(const T* arg, + const T* in_low, + const T* in_high, + const T* out_low, + const T* out_high, + T* out, + const Shape& arg_shape, + T levels_minus_one, + size_t input_inner_stride, + const F& get_outer_strides) { + auto quantize_with_scalar_output_intervals = [levels_minus_one](const T* input, + const T* const input_end, + const T* in_low, + const T* in_high, + const T* out_low, + const T* out_high, + T* out) { + const auto out_low_scalar = *out_low; + const auto out_high_scalar = *out_high; + transform(input, + input_end, + in_low, + in_high, + out, + [levels_minus_one, out_low_scalar, out_high_scalar](T input, T in_low, T in_high) { + return quantize(input, in_low, in_high, out_low_scalar, out_high_scalar, levels_minus_one); + }); + }; + + fake_quantize_loop(arg_shape, + arg, + in_low, + in_high, + out_low, + out_high, + out, + input_inner_stride, + get_outer_strides, + quantize_with_scalar_output_intervals); +} + +template +void fake_quantize_unit_input_intervals_inner_stride(const T* arg, + const T* in_low, + const T* in_high, + const T* out_low, + const T* out_high, + T* out, + const Shape& arg_shape, + T levels_minus_one, + size_t input_inner_stride, + const F& get_outer_strides) { + auto quantize_with_scalar_input_intervals = [levels_minus_one](const T* input, + const T* const input_end, + const T* in_low, + const T* in_high, + const T* out_low, + const T* out_high, + T* out) { + const auto in_low_scalar = *in_low; + const auto in_high_scalar = *in_high; + transform(input, + input_end, + out_low, + out_high, + out, + [levels_minus_one, in_low_scalar, in_high_scalar](T input, T out_low, T out_high) { + return quantize(input, in_low_scalar, in_high_scalar, out_low, out_high, levels_minus_one); + }); + }; + + fake_quantize_loop(arg_shape, + arg, + in_low, + in_high, + out_low, + out_high, + out, + input_inner_stride, + get_outer_strides, + quantize_with_scalar_input_intervals); +} + +} // namespace fake_quantize_details + } // namespace reference } // namespace ov diff --git a/src/plugins/template/tests/functional/op_reference/fake_quantize.cpp b/src/plugins/template/tests/functional/op_reference/fake_quantize.cpp index 418d02c23c5d08..d2ea203ff8e942 100644 --- a/src/plugins/template/tests/functional/op_reference/fake_quantize.cpp +++ b/src/plugins/template/tests/functional/op_reference/fake_quantize.cpp @@ -253,6 +253,22 @@ std::vector generateParamsForFakeQuantize() { }), 16, op::AutoBroadcastSpec(op::AutoBroadcastType::NUMPY)), + FakeQuantizeParams( + ov::Shape{1, 2, 4, 4}, + ov::Shape{1, 2, 4, 4}, + IN_ET, + IN_ET, + iota_vector(shape_size(Shape{1, 2, 4, 4})), + std::vector{ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 8.75, 8.75, 8.75, 8.75, 8.75, 8.75, 17.5, + 23.75, 23.75, 27.5, 27.5, 27.5, 27.5, 27.5, 31.25, 31.25, 31.25, 31.25, 31.25, 35, 35, 35, 35, + }, + op::v0::Constant::create(IN_ET, Shape{1, 2, 1, 1}, {5.f, 10.f}), + op::v0::Constant::create(IN_ET, Shape{1, 1}, {30.f}), + op::v0::Constant::create(IN_ET, Shape{2, 1, 1}, {0.f, 20.f}), + op::v0::Constant::create(IN_ET, Shape{1}, {35.f}), + 5), + }; return params; } From b630bffa14bab108f1cb128bff5a47f45dd5bb32 Mon Sep 17 00:00:00 2001 From: Anastasiia Pnevskaia Date: Tue, 10 Oct 2023 11:14:45 +0200 Subject: [PATCH 122/257] Switch telemetry to opt-out approach. (#20290) * Switch telemetry to opt-out approach. * Minor correction. * Small correction. --- README.md | 8 ++++ docs/resources/telemetry_information.md | 40 +++---------------- src/bindings/python/requirements.txt | 2 +- tools/constraints.txt | 2 +- .../tools/mo/utils/telemetry_utils.py | 6 ++- .../ovc/openvino/tools/ovc/telemetry_utils.py | 6 ++- 6 files changed, 25 insertions(+), 39 deletions(-) diff --git a/README.md b/README.md index adc6f9f2b965ea..e7004ffc7f57aa 100644 --- a/README.md +++ b/README.md @@ -129,6 +129,14 @@ OpenVINO™ Toolkit also contains several plugins which simplify loading models OpenVINO™ Toolkit is licensed under [Apache License Version 2.0](LICENSE). By contributing to the project, you agree to the license and copyright terms therein and release your contribution under these terms. +## Telemetry +OpenVINO™ collects software performance and usage data for the purpose of improving OpenVINO™ tools. This data is collected directly by OpenVINO™ or through the use of Google Analytics 4. +You can opt-out at any time by running the command: + +opt_in_out --opt_out + +More Information is available at https://docs.openvino.ai/latest/openvino_docs_telemetry_information.html. + ## Documentation ### User documentation diff --git a/docs/resources/telemetry_information.md b/docs/resources/telemetry_information.md index 4340a40923770b..b23a763ff97e70 100644 --- a/docs/resources/telemetry_information.md +++ b/docs/resources/telemetry_information.md @@ -3,13 +3,11 @@ @sphinxdirective .. meta:: - :description: Learn about OpenVINO™ telemetry, that with your explicit consent - collects only usage data to simplify debugging and further development. + :description: Learn about OpenVINO™ telemetry, that collects anonymous usage data for the purpose of improving OpenVINO™ tools. -To facilitate debugging and further development, OpenVINO™ asks its users for -a permission to collect telemetry data. It will not be collected -without an explicit consent on your part and will cover only OpenVINO™ usage information. +To facilitate debugging and further development, OpenVINO™ collects anonymous telemetry data. Anonymous telemetry data is collected by default, +but you can stop data collection anytime by running the command ``opt_in_out --opt_out``. It does not extend to any other Intel software, hardware, website usage, or other products. Google Analytics is used for telemetry purposes. Refer to @@ -18,34 +16,6 @@ Google Analytics is used for telemetry purposes. Refer to Enable or disable Telemetry reporting ########################################################### -First-run consent -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -On the first run of an application that collects telemetry data, you will be prompted -to opt in or out of telemetry collection with the following telemetry message: - -.. code-block:: console - - Intel would like your permission to collect software performance and usage data - for the purpose of improving Intel products and services. This data will be collected - directly by Intel or through the use of Google Analytics. This data will be stored - in countries where Intel or Google operate. - - You can opt-out at any time in the future by running ``opt_in_out --opt_in``. - - More Information is available at docs.openvino.ai. - - Please type ``Y`` to give your consent or ``N`` to decline. - -Choose your preference by typing ``Y`` to enable or ``N`` to disable telemetry. Your choice will -be confirmed by a corresponding disclaimer. If you do not reply to the telemetry message, -your telemetry data will not be collected. - -For the Neural Network Compression Framework (NNCF), which is not a command line application, -the telemetry message will not display. Telemetry data will only be collected from NNCF -if you have explicitly provided consent in another OpenVINO tool. - - Changing consent decision +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ @@ -82,8 +52,8 @@ Telemetry Data Collection Details .. tab-item:: Telemetry Data Retention :sync: telemetry-data-retention - Telemetry data is retained in Google Analytics for a maximum of 26 months. - Any raw data that has reached the 26-month threshold is deleted from Google Analytics on a monthly basis. + Telemetry data is retained in Google Analytics for a maximum of 14 months. + Any raw data that has reached the 14-month threshold is deleted from Google Analytics on a monthly basis. @endsphinxdirective \ No newline at end of file diff --git a/src/bindings/python/requirements.txt b/src/bindings/python/requirements.txt index 72438eeb2ecd91..92e55596bd6227 100644 --- a/src/bindings/python/requirements.txt +++ b/src/bindings/python/requirements.txt @@ -1,3 +1,3 @@ numpy>=1.16.6 singledispatchmethod; python_version<'3.8' -openvino-telemetry>=2023.1.0 +openvino-telemetry>=2023.2.0 diff --git a/tools/constraints.txt b/tools/constraints.txt index 18a3080d3a1e78..484466b5bda48e 100644 --- a/tools/constraints.txt +++ b/tools/constraints.txt @@ -18,4 +18,4 @@ pyenchant>=3.0.0 test-generator==0.1.1 py>=1.9.0 urllib3>=1.26.4 -openvino-telemetry>=2023.1.0 +openvino-telemetry>=2023.2.0 diff --git a/tools/mo/openvino/tools/mo/utils/telemetry_utils.py b/tools/mo/openvino/tools/mo/utils/telemetry_utils.py index 802986edf4c4c0..09ecd528ae28e1 100644 --- a/tools/mo/openvino/tools/mo/utils/telemetry_utils.py +++ b/tools/mo/openvino/tools/mo/utils/telemetry_utils.py @@ -22,7 +22,11 @@ def init_mo_telemetry(app_name='Model Optimizer'): - return tm.Telemetry(tid=get_tid(), app_name=app_name, app_version=get_rt_version(), backend='ga4') + return tm.Telemetry(tid=get_tid(), + app_name=app_name, + app_version=get_rt_version(), + backend='ga4', + enable_opt_in_dialog=False) def send_framework_info(framework: str): diff --git a/tools/ovc/openvino/tools/ovc/telemetry_utils.py b/tools/ovc/openvino/tools/ovc/telemetry_utils.py index 87e0132ccd17a6..812575c1fba8f6 100644 --- a/tools/ovc/openvino/tools/ovc/telemetry_utils.py +++ b/tools/ovc/openvino/tools/ovc/telemetry_utils.py @@ -17,7 +17,11 @@ def init_mo_telemetry(app_name='Model Conversion API'): - return tm.Telemetry(tid=get_tid(), app_name=app_name, app_version=get_rt_version(), backend='ga4') + return tm.Telemetry(tid=get_tid(), + app_name=app_name, + app_version=get_rt_version(), + backend='ga4', + enable_opt_in_dialog=False) def send_framework_info(framework: str): """ From 0fec05ecf2e2b828ec4e870157780de141b54ee3 Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Tue, 10 Oct 2023 11:23:23 +0200 Subject: [PATCH 123/257] Mark models according to last validation and fix some problems in models (#20324) * Mark models according to last validation and fix some problems in models * Update tests/model_hub_tests/torch_tests/hf_transformers_models --- .../torch_tests/detectron2_models | 2 +- .../torch_tests/hf_transformers_models | 10 +++---- .../torch_tests/test_detectron2.py | 27 +++++++++++++------ ...ransformers.py => test_hf_transformers.py} | 0 .../torch_tests/test_torchvision_models.py | 2 +- 5 files changed, 26 insertions(+), 15 deletions(-) rename tests/model_hub_tests/torch_tests/{test_transformers.py => test_hf_transformers.py} (100%) diff --git a/tests/model_hub_tests/torch_tests/detectron2_models b/tests/model_hub_tests/torch_tests/detectron2_models index 091464ab8d3080..bf893697180835 100644 --- a/tests/model_hub_tests/torch_tests/detectron2_models +++ b/tests/model_hub_tests/torch_tests/detectron2_models @@ -1,4 +1,4 @@ -COCO-Detection/fast_rcnn_R_50_FPN_1x,none +COCO-Detection/fast_rcnn_R_50_FPN_1x,none,xfail,Tracing problem COCO-Detection/faster_rcnn_R_101_C4_3x,none COCO-Detection/faster_rcnn_R_101_DC5_3x,none COCO-Detection/faster_rcnn_R_101_FPN_3x,none diff --git a/tests/model_hub_tests/torch_tests/hf_transformers_models b/tests/model_hub_tests/torch_tests/hf_transformers_models index 028de156c32459..112aedeb60de0c 100644 --- a/tests/model_hub_tests/torch_tests/hf_transformers_models +++ b/tests/model_hub_tests/torch_tests/hf_transformers_models @@ -3,7 +3,7 @@ abcp4/mymodel-test,mymodel,skip,Load problem abeja/gpt-neox-japanese-2.7b,gpt_neox_japanese acl-submission-anonym/EAM-spectral,examuse,skip,Load problem adalbertojunior/modular-test,modular,skip,Load problem -aerner/lm-v2,open-llama +aerner/lm-v2,open-llama,xfail,Example input problem afonsosamarques/ardt-vanilla-combo_train_hopper_v2-2508_1336-33,decision_transformer,xfail,Tracing problem aihijo/gec-zh-gector-bert-large,gector,skip,Load problem albert-base-v2,albert @@ -21,7 +21,7 @@ ArthurZ/jukebox-vqvae,jukebox_vqvae,skip,Load problem ArthurZ/persimmon-8b-base,persimmon,skip,Load problem ashishpatel26/span-marker-bert-base-fewnerd-coarse-super,span-marker,skip,Load problem asi/albert-act-tiny,albert_act,skip,Load problem -BAAI/AltCLIP,altclip,xfail,Unsupported op aten::numpy_T +BAAI/AltCLIP,altclip BAAI/AquilaCode-py,aquila,skip,Load problem bana513/opennmt-translator-en-hu,opennmt-translator,skip,Load problem benjamin/wtp-bert-mini,bert-char,skip,Load problem @@ -79,7 +79,7 @@ facebook/musicgen-small,musicgen,skip,Load problem facebook/opt-125m,opt facebook/rag-token-nq,rag,skip,Load problem facebook/sam-vit-large,sam,xfail,No node with name original_sizes -facebook/timesformer-base-finetuned-k400,timesformer,xfail,Shape inference of Add node failed: Eltwise shape infer input shapes dim index: 1 mismatch +facebook/timesformer-base-finetuned-k400,timesformer facebook/vit-mae-base,vit_mae,xfail,Accuracy validation failed facebook/wmt19-ru-en,fsmt,xfail,Tracing problem facebook/xlm-roberta-xl,xlm-roberta-xl @@ -199,7 +199,7 @@ kiddothe2b/hierarchical-transformer-base-4096-v2,hat,skip,Load problem k-l-lambda/clip-text-generator,clip_text_generator,skip,Load problem k-l-lambda/stable-diffusion-v1-4-inv-embed,inv_word_embed,skip,Load problem KoboldAI/fairseq-dense-13B-Janeway,xglm,skip,Large Model -konverner/qdq-camembert-apolliner,qdqbert +konverner/qdq-camembert-apolliner,qdqbert,xfail,Repository not found krasserm/perceiver-ar-clm-base,perceiver-ar-causal-language-model,skip,Load problem krasserm/perceiver-ar-sam-giant-midi,perceiver-ar-symbolic-audio-model,skip,Load problem krasserm/perceiver-io-img-clf,perceiver-io-image-classifier,skip,Load problem @@ -309,7 +309,7 @@ regisss/bridgetower-newyorker-a100-8x,bridgetower rinna/japanese-cloob-vit-b-16,cloob,skip,Load problem Rocketknight1/tiny-random-falcon-7b,falcon RUCAIBox/mass-base-uncased,mass,skip,Load problem -RWKV/rwkv-4-169m-pile,rwkv,xfail,Unsupported op aten::maximum +RWKV/rwkv-4-169m-pile,rwkv sahasrarjn/interbert,BERT,skip,Load problem saibo/genkalm-medium-gpt2,genkalm,skip,Load problem SajjadAyoubi/clip-fa-vision,clip_vision_model diff --git a/tests/model_hub_tests/torch_tests/test_detectron2.py b/tests/model_hub_tests/torch_tests/test_detectron2.py index e8859905622260..0ceebcff9f258d 100644 --- a/tests/model_hub_tests/torch_tests/test_detectron2.py +++ b/tests/model_hub_tests/torch_tests/test_detectron2.py @@ -20,7 +20,7 @@ def setup_class(self): def load_model(self, model_name, model_link): from detectron2 import model_zoo, export - from detectron2.modeling import build_model + from detectron2.modeling import build_model, PanopticFPN from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import CfgNode import torchvision.transforms as transforms @@ -32,13 +32,23 @@ def load_model(self, model_name, model_link): assert isinstance(cfg, CfgNode), "Unexpected config" cfg.MODEL.DEVICE = "cpu" model = build_model(cfg) - DetectionCheckpointer(model, save_to_disk=False).load(cfg.MODEL.WEIGHTS) - + DetectionCheckpointer( + model, save_to_disk=False).load(cfg.MODEL.WEIGHTS) + model.eval() inputs = [{"image": image, "height": torch.tensor(image.shape[1]), "width": torch.tensor(image.shape[2])}] - adapter = export.TracingAdapter(model, inputs) + # https://github.com/facebookresearch/detectron2/blob/4e80df1e58901557e2824ce3b488d30209a9be33/tools/deploy/export_model.py#L123 + # This is done only for Panoptic models, but it may be incorrect to do that, because one of outputs of panoptic model is getting lost + if isinstance(model, PanopticFPN): + def inference(model, inputs): + # use do_postprocess=False so it returns ROI mask + inst = model.inference(inputs, do_postprocess=False)[0] + return [{"instances": inst}] + else: + inference = None # assume that we just call the model directly + adapter = export.TracingAdapter(model, inputs, inference) self.example = adapter.flattened_inputs return adapter @@ -75,7 +85,8 @@ def compare_results(self, fw_outputs, ov_outputs): cur_fw_res = fw_outputs[i] cur_ov_res = ov_outputs[i] l = min(len(cur_fw_res), len(cur_ov_res)) - assert l > 0 or len(cur_fw_res) == len(cur_ov_res), "No boxes were selected." + assert l > 0 or len(cur_fw_res) == len( + cur_ov_res), "No boxes were selected." print(f"fw_re: {cur_fw_res};\n ov_res: {cur_ov_res}") is_ok = compare_two_tensors(cur_ov_res[:l], cur_fw_res[:l], fw_eps) assert is_ok, "Accuracy validation failed" @@ -86,8 +97,8 @@ def compare_results(self, fw_outputs, ov_outputs): def test_detectron2_precommit(self, name, type, mark, reason, ie_device): self.run(name, None, ie_device) - @pytest.mark.parametrize("name,type,mark,reason", - get_models_list(os.path.join(os.path.dirname(__file__), "detectron2_models"))) + @pytest.mark.parametrize("name", + [pytest.param(n, marks=pytest.mark.xfail(reason=r)) if m == "xfail" else n for n, _, m, r in get_models_list(os.path.join(os.path.dirname(__file__), "detectron2_models"))]) @pytest.mark.nightly - def test_detectron2_all_models(self, name, type, mark, reason, ie_device): + def test_detectron2_all_models(self, name, ie_device): self.run(name, None, ie_device) diff --git a/tests/model_hub_tests/torch_tests/test_transformers.py b/tests/model_hub_tests/torch_tests/test_hf_transformers.py similarity index 100% rename from tests/model_hub_tests/torch_tests/test_transformers.py rename to tests/model_hub_tests/torch_tests/test_hf_transformers.py diff --git a/tests/model_hub_tests/torch_tests/test_torchvision_models.py b/tests/model_hub_tests/torch_tests/test_torchvision_models.py index a7722389e4132d..7b93ea68505a88 100644 --- a/tests/model_hub_tests/torch_tests/test_torchvision_models.py +++ b/tests/model_hub_tests/torch_tests/test_torchvision_models.py @@ -114,7 +114,7 @@ def test_convert_model_precommit(self, model_name, ie_device): self.run(model_name, None, ie_device) @pytest.mark.parametrize("name", - [pytest.param(n, marks=pytest.mark.xfail) if m == "xfail" else n for n, _, m, r in get_models_list(os.path.join(os.path.dirname(__file__), "torchvision_models"))]) + [pytest.param(n, marks=pytest.mark.xfail(reason=r)) if m == "xfail" else n for n, _, m, r in get_models_list(os.path.join(os.path.dirname(__file__), "torchvision_models"))]) @pytest.mark.nightly def test_convert_model_all_models(self, name, ie_device): self.run(name, None, ie_device) From 34a86ecb3a6f9d06a4e1711f8c40bfcae915f4ad Mon Sep 17 00:00:00 2001 From: Oleg Pipikin Date: Tue, 10 Oct 2023 11:25:57 +0200 Subject: [PATCH 124/257] Refactor BinaryConvolutionLayerTest and BucketizeLayerTest (#19654) * Refactor BinaryConvolutionLayerTest * Refactor BucketizeLayerTest --- .../single_layer_tests/binary_convolution.cpp | 31 ++++---- .../single_layer_tests/bucketize.cpp | 59 +++++++------- .../single_layer_tests/binary_convolution.hpp | 1 - .../single_op_tests/binary_convolution.hpp | 15 ++++ .../include/single_op_tests/bucketize.hpp | 15 ++++ .../single_op/binary_convolution.hpp | 42 ++++++++++ .../single_op/bucketize.hpp | 33 ++++++++ .../src/single_layer/bucketize.cpp | 1 - .../src/single_op/binary_convolution.cpp | 79 +++++++++++++++++++ .../src/single_op/bucketize.cpp | 61 ++++++++++++++ 10 files changed, 288 insertions(+), 49 deletions(-) create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/binary_convolution.hpp create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/bucketize.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/binary_convolution.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/bucketize.hpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/binary_convolution.cpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/bucketize.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/binary_convolution.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/binary_convolution.cpp index 53a249873e2861..da2df80eb7dda0 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/binary_convolution.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/binary_convolution.cpp @@ -4,16 +4,17 @@ #include -#include "single_layer_tests/binary_convolution.hpp" +#include "single_op_tests/binary_convolution.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; +using ov::test::BinaryConvolutionLayerTest; namespace { -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16, - InferenceEngine::Precision::I32}; +const std::vector model_types = { + ov::element::f32, + ov::element::f16, + ov::element::i32}; /* ============= 2D Binary Convolution ============= */ const std::vector> kernels = {{3, 3}, {3, 5}}; @@ -24,6 +25,10 @@ const std::vector> dilations = {{1, 1}, {3, 1}}; const std::vector numOutChannels = {1, 5}; const std::vector padValues = {0, 1}; +const std::vector> input_shapes_static = { + {{ 1, 3, 30, 30}}, +}; + const auto binConv2DParams_ExplicitPadding = ::testing::Combine( ::testing::ValuesIn(kernels), ::testing::ValuesIn(strides), @@ -48,12 +53,8 @@ INSTANTIATE_TEST_SUITE_P( smoke_BinaryConvolution2D_ExplicitPadding, BinaryConvolutionLayerTest, ::testing::Combine( binConv2DParams_ExplicitPadding, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(std::vector({1, 3, 30, 30})), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_static)), ::testing::Values(ov::test::utils::DEVICE_CPU)), BinaryConvolutionLayerTest::getTestCaseName); @@ -61,12 +62,8 @@ INSTANTIATE_TEST_SUITE_P( smoke_BinaryConvolution2D_AutoPadValid, BinaryConvolutionLayerTest, ::testing::Combine( binConv2DParams_ValidPadding, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(std::vector({1, 3, 30, 30})), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_static)), ::testing::Values(ov::test::utils::DEVICE_CPU)), BinaryConvolutionLayerTest::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/bucketize.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/bucketize.cpp index 4ac333bb09c9f4..f1c08370ebfd8f 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/bucketize.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/bucketize.cpp @@ -4,53 +4,52 @@ #include -#include "single_layer_tests/bucketize.hpp" +#include "single_op_tests/bucketize.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; - -const std::vector> dataShapes = { - {1, 20, 20}, - {2, 3, 50, 50} -}; - -const std::vector> bucketsShapes = { - {5}, - {20}, - {100} +using ov::test::BucketizeLayerTest; + +namespace { +const std::vector> input_shapes_static = { + //data_shape, bucket_shape + {{ 1, 20, 20 }, {5}}, + {{ 1, 20, 20 }, {20}}, + {{ 1, 20, 20 }, {100}}, + {{ 2, 3, 50, 50 }, {5}}, + {{ 2, 3, 50, 50 }, {20}}, + {{ 2, 3, 50, 50 }, {100}} }; -const std::vector inPrc = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16, - InferenceEngine::Precision::I64, - InferenceEngine::Precision::I32 +const std::vector in_types = { + ov::element::f32, + ov::element::f16, + ov::element::i64, + ov::element::i32 }; -const std::vector netPrc = { - InferenceEngine::Precision::I64, - InferenceEngine::Precision::I32 +const std::vector model_types = { + ov::element::i64, + ov::element::i32 }; const auto test_Bucketize_right_edge = ::testing::Combine( - ::testing::ValuesIn(dataShapes), - ::testing::ValuesIn(bucketsShapes), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_static)), ::testing::Values(true), - ::testing::ValuesIn(inPrc), - ::testing::ValuesIn(inPrc), - ::testing::ValuesIn(netPrc), + ::testing::ValuesIn(in_types), + ::testing::ValuesIn(in_types), + ::testing::ValuesIn(model_types), ::testing::Values(ov::test::utils::DEVICE_CPU) ); const auto test_Bucketize_left_edge = ::testing::Combine( - ::testing::ValuesIn(dataShapes), - ::testing::ValuesIn(bucketsShapes), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_static)), ::testing::Values(false), - ::testing::ValuesIn(inPrc), - ::testing::ValuesIn(inPrc), - ::testing::ValuesIn(netPrc), + ::testing::ValuesIn(in_types), + ::testing::ValuesIn(in_types), + ::testing::ValuesIn(model_types), ::testing::Values(ov::test::utils::DEVICE_CPU) ); INSTANTIATE_TEST_SUITE_P(smoke_TestsBucketize_right, BucketizeLayerTest, test_Bucketize_right_edge, BucketizeLayerTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_TestsBucketize_left, BucketizeLayerTest, test_Bucketize_left_edge, BucketizeLayerTest::getTestCaseName); +} // namespace diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/binary_convolution.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/binary_convolution.hpp index f6f2fbdbbc9e6f..6d76bf71a3ee66 100644 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/binary_convolution.hpp +++ b/src/tests/functional/plugin/shared/include/single_layer_tests/binary_convolution.hpp @@ -7,7 +7,6 @@ #include "shared_test_classes/single_layer/binary_convolution.hpp" namespace LayerTestsDefinitions { - TEST_P(BinaryConvolutionLayerTest, CompareWithRefs) { Run(); } diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/binary_convolution.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/binary_convolution.hpp new file mode 100644 index 00000000000000..4f1e93f4155ab8 --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/binary_convolution.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/binary_convolution.hpp" + +namespace ov { +namespace test { +TEST_P(BinaryConvolutionLayerTest, Inference) { + run(); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/bucketize.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/bucketize.hpp new file mode 100644 index 00000000000000..3056be6c48d922 --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/bucketize.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/bucketize.hpp" + +namespace ov { +namespace test { +TEST_P(BucketizeLayerTest, Inference) { + run(); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/binary_convolution.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/binary_convolution.hpp new file mode 100644 index 00000000000000..14d388a8883266 --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/binary_convolution.hpp @@ -0,0 +1,42 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { + +using binConvSpecificParams = std::tuple< + std::vector, // Kernel size + std::vector, // Strides + std::vector, // Pads begin + std::vector, // Pads end + std::vector, // Dilations + size_t, // Num Output channels + ov::op::PadType, // Padding type + float>; // Padding value + +using binaryConvolutionTestParamsSet = std::tuple< + binConvSpecificParams, // + ov::element::Type, // Model Type + std::vector, // Input shape + std::string>; // Device name + +class BinaryConvolutionLayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); +protected: + void SetUp() override; +}; + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/bucketize.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/bucketize.hpp new file mode 100644 index 00000000000000..509a1405379fbd --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/bucketize.hpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { + +using bucketizeParamsTuple = std::tuple< + std::vector, // data shape, bucket shape + bool, // Right edge of interval + ov::element::Type, // Data input precision + ov::element::Type, // Buckets input precision + ov::element::Type, // Output precision + std::string>; // Device name + +class BucketizeLayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); +protected: + void SetUp() override; +}; + + +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/functional/shared_test_classes/src/single_layer/bucketize.cpp b/src/tests/functional/shared_test_classes/src/single_layer/bucketize.cpp index 30f162feeb242f..381a2c9f55fcf7 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/bucketize.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/bucketize.cpp @@ -5,7 +5,6 @@ #include "shared_test_classes/single_layer/bucketize.hpp" namespace LayerTestsDefinitions { - std::string BucketizeLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { InferenceEngine::SizeVector dataShape; InferenceEngine::SizeVector bucketsShape; diff --git a/src/tests/functional/shared_test_classes/src/single_op/binary_convolution.cpp b/src/tests/functional/shared_test_classes/src/single_op/binary_convolution.cpp new file mode 100644 index 00000000000000..b2f056457a852f --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/binary_convolution.cpp @@ -0,0 +1,79 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/binary_convolution.hpp" + +#include "ov_models/builders.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" + +namespace ov { +namespace test { +std::string BinaryConvolutionLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { + binConvSpecificParams bin_conv_params; + ov::element::Type model_type; + std::vector shapes; + std::string target_device; + + std::tie(bin_conv_params, model_type, shapes, target_device) = obj.param; + + ov::op::PadType pad_type; + std::vector kernel, stride, dilation; + std::vector pad_begin, padEnd; + size_t conv_out_channels; + float pad_value; + std::tie(kernel, stride, pad_begin, padEnd, dilation, conv_out_channels, pad_type, pad_value) = bin_conv_params; + + std::ostringstream result; + result << "IS=("; + for (size_t i = 0lu; i < shapes.size(); i++) { + result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < shapes.size(); j++) { + result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + result << "KS=" << ov::test::utils::vec2str(kernel) << "_"; + result << "S=" << ov::test::utils::vec2str(stride) << "_"; + result << "PB=" << ov::test::utils::vec2str(pad_begin) << "_"; + result << "PE=" << ov::test::utils::vec2str(padEnd) << "_"; + result << "D=" << ov::test::utils::vec2str(dilation) << "_"; + result << "O=" << conv_out_channels << "_"; + result << "AP=" << pad_type << "_"; + result << "PV=" << pad_value << "_"; + result << "netPRC=" << model_type.get_type_name() << "_"; + result << "trgDev=" << target_device; + return result.str(); +} + +void BinaryConvolutionLayerTest::SetUp() { + binConvSpecificParams bin_conv_params; + ov::element::Type model_type; + std::vector shapes; + + std::tie(bin_conv_params, model_type, shapes, targetDevice) = this->GetParam(); + init_input_shapes(shapes); + + ov::op::PadType pad_type; + std::vector kernel_size, strides, dilations; + std::vector pads_begin, pads_end; + size_t num_out_channels; + float pad_value; + std::tie(kernel_size, strides, pads_begin, pads_end, dilations, num_out_channels, pad_type, pad_value) = bin_conv_params; + + ov::ParameterVector params{std::make_shared(model_type, inputDynamicShapes.front())}; + params[0]->set_friendly_name("a_data_batch"); + + // TODO: refactor build BinaryConvolution op to accept filters input as Parameter + auto bin_conv = + ngraph::builder::makeBinaryConvolution(params[0], kernel_size, strides, pads_begin, pads_end, dilations, pad_type, num_out_channels, pad_value); + auto result = std::make_shared(bin_conv); + function = std::make_shared(ov::OutputVector{result}, params, "BinaryConvolution"); +} + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_op/bucketize.cpp b/src/tests/functional/shared_test_classes/src/single_op/bucketize.cpp new file mode 100644 index 00000000000000..bea6eaf18bfb22 --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/bucketize.cpp @@ -0,0 +1,61 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/bucketize.hpp" + +#include "common_test_utils/ov_tensor_utils.hpp" + +namespace ov { +namespace test { + +std::string BucketizeLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { + std::vector shapes; + bool with_right_bound; + ov::element::Type in_data_type, in_buckets_type, model_type; + std::string target_device; + + std::tie(shapes, with_right_bound, in_data_type, in_buckets_type, model_type, target_device) = obj.param; + + std::ostringstream result; + result << "IS=("; + for (size_t i = 0lu; i < shapes.size(); i++) { + result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < shapes.size(); j++) { + result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + if (with_right_bound) + result << "rightIntervalEdge_"; + else + result << "leftIntervalEdge_"; + result << "in_data_type=" << in_data_type.get_type_name() << "_"; + result << "in_buckets_type=" << in_buckets_type.get_type_name() << "_"; + result << "model_type=" << model_type.get_type_name() << "_"; + result << "target_device=" << target_device; + return result.str(); +} + +void BucketizeLayerTest::SetUp() { + std::vector shapes; + bool with_right_bound; + ov::element::Type in_data_type, in_buckets_type, model_type; + + std::tie(shapes, with_right_bound, in_data_type, in_buckets_type, model_type, targetDevice) = this->GetParam(); + init_input_shapes(shapes); + + auto data = std::make_shared(in_data_type, inputDynamicShapes[0]); + data->set_friendly_name("a_data"); + auto buckets = std::make_shared(in_buckets_type, inputDynamicShapes[1]); + buckets->set_friendly_name("b_buckets"); + auto bucketize = std::make_shared(data, buckets, model_type, with_right_bound); + auto result = std::make_shared(bucketize); + function = std::make_shared(result, ov::ParameterVector{data, buckets}, "Bucketize"); +} +} // namespace test +} // namespace ov From fca34c6f9fcc1640d1d5a96e7f07e398960251b8 Mon Sep 17 00:00:00 2001 From: Andrey Kashchikhin Date: Tue, 10 Oct 2023 10:35:35 +0100 Subject: [PATCH 125/257] [CI] [GHA] Refactor Win pipeline: split the `Build` job, remove unnecessary requirements installation (#20320) * split build job; rm unnecessary requirements installation * add missing OV Contrib build * align requirements install * rm incorrectly place vsvars, apply comments: add missing tests * rm build and source options in the cmake OV Contrib command * add missing build directory * add logs * Update windows.yml Fixed rebuild from scratch with OpenVINO Contrib * verbose archive extraction * Fixed path for extraction of openvino_tests package * Use windows-latest-8-cores for samples * Update windows.yml * Update .github/workflows/windows.yml * use filename for wheel; build ov contrib with ov --------- Co-authored-by: Ilya Lavrenov --- .github/workflows/windows.yml | 415 ++++++++++++++++------------------ 1 file changed, 196 insertions(+), 219 deletions(-) diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 2dd4b218a1e861..1e39f82e7b887a 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -1,4 +1,4 @@ -name: Tests on Windows (VS 2022, Python 3.11) +name: Windows (VS 2022, Python 3.11) on: workflow_dispatch: pull_request: @@ -26,20 +26,7 @@ concurrency: cancel-in-progress: true env: - CMAKE_BUILD_TYPE: 'Release' - CMAKE_GENERATOR: 'Ninja' - CMAKE_CXX_COMPILER_LAUNCHER: sccache - CMAKE_C_COMPILER_LAUNCHER: sccache - OPENVINO_REPO: "${{ github.workspace }}\\openvino" - OPENVINO_CONTRIB_REPO: "${{ github.workspace }}\\openvino_contrib" - INSTALL_DIR: "${{ github.workspace }}\\install" - INSTALL_TEST_DIR: "${{ github.workspace }}\\install\\tests" - SAMPLES_INSTALL_DIR: "${{ github.workspace }}\\install\\samples" - LAYER_TESTS_INSTALL_DIR: "${{ github.workspace }}\\install\\tests\\layer_tests" - BUILD_DIR: "${{ github.workspace }}\\build" - OV_TEMP: "${{ github.workspace }}\\openvino_temp" - PYTHON_STATIC_ARGS: -m "not dynamic_library" - VCVARSPATH: "C:\\Program Files\\Microsoft Visual Studio\\2022\\Enterprise\\VC\\Auxiliary\\Build\\vcvarsall.bat" + PYTHON_VERSION: '3.11' jobs: Build: @@ -47,6 +34,16 @@ jobs: run: shell: pwsh runs-on: windows-latest-8-cores + env: + CMAKE_BUILD_TYPE: 'Release' + CMAKE_GENERATOR: 'Ninja Multi-Config' + CMAKE_CXX_COMPILER_LAUNCHER: sccache + CMAKE_C_COMPILER_LAUNCHER: sccache + OPENVINO_REPO: "${{ github.workspace }}\\openvino" + OPENVINO_CONTRIB_REPO: "${{ github.workspace }}\\openvino_contrib" + INSTALL_DIR: "${{ github.workspace }}\\openvino_install" + INSTALL_TEST_DIR: "${{ github.workspace }}\\tests_install" + BUILD_DIR: "${{ github.workspace }}\\openvino_build" steps: - name: Clone OpenVINO uses: actions/checkout@v4 @@ -66,56 +63,35 @@ jobs: - uses: actions/setup-python@v4 with: - python-version: '3.11' + python-version: ${{ env.PYTHON_VERSION }} - name: Install python dependencies run: | - # For Python API - python3 -m pip install Scons + # For Python API: build and wheel packaging python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/wheel/requirements-dev.txt - python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/requirements.txt - python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/requirements_test.txt - - # For running Python API tests python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/src/compatibility/openvino/requirements-dev.txt # For running ONNX frontend unit tests - python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/frontends/onnx/tests/requirements.txt + python3 -m pip install --force-reinstall -r ${{ env.OPENVINO_REPO }}/src/frontends/onnx/tests/requirements.txt # For running TensorFlow frontend unit tests python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/frontends/tensorflow/tests/requirements.txt - # For running Paddle frontend unit tests - python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/frontends/paddle/tests/requirements.txt + # For running TensorFlow Lite frontend unit tests + python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/frontends/tensorflow_lite/tests/requirements.txt - - name: Install MO dependencies - run: | - python3 -m pip install -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_mxnet.txt ` - -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_caffe.txt ` - -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_kaldi.txt ` - -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_onnx.txt ` - -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_tf2.txt ` - -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_dev.txt + # For running Paddle frontend unit tests + # python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/frontends/paddle/tests/requirements.txt - name: Install build dependencies - run: | - choco install --no-progress ninja - choco install --no-progress shellcheck - - - name: Get tools versions - run: | - python3 --version - cmake --version + run: choco install --no-progress ninja # # Build # - - name: Get number of CPU cores - uses: SimenB/github-actions-cpu-cores@v2 - id: cpu-cores - - - uses: ilammy/msvc-dev-cmd@v1 + - name: Configure Developer Command Prompt for Microsoft Visual C++ + uses: ilammy/msvc-dev-cmd@v1 - name: Setup sccache uses: hendrikmuhs/ccache-action@v1.2 @@ -132,55 +108,33 @@ jobs: - name: CMake configure run: | - & {{ env.VCVARSPATH }} x64 && cmake -G "Ninja Multi-Config" ` + cmake -G "${{ env.CMAKE_GENERATOR }}" ` -DENABLE_CPPLINT=OFF ` - -DENABLE_ONEDNN_FOR_GPU=OFF ` + -DCMAKE_CXX_STANDARD=14 ` + -DBUILD_nvidia_plugin=OFF ` -DBUILD_SHARED_LIBS=OFF ` -DENABLE_TESTS=ON ` -DCMAKE_COMPILE_WARNING_AS_ERROR=OFF ` -DENABLE_STRICT_DEPENDENCIES=OFF ` -DENABLE_PYTHON=ON ` - -DBUILD_nvidia_plugin=OFF ` -DCMAKE_DISABLE_FIND_PACKAGE_PkgConfig=ON ` -DCUSTOM_OPERATIONS="calculate_grid;complex_mul;fft;grid_sample;sparse_conv;sparse_conv_transpose" ` - -DOPENVINO_EXTRA_MODULES=${{ env.OPENVINO_CONTRIB_REPO }}\modules ` - -DCMAKE_BUILD_TYPE=Release ` + -DOPENVINO_EXTRA_MODULES=${{ env.OPENVINO_CONTRIB_REPO }}/modules ` -S ${{ env.OPENVINO_REPO }} ` -B ${{ env.BUILD_DIR }} - - name: Build - run: | - & {{ env.VCVARSPATH }} x64 && cmake --build ${{ env.BUILD_DIR }} --parallel ${{ steps.cpu-cores.outputs.count }} --config Release - - - name: Install - run: cmake -DCMAKE_INSTALL_PREFIX=${{ env.INSTALL_DIR }} -P ${{ env.BUILD_DIR }}/cmake_install.cmake - - - name: Install Wheels - run: python3 -m pip install openvino-dev --find-links=${{ env.INSTALL_DIR }}\tools + - name: Cmake build - OpenVINO + run: cmake --build ${{ env.BUILD_DIR }} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} --verbose - - name: CMake Samples Tests + - name: Cmake install - OpenVINO run: | - & {{ env.VCVARSPATH }} x64 && cmake -S ${{ env.OPENVINO_REPO }}/tests/samples_tests -B ${{ env.BUILD_DIR }}/samples_tests - - - name: Install Samples Tests - run: cmake -DCOMPONENT=tests -DCMAKE_INSTALL_PREFIX=${{ env.INSTALL_DIR }} -P ${{ env.BUILD_DIR }}/samples_tests/cmake_install.cmake - - - name: Install Tests - run: cmake -DCMAKE_INSTALL_PREFIX=${{ env.INSTALL_DIR }} -DCOMPONENT=tests -P ${{ env.BUILD_DIR }}\cmake_install.cmake - - - name: Cmake Layer Tests - run: | - & {{ env.VCVARSPATH }} x64 && cmake -S ${{ env.OPENVINO_REPO }}/tests/layer_tests -B ${{ env.BUILD_DIR }}/layer_tests - - - name: Build Layer Tests - run: cmake --build ${{ env.BUILD_DIR }}/layer_tests --parallel --config Release - - - name: Install Layer Tests - run: cmake -DCOMPONENT=tests -DCMAKE_INSTALL_PREFIX=${{ env.INSTALL_DIR }} -P ${{ env.BUILD_DIR }}/layer_tests/cmake_install.cmake + cmake -DCMAKE_INSTALL_PREFIX=${{ env.INSTALL_DIR }} -P ${{ env.BUILD_DIR }}/cmake_install.cmake + cmake -DCMAKE_INSTALL_PREFIX=${{ env.INSTALL_TEST_DIR }} -DCOMPONENT=tests -P ${{ env.BUILD_DIR }}/cmake_install.cmake + cmake -DCMAKE_INSTALL_PREFIX=${{ env.INSTALL_DIR }} -DCOMPONENT=python_wheels -P ${{ env.BUILD_DIR }}/cmake_install.cmake - name: Pack Artifacts run: | - $file=Get-ChildItem -Path "${{ env.INSTALL_DIR }}" -Exclude "tests" + $file=Get-ChildItem -Path "${{ env.INSTALL_DIR }}" $compress = @{ Path = $file CompressionLevel = "Optimal" @@ -188,7 +142,7 @@ jobs: } Compress-Archive @compress - $file=Get-ChildItem -Path "${{ env.INSTALL_DIR }}\tests" + $file=Get-ChildItem -Path "${{ env.INSTALL_TEST_DIR }}" $compress = @{ Path = $file CompressionLevel = "Optimal" @@ -196,15 +150,78 @@ jobs: } Compress-Archive @compress + - name: Cmake & Build - OpenVINO Contrib + if: ${{ 'false' }} # Ticket: 122441 + run: | + cmake ` + -DBUILD_nvidia_plugin=OFF ` + -DCUSTOM_OPERATIONS="calculate_grid;complex_mul;fft;grid_sample;sparse_conv;sparse_conv_transpose" ` + -DOPENVINO_EXTRA_MODULES=${{ env.OPENVINO_CONTRIB_REPO }}/modules ` + -S ${{ env.OPENVINO_REPO }} ` + -B ${{ env.BUILD_DIR }} + cmake --build ${{ env.BUILD_DIR }} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} --verbose + + - name: Upload openvino package + uses: actions/upload-artifact@v3 + with: + name: openvino_package + path: ${{ env.BUILD_DIR }}/openvino_package.zip + if-no-files-found: 'error' + + - name: Upload openvino tests package + uses: actions/upload-artifact@v3 + with: + name: openvino_tests + path: ${{ env.BUILD_DIR }}/openvino_tests.zip + if-no-files-found: 'error' + + Samples: + needs: Build + defaults: + run: + shell: pwsh + runs-on: windows-latest-8-cores + env: + INSTALL_DIR: "${{ github.workspace }}\\install" + INSTALL_TEST_DIR: "${{ github.workspace }}\\install\\tests" + SAMPLES_INSTALL_DIR: "${{ github.workspace }}\\install\\samples" + BUILD_DIR: "${{ github.workspace }}\\build" + + steps: + - name: Download OpenVINO package + uses: actions/download-artifact@v3 + with: + name: openvino_package + path: ${{ env.INSTALL_DIR }} + + - name: Download OpenVINO tests package + uses: actions/download-artifact@v3 + with: + name: openvino_tests + path: ${{ env.INSTALL_TEST_DIR }} + + - name: Extract OpenVINO packages + run: | + pushd ${{ env.INSTALL_DIR }} + Expand-Archive openvino_package.zip -DestinationPath "${{ env.INSTALL_DIR }}" + popd + pushd ${{ env.INSTALL_TEST_DIR }} + Expand-Archive openvino_tests.zip -DestinationPath "${{ env.INSTALL_DIR }}" + popd + + - uses: actions/setup-python@v4 + with: + python-version: ${{ env.PYTHON_VERSION }} + - name: Build cpp samples run: | - & {{ env.VCVARSPATH }} x64 - & ${{ env.SAMPLES_INSTALL_DIR }}/cpp/build_samples_msvc.bat -i ${{ env.INSTALL_DIR }} + & ${{ env.SAMPLES_INSTALL_DIR }}/cpp/build_samples_msvc.bat -i ${{ env.INSTALL_DIR }} -b ${{ env.BUILD_DIR }}/cpp_samples + env: + CMAKE_COMPILE_WARNING_AS_ERROR: 'ON' - name: Build c samples run: | - & {{ env.VCVARSPATH }} x64 - & ${{ env.SAMPLES_INSTALL_DIR }}/c/build_samples_msvc.bat -i ${{ env.INSTALL_DIR }} + & ${{ env.SAMPLES_INSTALL_DIR }}/c/build_samples_msvc.bat -i ${{ env.INSTALL_DIR }} -b ${{ env.BUILD_DIR }}/c_samples - name: Samples tests shell: cmd @@ -217,31 +234,16 @@ jobs: SHARE: ${{ env.INSTALL_TEST_DIR }}/smoke_tests/samples_smoke_tests_data WORKSPACE: ${{ env.INSTALL_DIR }} - # Present in the "Build" job due to the fact that these tests require build directory - - name: ONNX frontend tests - shell: cmd - run: | - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_onnx_frontend_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ONNXFrontend.xml - - - name: List installed files - run: | - Get-ChildItem -Recurse -Directory ${{ env.INSTALL_DIR }} - - - name: Upload openvino package - uses: actions/upload-artifact@v3 - with: - name: openvino_package - path: ${{ env.BUILD_DIR }}/openvino_package.zip - if-no-files-found: 'error' - - - name: Upload openvino tests package + - name: Upload Test Results uses: actions/upload-artifact@v3 + if: ${{ !cancelled() }} with: - name: openvino_tests - path: ${{ env.BUILD_DIR }}/openvino_tests.zip + name: test-results-samples + path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml if-no-files-found: 'error' Python_Unit_Tests: + name: Python unit tests needs: Build defaults: run: @@ -252,17 +254,10 @@ jobs: OPENVINO_CONTRIB_REPO: "${{ github.workspace }}\\openvino_contrib" INSTALL_DIR: "${{ github.workspace }}\\install" INSTALL_TEST_DIR: "${{ github.workspace }}\\install\\tests" - SAMPLES_INSTALL_DIR: "${{ github.workspace }}\\install\\samples" LAYER_TESTS_INSTALL_DIR: "${{ github.workspace }}\\install\\tests\\layer_tests" - BUILD_DIR: "${{ github.workspace }}\\build" PYTHON_STATIC_ARGS: -m "not dynamic_library and not template_plugin" steps: - - name: Create Directories - run: | - mkdir ${{ env.INSTALL_DIR }} - mkdir ${{ env.INSTALL_TEST_DIR }} - - name: Download OpenVINO package uses: actions/download-artifact@v3 with: @@ -281,15 +276,9 @@ jobs: Expand-Archive openvino_package.zip -DestinationPath "${{ env.INSTALL_DIR }}" popd pushd ${{ env.INSTALL_TEST_DIR }} - Expand-Archive openvino_tests.zip -DestinationPath "${{ env.INSTALL_TEST_DIR }}" + Expand-Archive openvino_tests.zip -DestinationPath "${{ env.INSTALL_DIR }}" popd - - name: Check extraction - run: | - ls "${{ github.workspace }}" - ls "${{ env.INSTALL_DIR }}" - ls "${{ env.INSTALL_TEST_DIR }}" - - name: Clone OpenVINO uses: actions/checkout@v4 with: @@ -297,40 +286,25 @@ jobs: - uses: actions/setup-python@v4 with: - python-version: '3.11' + python-version: ${{ env.PYTHON_VERSION }} - - name: Install python dependencies + - name: Install Python API tests dependencies run: | - # For Python API - python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/wheel/requirements-dev.txt - python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/requirements.txt + # For torchvision to OpenVINO preprocessing converter + python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/src/openvino/preprocess/torchvision/requirements.txt - # For running Python API tests - python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/src/compatibility/openvino/requirements-dev.txt + # TODO: replace with Python API tests requirements + python3 -m pip install -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_dev.txt - # For running ONNX frontend unit tests - python3 -m pip install --force-reinstall -r ${{ env.OPENVINO_REPO }}/src/frontends/onnx/tests/requirements.txt - - # For running TensorFlow frontend unit tests - python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/frontends/tensorflow/tests/requirements.txt - - # For running Paddle frontend unit tests - python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/frontends/paddle/tests/requirements.txt - - python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt - - - name: Install MO dependencies - run: | - python3 -m pip install -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_mxnet.txt ` - -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_caffe.txt ` - -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_kaldi.txt ` - -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_onnx.txt ` - -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_tf2.txt ` - -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_dev.txt - - - name: Install Python wheels + - name: Install OpenVINO Python wheels run: | - python3 -m pip install openvino-dev --force-reinstall --find-links=${{ env.INSTALL_DIR }}\tools + # Find and install the core OV wheel + $ovCoreWheelPath=Get-ChildItem -Path "${{ env.INSTALL_DIR }}\tools" -Filter openvino-*.whl | % { $_.FullName } + python3 -m pip install "$ovCoreWheelPath" + + # Find and install the dev OV wheel + $ovDevWheelPath=Get-ChildItem -Path "${{ env.INSTALL_DIR }}\tools" -Filter openvino_dev*.whl | % { $_.FullName } + python3 -m pip install "$ovDevWheelPath[mxnet,caffe,kaldi,onnx,tensorflow2]" - name: Python API 1.0 Tests shell: cmd @@ -347,37 +321,22 @@ jobs: - name: Model Optimizer UT shell: cmd run: | + set PYTHONPATH=${{ env.OPENVINO_REPO }}\tools\mo;${{ env.LAYER_TESTS_INSTALL_DIR }};${{ env.INSTALL_TEST_DIR }};${{ env.INSTALL_DIR }}\python\python${{ env.PYTHON_VERSION }};%PYTHONPATH% - python3 -m pip install -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_mxnet.txt ^ - -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_caffe.txt ^ - -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_kaldi.txt ^ - -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_onnx.txt ^ - -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_tf2.txt ^ - -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_dev.txt + call "${{ env.INSTALL_DIR }}\\setupvars.bat" && python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/mo/unit_tests --ignore=${{ env.INSTALL_TEST_DIR }}/mo/unit_tests/mo/front/mxnet --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-ModelOptimizer.xml - set PYTHONPATH=${{ env.OPENVINO_REPO }}\tools\mo;${{ env.LAYER_TESTS_INSTALL_DIR }};${{ env.INSTALL_TEST_DIR }};${{ env.INSTALL_DIR }}\python\python3.11;%PYTHONPATH% + # Ticket - 115085 + - name: PyTorch Layer Tests + if: ${{ 'false' }} + shell: cmd + run: | + python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/mo/unit_tests --ignore=${{ env.INSTALL_TEST_DIR }}/mo/unit_tests/mo/front/mxnet --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-ModelOptimizer.xml + set PYTHONPATH=${{ env.OPENVINO_REPO }}\tools\mo;${{ env.LAYER_TESTS_INSTALL_DIR }};%PYTHONPATH% -# Ticket - 115085 -# - name: PyTorch Layer Tests -# shell: cmd -# run: | -# -# python3 -m pip install -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_mxnet.txt ^ -# -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_caffe.txt ^ -# -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_kaldi.txt ^ -# -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_onnx.txt ^ -# -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_tf2.txt ^ -# -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_dev.txt -# -# python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt -# -# set PYTHONPATH=${{ env.OPENVINO_REPO }}\tools\mo;${{ env.LAYER_TESTS_INSTALL_DIR }};%PYTHONPATH% -# -# call "${{ env.INSTALL_DIR }}\\setupvars.bat" && python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/pytorch_tests -m precommit --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-pytorch.xml -# env: -# TEST_DEVICE: CPU + call "${{ env.INSTALL_DIR }}\\setupvars.bat" && python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/pytorch_tests -m precommit --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-pytorch.xml + env: + TEST_DEVICE: CPU - name: TensorFlow 1 Layer Tests - TF FE shell: cmd @@ -426,18 +385,25 @@ jobs: run: | python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt - python3 -m pip install -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_mxnet.txt ^ - -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_caffe.txt ^ - -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_kaldi.txt ^ - -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_onnx.txt ^ - -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_tf2.txt ^ - -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_dev.txt - set PYTHONPATH=${{ env.OPENVINO_REPO }}\tools\mo;${{ env.LAYER_TESTS_INSTALL_DIR }};%PYTHONPATH% call "${{ env.INSTALL_DIR }}\\setupvars.bat" && python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow_lite_tests/ --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tfl_fe.xml env: TEST_DEVICE: CPU + TEST_PRECISION: FP16 + + - name: TensorFlow Lite Layer Tests - TFL FE + shell: cmd + run: | + :: Skip test_onnx/test_zoo_models and test_onnx/test_backend due to long execution time - ONNX Model Zoo tests are run separately + python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt + + set PYTHONPATH=${{ env.OPENVINO_REPO }}\tools\mo;${{ env.LAYER_TESTS_INSTALL_DIR }};%PYTHONPATH% + + python3 -m pytest ${{ env.OPENVINO_REPO }}/src/frontends/onnx/tests -k "not cuda" ^ + --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-onnx_frontend.xml ^ + --ignore=${{ env.OPENVINO_REPO }}/src/frontends/onnx/tests/test_python/test_zoo_models.py ^ + --ignore=${{ env.OPENVINO_REPO }}/src/frontends/onnx/tests/test_python/test_backend.py - name: MO Python API Tests shell: cmd @@ -449,6 +415,19 @@ jobs: call "${{ env.INSTALL_DIR }}\\setupvars.bat" && python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/mo_python_api_tests --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-test_mo_convert.xml env: TEST_DEVICE: CPU + TEST_PRECISION: FP16 + + - name: OVC Python API Tests + shell: cmd + run: | + python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt + + set PYTHONPATH=${{ env.OPENVINO_REPO }}\tools\mo;${{ env.LAYER_TESTS_INSTALL_DIR }};%PYTHONPATH% + + call "${{ env.INSTALL_DIR }}\\setupvars.bat" && python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/ovc_python_api_tests --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-test_ovc_convert.xml + env: + TEST_DEVICE: CPU + TEST_PRECISION: FP16 - name: Python Frontend tests shell: cmd @@ -468,6 +447,7 @@ jobs: if-no-files-found: 'error' CXX_Unit_Tests: + name: C++ unit tests needs: Build defaults: run: @@ -478,11 +458,6 @@ jobs: INSTALL_TEST_DIR: "${{ github.workspace }}\\install\\tests" steps: - - name: Create Directories - run: | - mkdir ${{ env.INSTALL_DIR }} - mkdir ${{ env.INSTALL_TEST_DIR }} - - name: Download OpenVINO package uses: actions/download-artifact@v3 with: @@ -501,15 +476,9 @@ jobs: Expand-Archive openvino_package.zip -DestinationPath "${{ env.INSTALL_DIR }}" popd pushd ${{ env.INSTALL_TEST_DIR }} - Expand-Archive openvino_tests.zip -DestinationPath "${{ env.INSTALL_TEST_DIR }}" + Expand-Archive openvino_tests.zip -DestinationPath "${{ env.INSTALL_DIR }}" popd - - name: Check extraction - run: | - ls "${{ github.workspace }}" - ls "${{ env.INSTALL_DIR }}" - ls "${{ env.INSTALL_TEST_DIR }}" - - name: OpenVINO Core unit tests shell: cmd run: | @@ -540,15 +509,16 @@ jobs: run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_ir_frontend_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-IRFrontend.xml - # - name: PaddlePaddle frontend tests # Disabled in Azure: https://github.com/openvinotoolkit/openvino/blob/master/.ci/azure/linux.yml#L403 - # shell: cmd - # run: | - # call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/paddle_tests --gtest_print_time=1 --gtest_filter=*smoke* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-PaddleTests.xml + - name: PaddlePaddle frontend tests # Disabled in Azure: https://github.com/openvinotoolkit/openvino/blob/master/.ci/azure/linux.yml#L403 + if: ${{ 'false' }} + shell: cmd + run: | + call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/paddle_tests --gtest_print_time=1 --gtest_filter=*smoke* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-PaddleTests.xml - # - name: ONNX frontend tests # Present in the "Build" job due to the fact that these tests require build directory - # shell: cmd - # run: | - # call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_onnx_frontend_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ONNXFrontend.xml + - name: ONNX frontend tests + shell: cmd + run: | + call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_onnx_frontend_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ONNXFrontend.xml - name: TensorFlow Common tests shell: cmd @@ -570,6 +540,16 @@ jobs: run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_transformations_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-Transformations.xml + - name: Legacy Transformations func tests + shell: cmd + run: | + call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_legacy_transformations_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-LegacyTransformations.xml + + - name: Inference Engine 1.0 unit tests + shell: cmd + run: | + call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/InferenceEngineUnitTests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-InferenceEngineUnitTests.xml + - name: Common test utils tests shell: cmd run: | @@ -585,10 +565,10 @@ jobs: run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_cpu_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-CPUUnitTests.xml - # - name: GNA plugin unit tests # Disabled in Azure: https://github.com/openvinotoolkit/openvino/blob/master/.ci/azure/linux.yml#L434 - # shell: cmd - # run: | - # call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_gna_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-GNAUnitTests.xml + - name: GNA plugin unit tests + shell: cmd + run: | + call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_gna_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-GNAUnitTests.xml - name: AUTO UT shell: cmd @@ -615,6 +595,11 @@ jobs: run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_capi_test --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OpenVINOCAPITests.xml + - name: AutoBatch unit tests + shell: cmd + run: | + call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_auto_batch_unit_tests --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_batch_unit_tests.xml + - name: AutoBatch FuncTests shell: cmd run: | @@ -644,6 +629,7 @@ jobs: if-no-files-found: 'error' CPU_Functional_Tests: + name: CPU functional tests needs: Build defaults: run: @@ -656,11 +642,6 @@ jobs: PARALLEL_TEST_CACHE: "${{ github.workspace }}\\install\\tests\\test_cache.lst" steps: - - name: Create Directories - run: | - mkdir ${{ env.INSTALL_DIR }} - mkdir ${{ env.INSTALL_TEST_DIR }} - - name: Download OpenVINO package uses: actions/download-artifact@v3 with: @@ -679,20 +660,16 @@ jobs: Expand-Archive openvino_package.zip -DestinationPath "${{ env.INSTALL_DIR }}" popd pushd ${{ env.INSTALL_TEST_DIR }} - Expand-Archive openvino_tests.zip -DestinationPath "${{ env.INSTALL_TEST_DIR }}" + Expand-Archive openvino_tests.zip -DestinationPath "${{ env.INSTALL_DIR }}" popd - - name: Check extraction - run: | - ls "${{ github.workspace }}" - ls "${{ env.INSTALL_DIR }}" - ls "${{ env.INSTALL_TEST_DIR }}" + - uses: actions/setup-python@v4 + with: + python-version: ${{ env.PYTHON_VERSION }} - name: Install python dependencies shell: cmd - run: | - python3 -m pip install --upgrade pip - python3 -m pip install -r ${{ github.workspace }}\install\tests\functional_test_utils\layer_tests_summary\requirements.txt + run: python3 -m pip install -r ${{ github.workspace }}\install\tests\functional_test_utils\layer_tests_summary\requirements.txt - name: Restore tests execution time uses: actions/cache/restore@v3 From 1454e77bbf29fe9bf2eb5d41f441cae12b3a1b9b Mon Sep 17 00:00:00 2001 From: Andrey Kashchikhin Date: Tue, 10 Oct 2023 11:43:09 +0100 Subject: [PATCH 126/257] [CI] [GHA] Introduce GHA macOS Pipeline (#20212) * start transferring * start with samples * start with initial two stages * change name * skip pytorch tests; rm unused comments * rm setupvars sourcing; make test steps similar to those in linux pipeline * add missing options and setupvars sourcing * add skips for mac * install wheels directly * add deployment target * add skips for pytorch layer tests; experiment with samples * do not exclude files for archives; set rpath * apply comments; rm unnecessary stages * Update mac.yml fixed MO Python API tests * Update .github/workflows/mac.yml * Update openvino.cmake add LC_RPATH to libopenvino.dylib * Update src/cmake/openvino.cmake * Update CMakeLists.txt reverted changes in samples build * Update openvino.cmake removed rpath changes * add setupvars * disable pr trigger --------- Co-authored-by: Ilya Lavrenov --- .github/workflows/linux.yml | 2 +- .github/workflows/mac.yml | 660 ++++++++++++++++++ .../test_onnx/test_backend.py | 5 + .../test_rfftn_complex_transforms.py | 7 +- .../pytorch_tests/test_upsample.py | 4 +- .../tensorflow_tests/test_tf_CTCLoss.py | 3 + .../test_tf_FusedBatchNorm.py | 3 + .../test_tf_TensorListLength.py | 4 + .../test_tf_TensorListResize.py | 3 + .../tensorflow_tests/test_tf_UnaryOps.py | 1 + .../tensorflow_tests/test_tf_While.py | 4 + .../moc_tf_fe/conversion_basic_models_test.py | 4 +- 12 files changed, 695 insertions(+), 5 deletions(-) create mode 100644 .github/workflows/mac.yml diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index f7d225d075833a..cc2b3eb9b9c227 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -369,7 +369,7 @@ jobs: uses: actions/upload-artifact@v3 if: ${{ !cancelled() }} with: - name: test-results-cpp + name: test-results-samples path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml if-no-files-found: 'error' diff --git a/.github/workflows/mac.yml b/.github/workflows/mac.yml new file mode 100644 index 00000000000000..6a8fc2f63c1c7f --- /dev/null +++ b/.github/workflows/mac.yml @@ -0,0 +1,660 @@ +name: macOS (macOS 12, Python 3.11) +on: + workflow_dispatch: +# pull_request: +# paths-ignore: +# - '**/docs/**' +# - 'docs/**' +# - '**/**.md' +# - '**.md' +# - '**/layer_tests_summary/**' +# - '**/conformance/**' + push: + paths-ignore: + - '**/docs/**' + - 'docs/**' + - '**/**.md' + - '**.md' + - '**/layer_tests_summary/**' + - '**/conformance/**' + branches: + - master + - 'releases/**' + +concurrency: + # github.ref is not unique in post-commit + group: ${{ github.event_name == 'push' && github.run_id || github.ref }}-mac-main + cancel-in-progress: true + +env: + PYTHON_VERSION: '3.11' + +jobs: + Build: + defaults: + run: + shell: bash + runs-on: macos-12-large + env: + CMAKE_BUILD_TYPE: 'Release' + CMAKE_GENERATOR: 'Ninja Multi-Config' + MACOSX_DEPLOYMENT_TARGET: '10.12' + CMAKE_CXX_COMPILER_LAUNCHER: ccache + CMAKE_C_COMPILER_LAUNCHER: ccache + OPENVINO_REPO: ${{ github.workspace }}/openvino + OPENVINO_CONTRIB_REPO: ${{ github.workspace }}/openvino_contrib + INSTALL_DIR: ${{ github.workspace }}/openvino_install + INSTALL_TEST_DIR: ${{ github.workspace }}/tests_install + BUILD_DIR: ${{ github.workspace }}/build + steps: + - name: Clone OpenVINO + uses: actions/checkout@v4 + with: + path: 'openvino' + submodules: 'true' + + - name: Clone OpenVINO Contrib + uses: actions/checkout@v4 + with: + repository: 'openvinotoolkit/openvino_contrib' + path: 'openvino_contrib' + + # + # Dependencies + # + + - name: Install build dependencies + run: brew install coreutils ninja scons + + - uses: actions/setup-python@v4 + with: + python-version: ${{ env.PYTHON_VERSION }} + + - name: Install python dependencies + run: | + # For Python API + python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/wheel/requirements-dev.txt + python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/requirements.txt + + # For running Python API tests + python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/src/compatibility/openvino/requirements-dev.txt + + # For running ONNX frontend unit tests + python3 -m pip install --force-reinstall -r ${{ env.OPENVINO_REPO }}/src/frontends/onnx/tests/requirements.txt + + # For running TensorFlow frontend unit tests + python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/frontends/tensorflow/tests/requirements.txt + + # For running Paddle frontend unit tests + python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/frontends/paddle/tests/requirements.txt + + # + # Build + # + + - name: Setup ccache + uses: hendrikmuhs/ccache-action@v1.2 + with: + max-size: "2000M" + # Should save cache only if run in the master branch of the base repo + # github.ref_name is 'ref/PR_#' in case of the PR, and 'branch_name' when executed on push + save: ${{ github.ref_name == 'master' && 'true' || 'false' }} + verbose: 2 + key: ${{ runner.os }}-main + restore-keys: | + ${{ runner.os }}-main + + - name: CMake configure + run: | + cmake \ + -G "${{ env.CMAKE_GENERATOR }}" \ + -DENABLE_CPPLINT=OFF \ + -DENABLE_NCC_STYLE=OFF \ + -DENABLE_TESTS=ON \ + -DCMAKE_COMPILE_WARNING_AS_ERROR=OFF \ + -DENABLE_STRICT_DEPENDENCIES=OFF \ + -DCMAKE_CXX_COMPILER_LAUNCHER=${{ env.CMAKE_CXX_COMPILER_LAUNCHER }} \ + -DCMAKE_C_COMPILER_LAUNCHER=${{ env.CMAKE_C_COMPILER_LAUNCHER }} \ + -S ${{ env.OPENVINO_REPO }} \ + -B ${{ env.BUILD_DIR }} + + - name: Cmake build - OpenVINO + run: cmake --build ${{ env.BUILD_DIR }} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} + + - name: Show ccache stats + run: ccache --show-stats + + - name: Cmake install - OpenVINO + run: | + cmake -DCMAKE_INSTALL_PREFIX=${{ env.INSTALL_DIR }} -P ${{ env.BUILD_DIR }}/cmake_install.cmake + cmake -DCMAKE_INSTALL_PREFIX=${{ env.INSTALL_TEST_DIR }} -DCOMPONENT=tests -P ${{ env.BUILD_DIR }}/cmake_install.cmake + cmake -DCMAKE_INSTALL_PREFIX=${{ env.INSTALL_DIR }} -DCOMPONENT=python_wheels -P ${{ env.BUILD_DIR }}/cmake_install.cmake + + - name: Pack Artifacts + run: | + pushd ${{ env.INSTALL_DIR }} + tar -czvf ${{ env.BUILD_DIR }}/openvino_package.tar.gz * + popd + + pushd ${{ env.INSTALL_TEST_DIR }} + tar -czvf ${{ env.BUILD_DIR }}/openvino_tests.tar.gz * + popd + + - name: Cmake & Build - OpenVINO Contrib + run: | + cmake \ + -DBUILD_nvidia_plugin=OFF \ + -DCUSTOM_OPERATIONS="calculate_grid;complex_mul;fft;grid_sample;sparse_conv;sparse_conv_transpose" \ + -DOPENVINO_EXTRA_MODULES=${{ env.OPENVINO_CONTRIB_REPO }}/modules \ + -S ${{ env.OPENVINO_REPO }} \ + -B ${{ env.BUILD_DIR }} + cmake --build ${{ env.BUILD_DIR }} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} + + # + # Upload build artifacts + # + + - name: Upload openvino package + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: openvino_package + path: ${{ env.BUILD_DIR }}/openvino_package.tar.gz + if-no-files-found: 'error' + + - name: Upload openvino tests package + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: openvino_tests + path: ${{ env.BUILD_DIR }}/openvino_tests.tar.gz + if-no-files-found: 'error' + + Samples: + needs: Build + defaults: + run: + shell: bash + runs-on: macos-12 + env: + INSTALL_DIR: ${{ github.workspace }}/install + INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests + BUILD_DIR: ${{ github.workspace }}/build + + steps: + # + # Initialize OpenVINO + # + + - name: Download OpenVINO package + uses: actions/download-artifact@v3 + with: + name: openvino_package + path: ${{ env.INSTALL_DIR }} + + - name: Download OpenVINO tests package + uses: actions/download-artifact@v3 + with: + name: openvino_tests + path: ${{ env.INSTALL_TEST_DIR }} + + - name: Extract OpenVINO packages + run: | + pushd ${INSTALL_DIR} + tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} + popd + + pushd ${INSTALL_TEST_DIR} + tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR} + popd + + - name: Install dependencies + run: brew install coreutils + + - uses: actions/setup-python@v4 + with: + python-version: ${{ env.PYTHON_VERSION }} + + - name: Build cpp samples + run: ${INSTALL_DIR}/samples/cpp/build_samples.sh -i ${INSTALL_DIR} -b ${BUILD_DIR}/cpp_samples + env: + CMAKE_COMPILE_WARNING_AS_ERROR: 'ON' + + - name: Build c samples + run: ${INSTALL_DIR}/samples/c/build_samples.sh -i ${INSTALL_DIR} -b ${BUILD_DIR}/c_samples + env: + CMAKE_COMPILE_WARNING_AS_ERROR: 'ON' + + # + # Tests + # + + - name: Samples tests + run: | + export WORKSPACE=${INSTALL_DIR} + export IE_APP_PATH=${INSTALL_DIR}/samples_bin + export IE_APP_PYTHON_PATH=${INSTALL_DIR}/samples/python + export SHARE=${INSTALL_TEST_DIR}/smoke_tests/samples_smoke_tests_data + + python3 -m pip install --ignore-installed PyYAML -r ${INSTALL_TEST_DIR}/smoke_tests/requirements.txt + + source ${INSTALL_DIR}/setupvars.sh + + python3 -m pytest -sv ${INSTALL_TEST_DIR}/smoke_tests \ + --ignore=${INSTALL_TEST_DIR}/smoke_tests/test_speech_sample.py \ + --env_conf ${INSTALL_TEST_DIR}/smoke_tests/env_config.yml \ + --junitxml=${INSTALL_TEST_DIR}/TEST-SamplesSmokeTests.xml + + - name: Upload Test Results + uses: actions/upload-artifact@v3 + if: ${{ !cancelled() }} + with: + name: test-results-samples + path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml + if-no-files-found: 'error' + + CXX_Unit_Tests: + name: C++ Unit tests + needs: Build + defaults: + run: + shell: bash + runs-on: macos-12 + env: + INSTALL_DIR: ${{ github.workspace }}/install + INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests + + steps: + # + # Dependencies + # + + - name: Download OpenVINO package + uses: actions/download-artifact@v3 + with: + name: openvino_package + path: ${{ env.INSTALL_DIR }} + + - name: Download OpenVINO tests package + uses: actions/download-artifact@v3 + with: + name: openvino_tests + path: ${{ env.INSTALL_TEST_DIR }} + + - name: Extract OpenVINO packages + run: | + pushd ${{ env.INSTALL_DIR }} + tar -xzf openvino_package.tar.gz -C ${{ env.INSTALL_DIR }} && rm openvino_package.tar.gz || exit 1 + popd + pushd ${{ env.INSTALL_TEST_DIR }} + tar -xzf openvino_tests.tar.gz -C ${{ env.INSTALL_DIR }} && rm openvino_tests.tar.gz || exit 1 + popd + + # + # Tests + # + + - name: OpenVINO Core Unit Tests + run: | + source ${{ env.INSTALL_DIR }}/setupvars.sh + ${{ env.INSTALL_TEST_DIR }}/ov_core_unit_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* \ + --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-NGraphUT.xml + + - name: OpenVINO Inference Functional Tests + run: | + source ${{ env.INSTALL_DIR }}/setupvars.sh + ${{ env.INSTALL_TEST_DIR }}/ov_inference_functional_tests --gtest_print_time=1 \ + --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-InferenceFunc.xml + + - name: OpenVINO Inference Unit Tests + run: | + source ${{ env.INSTALL_DIR }}/setupvars.sh + ${{ env.INSTALL_TEST_DIR }}/ov_inference_unit_tests --gtest_print_time=1 \ + --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-InferenceUnit.xml + + - name: Low Precision Transformations Tests + run: | + source ${{ env.INSTALL_DIR }}/setupvars.sh + ${{ env.INSTALL_TEST_DIR }}/ov_lp_transformations_tests --gtest_print_time=1 \ + --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-LpTransformations.xml + + - name: OpenVINO Conditional compilation tests + run: | + source ${{ env.INSTALL_DIR }}/setupvars.sh + ${{ env.INSTALL_TEST_DIR }}/ov_conditional_compilation_tests --gtest_print_time=1 \ + --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ConditionalCompilation.xml + + - name: IR frontend tests + run: | + source ${{ env.INSTALL_DIR }}/setupvars.sh + ${{ env.INSTALL_TEST_DIR }}/ov_ir_frontend_tests --gtest_print_time=1 \ + --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-IRFrontend.xml + + - name: PaddlePaddle frontend tests + if: ${{ 'false' }} + run: | + source ${{ env.INSTALL_DIR }}/setupvars.sh + ${{ env.INSTALL_TEST_DIR }}/paddle_tests --gtest_print_time=1 --gtest_filter=*smoke* \ + --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-PaddleTests.xml + + - name: ONNX frontend tests + run: | + source ${{ env.INSTALL_DIR }}/setupvars.sh + ${{ env.INSTALL_TEST_DIR }}/ov_onnx_frontend_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* \ + --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ONNXFrontend.xml + + - name: TensorFlow Common tests + run: | + source ${{ env.INSTALL_DIR }}/setupvars.sh + ${{ env.INSTALL_TEST_DIR }}/ov_tensorflow_common_tests --gtest_print_time=1 \ + --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TensorFlowCommonFrontend.xml + + - name: TensorFlow frontend tests + run: | + source ${{ env.INSTALL_DIR }}/setupvars.sh + ${{ env.INSTALL_TEST_DIR }}/ov_tensorflow_frontend_tests --gtest_print_time=1 \ + --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TensorFlowFrontend.xml + + - name: TensorFlow Lite frontend tests + run: | + source ${{ env.INSTALL_DIR }}/setupvars.sh + ${{ env.INSTALL_TEST_DIR }}/ov_tensorflow_lite_frontend_tests --gtest_print_time=1 \ + --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TensorFlowLiteFrontend.xml + + - name: Transformations Tests + run: | + source ${{ env.INSTALL_DIR }}/setupvars.sh + ${{ env.INSTALL_TEST_DIR }}/ov_transformations_tests --gtest_print_time=1 \ + --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-Transformations.xml + + - name: Common test utils tests + run: | + source ${{ env.INSTALL_DIR }}/setupvars.sh + ${{ env.INSTALL_TEST_DIR }}/ov_util_tests --gtest_print_time=1 \ + --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-commonUtilsTests.xml + + - name: CPU plugin unit tests + run: | + source ${{ env.INSTALL_DIR }}/setupvars.sh + ${{ env.INSTALL_TEST_DIR }}/ov_cpu_unit_tests --gtest_print_time=1 \ + --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-CPUUnitTests.xml + + - name: AUTO unit tests + run: | + source ${{ env.INSTALL_DIR }}/setupvars.sh + ${{ env.INSTALL_TEST_DIR }}/ov_auto_unit_tests --gtest_print_time=1 \ + --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_unit_tests.xml + + - name: Template plugin tests + run: | + source ${{ env.INSTALL_DIR }}/setupvars.sh + ${{ env.INSTALL_TEST_DIR }}/ov_template_func_tests --gtest_print_time=1 \ + --gtest_filter=*smoke* \ + --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TemplateFuncTests.xml + + - name: Inference Engine C API tests + if: ${{ always() }} + run: | + source ${{ env.INSTALL_DIR }}/setupvars.sh + ${{ env.INSTALL_TEST_DIR }}/InferenceEngineCAPITests --gtest_print_time=1 \ + --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-InferenceEngineCAPITests.xml + + - name: OpenVINO C API tests + run: | + source ${{ env.INSTALL_DIR }}/setupvars.sh + ${{ env.INSTALL_TEST_DIR }}/ov_capi_test --gtest_print_time=1 \ + --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OpenVINOCAPITests.xml + + - name: AutoBatch func tests + run: | + source ${{ env.INSTALL_DIR }}/setupvars.sh + ${{ env.INSTALL_TEST_DIR }}/ov_auto_batch_func_tests --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_batch_func_tests.xml + + - name: Upload Test Results + uses: actions/upload-artifact@v3 + if: ${{ always() }} + with: + name: test-results-cpp + path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml + if-no-files-found: 'error' + + Python_Unit_Tests: + name: Python unit tests + needs: Build + if: ${{ always() }} + defaults: + run: + shell: bash + runs-on: macos-12 + env: + OPENVINO_REPO: ${{ github.workspace }}/openvino + OPENVINO_CONTRIB_REPO: ${{ github.workspace }}/openvino_contrib + INSTALL_DIR: ${{ github.workspace }}/install + INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests + LAYER_TESTS_INSTALL_DIR: ${{ github.workspace }}/install/tests/layer_tests + steps: + - name: Clone OpenVINO + uses: actions/checkout@v4 + with: + path: 'openvino' + submodules: 'true' + + # + # Dependencies + # + + - uses: actions/setup-python@v4 + with: + python-version: ${{ env.PYTHON_VERSION }} + + - name: Download OpenVINO package + uses: actions/download-artifact@v3 + with: + name: openvino_package + path: ${{ env.INSTALL_DIR }} + + - name: Download OpenVINO tests package + uses: actions/download-artifact@v3 + with: + name: openvino_tests + path: ${{ env.INSTALL_TEST_DIR }} + + - name: Extract OpenVINO packages + run: | + pushd ${{ env.INSTALL_DIR }} + tar -xzf openvino_package.tar.gz -C ${{ env.INSTALL_DIR }} + popd + + pushd ${{ env.INSTALL_TEST_DIR }} + tar -xzf openvino_tests.tar.gz -C ${{ env.INSTALL_DIR }} + popd + + - name: Install Python API tests dependencies + run: | + # For torchvision to OpenVINO preprocessing converter + python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/src/openvino/preprocess/torchvision/requirements.txt + + # TODO: replace with Python API tests requirements + python3 -m pip install -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_dev.txt + + - name: Install OpenVINO Python wheels + run: | + # Install the core OV wheel + python3 -m pip install ${{ env.INSTALL_DIR }}/tools/openvino-*.whl + + # Find and install OV dev wheel + pushd ${{ env.INSTALL_DIR }}/tools + ov_dev_wheel_name=$(find . -name 'openvino_dev*.whl') + python3 -m pip install $ov_dev_wheel_name[mxnet,caffe,kaldi,onnx,tensorflow2] + popd + + - name: nGraph and IE Python Bindings Tests + if: ${{ always() }} + run: | + python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/pyngraph \ + --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml \ + --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests/test_onnx/test_zoo_models.py \ + --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests/test_onnx/test_backend.py + + - name: Python API 2.0 Tests + if: ${{ always() }} + run: | + # For python imports to import pybind_mock_frontend + export PYTHONPATH=${{ env.INSTALL_TEST_DIR }}:$PYTHONPATH + # for 'template' extension + export DYLD_LIBRARY_PATH=${{ env.INSTALL_TEST_DIR }}:$DYLD_LIBRARY_PATH + + python3 -m pytest -sv ${{ env.INSTALL_TEST_DIR }}/pyopenvino \ + --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml \ + --ignore=${{ env.INSTALL_TEST_DIR }}/pyopenvino/tests/test_utils/test_utils.py \ + --ignore=${{ env.INSTALL_TEST_DIR }}/pyopenvino/tests/test_onnx/test_zoo_models.py \ + --ignore=${{ env.INSTALL_TEST_DIR }}/pyopenvino/tests/test_onnx/test_backend.py + + - name: MO Python API Tests + if: ${{ always() }} + run: | + python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt + export PYTHONPATH=${{ env.LAYER_TESTS_INSTALL_DIR }}:$PYTHONPATH + # TODO: remove setupvars.sh from here; currently, it's used for 'test_utils' installed in '/python/openvino' + source ${INSTALL_DIR}/setupvars.sh + + python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/mo_python_api_tests/ --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-test_mo_convert.xml + env: + TEST_DEVICE: CPU + TEST_PRECISION: FP16 + + - name: Model Optimizer unit tests + if: ${{ always() }} + run: | + export PYTHONPATH=${{ env.INSTALL_TEST_DIR }}:$PYTHONPATH + python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/mo/unit_tests \ + --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-ModelOptimizer.xml + + - name: PyTorch Layer Tests + if: ${{ always() }} + run: | + python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt + export PYTHONPATH=${{ env.LAYER_TESTS_INSTALL_DIR }}:$PYTHONPATH + + python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/pytorch_tests -m precommit --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-pytorch.xml + env: + TEST_DEVICE: CPU + TEST_PRECISION: FP16 + + - name: TensorFlow 1 Layer Tests - TF FE + if: ${{ always() }} + run: | + python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt + + export PYTHONPATH=${{ env.OPENVINO_REPO }}/tools/mo/:${{ env.LAYER_TESTS_INSTALL_DIR }}:$PYTHONPATH + + python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow_tests/ --use_new_frontend -m precommit_tf_fe --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tf_fe.xml + env: + TEST_DEVICE: CPU + + - name: TensorFlow 2 Layer Tests - TF FE + if: ${{ always() }} + run: | + python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt + export PYTHONPATH=${{ env.OPENVINO_REPO }}/tools/mo/:${{ env.LAYER_TESTS_INSTALL_DIR }}:$PYTHONPATH + + python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow2_keras_tests/ --use_new_frontend -m precommit_tf_fe --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tf2_fe.xml + env: + TEST_DEVICE: CPU + + - name: TensorFlow 1 Layer Tests - Legacy FE + if: ${{ always() }} + run: | + python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt + export PYTHONPATH=${{ env.OPENVINO_REPO }}/tools/mo/:${{ env.LAYER_TESTS_INSTALL_DIR }}:$PYTHONPATH + + python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow_tests/test_tf_Roll.py --ir_version=10 --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tf_Roll.xml + + - name: TensorFlow 2 Layer Tests - Legacy FE + if: ${{ always() }} + run: | + python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt + export PYTHONPATH=${{ env.OPENVINO_REPO }}/tools/mo/:${{ env.LAYER_TESTS_INSTALL_DIR }}:$PYTHONPATH + + python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow2_keras_tests/test_tf2_keras_activation.py \ + --ir_version=11 --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tf2_Activation.xml -k "sigmoid" + env: + TEST_DEVICE: CPU + + - name: TensorFlow Lite Layer Tests - TFL FE + if: ${{ always() }} + run: | + python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt + export PYTHONPATH=${{ env.OPENVINO_REPO }}/tools/mo/:${{ env.LAYER_TESTS_INSTALL_DIR }}:$PYTHONPATH + + python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow_lite_tests/ --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tfl_fe.xml + env: + TEST_DEVICE: CPU + + - name: Python Frontend tests + if: ${{ always() }} + run: | + python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt + export PYTHONPATH=${{ env.OPENVINO_REPO }}/tools/mo/:${{ env.LAYER_TESTS_INSTALL_DIR }}:$PYTHONPATH + # to allow 'libtest_builtin_extensions.so' to find 'libopenvino_onnx_frontend.so' + source ${{ env.INSTALL_DIR }}/setupvars.sh + + python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/py_frontend_tests --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-test_py_fontend.xml + + - name: Upload Test Results + uses: actions/upload-artifact@v3 + if: ${{ always() }} + with: + name: test-results-python + path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml + if-no-files-found: 'error' + +# Ticket: 122001 +# CPU_Functional_Tests: +# needs: Build +# if: ${{ always() }} +# defaults: +# run: +# shell: bash +# runs-on: macos-12 +# env: +# INSTALL_DIR: ${{ github.workspace }}/install +# INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests +# +# steps: +# - name: Create Directories +# run: mkdir -p ${{ env.INSTALL_DIR }} ${{ env.INSTALL_TEST_DIR }} +# +# - name: Download OpenVINO package +# uses: actions/download-artifact@v3 +# with: +# name: openvino_package +# path: ${{ env.INSTALL_DIR }} +# +# - name: Download OpenVINO tests package +# uses: actions/download-artifact@v3 +# with: +# name: openvino_tests +# path: ${{ env.INSTALL_TEST_DIR }} +# +# - name: Extract OpenVINO packages +# run: | +# pushd ${{ env.INSTALL_DIR }} +# tar -xzf openvino_package.tar.gz -C ${{ env.INSTALL_DIR }} && rm openvino_package.tar.gz || exit 1 +# popd +# pushd ${{ env.INSTALL_TEST_DIR }} +# tar -xzf openvino_tests.tar.gz -C ${{ env.INSTALL_DIR }} && rm openvino_tests.tar.gz || exit 1 +# popd +# +# - name: Intel CPU plugin func tests +# run: | +# source ${{ env.INSTALL_DIR }}/setupvars.sh +# ${{ env.INSTALL_TEST_DIR }}/ov_cpu_func_tests --gtest_print_time=1 --gtest_filter=*smoke* --gtest_output=xml:"${{ env.INSTALL_TEST_DIR }}/TEST-CPUFuncTests.xml" +# +# - name: Upload Test Results +# uses: actions/upload-artifact@v3 +# if: ${{ always() }} +# with: +# name: test-results-functional-cpu +# path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml +# if-no-files-found: 'error' diff --git a/src/bindings/python/tests_compatibility/test_onnx/test_backend.py b/src/bindings/python/tests_compatibility/test_onnx/test_backend.py index 87f53223c2d672..f9d7c4fe261a13 100644 --- a/src/bindings/python/tests_compatibility/test_onnx/test_backend.py +++ b/src/bindings/python/tests_compatibility/test_onnx/test_backend.py @@ -32,6 +32,7 @@ xfail_issue_48052, xfail_issue_52463, xfail_issue_58033, + xfail_issue_58676, xfail_issue_63033, xfail_issue_63036, xfail_issue_63043, @@ -301,6 +302,10 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None "OnnxBackendNodeModelTest.test_tril_zero_cpu", "OnnxBackendNodeModelTest.test_triu_zero_cpu", ), + ( + xfail_issue_58676, + "OnnxBackendNodeModelTest.test_div_uint8_cpu" + ), ( skip_dynamic_model, "OnnxBackendNodeModelTest.test_triu_one_row_cpu", diff --git a/tests/layer_tests/pytorch_tests/test_rfftn_complex_transforms.py b/tests/layer_tests/pytorch_tests/test_rfftn_complex_transforms.py index 7e3dd169e5bc02..a871bbc46fd75f 100644 --- a/tests/layer_tests/pytorch_tests/test_rfftn_complex_transforms.py +++ b/tests/layer_tests/pytorch_tests/test_rfftn_complex_transforms.py @@ -1,9 +1,12 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +from sys import platform + import numpy as np import pytest import torch + from pytorch_layer_test_class import PytorchLayerTest @@ -40,7 +43,9 @@ def forward(self, x): @pytest.mark.parametrize("norm", ["forward", "backward", "ortho", None]) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.skipif(platform == 'darwin', reason="Ticket - 122182") def test_rfftn(self, ie_device, precision, ir_version, input_shape, dim, s, norm): self.input_shape = input_shape # Unfrozen test would fail due to issues with prim::GetAttr containing lists, strings or none. - self._test(*self.create_model(dim, s, norm), ie_device, precision, ir_version, custom_eps=1e-3, freeze_model=True) + self._test(*self.create_model(dim, s, norm), ie_device, precision, ir_version, custom_eps=1e-3, + freeze_model=True) diff --git a/tests/layer_tests/pytorch_tests/test_upsample.py b/tests/layer_tests/pytorch_tests/test_upsample.py index cc862ee6cb5053..7406302ef5d7fe 100644 --- a/tests/layer_tests/pytorch_tests/test_upsample.py +++ b/tests/layer_tests/pytorch_tests/test_upsample.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +from sys import platform + import pytest from pytorch_layer_test_class import PytorchLayerTest @@ -41,6 +43,7 @@ def forward(self, x): ]) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.skipif(platform == 'darwin', reason="Ticket - 122182") def test_upsample1d(self, mode, size, scale, ie_device, precision, ir_version): self._test(*self.create_model(size, scale, mode), ie_device, precision, ir_version, trace_model=True) @@ -96,7 +99,6 @@ def test_upsample2d(self, mode, size, scale, ie_device, precision, ir_version): precision, ir_version, trace_model=True, **{"custom_eps": 1e-3}) - class TestUpsample2DAntialias(PytorchLayerTest): def _prepare_input(self): import numpy as np diff --git a/tests/layer_tests/tensorflow_tests/test_tf_CTCLoss.py b/tests/layer_tests/tensorflow_tests/test_tf_CTCLoss.py index 78289a7144ba7a..0a2eae6303386e 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_CTCLoss.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_CTCLoss.py @@ -1,6 +1,8 @@ # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +from sys import platform + import numpy as np import pytest import tensorflow as tf @@ -54,6 +56,7 @@ def create_ctcloss_placeholder_const_net(self, inputs, targets): @pytest.mark.parametrize("params", test_data) @pytest.mark.precommit_tf_fe @pytest.mark.nightly + @pytest.mark.skipif(platform == 'darwin', reason="Ticket - 122182") def test_ctcloss_placeholder_const(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): self._test(*self.create_ctcloss_placeholder_const_net(**params), diff --git a/tests/layer_tests/tensorflow_tests/test_tf_FusedBatchNorm.py b/tests/layer_tests/tensorflow_tests/test_tf_FusedBatchNorm.py index 94ae7dc628270d..b2d44b5a07beb9 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_FusedBatchNorm.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_FusedBatchNorm.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +from sys import platform + import numpy as np import pytest import tensorflow as tf @@ -102,6 +104,7 @@ def create_fused_batch_norm_net(self, x_shape, epsilon, exponential_avg_factor, @pytest.mark.parametrize("params", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly + @pytest.mark.skipif(platform == 'darwin', reason="Ticket - 122182") def test_fused_batch_norm_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): self._test(*self.create_fused_batch_norm_net(**params), diff --git a/tests/layer_tests/tensorflow_tests/test_tf_TensorListLength.py b/tests/layer_tests/tensorflow_tests/test_tf_TensorListLength.py index 57b17b3341d750..8d6005350fb3fd 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_TensorListLength.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_TensorListLength.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +from sys import platform + import numpy as np import pytest import tensorflow as tf @@ -38,6 +40,7 @@ def create_tensor_list_length(self, input_shape, input_type): @pytest.mark.parametrize("params", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly + @pytest.mark.skipif(platform == 'darwin', reason="Ticket - 122182") def test_tensor_list_length_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): self._test(*self.create_tensor_list_length(**params), @@ -76,6 +79,7 @@ def create_tensor_list_length_empty_list(self, tensor_list_size, element_shape): @pytest.mark.parametrize("params", test_data_tensor_list_length_empty_list) @pytest.mark.precommit_tf_fe @pytest.mark.nightly + @pytest.mark.skipif(platform == 'darwin', reason="Ticket - 122182") def test_tensor_list_length_empty_list(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): self._test(*self.create_tensor_list_length_empty_list(**params), diff --git a/tests/layer_tests/tensorflow_tests/test_tf_TensorListResize.py b/tests/layer_tests/tensorflow_tests/test_tf_TensorListResize.py index 39bdca06dee004..709f009a7afa93 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_TensorListResize.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_TensorListResize.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +from sys import platform + import numpy as np import pytest import tensorflow as tf @@ -42,6 +44,7 @@ def create_tensor_list_resize(self, input_shape, input_type, new_size): @pytest.mark.parametrize("params", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly + @pytest.mark.skipif(platform == 'darwin', reason="Ticket - 122182") def test_tensor_list_resize_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): self._test(*self.create_tensor_list_resize(**params), diff --git a/tests/layer_tests/tensorflow_tests/test_tf_UnaryOps.py b/tests/layer_tests/tensorflow_tests/test_tf_UnaryOps.py index 7b176344271794..91ed0a72db0bcf 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_UnaryOps.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_UnaryOps.py @@ -214,6 +214,7 @@ def test_unary_op_mish_precommit(self, params, ie_device, precision, ir_version, 'Selu' ]) @pytest.mark.nightly + @pytest.mark.skipif(sys.platform == 'darwin', reason="Ticket - 122182") def test_unary_op(self, params, ie_device, precision, ir_version, temp_dir, op_type, use_new_frontend, use_old_api): if ie_device == 'GPU': diff --git a/tests/layer_tests/tensorflow_tests/test_tf_While.py b/tests/layer_tests/tensorflow_tests/test_tf_While.py index 3a6f9b7f291029..2a112700f30ad5 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_While.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_While.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +from sys import platform + import numpy as np import pytest import tensorflow as tf @@ -55,6 +57,7 @@ def body(x, y): @pytest.mark.parametrize("params", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly + @pytest.mark.skipif(platform == 'darwin', reason="Ticket - 122182") def test_while_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): self._test(*self.create_while_net(**params), @@ -113,6 +116,7 @@ def body(x, y): @pytest.mark.parametrize("params", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly + @pytest.mark.skipif(platform == 'darwin', reason="Ticket - 122182") def test_while_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): self._test(*self.create_while_net(**params), diff --git a/tools/mo/unit_tests/moc_tf_fe/conversion_basic_models_test.py b/tools/mo/unit_tests/moc_tf_fe/conversion_basic_models_test.py index 3e5c46ecb3ac90..8d905d8f13129d 100644 --- a/tools/mo/unit_tests/moc_tf_fe/conversion_basic_models_test.py +++ b/tools/mo/unit_tests/moc_tf_fe/conversion_basic_models_test.py @@ -3,10 +3,10 @@ import os import unittest +from sys import platform import numpy as np from generator import generator, generate - from openvino.runtime import Core from openvino.tools.mo.convert import convert_model @@ -40,7 +40,6 @@ def basic(self, input_model, argv_input, inputs, dtype, expected, freeze_placeho assert values.dtype == dtype assert np.allclose(values, expected) - @generate( *[ ( @@ -240,6 +239,7 @@ def test_conversion_failure_fallback_default(self): self.basic("ctc_model_based.pbtxt", None, None, None, None, None, None, True, True, False, False) + @unittest.skipIf(platform == 'darwin', reason="Ticket - 122182") def test_conversion_failure_fallback_use_new_frontend(self): with self.assertRaisesRegex(Exception, "\[TensorFlow Frontend\] Internal error, no translator found for operation\(s\)\: " From a5b6606132a19aa7f3be2c7dbf18b66eea8fb84e Mon Sep 17 00:00:00 2001 From: Ekaterina Aidova Date: Tue, 10 Oct 2023 15:05:10 +0400 Subject: [PATCH 127/257] [PT FE]: support aten::amax, aten::amin, aten::clip, aten::clamp_ (#20338) --- src/frontends/pytorch/src/op/min_max.cpp | 30 +++++++++++ src/frontends/pytorch/src/op_table.cpp | 7 +++ tests/layer_tests/pytorch_tests/test_clamp.py | 25 ++++++--- .../layer_tests/pytorch_tests/test_min_max.py | 53 +++++++++++++++++++ 4 files changed, 108 insertions(+), 7 deletions(-) diff --git a/src/frontends/pytorch/src/op/min_max.cpp b/src/frontends/pytorch/src/op/min_max.cpp index 670b4eca4d4e93..45b4f5f0155354 100644 --- a/src/frontends/pytorch/src/op/min_max.cpp +++ b/src/frontends/pytorch/src/op/min_max.cpp @@ -112,6 +112,36 @@ OutputVector translate_minimum(const NodeContext& context) { return {res}; } +OutputVector translate_amin(const NodeContext& context) { + // aten::amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor + + // aten::amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + num_inputs_check(context, 3, 4); + auto x = context.get_input(0); + auto dims = context.get_input(1); + auto keep_dims = context.const_input(2); + auto res = context.mark_node(std::make_shared(x, dims, keep_dims)); + if (!context.input_is_none(3)) { + context.mutate_input(3, res); + } + return {res}; +} + +OutputVector translate_amax(const NodeContext& context) { + // aten::amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor + + // aten::amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + num_inputs_check(context, 3, 4); + auto x = context.get_input(0); + auto dims = context.get_input(1); + auto keep_dims = context.const_input(2); + auto res = context.mark_node(std::make_shared(x, dims, keep_dims)); + if (!context.input_is_none(3)) { + context.mutate_input(3, res); + } + return {res}; +} + } // namespace op } // namespace pytorch } // namespace frontend diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index c420a1b16e10f4..b168775acc09a0 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -22,6 +22,8 @@ OP_CONVERTER(translate_add); OP_CONVERTER(translate_addcmul); OP_CONVERTER(translate_addmm); OP_CONVERTER(translate_all); +OP_CONVERTER(translate_amax); +OP_CONVERTER(translate_amin); OP_CONVERTER(translate_and); OP_CONVERTER(translate_arange); OP_CONVERTER(translate_argmax); @@ -237,6 +239,8 @@ const std::map get_supported_ops_ts() { {"aten::addcmul", op::translate_addcmul}, {"aten::addmm", op::translate_addmm}, {"aten::all", op::translate_all}, + {"aten::amax", op::translate_amax}, + {"aten::amin", op::translate_amin}, {"aten::arange", op::translate_arange}, {"aten::argmax", op::translate_argmax}, {"aten::argmin", op::translate_argmin}, @@ -266,8 +270,11 @@ const std::map get_supported_ops_ts() { {"aten::ceil_", op::inplace_op>}, {"aten::channel_shuffle", op::translate_channel_shuffle}, {"aten::clamp", op::translate_clamp}, + {"aten::clamp_", op::inplace_op}, {"aten::clamp_max", op::translate_1to1_match_2_inputs}, {"aten::clamp_min", op::translate_1to1_match_2_inputs}, + {"aten::clip", op::translate_clamp}, + {"aten::clip_", op::inplace_op}, {"aten::clone", op::skip_node}, // ignore clone operators that are inserted by PyTorch autograd {"aten::contiguous", op::skip_node}, // In openvino how tensors are stored in memory is internal plugin detail, // we assume all tensors are contiguous diff --git a/tests/layer_tests/pytorch_tests/test_clamp.py b/tests/layer_tests/pytorch_tests/test_clamp.py index 346b47c3d1f631..ad869d6211e270 100644 --- a/tests/layer_tests/pytorch_tests/test_clamp.py +++ b/tests/layer_tests/pytorch_tests/test_clamp.py @@ -11,11 +11,11 @@ def _prepare_input(self): import numpy as np return (np.random.randn(1, 3, 224, 224).astype(np.float32),) - def create_model(self, minimum, maximum, as_tensors=False): + def create_model(self, minimum, maximum, as_tensors=False, op_type='clamp'): import torch class aten_clamp(torch.nn.Module): - def __init__(self, minimum, maximum, as_tensors): + def __init__(self, minimum, maximum, as_tensors, op_type="clamp"): super(aten_clamp, self).__init__() if minimum is not None and as_tensors: minimum = torch.tensor(minimum) @@ -23,20 +23,31 @@ def __init__(self, minimum, maximum, as_tensors): if maximum is not None and as_tensors: maximum = torch.tensor(maximum) self.max = maximum + self.forward = getattr(self, f"forward_{op_type}") - def forward(self, x): + def forward_clamp(self, x): return torch.clamp(x, self.min, self.max) + def forward_clip(self, x): + return torch.clip(x, self.min, self.max) + + def forward_clamp_(self, x): + return x.clamp_(self.min, self.max), x + + def forward_clip_(self, x): + return x.clip_(self.min, self.max), x + ref_net = None - op_name = "aten::clamp" - return aten_clamp(minimum, maximum, as_tensors), ref_net, op_name + op_name = f"aten::{op_type}" + return aten_clamp(minimum, maximum, as_tensors, op_type), ref_net, op_name @pytest.mark.parametrize("minimum,maximum", [(0., 1.), (-0.5, 1.5), (None, 10.), (None, -10.), (10., None), (-10., None), (100, 200)]) @pytest.mark.parametrize("as_tensors", [True, False]) + @pytest.mark.parametrize("op_type", ["clamp", "clamp_"]) @pytest.mark.nightly - def test_clamp(self, minimum, maximum, as_tensors, ie_device, precision, ir_version): - self._test(*self.create_model(minimum, maximum, as_tensors), ie_device, precision, ir_version) + def test_clamp(self, minimum, maximum, as_tensors, op_type, ie_device, precision, ir_version): + self._test(*self.create_model(minimum, maximum, as_tensors, op_type), ie_device, precision, ir_version) @pytest.mark.xfail(reason='OpenVINO clamp does not support min > max') def test_clamp_min_greater(self, ie_device, precision, ir_version): diff --git a/tests/layer_tests/pytorch_tests/test_min_max.py b/tests/layer_tests/pytorch_tests/test_min_max.py index c32fe41512f800..3a624d534fa6e0 100644 --- a/tests/layer_tests/pytorch_tests/test_min_max.py +++ b/tests/layer_tests/pytorch_tests/test_min_max.py @@ -283,4 +283,57 @@ def test_minimum_maximum_out( ie_device, precision, ir_version, kwargs_to_prepare_input= {"input_dtype": input_dtype, "second_input_dtype": input_dtype, "out": True} + ) + + +class TestAminAmax(PytorchLayerTest): + def _prepare_input(self, input_dtype="float32", out=False, axes=None, keep_dims=False): + import numpy as np + x = np.random.randn(1, 3, 10, 10).astype(input_dtype) + if not out: + return (x,) + if isinstance(axes, list): + axes = tuple(axes) + out = np.zeros_like(np.max(x, axis=axes, keepdims=keep_dims), dtype=input_dtype) + return (x, out) + + def create_model(self, op_type, axis, keep_dims, out=False): + import torch + op_types = { + "amax": torch.amax, + "amin": torch.amin + } + + + op = op_types[op_type] + + class aten_amin_amax(torch.nn.Module): + def __init__(self, op, axis, keep_dims, out): + super().__init__() + self.op = op + self.axis = axis + self.keep_dims = keep_dims + if out: + self.forward = self.forward_out + + def forward_out(self, x, y): + return self.op(x, self.axis, self.keep_dims, out=y), y + + def forward(self, x): + return self.op(x, self.axis, self.keep_dims) + + + model_cls = aten_amin_amax(op, axis, keep_dims, out) + + return model_cls, None, f"aten::{op_type}" + + @pytest.mark.parametrize("op_type", ["amin", "amax"]) + @pytest.mark.parametrize("axis", [0, -1, 1, [1, 2], [-1, -2], [2, 0, -1], [0, 1, 2, 3]]) + @pytest.mark.parametrize("keep_dims", [True, False]) + @pytest.mark.parametrize("out", [True, False]) + @pytest.mark.parametrize("input_dtype", ['float32', 'int32', 'int64', 'float64']) + def test_amin_amax(self, op_type, input_dtype, axis, keep_dims, out, ie_device, precision, ir_version): + self._test(*self.create_model(op_type, axis, keep_dims, out), + ie_device, precision, ir_version, kwargs_to_prepare_input= + {"input_dtype": input_dtype, "out": out, "axes": axis, "keep_dims": keep_dims} ) \ No newline at end of file From 4d9f2f3cd7e012b5ef879a2f9af03703247c60cd Mon Sep 17 00:00:00 2001 From: Evgeny Kotov Date: Tue, 10 Oct 2023 13:06:10 +0200 Subject: [PATCH 128/257] Support new operations in TS: Selu, Swish, HSwish, Tile, CumSum, HardSigmoid (#19990) * add new operations as unary * get unary as input(0) instead of iterating pattern map * add CumSum + unit tests * add Tile + unit tests * add tile * fix ts_tile * code review fix: use ADD_MATCHER * fix bug CI tests --- .../transpose_sinking/ts_cumsum.hpp | 41 ++++ .../transpose_sinking/ts_tile.hpp | 41 ++++ .../transpose_sinking/ts_cumsum.cpp | 92 +++++++ .../transpose_sinking/ts_general.cpp | 57 +++-- .../transpose_sinking/ts_tile.cpp | 93 ++++++++ .../transpose_sinking/ts_unary.cpp | 50 +++- .../transpose_sinking/ts_utils.cpp | 1 + .../transpose_sinking/ts_common_test.cpp | 224 ++++++++++++++++++ .../tests/transpose_sinking/ts_unary_test.cpp | 55 ++++- 9 files changed, 605 insertions(+), 49 deletions(-) create mode 100644 src/common/transformations/include/transformations/transpose_sinking/ts_cumsum.hpp create mode 100644 src/common/transformations/include/transformations/transpose_sinking/ts_tile.hpp create mode 100644 src/common/transformations/src/transformations/transpose_sinking/ts_cumsum.cpp create mode 100644 src/common/transformations/src/transformations/transpose_sinking/ts_tile.cpp diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_cumsum.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_cumsum.hpp new file mode 100644 index 00000000000000..d8c70e65ad2899 --- /dev/null +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_cumsum.hpp @@ -0,0 +1,41 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/pass/graph_rewrite.hpp" +#include "openvino/pass/pass.hpp" +#include "transformations/transpose_sinking/ts_base.hpp" +#include "transformations_visibility.hpp" + +namespace ov { +namespace pass { +namespace transpose_sinking { + +class TRANSFORMATIONS_API TSCumSumForward; +class TRANSFORMATIONS_API TSCumSumBackward; + +} // namespace transpose_sinking +} // namespace pass +} // namespace ov + +/** + * @ingroup ie_transformation_common_api + * @brief TSCumSumForward transformation sinks Transpose through CumSum in the forward direction. + */ +class ov::pass::transpose_sinking::TSCumSumForward : public ov::pass::transpose_sinking::TSForwardBase { +public: + OPENVINO_RTTI("ov::pass::TSBinaryForward", "0"); + TSCumSumForward(); +}; + +/** + * @ingroup ie_transformation_common_api + * @brief TSCumSumBackward transformation sinks Transpose through CumSum in the backward direction. + */ +class ov::pass::transpose_sinking::TSCumSumBackward : public ov::pass::MatcherPass { +public: + OPENVINO_RTTI("ov::pass::TSBinaryBackward", "0"); + TSCumSumBackward(); +}; diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_tile.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_tile.hpp new file mode 100644 index 00000000000000..cd125ca05639dd --- /dev/null +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_tile.hpp @@ -0,0 +1,41 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/pass/graph_rewrite.hpp" +#include "openvino/pass/pass.hpp" +#include "transformations/transpose_sinking/ts_base.hpp" +#include "transformations_visibility.hpp" + +namespace ov { +namespace pass { +namespace transpose_sinking { + +class TRANSFORMATIONS_API TSTileForward; +class TRANSFORMATIONS_API TSTileBackward; + +} // namespace transpose_sinking +} // namespace pass +} // namespace ov + +/** + * @ingroup ie_transformation_common_api + * @brief TSTileForward transformation sinks Transpose through Tile in the forward direction. + */ +class ov::pass::transpose_sinking::TSTileForward : public ov::pass::transpose_sinking::TSForwardBase { +public: + OPENVINO_RTTI("ov::pass::TSBinaryForward", "0"); + TSTileForward(); +}; + +/** + * @ingroup ie_transformation_common_api + * @brief TSTileBackward transformation sinks Transpose through Tile in the backward direction. + */ +class ov::pass::transpose_sinking::TSTileBackward : public ov::pass::MatcherPass { +public: + OPENVINO_RTTI("ov::pass::TSBinaryBackward", "0"); + TSTileBackward(); +}; diff --git a/src/common/transformations/src/transformations/transpose_sinking/ts_cumsum.cpp b/src/common/transformations/src/transformations/transpose_sinking/ts_cumsum.cpp new file mode 100644 index 00000000000000..623724f8ccf99f --- /dev/null +++ b/src/common/transformations/src/transformations/transpose_sinking/ts_cumsum.cpp @@ -0,0 +1,92 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "transformations/transpose_sinking/ts_cumsum.hpp" + +#include "itt.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/cum_sum.hpp" +#include "openvino/op/fake_quantize.hpp" +#include "openvino/op/transpose.hpp" +#include "openvino/op/util/op_types.hpp" +#include "openvino/pass/pattern/op/wrap_type.hpp" +#include "transformations/rt_info/transpose_sinking_attr.hpp" +#include "transformations/transpose_sinking/ts_utils.hpp" + +using namespace ov; +using namespace ov::pass::pattern; +using namespace ov::pass::transpose_sinking; +using namespace ov::pass::transpose_sinking::utils; + +#undef CUMSUM_AXIS_INPUT_IDX +#define CUMSUM_AXIS_INPUT_IDX 1 + +TSCumSumForward::TSCumSumForward() { + MATCHER_SCOPE(TSCumSumForward); + + create_pattern(true, {0}); + + auto sinking_transformation = [=](const std::shared_ptr& main_node, + const TransposeInputsInfo& transpose_info) -> bool { + if (transformation_callback(main_node)) { + return false; + } + + bool res = utils::sink_forward::UpdateInputTransposes(main_node, transpose_info, /* input_indexes= */ {0}); + if (!res) + return res; + + const auto transpose_axis_order = transpose_info.transpose_const->get_axis_vector_val(); + auto axis = std::make_shared(element::i32, Shape{}, 0); + const auto& new_axes = ChangeAxes(main_node->input_value(CUMSUM_AXIS_INPUT_IDX), transpose_axis_order, axis); + main_node->input(CUMSUM_AXIS_INPUT_IDX).replace_source_output(new_axes); + + default_outputs_update(main_node, transpose_info); + return true; + }; + transpose_sinking(matcher_name, sinking_transformation); +} + +TSCumSumBackward::TSCumSumBackward() { + MATCHER_SCOPE(TSCumSumBackward); + auto main_node_label = wrap_type([](const Output& output) -> bool { + return has_static_rank()(output) && CheckTransposeConsumers(output); + }); + + auto transpose_const_label = wrap_type(); + + auto transpose_label = wrap_type({main_node_label, transpose_const_label}, + [](const Output& output) -> bool { + return has_static_rank()(output); + }); + matcher_pass_callback matcher_pass_callback = [=](Matcher& m) { + const auto& pattern_to_output = m.get_pattern_value_map(); + auto transpose_const = + as_type_ptr(pattern_to_output.at(transpose_const_label).get_node_shared_ptr()); + auto transpose = pattern_to_output.at(transpose_label).get_node_shared_ptr(); + auto main_node = pattern_to_output.at(main_node_label).get_node_shared_ptr(); + + if (transformation_callback(main_node)) { + return false; + } + + for (auto& new_node : sink_backward::InsertTransposeBeforeNode(main_node, + transpose_const, + /* input_indexes= */ {0})) { + register_new_node(new_node); + } + + RemoveTransposeConsumers(main_node); + const auto transpose_axis_order = transpose_const->get_axis_vector_val(); + const auto reversed_transpose_order = ReverseTransposeOrder(transpose_axis_order); + auto axis = std::make_shared(element::i32, Shape{}, 0); + auto new_axes = ChangeAxes(main_node->input_value(CUMSUM_AXIS_INPUT_IDX), reversed_transpose_order, axis); + main_node->input(CUMSUM_AXIS_INPUT_IDX).replace_source_output(new_axes); + + main_node->validate_and_infer_types(); + return true; + }; + auto m = std::make_shared(transpose_label, matcher_name); + register_matcher(m, matcher_pass_callback); +} diff --git a/src/common/transformations/src/transformations/transpose_sinking/ts_general.cpp b/src/common/transformations/src/transformations/transpose_sinking/ts_general.cpp index 4b4a0835a9da70..ceae4cd45e6397 100644 --- a/src/common/transformations/src/transformations/transpose_sinking/ts_general.cpp +++ b/src/common/transformations/src/transformations/transpose_sinking/ts_general.cpp @@ -13,6 +13,7 @@ #include "transformations/common_optimizations/enable_shapeof_constant_folding.hpp" #include "transformations/transpose_sinking/ts_binary.hpp" #include "transformations/transpose_sinking/ts_concat.hpp" +#include "transformations/transpose_sinking/ts_cumsum.hpp" #include "transformations/transpose_sinking/ts_data_movement.hpp" #include "transformations/transpose_sinking/ts_fuse.hpp" #include "transformations/transpose_sinking/ts_gather.hpp" @@ -23,6 +24,7 @@ #include "transformations/transpose_sinking/ts_slice.hpp" #include "transformations/transpose_sinking/ts_split.hpp" #include "transformations/transpose_sinking/ts_squeeze.hpp" +#include "transformations/transpose_sinking/ts_tile.hpp" #include "transformations/transpose_sinking/ts_unary.hpp" #include "transformations/transpose_sinking/ts_unsqueeze.hpp" #include "transformations/utils/utils.hpp" @@ -31,35 +33,40 @@ using namespace ov::pass::transpose_sinking; TSGeneralForward::TSGeneralForward() { MATCHER_SCOPE(TSGeneralForward); - add_matcher(); - add_matcher(); - add_matcher(); - add_matcher(); - add_matcher(); - add_matcher(); - add_matcher(); - add_matcher(); - add_matcher(); - add_matcher(); - add_matcher(); - add_matcher(); - add_matcher(); + ADD_MATCHER(this, TSUnaryForward); + ADD_MATCHER(this, TSBinaryForward); + ADD_MATCHER(this, TSConcatForward); + ADD_MATCHER(this, TSSplitForward); + ADD_MATCHER(this, TSDataMovementForward); + ADD_MATCHER(this, TSReductionForward); + ADD_MATCHER(this, TSSqueezeForward); + ADD_MATCHER(this, TSUnsqueezeForward); + ADD_MATCHER(this, TSInterpolateForward); + ADD_MATCHER(this, TSSliceForward); + ADD_MATCHER(this, TSGatherForward); + ADD_MATCHER(this, TSShapeOfForward); + ADD_MATCHER(this, TSCumSumForward); + ADD_MATCHER(this, TSTileForward); + ADD_MATCHER(this, TSFuse); } TSGeneralBackward::TSGeneralBackward() { MATCHER_SCOPE(TSGeneralBackward); - add_matcher(); - add_matcher(); - add_matcher(); - add_matcher(); - add_matcher(); - add_matcher(); - add_matcher(); - add_matcher(); - add_matcher(); - add_matcher(); - add_matcher(); - add_matcher(); + ADD_MATCHER(this, TSUnaryBackward); + ADD_MATCHER(this, TSUnaryBackward); + ADD_MATCHER(this, TSBinaryBackward); + ADD_MATCHER(this, TSConcatBackward); + ADD_MATCHER(this, TSSplitBackward); + ADD_MATCHER(this, TSDataMovementBackward); + ADD_MATCHER(this, TSReductionBackward); + ADD_MATCHER(this, TSSqueezeBackward); + ADD_MATCHER(this, TSUnsqueezeBackward); + ADD_MATCHER(this, TSInterpolateBackward); + ADD_MATCHER(this, TSSliceBackward); + ADD_MATCHER(this, TSGatherBackward); + ADD_MATCHER(this, TSCumSumBackward); + ADD_MATCHER(this, TSTileBackward); + ADD_MATCHER(this, TSFuse); } bool TSGeneral::run_on_model(const std::shared_ptr& f) { diff --git a/src/common/transformations/src/transformations/transpose_sinking/ts_tile.cpp b/src/common/transformations/src/transformations/transpose_sinking/ts_tile.cpp new file mode 100644 index 00000000000000..dda10b0a6cb7b7 --- /dev/null +++ b/src/common/transformations/src/transformations/transpose_sinking/ts_tile.cpp @@ -0,0 +1,93 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "transformations/transpose_sinking/ts_tile.hpp" + +#include "itt.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/fake_quantize.hpp" +#include "openvino/op/tile.hpp" +#include "openvino/op/transpose.hpp" +#include "openvino/op/util/op_types.hpp" +#include "openvino/pass/pattern/op/wrap_type.hpp" +#include "transformations/rt_info/transpose_sinking_attr.hpp" +#include "transformations/transpose_sinking/ts_utils.hpp" + +using namespace ov; +using namespace ov::pass::pattern; +using namespace ov::pass::transpose_sinking; +using namespace ov::pass::transpose_sinking::utils; + +#undef TILE_REPEATS_INPUT_IDX +#define TILE_REPEATS_INPUT_IDX 1 + +TSTileForward::TSTileForward() { + MATCHER_SCOPE(TSTileForward); + + create_pattern(true, {0}); + + auto sinking_transformation = [=](const std::shared_ptr& main_node, + const TransposeInputsInfo& transpose_info) -> bool { + if (transformation_callback(main_node)) { + return false; + } + + bool res = utils::sink_forward::UpdateInputTransposes(main_node, transpose_info, /* input_indexes= */ {0}); + if (!res) + return res; + + const auto transpose_axis_order = transpose_info.transpose_const->get_axis_vector_val(); + auto repeats = std::make_shared(element::i32, Shape{}, 0); + const auto& new_repeats = + ChangeValuesOrder(main_node->input_value(TILE_REPEATS_INPUT_IDX), transpose_axis_order, repeats); + main_node->input(TILE_REPEATS_INPUT_IDX).replace_source_output(new_repeats); + + default_outputs_update(main_node, transpose_info); + return true; + }; + transpose_sinking(matcher_name, sinking_transformation); +} + +TSTileBackward::TSTileBackward() { + MATCHER_SCOPE(TSTileBackward); + auto main_node_label = wrap_type([](const Output& output) -> bool { + return has_static_rank()(output) && CheckTransposeConsumers(output); + }); + + auto transpose_const_label = wrap_type(); + + auto transpose_label = wrap_type({main_node_label, transpose_const_label}, + [](const Output& output) -> bool { + return has_static_rank()(output); + }); + matcher_pass_callback matcher_pass_callback = [=](Matcher& m) { + const auto& pattern_to_output = m.get_pattern_value_map(); + auto transpose_const = + as_type_ptr(pattern_to_output.at(transpose_const_label).get_node_shared_ptr()); + auto transpose = pattern_to_output.at(transpose_label).get_node_shared_ptr(); + auto main_node = pattern_to_output.at(main_node_label).get_node_shared_ptr(); + + if (transformation_callback(main_node)) { + return false; + } + + for (auto& new_node : sink_backward::InsertTransposeBeforeNode(main_node, + transpose_const, + /* input_indexes= */ {0})) { + register_new_node(new_node); + } + + RemoveTransposeConsumers(main_node); + const auto transpose_axis_order = transpose_const->get_axis_vector_val(); + auto repeats = std::make_shared(element::i32, Shape{}, 0); + auto new_repeats = + ChangeValuesOrder(main_node->input_value(TILE_REPEATS_INPUT_IDX), transpose_axis_order, repeats); + main_node->input(TILE_REPEATS_INPUT_IDX).replace_source_output(new_repeats); + + main_node->validate_and_infer_types(); + return true; + }; + auto m = std::make_shared(transpose_label, matcher_name); + register_matcher(m, matcher_pass_callback); +} diff --git a/src/common/transformations/src/transformations/transpose_sinking/ts_unary.cpp b/src/common/transformations/src/transformations/transpose_sinking/ts_unary.cpp index d4b4869c3eb959..5814634e7408f5 100644 --- a/src/common/transformations/src/transformations/transpose_sinking/ts_unary.cpp +++ b/src/common/transformations/src/transformations/transpose_sinking/ts_unary.cpp @@ -16,6 +16,7 @@ #include "openvino/op/logical_not.hpp" #include "openvino/op/softplus.hpp" #include "openvino/op/transpose.hpp" +#include "openvino/pass/pattern/op/or.hpp" #include "openvino/pass/pattern/op/wrap_type.hpp" #include "transformations/rt_info/transpose_sinking_attr.hpp" #include "transformations/transpose_sinking/ts_utils.hpp" @@ -45,8 +46,21 @@ TSUnaryForward::TSUnaryForward() { ov::op::v0::Convert, ov::op::v10::IsInf, ov::op::v10::IsNaN, - ov::op::v10::IsFinite>(true); - transpose_sinking(matcher_name); + ov::op::v10::IsFinite, + ov::op::v0::Selu, + ov::op::v4::Swish, + ov::op::v0::HardSigmoid, + ov::op::v5::LogSoftmax, + ov::op::v1::ConvertLike>(true); + auto ts_unary_sinking_function = [this](const std::shared_ptr& main_node, + const utils::TransposeInputsInfo& transpose_info) -> bool { + bool res = utils::sink_forward::UpdateInputTransposes(main_node, transpose_info, {0}); + if (!res) + return res; + default_outputs_update(main_node, transpose_info); + return true; + }; + transpose_sinking(matcher_name, ts_unary_sinking_function); } TSUnaryBackward::TSUnaryBackward() { @@ -56,15 +70,25 @@ TSUnaryBackward::TSUnaryBackward() { return CheckTransposeConsumers(output); }; - auto unary_label = wrap_type({any_input()}, unary_restrictions); + auto unary_with_1_input_label = wrap_type({any_input()}, unary_restrictions); + + auto unary_with_2_inputs_label = + wrap_type({any_input(), any_input()}, unary_restrictions); + auto unary_with_3_inputs_label = + wrap_type({any_input(), any_input(), any_input()}, + unary_restrictions); + + auto unary_label = std::make_shared( + ov::OutputVector{unary_with_1_input_label, unary_with_2_inputs_label, unary_with_3_inputs_label}); auto transpose_const_label = wrap_type(); @@ -75,12 +99,12 @@ TSUnaryBackward::TSUnaryBackward() { auto transpose_const = as_type_ptr(pattern_to_output.at(transpose_const_label).get_node_shared_ptr()); auto transpose = pattern_to_output.at(transpose_label).get_node_shared_ptr(); - auto unary = pattern_to_output.at(unary_label).get_node_shared_ptr(); + auto unary = transpose->get_input_node_shared_ptr(0); if (transformation_callback(unary)) { return false; } - for (auto& new_node : sink_backward::InsertTransposeBeforeNode(unary, transpose_const)) { + for (auto& new_node : sink_backward::InsertTransposeBeforeNode(unary, transpose_const, {0})) { register_new_node(new_node); } unary->validate_and_infer_types(); diff --git a/src/common/transformations/src/transformations/transpose_sinking/ts_utils.cpp b/src/common/transformations/src/transformations/transpose_sinking/ts_utils.cpp index 177267581eb9be..38073bc8848e17 100644 --- a/src/common/transformations/src/transformations/transpose_sinking/ts_utils.cpp +++ b/src/common/transformations/src/transformations/transpose_sinking/ts_utils.cpp @@ -59,6 +59,7 @@ Output ChangeAxes(const Output& indices, copy_runtime_info(indices.get_node_shared_ptr(), gather); return gather; } + Output ChangeAxes(const Output& indices, const AxisVector& transpose_axis_order, const std::shared_ptr& axis) { diff --git a/src/common/transformations/tests/transpose_sinking/ts_common_test.cpp b/src/common/transformations/tests/transpose_sinking/ts_common_test.cpp index 9a00aa9773ed4d..1da471f166639b 100644 --- a/src/common/transformations/tests/transpose_sinking/ts_common_test.cpp +++ b/src/common/transformations/tests/transpose_sinking/ts_common_test.cpp @@ -8,12 +8,14 @@ #include "openvino/pass/manager.hpp" #include "transformations/transpose_sinking/ts_binary.hpp" #include "transformations/transpose_sinking/ts_concat.hpp" +#include "transformations/transpose_sinking/ts_cumsum.hpp" #include "transformations/transpose_sinking/ts_data_movement.hpp" #include "transformations/transpose_sinking/ts_interpolate.hpp" #include "transformations/transpose_sinking/ts_reduction.hpp" #include "transformations/transpose_sinking/ts_slice.hpp" #include "transformations/transpose_sinking/ts_split.hpp" #include "transformations/transpose_sinking/ts_squeeze.hpp" +#include "transformations/transpose_sinking/ts_tile.hpp" #include "transformations/transpose_sinking/ts_unary.hpp" #include "transformations/transpose_sinking/ts_unsqueeze.hpp" #include "ts_test_case.hpp" @@ -206,6 +208,30 @@ FactoryPtr CreateInterpolateFactory(const std::string& type_name, bool is_refere return std::make_shared(type_name, is_reference); } +class CumSumFactory : public IFactory { +public: + explicit CumSumFactory(const std::string& type_name) : IFactory(type_name) {} + NodePtr create(const OutputVector& parent_nodes) const override { + return std::make_shared(parent_nodes[0], parent_nodes[1]); + } +}; + +FactoryPtr CreateCumSumFactory(const std::string& type_name) { + return std::make_shared(type_name); +} + +class TileFactory : public IFactory { +public: + explicit TileFactory(const std::string& type_name) : IFactory(type_name) {} + NodePtr create(const OutputVector& parent_nodes) const override { + return std::make_shared(parent_nodes[0], parent_nodes[1]); + } +}; + +FactoryPtr CreateTileFactory(const std::string& type_name) { + return std::make_shared(type_name); +} + class SliceFactory : public IFactory { public: explicit SliceFactory(const std::string& type_name) : IFactory(type_name) {} @@ -285,6 +311,12 @@ FactoryPtr CreateFakeQuantizeFactory(const std::string& type_name) { #undef CREATE_INTERPOLATE_FACTORY #define CREATE_INTERPOLATE_FACTORY(type_name, reference_flag) CreateInterpolateFactory(#type_name, reference_flag) +#undef CREATE_CUMSUM_FACTORY +#define CREATE_CUMSUM_FACTORY(type_name) CreateCumSumFactory(#type_name) + +#undef CREATE_TILE_FACTORY +#define CREATE_TILE_FACTORY(type_name) CreateTileFactory(#type_name) + #undef CREATE_SLICE_FACTORY #define CREATE_SLICE_FACTORY(type_name) CreateSliceFactory(#type_name) @@ -761,6 +793,84 @@ auto test_forward_interpolate = []() { INSTANTIATE_TEST_SUITE_P(TransposeSinkingCommonInterpolateForward, TSTestFixture, test_forward_interpolate()); +auto test_forward_cumsum = []() { + TestCase test_case; + + // Initialize common attributes + test_case.transformation = CREATE_PASS_FACTORY(TSCumSumForward); + test_case.num_main_ops = {1}; + test_case.inputs_to_main = {parameter(element::f32, {1, 2, 48, 80}), + constant(element::i64, {}, std::vector{0})}; + + // Test model description: + test_case.model.preprocess_inputs_to_main = {{set_transpose_for}, {{0}}}; + test_case.model.main_op = {CREATE_CUMSUM_FACTORY(CumSum)}; + test_case.model.model_template = create_model; + + // Reference model description: + auto set_specific_gather_for = [](const vector& idxs, const OutputVector& out_vec) -> OutputVector { + OutputVector result = out_vec; + for (const auto& idx : idxs) { + const auto& out = out_vec[idx]; + vector transpose_order(out_vec[0].get_shape().size()); + iota(transpose_order.begin(), transpose_order.end(), 0); + reverse(transpose_order.begin(), transpose_order.end()); + auto data = make_shared(element::i32, Shape{transpose_order.size()}, transpose_order); + auto axis = make_shared(element::i32, Shape{}, 0); + auto transpose = make_shared(data, out, axis); + result[idx] = transpose; + } + return result; + }; + test_case.model_ref.preprocess_inputs_to_main = {{set_specific_gather_for}, {{1}}}; + test_case.model_ref.main_op = {CREATE_CUMSUM_FACTORY(CumSum)}; + test_case.model_ref.preprocess_outputs_of_main = {{set_transpose_for}, {{0}}}; + test_case.model_ref.model_template = create_model; + + return wrapper(test_case); +}; + +INSTANTIATE_TEST_SUITE_P(TransposeSinkingCommonCumSumForward, TSTestFixture, test_forward_cumsum()); + +auto test_forward_tile = []() { + TestCase test_case; + + // Initialize common attributes + test_case.transformation = CREATE_PASS_FACTORY(TSTileForward); + test_case.num_main_ops = {1}; + test_case.inputs_to_main = {parameter(element::f32, {1, 2, 48, 80}), + constant(element::i64, {4}, std::vector{1, 2, 3, 4})}; + + // Test model description: + test_case.model.preprocess_inputs_to_main = {{set_transpose_for}, {{0}}}; + test_case.model.main_op = {CREATE_TILE_FACTORY(Tile)}; + test_case.model.model_template = create_model; + + // Reference model description: + auto set_specific_gather_for = [](const vector& idxs, const OutputVector& out_vec) -> OutputVector { + OutputVector result = out_vec; + for (const auto& idx : idxs) { + const auto& out = out_vec[idx]; + vector transpose_order(out_vec[0].get_shape().size()); + iota(transpose_order.begin(), transpose_order.end(), 0); + reverse(transpose_order.begin(), transpose_order.end()); + auto data = make_shared(element::i32, Shape{transpose_order.size()}, transpose_order); + auto axis = make_shared(element::i32, Shape{}, 0); + auto transpose = make_shared(out, data, axis); + result[idx] = transpose; + } + return result; + }; + test_case.model_ref.preprocess_inputs_to_main = {{set_specific_gather_for}, {{1}}}; + test_case.model_ref.main_op = {CREATE_TILE_FACTORY(Tile)}; + test_case.model_ref.preprocess_outputs_of_main = {{set_transpose_for}, {{0}}}; + test_case.model_ref.model_template = create_model; + + return wrapper(test_case); +}; + +INSTANTIATE_TEST_SUITE_P(TransposeSinkingCommonTileForward, TSTestFixture, test_forward_tile()); + auto test_forward_squeeze = []() { TestCase test_case; @@ -1262,6 +1372,120 @@ auto test_backward_interpolate = []() { INSTANTIATE_TEST_SUITE_P(TransposeSinkingCommonInterpolateBackward, TSTestFixture, test_backward_interpolate()); +auto test_backward_cumsum = []() { + TestCase test_case; + + // Initialize common attributes + test_case.transformation = CREATE_PASS_FACTORY(TSCumSumBackward); + test_case.num_main_ops = {1}; + test_case.inputs_to_main = {parameter(element::f32, {1, 2, 48, 80}), + constant(element::i64, {}, std::vector{0})}; + + // Test model description: + test_case.model.main_op = {CREATE_CUMSUM_FACTORY(CumSum)}; + test_case.model.preprocess_outputs_of_main = {{set_transpose_for}, {{0}}}; + test_case.model.model_template = create_model; + + // Reference model description: + auto set_specific_gather_for = [](const vector& idxs, const OutputVector& out_vec) -> OutputVector { + OutputVector result = out_vec; + for (const auto& idx : idxs) { + const auto& out = out_vec[idx]; + vector transpose_order(out_vec[0].get_shape().size()); + iota(transpose_order.begin(), transpose_order.end(), 0); + reverse(transpose_order.begin(), transpose_order.end()); + auto data = make_shared(element::i32, Shape{transpose_order.size()}, transpose_order); + auto axis = make_shared(element::i32, Shape{}, 0); + auto transpose = make_shared(data, out, axis); + result[idx] = transpose; + } + return result; + }; + test_case.model_ref.preprocess_inputs_to_main = {{set_transpose_for, set_specific_gather_for}, {{0}, {1}}}; + test_case.model_ref.main_op = {CREATE_CUMSUM_FACTORY(CumSum)}; + test_case.model_ref.model_template = create_model; + + return wrapper(test_case); +}; + +INSTANTIATE_TEST_SUITE_P(TransposeSinkingCommonCumSumBackward, TSTestFixture, test_backward_cumsum()); + +auto test_backward_tile = []() { + TestCase test_case; + + // Initialize common attributes + test_case.transformation = CREATE_PASS_FACTORY(TSTileBackward); + test_case.num_main_ops = {1}; + test_case.inputs_to_main = {parameter(element::f32, {1, 2, 48, 80}), + constant(element::i64, {4}, std::vector{1, 2, 3, 4})}; + + // Test model description: + test_case.model.main_op = {CREATE_TILE_FACTORY(Tile)}; + test_case.model.preprocess_outputs_of_main = {{set_transpose_for}, {{0}}}; + test_case.model.model_template = create_model; + + // Reference model description: + auto set_specific_gather_for = [](const vector& idxs, const OutputVector& out_vec) -> OutputVector { + OutputVector result = out_vec; + for (const auto& idx : idxs) { + const auto& out = out_vec[idx]; + vector transpose_order(out_vec[0].get_shape().size()); + iota(transpose_order.begin(), transpose_order.end(), 0); + reverse(transpose_order.begin(), transpose_order.end()); + auto data = make_shared(element::i32, Shape{transpose_order.size()}, transpose_order); + auto axis = make_shared(element::i32, Shape{}, 0); + auto transpose = make_shared(out, data, axis); + result[idx] = transpose; + } + return result; + }; + test_case.model_ref.preprocess_inputs_to_main = {{set_transpose_for, set_specific_gather_for}, {{0}, {1}}}; + test_case.model_ref.main_op = {CREATE_TILE_FACTORY(Tile)}; + test_case.model_ref.model_template = create_model; + + return wrapper(test_case); +}; + +INSTANTIATE_TEST_SUITE_P(TransposeSinkingCommonTileBackward, TSTestFixture, test_backward_tile()); + +auto test_backward_tile_tf_case = []() { + TestCase test_case; + + // Initialize common attributes + test_case.transformation = CREATE_PASS_FACTORY(TSTileBackward); + test_case.num_main_ops = {1}; + test_case.inputs_to_main = {parameter(element::f32, {2, 1, 1, 128}), + constant(element::i64, {4}, std::vector{1, 1, 88, 1})}; + + // Test model description: + test_case.model.main_op = {CREATE_TILE_FACTORY(Tile)}; + test_case.model.preprocess_outputs_of_main = {{set_transpose_for}, {{0}}}; + test_case.model.model_template = create_model; + + // Reference model description: + auto set_specific_gather_for = [](const vector& idxs, const OutputVector& out_vec) -> OutputVector { + OutputVector result = out_vec; + for (const auto& idx : idxs) { + const auto& out = out_vec[idx]; + vector transpose_order(out_vec[0].get_shape().size()); + iota(transpose_order.begin(), transpose_order.end(), 0); + reverse(transpose_order.begin(), transpose_order.end()); + auto data = make_shared(element::i32, Shape{transpose_order.size()}, transpose_order); + auto axis = make_shared(element::i32, Shape{}, 0); + auto transpose = make_shared(out, data, axis); + result[idx] = transpose; + } + return result; + }; + test_case.model_ref.preprocess_inputs_to_main = {{set_transpose_for, set_specific_gather_for}, {{0}, {1}}}; + test_case.model_ref.main_op = {CREATE_TILE_FACTORY(Tile)}; + test_case.model_ref.model_template = create_model; + + return wrapper(test_case); +}; + +INSTANTIATE_TEST_SUITE_P(TransposeSinkingCommonTileBackwardTfCase, TSTestFixture, test_backward_tile_tf_case()); + auto test_backward_unsqueeze = []() { TestCase test_case; diff --git a/src/common/transformations/tests/transpose_sinking/ts_unary_test.cpp b/src/common/transformations/tests/transpose_sinking/ts_unary_test.cpp index e47f378cb2bace..8076edf43b2eb8 100644 --- a/src/common/transformations/tests/transpose_sinking/ts_unary_test.cpp +++ b/src/common/transformations/tests/transpose_sinking/ts_unary_test.cpp @@ -7,7 +7,7 @@ #include "common_test_utils/ov_test_utils.hpp" #include "gtest/gtest.h" #include "openvino/frontend/manager.hpp" -#include "openvino/opsets/opset10.hpp" +#include "openvino/opsets/opset12.hpp" #include "openvino/pass/manager.hpp" #include "ts_test_utils.hpp" @@ -85,6 +85,37 @@ NodePtr UnaryFactory::create(const OutputVector& inputs) const { return std::make_shared(inputs[0], element::f64); } +template <> +NodePtr UnaryFactory::create(const OutputVector& inputs) const { + auto alpha = std::make_shared(element::f32, Shape{}, 2.0); + auto lambda = std::make_shared(element::f32, Shape{}, 3.0); + return std::make_shared(inputs[0], alpha, lambda); +} + +template <> +NodePtr UnaryFactory::create(const OutputVector& inputs) const { + auto beta = std::make_shared(element::f32, Shape{}, 0.9); + return std::make_shared(inputs[0], beta); +} + +template <> +NodePtr UnaryFactory::create(const OutputVector& inputs) const { + auto alpha = std::make_shared(element::f32, Shape{}, 2.0); + auto beta = std::make_shared(element::f32, Shape{}, 3.0); + return std::make_shared(inputs[0], alpha, beta); +} + +template <> +NodePtr UnaryFactory::create(const OutputVector& inputs) const { + return std::make_shared(inputs[0], 2); +} + +template <> +NodePtr UnaryFactory::create(const OutputVector& inputs) const { + auto like = std::make_shared(element::f64, Shape{}, 1); + return std::make_shared(inputs[0], like); +} + template FactoryPtr CreateUnaryFactory(const std::string& type_name) { return std::make_shared>(type_name); @@ -352,16 +383,18 @@ std::shared_ptr CreateReferenceFunction(const FactoryPtr& unary_facto } // namespace mult_consumers_first_node std::vector unary_factories = { - CREATE_UNARY_FACTORY(Clamp), CREATE_UNARY_FACTORY(Elu), CREATE_UNARY_FACTORY(SoftPlus), - CREATE_UNARY_FACTORY(LogicalNot), CREATE_UNARY_FACTORY(Convert), CREATE_UNARY_FACTORY(Abs), - CREATE_UNARY_FACTORY(Acos), CREATE_UNARY_FACTORY(Asin), CREATE_UNARY_FACTORY(Asinh), - CREATE_UNARY_FACTORY(Atan), CREATE_UNARY_FACTORY(Ceiling), CREATE_UNARY_FACTORY(Cos), - CREATE_UNARY_FACTORY(Cosh), CREATE_UNARY_FACTORY(Erf), CREATE_UNARY_FACTORY(Exp), - CREATE_UNARY_FACTORY(Gelu), CREATE_UNARY_FACTORY(HSigmoid), CREATE_UNARY_FACTORY(HSwish), - CREATE_UNARY_FACTORY(Log), CREATE_UNARY_FACTORY(Negative), CREATE_UNARY_FACTORY(Relu), - CREATE_UNARY_FACTORY(Sigmoid), CREATE_UNARY_FACTORY(Sign), CREATE_UNARY_FACTORY(Sin), - CREATE_UNARY_FACTORY(Sinh), CREATE_UNARY_FACTORY(SoftSign), CREATE_UNARY_FACTORY(Sqrt), - CREATE_UNARY_FACTORY(Tan), CREATE_UNARY_FACTORY(Tanh)}; + CREATE_UNARY_FACTORY(Clamp), CREATE_UNARY_FACTORY(Elu), CREATE_UNARY_FACTORY(SoftPlus), + CREATE_UNARY_FACTORY(LogicalNot), CREATE_UNARY_FACTORY(Convert), CREATE_UNARY_FACTORY(Abs), + CREATE_UNARY_FACTORY(Acos), CREATE_UNARY_FACTORY(Asin), CREATE_UNARY_FACTORY(Asinh), + CREATE_UNARY_FACTORY(Atan), CREATE_UNARY_FACTORY(Ceiling), CREATE_UNARY_FACTORY(Cos), + CREATE_UNARY_FACTORY(Cosh), CREATE_UNARY_FACTORY(Erf), CREATE_UNARY_FACTORY(Exp), + CREATE_UNARY_FACTORY(Gelu), CREATE_UNARY_FACTORY(HSigmoid), CREATE_UNARY_FACTORY(HSwish), + CREATE_UNARY_FACTORY(Log), CREATE_UNARY_FACTORY(Negative), CREATE_UNARY_FACTORY(Relu), + CREATE_UNARY_FACTORY(Sigmoid), CREATE_UNARY_FACTORY(Sign), CREATE_UNARY_FACTORY(Sin), + CREATE_UNARY_FACTORY(Sinh), CREATE_UNARY_FACTORY(SoftSign), CREATE_UNARY_FACTORY(Sqrt), + CREATE_UNARY_FACTORY(Tan), CREATE_UNARY_FACTORY(Tanh), CREATE_UNARY_FACTORY(Selu), + CREATE_UNARY_FACTORY(Swish), CREATE_UNARY_FACTORY(HardSigmoid), CREATE_UNARY_FACTORY(LogSoftmax), + CREATE_UNARY_FACTORY(ConvertLike)}; TEST_P(TransposeSinkingUnaryTestFixture, CompareFunctions) { FactoryPtr unary_factory; From 0dcde7f7bc98cfcf96f57d9f8109e350413367b2 Mon Sep 17 00:00:00 2001 From: Ekaterina Aidova Date: Tue, 10 Oct 2023 15:18:35 +0400 Subject: [PATCH 129/257] [PT FE]: support aten::pixel_unshuffle (#20325) --- .../pytorch/src/op/pixel_shuffle.cpp | 39 +++++++++++++++++++ src/frontends/pytorch/src/op_table.cpp | 2 + .../pytorch_tests/test_pixel_shuffle.py | 30 +++++++++++++- 3 files changed, 70 insertions(+), 1 deletion(-) diff --git a/src/frontends/pytorch/src/op/pixel_shuffle.cpp b/src/frontends/pytorch/src/op/pixel_shuffle.cpp index dec771fe6a4f57..fc35b44d9a304a 100644 --- a/src/frontends/pytorch/src/op/pixel_shuffle.cpp +++ b/src/frontends/pytorch/src/op/pixel_shuffle.cpp @@ -69,6 +69,45 @@ OutputVector translate_pixel_shuffle(const NodeContext& context) { return {context.mark_node(std::make_shared(transpose, shape_after, false))}; }; +OutputVector translate_pixel_unshuffle(const NodeContext& context) { + // aten::pixel_unshuffle(Tensor self, int upscale_factor) -> Tensor + num_inputs_check(context, 2, 2); + auto x = context.get_input(0); + auto upscale_factor = context.get_input(1); + auto neg_1 = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-1})); + auto neg_3 = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-3})); + auto zero = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {0})); + auto zero_s = context.mark_node(v0::Constant::create(element::i32, Shape{}, {0})); + auto one = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {1})); + auto one_s = context.mark_node(v0::Constant::create(element::i32, Shape{}, {1})); + Output shape; + Output rank; + std::tie(shape, rank) = get_shape_rank(context, x, true); + // 1. Reshape input to [-1, C, H / r, r, W / r, r], where r is upscale factor + auto indices = context.mark_node(v0::Constant::create(element::i32, Shape{3}, {-3, -2, -1})); + auto dims = context.mark_node(std::make_shared(shape, indices, zero_s)); + auto dims_splitted = context.mark_node(std::make_shared(dims, zero_s, 3)); + auto c = dims_splitted->output(0); + auto h = dims_splitted->output(1); + auto w = dims_splitted->output(2); + auto dims_before = context.mark_node(std::make_shared(shape, zero, neg_3, one)); + auto r = context.mark_node(std::make_shared(upscale_factor, zero)); + auto new_h = context.mark_node(std::make_shared(h, upscale_factor, true)); + auto new_w = context.mark_node(std::make_shared(w, upscale_factor, true)); + auto intermediate_shape = + context.mark_node(std::make_shared(OutputVector{neg_1, c, new_h, r, new_w, r}, 0)); + auto x_reshaped = context.mark_node(std::make_shared(x, intermediate_shape, false)); + // 2. Transpose to [-1, C, r, r, H / r, W / r] + auto transpose_order = context.mark_node(v0::Constant::create(element::i32, Shape{6}, {0, 1, 3, 5, 2, 4})); + auto x_transposed = context.mark_node(std::make_shared(x_reshaped, transpose_order)); + // 3. Reshape to [*, C*r*r, H / r, W / r] + auto r_sqr = context.mark_node(std::make_shared(r, r)); + auto new_c = context.mark_node(std::make_shared(c, r_sqr)); + auto final_shape = + context.mark_node(std::make_shared(OutputVector{dims_before, new_c, new_h, new_w}, 0)); + return {context.mark_node(std::make_shared(x_transposed, final_shape, false))}; +}; + OutputVector translate_channel_shuffle(const NodeContext& context) { // aten::channel_shuffle(Tensor self, int groups) -> Tensor num_inputs_check(context, 2, 2); diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index b168775acc09a0..20c53dbe52bc9f 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -125,6 +125,7 @@ OP_CONVERTER(translate_outer); OP_CONVERTER(translate_pad); OP_CONVERTER(translate_pairwise_distance); OP_CONVERTER(translate_pixel_shuffle); +OP_CONVERTER(translate_pixel_unshuffle); OP_CONVERTER(translate_pow); OP_CONVERTER(translate_pythonop); OP_CONVERTER(translate_quantize_per_channel); @@ -409,6 +410,7 @@ const std::map get_supported_ops_ts() { {"aten::pairwise_distance", op::translate_pairwise_distance}, {"aten::permute", op::translate_1to1_match_2_inputs}, {"aten::pixel_shuffle", op::translate_pixel_shuffle}, + {"aten::pixel_unshuffle", op::translate_pixel_unshuffle}, {"aten::prelu", op::translate_1to1_match_2_inputs}, {"aten::pow", op::translate_pow}, {"aten::quantize_per_channel", op::translate_quantize_per_channel}, diff --git a/tests/layer_tests/pytorch_tests/test_pixel_shuffle.py b/tests/layer_tests/pytorch_tests/test_pixel_shuffle.py index 3a47d1abd397e8..4422626ccba956 100644 --- a/tests/layer_tests/pytorch_tests/test_pixel_shuffle.py +++ b/tests/layer_tests/pytorch_tests/test_pixel_shuffle.py @@ -35,6 +35,34 @@ def test_pixel_shuffle(self, upscale_factor, shape, ie_device, precision, ir_ver ie_device, precision, ir_version) +class TestPixelUnshuffle(PytorchLayerTest): + def _prepare_input(self): + return (np.random.randn(*self.shape).astype(np.float32),) + + def create_model(self, upscale_factor): + import torch + import torch.nn.functional as F + + class aten_pixel_unshuffle(torch.nn.Module): + def __init__(self, upscale_factor): + super(aten_pixel_unshuffle, self).__init__() + self.upscale_factor = upscale_factor + + def forward(self, x): + return F.pixel_unshuffle(x, self.upscale_factor) + + return aten_pixel_unshuffle(upscale_factor), None, "aten::pixel_unshuffle" + + @pytest.mark.parametrize(("upscale_factor,shape"), [(3, [1, 1, 12, 12]), + (2, [1, 2, 3, 2, 8, 8]),]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_pixel_unshuffle(self, upscale_factor, shape, ie_device, precision, ir_version): + self.shape = shape + self._test(*self.create_model(upscale_factor), + ie_device, precision, ir_version) + + class TestChannelShuffle(PytorchLayerTest): def _prepare_input(self): return (np.random.randn(*self.shape).astype(np.float32),) @@ -65,4 +93,4 @@ def forward(self, x): def test_channel_shuffle(self, groups, shape, ie_device, precision, ir_version): self.shape = shape self._test(*self.create_model(groups), - ie_device, precision, ir_version) \ No newline at end of file + ie_device, precision, ir_version) From b358d283d01fb5123278c3860a649ddf2ac1ef90 Mon Sep 17 00:00:00 2001 From: Yuan Hu Date: Tue, 10 Oct 2023 20:03:59 +0800 Subject: [PATCH 130/257] [CPU] Enable skipped custom shape infer test cases (#19037) * enable eltwise skip test Signed-off-by: HU Yuan2 * enable skipped test of onehot Signed-off-by: HU Yuan2 * remove shapeof OD test. cpu node shape_of don't support it Signed-off-by: HU Yuan2 * enable skipped test case of strideslice remove default stride test Signed-off-by: HU Yuan2 * enable skipped test case of matmul remove some test cases, custom matmul only support some rank Signed-off-by: HU Yuan2 * fix eltwise autob issue powerstatic don't get the autob attibute from origin op Signed-off-by: HU Yuan2 * Revert "fix eltwise autob issue" This reverts commit 1139296411cc7e25939a1734f156011470482afd. * Revert "enable eltwise skip test" This reverts commit c9f0a6f225eb91767ffb4c1e32723523b92291ba. * disable eltwise none autob test Signed-off-by: HU Yuan2 * add error message for OPENVINO_ASSERT go NgraphShapeInfer branch in matmul node when input0 rank is not equal to input1 rank Signed-off-by: HU Yuan2 * create ticket for EltwiseShapeInfer and skip the test case Signed-off-by: HU Yuan2 * revert test case order in matmul Signed-off-by: HU Yuan2 --------- Signed-off-by: HU Yuan2 --- .../src/shape_inference/custom/matmul.cpp | 11 +++++++-- .../src/shape_inference/custom/matmul.hpp | 1 + .../src/shape_inference/custom/one_hot.cpp | 4 +++- .../shape_inference/custom/strided_slice.cpp | 11 +++++++-- .../binary_elementwise_arithmetic.cpp | 3 +-- .../custom_shape_infer/matmul.cpp | 16 ++++++------- .../custom_shape_infer/one_hot.cpp | 7 +++--- .../custom_shape_infer/shape_node.cpp | 13 ---------- .../custom_shape_infer/strided_slice.cpp | 24 +++---------------- 9 files changed, 36 insertions(+), 54 deletions(-) diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/matmul.cpp b/src/plugins/intel_cpu/src/shape_inference/custom/matmul.cpp index 1484e35b1793eb..6d327de50d65ed 100644 --- a/src/plugins/intel_cpu/src/shape_inference/custom/matmul.cpp +++ b/src/plugins/intel_cpu/src/shape_inference/custom/matmul.cpp @@ -29,7 +29,7 @@ Result MMShapeInfer::infer( if (rankA == 1 && rankB == 1 && shapeA[0] == shapeB[0]) { return {{m_shapeY}, ShapeInferStatus::success}; } - + OPENVINO_ASSERT(m_out_rank >= 2, "The output rank should be greater or euqal to 2."); m_shapeY[m_out_rank-2] = m_transpose_a ? shapeA[rankA-1] : shapeA[rankA-2]; m_shapeY[m_out_rank-1] = m_transpose_b ? shapeB[rankB-2] : shapeB[rankB-1]; @@ -54,7 +54,14 @@ ShapeInferPtr MMShapeInferFactory::makeShapeInfer() const { const auto output_rank = matmul->get_output_partial_shape(0).rank().get_length(); const bool transpose_a = matmul->get_transpose_a(); const bool transpose_b = matmul->get_transpose_b(); - return std::make_shared(output_rank, transpose_a, transpose_b); + const auto input_rank0 = matmul->get_input_partial_shape(0).rank().get_length(); + const auto input_rank1 = matmul->get_input_partial_shape(1).rank().get_length(); + if (input_rank0 == input_rank1) { + return std::make_shared(output_rank, transpose_a, transpose_b); + } else { + return std::make_shared(make_shape_inference(m_op), EMPTY_PORT_MASK); + } + } else { OPENVINO_THROW("Unexpected operation type in the MatMul shape inference factory"); } diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/matmul.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/matmul.hpp index dd38d984d0883c..25263cfa6fcb67 100644 --- a/src/plugins/intel_cpu/src/shape_inference/custom/matmul.hpp +++ b/src/plugins/intel_cpu/src/shape_inference/custom/matmul.hpp @@ -4,6 +4,7 @@ #include #include "shape_inference/shape_inference_cpu.hpp" +#include #pragma once namespace ov { diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/one_hot.cpp b/src/plugins/intel_cpu/src/shape_inference/custom/one_hot.cpp index fa7352dd344aad..d8630b74f3bc4f 100644 --- a/src/plugins/intel_cpu/src/shape_inference/custom/one_hot.cpp +++ b/src/plugins/intel_cpu/src/shape_inference/custom/one_hot.cpp @@ -21,7 +21,9 @@ Result OneHotShapeInfer::infer( const std::vector>& input_shapes, const std::unordered_map& data_dependency) { auto depth = reinterpret_cast(data_dependency.at(1)->getData())[0]; - + if (depth < 0) { + OPENVINO_THROW("OneHot depth value can't be negative."); + } auto result = input_shapes.front().get(); result.insert(result.begin() + m_axis, depth); diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/strided_slice.cpp b/src/plugins/intel_cpu/src/shape_inference/custom/strided_slice.cpp index 19918455ae63a1..0ed160bf6e877a 100644 --- a/src/plugins/intel_cpu/src/shape_inference/custom/strided_slice.cpp +++ b/src/plugins/intel_cpu/src/shape_inference/custom/strided_slice.cpp @@ -50,8 +50,15 @@ Result StridedSliceShapeInfer::infer( if ((i >= shapeBegin[0]) || (shapeIn[i] == 0)) { m_outputShape[new_idx] = shapeIn[i]; } else { - auto begin = m_begin_mask_set.count(i) ? 0 : beginPtr[i]; - auto end = m_end_mask_set.count(i) ? shapeIn[i] : endPtr[i]; + int32_t begin = 0; + int32_t end = 0; + if (stridePtr[i] < 0) { + begin = m_begin_mask_set.count(i) ? shapeIn[i] : beginPtr[i]; + end = m_end_mask_set.count(i) ? (-1 - shapeIn[i]) : endPtr[i]; + } else { + begin = m_begin_mask_set.count(i) ? 0 : beginPtr[i]; + end = m_end_mask_set.count(i) ? shapeIn[i] : endPtr[i]; + } m_outputShape[new_idx] = ov::op::slice::get_sliced_value(shapeIn[i], begin, end, stridePtr[i]); } new_idx += 1; diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/binary_elementwise_arithmetic.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/binary_elementwise_arithmetic.cpp index 1b515222b85002..4943144ff7e2dc 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/binary_elementwise_arithmetic.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/binary_elementwise_arithmetic.cpp @@ -83,7 +83,7 @@ TYPED_TEST_P(CpuShapeInferenceTest_BEA, shape_inference_aubtob_none) { } TYPED_TEST_P(CpuShapeInferenceTest_BEA, shape_inference_aubtob_none_incompatible_shapes) { - GTEST_SKIP() << "Skipping test, please check CVS-108946"; + GTEST_SKIP() << "CVS-122351 Skipping test, eltwiseShapeInfer only implemented numpy type boardcast"; auto A = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); auto B = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); @@ -92,7 +92,6 @@ TYPED_TEST_P(CpuShapeInferenceTest_BEA, shape_inference_aubtob_none_incompatible std::vector static_input_shapes = {StaticShape{3, 4, 6, 5}, StaticShape{3, 1, 6, 1}}, static_output_shapes = {StaticShape{}}; - //TODO cvs-108946, below test can't pass. OV_EXPECT_THROW(unit_test::cpu_test_shape_infer(node.get(), static_input_shapes, static_output_shapes), ov::Exception, testing::HasSubstr("Eltwise shape infer input shapes dim index:")); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/matmul.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/matmul.cpp index b503b5e965ae7f..26dec0165c6529 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/matmul.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/matmul.cpp @@ -4,6 +4,8 @@ #include #include "custom_shape_infer.hpp" +#include "openvino/core/dimension.hpp" +#include "openvino/core/partial_shape.hpp" #include "openvino/op/ops.hpp" namespace ov { namespace intel_cpu { @@ -33,7 +35,7 @@ class CPUMatMulTest : public TestWithParam { protected: void SetUp() override { std::tie(a_shape, b_shape) = GetParam(); - + (*exp_shape).clear(); set_exp_shape(); output_shapes.clear(); output_shapes.push_back(exp_shape); @@ -82,39 +84,35 @@ class CPUMatMulTest : public TestWithParam { }; TEST_P(CPUMatMulTest, no_input_transpose) { - GTEST_SKIP() << "Skipping test, please check CVS-108946"; const auto matmul = make_matmul(a_shape.size(), b_shape.size(), false, false); std::vector static_input_shapes = {a_shape, b_shape}; - // TODO 108946,below test case can't pass + matmul->set_output_type(0, element::i64, ov::PartialShape(std::vector(exp_shape.size(), -1))); unit_test::cpu_test_shape_infer(matmul.get(), static_input_shapes, output_shapes); } TEST_P(CPUMatMulTest, transpose_input_a) { - GTEST_SKIP() << "Skipping test, please check CVS-108946"; const auto matmul = make_matmul(a_shape.size(), b_shape.size(), true, false); const auto a_transpose = make_transpose_input(a_shape); std::vector static_input_shapes = {a_transpose, b_shape}; - // TODO 108946,below test case can't pass + matmul->set_output_type(0, element::i64, ov::PartialShape(std::vector(exp_shape.size(), -1))); unit_test::cpu_test_shape_infer(matmul.get(), static_input_shapes, output_shapes); } TEST_P(CPUMatMulTest, transpose_input_b) { - GTEST_SKIP() << "Skipping test, please check CVS-108946"; const auto matmul = make_matmul(a_shape.size(), b_shape.size(), false, true); const auto b_transpose = make_transpose_input(b_shape); std::vector static_input_shapes = {a_shape, b_transpose}; - // TODO 108946,below test case can't pass + matmul->set_output_type(0, element::i64, ov::PartialShape(std::vector(exp_shape.size(), -1))); unit_test::cpu_test_shape_infer(matmul.get(), static_input_shapes, output_shapes); } TEST_P(CPUMatMulTest, transpose_inputs_a_b) { - GTEST_SKIP() << "Skipping test, please check CVS-108946"; const auto matmul = make_matmul(a_shape.size(), b_shape.size(), true, true); const auto a_transpose = make_transpose_input(a_shape); @@ -122,7 +120,7 @@ TEST_P(CPUMatMulTest, transpose_inputs_a_b) { std::vector static_input_shapes = {a_transpose, b_transpose}; - // TODO 108946,below test case can't pass + matmul->set_output_type(0, element::i64, ov::PartialShape(std::vector(exp_shape.size(), -1))); unit_test::cpu_test_shape_infer(matmul.get(), static_input_shapes, output_shapes); } diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/one_hot.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/one_hot.cpp index 57e007c7d9a863..99ed87e83fcf7f 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/one_hot.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/one_hot.cpp @@ -89,7 +89,6 @@ INSTANTIATE_TEST_SUITE_P( using OneHotCpuShapeInferenceThrowExceptionTest = OneHotCpuShapeInferenceTest; TEST_P(OneHotCpuShapeInferenceThrowExceptionTest, wrong_pattern) { - GTEST_SKIP() << "Skipping test, please check CVS-108946"; const auto depth = std::make_shared(element::i64, ov::Shape{}); const auto on = std::make_shared(element::i32, ov::Shape{}); const auto off = std::make_shared(element::i32, ov::Shape{}); @@ -101,9 +100,9 @@ TEST_P(OneHotCpuShapeInferenceThrowExceptionTest, wrong_pattern) { const auto off_tensor = ov::Tensor(element::i32, ov::Shape{}, &m_off); const std::unordered_map constant_data = {{1, depth_tensor}, {2, on_tensor}, {3, off_tensor}}; - // TODO , implementation should throw exception - ASSERT_THROW(unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, constant_data), - ov::Exception); + OV_EXPECT_THROW(unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, constant_data), + ov::Exception, + testing::HasSubstr("OneHot depth value can't be negative")); } INSTANTIATE_TEST_SUITE_P( diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/shape_node.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/shape_node.cpp index 6611b760b406a5..9d8f63802bdeb8 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/shape_node.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/shape_node.cpp @@ -39,19 +39,6 @@ TEST(CpuShapeInfer, v3ShapeOf5DTest) { unit_test::cpu_test_shape_infer(shapeof.get(), static_input_shapes, static_output_shapes); } -TEST(CpuShapeInfer, ShapeOf0DTest) { - GTEST_SKIP() << "Skipping test, please check CVS-108946"; - auto data = std::make_shared(element::f32, PartialShape{}); - - auto shapeof = - std::make_shared(data); - - std::vector static_input_shapes = {StaticShape{}}, - static_output_shapes = {StaticShape{}}; - // TODO , can't pass implementation don't support 0D shape input - unit_test::cpu_test_shape_infer(shapeof.get(), static_input_shapes, static_output_shapes); -} - } // namespace cpu_shape_infer } // namespace unit_test } // namespace intel_cpu diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/strided_slice.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/strided_slice.cpp index 9a9fe23f00afbe..fa25928d60e4de 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/strided_slice.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/strided_slice.cpp @@ -101,29 +101,11 @@ INSTANTIATE_TEST_SUITE_P( make_tuple(unit_test::ShapeVector{{3, 2, 3}, {3}, {3}, {3}}, std::vector>{{1, 0, 0}, {0, 0, 0}, {1, 1, 1}}, std::vector{0, 1, 1}, std::vector(3, 1), StaticShape({2, 2, 3})), make_tuple(unit_test::ShapeVector{{3, 2, 3}, {3}, {3}, {3}}, std::vector>{{0, 1, 0}, {2, 0, 0}, {1, 1, 2}}, - std::vector{1, 0, 1}, std::vector{0, 1, 1}, StaticShape({2, 1, 2}))), - // TODO 108946, can't pass; - // make_tuple(unit_test::ShapeVector{{3, 2, 3}, {3}, {3}, {3}}, std::vector>{{0, 0, 0}, {1, 0, 0}, {1, 1, -1}}, - // std::vector{0, 1, 1}, std::vector{0, 1, 1}, StaticShape({1, 1, 3}))), + std::vector{1, 0, 1}, std::vector{0, 1, 1}, StaticShape({2, 1, 2})), + make_tuple(unit_test::ShapeVector{{3, 2, 3}, {3}, {3}, {3}}, std::vector>{{0, 0, 0}, {1, 0, 0}, {1, 1, -1}}, + std::vector{0, 1, 1}, std::vector{0, 1, 1}, StaticShape({1, 2, 3}))), StridedSliceCpuShapeInferenceTest::getTestCaseName); -TEST(CpuShapeInfer, StridedSliceDefault_stride) { - GTEST_SKIP() << "Skipping test, please check CVS-108946"; - const auto mask = std::vector{0, 1, 0}; - - const auto data = std::make_shared(element::f32, ov::PartialShape::dynamic()); - // only supprot i32 - const auto begin = op::v0::Constant::create(element::i32, ov::Shape{3}, {0, 0, 0}); - const auto end = op::v0::Constant::create(element::i32, ov::Shape{3}, {1, 0, 2}); - const auto op = std::make_shared(data, begin, end, mask, mask); - - std::vector static_input_shapes = {{3, 2, 3}, {3}, {3}}; - std::vector static_output_shapes = {StaticShape{1, 2, 2}}; - // implementation depends on some output information of the op - op->set_output_type(0, element::i32, {-1, -1, -1}); - // TODO 108946,there is some issue in implementation, this test case can't pass - unit_test::cpu_test_shape_infer(op.get(), static_input_shapes, static_output_shapes); -} } // namespace cpu_shape_infer } // namespace unit_test } // namespace intel_cpu From 670fff062e886f8262c548f9c339ca184ce8b5f1 Mon Sep 17 00:00:00 2001 From: Mateusz Mikolajczyk Date: Tue, 10 Oct 2023 14:27:45 +0200 Subject: [PATCH 131/257] [Opset13][pyAPI] Python API BitwiseNot-13 (#20265) * Add pyAPI BitwiseNot-13 * Update src/bindings/python/src/openvino/runtime/opset13/ops.py Co-authored-by: Katarzyna Mitrus * Remove whitespace from ops.py --------- Co-authored-by: Katarzyna Mitrus --- .../src/openvino/runtime/opset13/__init__.py | 1 + .../src/openvino/runtime/opset13/ops.py | 21 ++++++++++++++++++- .../python/tests/test_graph/test_ops_unary.py | 20 +++++++++++++++++- 3 files changed, 40 insertions(+), 2 deletions(-) diff --git a/src/bindings/python/src/openvino/runtime/opset13/__init__.py b/src/bindings/python/src/openvino/runtime/opset13/__init__.py index bddc3e1aebbb40..4ea991bf77b9ec 100644 --- a/src/bindings/python/src/openvino/runtime/opset13/__init__.py +++ b/src/bindings/python/src/openvino/runtime/opset13/__init__.py @@ -19,6 +19,7 @@ from openvino.runtime.opset2.ops import batch_to_space from openvino.runtime.opset1.ops import binary_convolution from openvino.runtime.opset13.ops import bitwise_and +from openvino.runtime.opset13.ops import bitwise_not from openvino.runtime.opset13.ops import bitwise_or from openvino.runtime.opset13.ops import bitwise_xor from openvino.runtime.opset3.ops import broadcast diff --git a/src/bindings/python/src/openvino/runtime/opset13/ops.py b/src/bindings/python/src/openvino/runtime/opset13/ops.py index 4840beb69127bc..f864d7fccca0ea 100644 --- a/src/bindings/python/src/openvino/runtime/opset13/ops.py +++ b/src/bindings/python/src/openvino/runtime/opset13/ops.py @@ -8,7 +8,7 @@ from openvino.runtime import Node from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.decorators import binary_op, nameable_op +from openvino.runtime.utils.decorators import binary_op, nameable_op, unary_op from openvino.runtime.utils.types import ( NodeInput, as_nodes, @@ -43,6 +43,25 @@ def bitwise_and( ) +@unary_op +def bitwise_not( + node: NodeInput, + name: Optional[str] = None, +) -> Node: + """Return node which performs bitwise NOT operation on input node element-wise. + + For boolean input tensors, operator is equivalent to logical_not. + + :param node: Tensor of integer or boolean datatype providing data. + :param name: The optional new name for output node. + :return: The node performing bitwise NOT operation on the given tensor. + """ + return _get_node_factory_opset13().create( + "BitwiseNot", + [node], + ) + + @binary_op def bitwise_or( left_node: NodeInput, diff --git a/src/bindings/python/tests/test_graph/test_ops_unary.py b/src/bindings/python/tests/test_graph/test_ops_unary.py index 745f9b6d9f7fd9..a01b800199e654 100644 --- a/src/bindings/python/tests/test_graph/test_ops_unary.py +++ b/src/bindings/python/tests/test_graph/test_ops_unary.py @@ -6,7 +6,7 @@ import pytest import openvino.runtime as ov_runtime -import openvino.runtime.opset10 as ov +import openvino.runtime.opset13 as ov from openvino.runtime import Shape, Type R_TOLERANCE = 1e-6 # global relative tolerance @@ -203,3 +203,21 @@ def test_gelu_tanh_operator_with_array(): assert model.get_type_name() == "Gelu" assert model.get_output_element_type(0) == ov_runtime.Type.f32 assert list(model.get_output_shape(0)) == [2, 2] + + +@pytest.mark.parametrize( + ("input_data", "dtype"), + [ + (np.array([True, False, True, False]), ov_runtime.Type.boolean), + (np.array([True]), ov_runtime.Type.boolean), + (np.array([False]), ov_runtime.Type.boolean), + (np.array([0, 3, 7, 256], dtype=np.uint16), ov_runtime.Type.u16), + (np.array([[-7, 0], [256, 1]], dtype=np.int32), ov_runtime.Type.i32), + ], +) +def test_bitwise_not(input_data, dtype): + node = ov.bitwise_not(input_data) + assert node.get_output_size() == 1 + assert node.get_type_name() == "BitwiseNot" + assert node.get_output_element_type(0) == dtype + assert list(node.get_output_shape(0)) == list(input_data.shape) From 3ebadc14d1a19a2b59034aad797491a90b723a35 Mon Sep 17 00:00:00 2001 From: Andrey Kashchikhin Date: Tue, 10 Oct 2023 14:10:46 +0100 Subject: [PATCH 132/257] [CI] [GHA] mac pipeline review leftovers; small cleanup of the Linux pipeline (#20353) * skip gna-related tests on mac; rm unused in pipelines * rm unused env var * use if instead of commenting out * add name * return pr trigger * rm pr trigger --- .github/workflows/linux.yml | 37 +++--- .github/workflows/mac.yml | 112 ++++++++---------- .../smoke_tests/test_speech_sample.py | 2 + 3 files changed, 73 insertions(+), 78 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index cc2b3eb9b9c227..c658f6a9f88b11 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -51,7 +51,6 @@ jobs: INSTALL_DIR: /__w/openvino/openvino/openvino_install INSTALL_TEST_DIR: /__w/openvino/openvino/tests_install BUILD_DIR: /__w/openvino/openvino/openvino_build - OPENVINO_CONTRIB_BUILD_DIR: /__w/openvino/openvino/openvino_contrib_build CCACHE_DIR: /mount/caches/ccache/ubuntu20_x86_64_Release CCACHE_TEMPDIR: /__w/openvino/openvino/ccache_temp CCACHE_MAXSIZE: 50G @@ -304,10 +303,10 @@ jobs: - name: Extract OpenVINO packages run: | pushd ${INSTALL_DIR} - tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} && rm openvino_package.tar.gz || exit 1 + tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} popd pushd ${INSTALL_TEST_DIR} - tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR} && rm openvino_tests.tar.gz || exit 1 + tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR} popd - name: Install 'actions/setup-python@v4' dependencies @@ -525,7 +524,7 @@ jobs: - name: Extract OpenVINO package run: | pushd ${INSTALL_DIR} - tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} && rm openvino_package.tar.gz + tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} popd - name: Install OpenVINO dependencies @@ -625,10 +624,10 @@ jobs: - name: Extract OpenVINO packages run: | pushd ${INSTALL_DIR} - tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} && rm openvino_package.tar.gz || exit 1 + tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} popd pushd ${INSTALL_TEST_DIR} - tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR} && rm openvino_tests.tar.gz || exit 1 + tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR} popd - name: Install OpenVINO dependencies @@ -871,15 +870,23 @@ jobs: - name: Extract OpenVINO packages run: | pushd ${INSTALL_DIR} - tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} && rm openvino_package.tar.gz || exit 1 + tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} popd pushd ${INSTALL_TEST_DIR} - tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR} && rm openvino_tests.tar.gz || exit 1 + tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR} popd - name: Install OpenVINO Python wheels - run: python3 -m pip install openvino-dev[mxnet,caffe,kaldi,onnx,tensorflow2] --find-links=${INSTALL_DIR}/tools + run: | + # Install the core OV wheel + python3 -m pip install ${INSTALL_DIR}/tools/openvino-*.whl + + # Find and install OV dev wheel + pushd ${INSTALL_DIR}/tools + ov_dev_wheel_name=$(find . -name 'openvino_dev*.whl') + python3 -m pip install $ov_dev_wheel_name[mxnet,caffe,kaldi,onnx,tensorflow2] + popd # # Tests @@ -1059,10 +1066,10 @@ jobs: - name: Extract OpenVINO packages run: | pushd ${INSTALL_DIR} - tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} && rm openvino_package.tar.gz || exit 1 + tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} popd pushd ${INSTALL_TEST_DIR} - tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR} && rm openvino_tests.tar.gz || exit 1 + tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR} popd - name: Install OpenVINO dependencies @@ -1163,11 +1170,11 @@ jobs: - name: Extract OpenVINO packages run: | pushd ${INSTALL_DIR} - tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} && rm openvino_package.tar.gz || exit 1 + tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} popd pushd ${INSTALL_TEST_DIR} - tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR} && rm openvino_tests.tar.gz || exit 1 + tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR} popd - name: Install OpenVINO Python wheels @@ -1238,10 +1245,10 @@ jobs: - name: Extract OpenVINO packages run: | pushd ${INSTALL_DIR} - tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} && rm openvino_package.tar.gz || exit 1 + tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} popd pushd ${INSTALL_TEST_DIR} - tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR} && rm openvino_tests.tar.gz || exit 1 + tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR} popd - uses: actions/setup-python@v4 diff --git a/.github/workflows/mac.yml b/.github/workflows/mac.yml index 6a8fc2f63c1c7f..5097a6bb006b87 100644 --- a/.github/workflows/mac.yml +++ b/.github/workflows/mac.yml @@ -241,7 +241,6 @@ jobs: source ${INSTALL_DIR}/setupvars.sh python3 -m pytest -sv ${INSTALL_TEST_DIR}/smoke_tests \ - --ignore=${INSTALL_TEST_DIR}/smoke_tests/test_speech_sample.py \ --env_conf ${INSTALL_TEST_DIR}/smoke_tests/env_config.yml \ --junitxml=${INSTALL_TEST_DIR}/TEST-SamplesSmokeTests.xml @@ -393,7 +392,6 @@ jobs: --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TemplateFuncTests.xml - name: Inference Engine C API tests - if: ${{ always() }} run: | source ${{ env.INSTALL_DIR }}/setupvars.sh ${{ env.INSTALL_TEST_DIR }}/InferenceEngineCAPITests --gtest_print_time=1 \ @@ -421,7 +419,6 @@ jobs: Python_Unit_Tests: name: Python unit tests needs: Build - if: ${{ always() }} defaults: run: shell: bash @@ -489,7 +486,6 @@ jobs: popd - name: nGraph and IE Python Bindings Tests - if: ${{ always() }} run: | python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/pyngraph \ --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml \ @@ -497,7 +493,6 @@ jobs: --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests/test_onnx/test_backend.py - name: Python API 2.0 Tests - if: ${{ always() }} run: | # For python imports to import pybind_mock_frontend export PYTHONPATH=${{ env.INSTALL_TEST_DIR }}:$PYTHONPATH @@ -511,7 +506,6 @@ jobs: --ignore=${{ env.INSTALL_TEST_DIR }}/pyopenvino/tests/test_onnx/test_backend.py - name: MO Python API Tests - if: ${{ always() }} run: | python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt export PYTHONPATH=${{ env.LAYER_TESTS_INSTALL_DIR }}:$PYTHONPATH @@ -524,14 +518,12 @@ jobs: TEST_PRECISION: FP16 - name: Model Optimizer unit tests - if: ${{ always() }} run: | export PYTHONPATH=${{ env.INSTALL_TEST_DIR }}:$PYTHONPATH python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/mo/unit_tests \ --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-ModelOptimizer.xml - name: PyTorch Layer Tests - if: ${{ always() }} run: | python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt export PYTHONPATH=${{ env.LAYER_TESTS_INSTALL_DIR }}:$PYTHONPATH @@ -542,7 +534,6 @@ jobs: TEST_PRECISION: FP16 - name: TensorFlow 1 Layer Tests - TF FE - if: ${{ always() }} run: | python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt @@ -553,7 +544,6 @@ jobs: TEST_DEVICE: CPU - name: TensorFlow 2 Layer Tests - TF FE - if: ${{ always() }} run: | python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt export PYTHONPATH=${{ env.OPENVINO_REPO }}/tools/mo/:${{ env.LAYER_TESTS_INSTALL_DIR }}:$PYTHONPATH @@ -563,7 +553,6 @@ jobs: TEST_DEVICE: CPU - name: TensorFlow 1 Layer Tests - Legacy FE - if: ${{ always() }} run: | python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt export PYTHONPATH=${{ env.OPENVINO_REPO }}/tools/mo/:${{ env.LAYER_TESTS_INSTALL_DIR }}:$PYTHONPATH @@ -571,7 +560,6 @@ jobs: python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow_tests/test_tf_Roll.py --ir_version=10 --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tf_Roll.xml - name: TensorFlow 2 Layer Tests - Legacy FE - if: ${{ always() }} run: | python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt export PYTHONPATH=${{ env.OPENVINO_REPO }}/tools/mo/:${{ env.LAYER_TESTS_INSTALL_DIR }}:$PYTHONPATH @@ -582,7 +570,6 @@ jobs: TEST_DEVICE: CPU - name: TensorFlow Lite Layer Tests - TFL FE - if: ${{ always() }} run: | python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt export PYTHONPATH=${{ env.OPENVINO_REPO }}/tools/mo/:${{ env.LAYER_TESTS_INSTALL_DIR }}:$PYTHONPATH @@ -592,7 +579,6 @@ jobs: TEST_DEVICE: CPU - name: Python Frontend tests - if: ${{ always() }} run: | python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt export PYTHONPATH=${{ env.OPENVINO_REPO }}/tools/mo/:${{ env.LAYER_TESTS_INSTALL_DIR }}:$PYTHONPATH @@ -609,52 +595,52 @@ jobs: path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml if-no-files-found: 'error' -# Ticket: 122001 -# CPU_Functional_Tests: -# needs: Build -# if: ${{ always() }} -# defaults: -# run: -# shell: bash -# runs-on: macos-12 -# env: -# INSTALL_DIR: ${{ github.workspace }}/install -# INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests -# -# steps: -# - name: Create Directories -# run: mkdir -p ${{ env.INSTALL_DIR }} ${{ env.INSTALL_TEST_DIR }} -# -# - name: Download OpenVINO package -# uses: actions/download-artifact@v3 -# with: -# name: openvino_package -# path: ${{ env.INSTALL_DIR }} -# -# - name: Download OpenVINO tests package -# uses: actions/download-artifact@v3 -# with: -# name: openvino_tests -# path: ${{ env.INSTALL_TEST_DIR }} -# -# - name: Extract OpenVINO packages -# run: | -# pushd ${{ env.INSTALL_DIR }} -# tar -xzf openvino_package.tar.gz -C ${{ env.INSTALL_DIR }} && rm openvino_package.tar.gz || exit 1 -# popd -# pushd ${{ env.INSTALL_TEST_DIR }} -# tar -xzf openvino_tests.tar.gz -C ${{ env.INSTALL_DIR }} && rm openvino_tests.tar.gz || exit 1 -# popd -# -# - name: Intel CPU plugin func tests -# run: | -# source ${{ env.INSTALL_DIR }}/setupvars.sh -# ${{ env.INSTALL_TEST_DIR }}/ov_cpu_func_tests --gtest_print_time=1 --gtest_filter=*smoke* --gtest_output=xml:"${{ env.INSTALL_TEST_DIR }}/TEST-CPUFuncTests.xml" -# -# - name: Upload Test Results -# uses: actions/upload-artifact@v3 -# if: ${{ always() }} -# with: -# name: test-results-functional-cpu -# path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml -# if-no-files-found: 'error' + CPU_Functional_Tests: + name: CPU functional tests + if: ${{ 'false' }} # Ticket: 122001 + needs: Build + defaults: + run: + shell: bash + runs-on: macos-12 + env: + INSTALL_DIR: ${{ github.workspace }}/install + INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests + + steps: + - name: Create Directories + run: mkdir -p ${{ env.INSTALL_DIR }} ${{ env.INSTALL_TEST_DIR }} + + - name: Download OpenVINO package + uses: actions/download-artifact@v3 + with: + name: openvino_package + path: ${{ env.INSTALL_DIR }} + + - name: Download OpenVINO tests package + uses: actions/download-artifact@v3 + with: + name: openvino_tests + path: ${{ env.INSTALL_TEST_DIR }} + + - name: Extract OpenVINO packages + run: | + pushd ${{ env.INSTALL_DIR }} + tar -xzf openvino_package.tar.gz -C ${{ env.INSTALL_DIR }} && rm openvino_package.tar.gz || exit 1 + popd + pushd ${{ env.INSTALL_TEST_DIR }} + tar -xzf openvino_tests.tar.gz -C ${{ env.INSTALL_DIR }} && rm openvino_tests.tar.gz || exit 1 + popd + + - name: Intel CPU plugin func tests + run: | + source ${{ env.INSTALL_DIR }}/setupvars.sh + ${{ env.INSTALL_TEST_DIR }}/ov_cpu_func_tests --gtest_print_time=1 --gtest_filter=*smoke* --gtest_output=xml:"${{ env.INSTALL_TEST_DIR }}/TEST-CPUFuncTests.xml" + + - name: Upload Test Results + uses: actions/upload-artifact@v3 + if: ${{ always() }} + with: + name: test-results-functional-cpu + path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml + if-no-files-found: 'error' diff --git a/tests/samples_tests/smoke_tests/test_speech_sample.py b/tests/samples_tests/smoke_tests/test_speech_sample.py index 46871e31ca0dac..ff3649662d2207 100644 --- a/tests/samples_tests/smoke_tests/test_speech_sample.py +++ b/tests/samples_tests/smoke_tests/test_speech_sample.py @@ -56,6 +56,7 @@ def setup_class(cls): super().setup_class() @pytest.mark.parametrize("param", test_data) + @pytest.mark.skipif(sys.platform == 'darwin', reason="GNA is not available on macOS") def test_speech_sample_nthreads(self, param): stdout = self._test(param).split('\n') @@ -64,6 +65,7 @@ def test_speech_sample_nthreads(self, param): assert avg_error <= self.threshold @pytest.mark.parametrize("param", new_format_test_data) + @pytest.mark.skipif(sys.platform == 'darwin', reason="GNA is not available on macOS") def test_speech_sample_new_format(self, param): stdout = self._test(param, complete_path=False).split('\n') From 4426486e6fe3918b0a5672f8d61d7b466728dd72 Mon Sep 17 00:00:00 2001 From: Nadezhda Ageeva Date: Tue, 10 Oct 2023 17:47:52 +0400 Subject: [PATCH 133/257] [nGraph Transformations] Add missing rtinfo copy in ConvertPrecision transformation (#20347) --- .../src/transformations/convert_precision.cpp | 1 + .../tests/utils/convert_precision.cpp | 107 +++++++++++++----- 2 files changed, 77 insertions(+), 31 deletions(-) diff --git a/src/common/transformations/src/transformations/convert_precision.cpp b/src/common/transformations/src/transformations/convert_precision.cpp index c5e2b13381c9f6..a1e9dd7a820e16 100644 --- a/src/common/transformations/src/transformations/convert_precision.cpp +++ b/src/common/transformations/src/transformations/convert_precision.cpp @@ -1172,6 +1172,7 @@ bool fuse_type_to_constant(const std::shared_ptr& node, new_const->validate_and_infer_types(); new_const->set_friendly_name(constant->get_friendly_name()); + ov::copy_runtime_info(constant, new_const); return true; } return false; diff --git a/src/common/transformations/tests/utils/convert_precision.cpp b/src/common/transformations/tests/utils/convert_precision.cpp index 07d6288112214a..6c0da965f9bfad 100644 --- a/src/common/transformations/tests/utils/convert_precision.cpp +++ b/src/common/transformations/tests/utils/convert_precision.cpp @@ -99,10 +99,11 @@ TEST(TransformationTests, ConvertPrecision_NMS4) { static const precisions_map precisions = {{element::i64, element::i32}, {element::f16, element::f32}}; + manager.register_pass(); manager.register_pass(precisions); manager.run_passes(f); } - + ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); } @@ -131,8 +132,10 @@ TEST(TransformationTests, ConvertPrecision_NMS5) { pass::Manager manager; static const precisions_map precisions = {{element::i64, element::i32}, {element::f32, element::f16}}; + manager.register_pass(); manager.register_pass(precisions); manager.run_passes(f); + ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); } @@ -154,8 +157,10 @@ TEST(TransformationTests, ConvertPrecision_MatrixNms) { pass::Manager manager; static const precisions_map precisions = {{element::i64, element::i32}, {element::f16, element::f32}}; + manager.register_pass(); manager.register_pass(precisions); manager.run_passes(f); + ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); } @@ -177,8 +182,10 @@ TEST(TransformationTests, ConvertPrecision_MulticlassNms) { pass::Manager manager; static const precisions_map precisions = {{element::i64, element::i32}, {element::f16, element::f32}}; + manager.register_pass(); manager.register_pass(precisions); manager.run_passes(f); + ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); } @@ -195,10 +202,11 @@ TEST(TransformationTests, ConvertPrecision_ShapeOf) { static const precisions_map precisions = {{element::i64, element::i32}, {element::f16, element::f32}}; + manager.register_pass(); manager.register_pass(precisions); manager.run_passes(f); } - + ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); } @@ -217,10 +225,11 @@ TEST(TransformationTests, ConvertPrecision_Range) { static const precisions_map precisions = {{element::i64, element::i32}, {element::f16, element::f32}}; + manager.register_pass(); manager.register_pass(precisions); manager.run_passes(f); } - + ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); } @@ -238,10 +247,11 @@ TEST(TransformationTests, ConvertPrecision_ConstantRelu) { static const precisions_map precisions = {{element::f16, element::f32}}; + manager.register_pass(); manager.register_pass(precisions); manager.run_passes(f); } - + ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); } @@ -258,10 +268,11 @@ TEST(TransformationTests, ConvertPrecision_Convert) { static const precisions_map precisions = {{element::i64, element::i32}, {element::f16, element::f32}}; + manager.register_pass(); manager.register_pass(precisions); manager.run_passes(f); } - + ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); } @@ -276,6 +287,7 @@ TEST(TransformationTests, ConvertPrecision_ConvertElimination) { f = std::make_shared(NodeVector{convert}, ParameterVector{input}); pass::Manager manager; + manager.register_pass(); manager.register_pass(precisions_map{{element::f16, element::f32}}); manager.run_passes(f); ASSERT_FALSE(has_type(f)); @@ -287,7 +299,7 @@ TEST(TransformationTests, ConvertPrecision_ConvertElimination) { f_ref = std::make_shared(NodeVector{relu}, ParameterVector{input}); } - + ASSERT_NO_THROW(check_rt_info(f)); auto res = compare_functions(f, f_ref); ASSERT_TRUE(res.first) << res.second; } @@ -305,10 +317,11 @@ TEST(TransformationTests, ConvertPrecision_TopK) { static const precisions_map precisions = {{element::i64, element::i32}, {element::f16, element::f32}}; + manager.register_pass(); manager.register_pass(precisions); manager.run_passes(f); } - + ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); } @@ -325,10 +338,11 @@ TEST(TransformationTests, ConvertPrecision_Unique10) { static const precisions_map precisions = {{element::i64, element::i32}, {element::f16, element::f32}}; + manager.register_pass(); manager.register_pass(precisions); manager.run_passes(model); } - + ASSERT_NO_THROW(check_rt_info(model)); ASSERT_EQ(model->outputs().size(), 4); EXPECT_EQ(model->outputs()[0].get_element_type(), element::f32); EXPECT_EQ(model->outputs()[1].get_element_type(), element::i32); @@ -353,10 +367,11 @@ TEST(TransformationTests, ConvertPrecision_NonZero) { static const precisions_map precisions = {{element::i64, element::i32}, {element::f16, element::f32}}; + manager.register_pass(); manager.register_pass(precisions); manager.run_passes(f); } - + ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); } @@ -374,10 +389,11 @@ TEST(TransformationTests, ConvertPrecision_Bucketize) { static const precisions_map precisions = {{element::i64, element::i32}, {element::f16, element::f32}}; + manager.register_pass(); manager.register_pass(precisions); manager.run_passes(f); } - + ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); } @@ -404,6 +420,7 @@ TEST(TransformationTests, ConvertPrecision_Roundings) { static const precisions_map precisions = {{element::i64, element::i32}, {element::f16, element::f32}}; + manager.register_pass(); manager.register_pass(precisions); manager.run_passes(f); @@ -413,7 +430,7 @@ TEST(TransformationTests, ConvertPrecision_Roundings) { ASSERT_EQ(casted_end->cast_vector(), std::vector({max_int32, max_int32, max_int32, max_int32})); } - + ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); } @@ -461,9 +478,11 @@ TEST(TransformationTests, ConvertPrecision_TIBody) { static const precisions_map precisions = {{element::i64, element::i32}, {element::f16, element::f32}}; + manager.register_pass(); manager.register_pass(precisions); manager.run_passes(f); + ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(tensor_iterator->get_body())); @@ -484,10 +503,11 @@ TEST(TransformationTests, ConvertPrecision_Equal) { static const precisions_map precisions = {{element::boolean, element::u8}, {element::f16, element::f32}}; + manager.register_pass(); manager.register_pass(precisions); manager.run_passes(f); } - + ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); ASSERT_TRUE(has_type(f)); @@ -506,10 +526,11 @@ TEST(TransformationTests, ConvertPrecision_NotEqual) { static const precisions_map precisions = {{element::boolean, element::u8}, {element::f16, element::f32}}; + manager.register_pass(); manager.register_pass(precisions); manager.run_passes(f); } - + ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); ASSERT_TRUE(has_type(f)); @@ -528,10 +549,11 @@ TEST(TransformationTests, ConvertPrecision_Greater) { static const precisions_map precisions = {{element::boolean, element::u8}, {element::f16, element::f32}}; + manager.register_pass(); manager.register_pass(precisions); manager.run_passes(f); } - + ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); ASSERT_TRUE(has_type(f)); @@ -550,10 +572,11 @@ TEST(TransformationTests, ConvertPrecision_GreaterEqual) { static const precisions_map precisions = {{element::boolean, element::u8}, {element::f16, element::f32}}; + manager.register_pass(); manager.register_pass(precisions); manager.run_passes(f); } - + ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); ASSERT_TRUE(has_type(f)); @@ -572,10 +595,11 @@ TEST(TransformationTests, ConvertPrecision_Less) { static const precisions_map precisions = {{element::boolean, element::u8}, {element::f16, element::f32}}; + manager.register_pass(); manager.register_pass(precisions); manager.run_passes(f); } - + ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); ASSERT_TRUE(has_type(f)); @@ -594,10 +618,11 @@ TEST(TransformationTests, ConvertPrecision_LessEqual) { static const precisions_map precisions = {{element::boolean, element::u8}, {element::f16, element::f32}}; + manager.register_pass(); manager.register_pass(precisions); manager.run_passes(f); } - + ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); ASSERT_TRUE(has_type(f)); @@ -613,10 +638,11 @@ TEST(TransformationTests, ConvertPrecision_LogicalAnd) { f = std::make_shared(OutputVector{node}, ParameterVector{input1, input2}); pass::Manager manager; + manager.register_pass(); manager.register_pass(precisions_map{{element::boolean, element::u8}}); manager.run_passes(f); } - + ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_TRUE(has_type(f)); } @@ -631,10 +657,11 @@ TEST(TransformationTests, ConvertPrecision_LogicalOr) { f = std::make_shared(OutputVector{node}, ParameterVector{input1, input2}); pass::Manager manager; + manager.register_pass(); manager.register_pass(precisions_map{{element::boolean, element::u8}}); manager.run_passes(f); } - + ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_TRUE(has_type(f)); } @@ -649,10 +676,11 @@ TEST(TransformationTests, ConvertPrecision_LogicalXor) { f = std::make_shared(OutputVector{node}, ParameterVector{input1, input2}); pass::Manager manager; + manager.register_pass(); manager.register_pass(precisions_map{{element::boolean, element::u8}}); manager.run_passes(f); } - + ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_TRUE(has_type(f)); } @@ -666,10 +694,11 @@ TEST(TransformationTests, ConvertPrecision_LogicalNot) { f = std::make_shared(OutputVector{node}, ParameterVector{input1}); pass::Manager manager; + manager.register_pass(); manager.register_pass(precisions_map{{element::boolean, element::u8}}); manager.run_passes(f); } - + ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_TRUE(has_type(f)); @@ -692,10 +721,11 @@ TEST(TransformationTests, ConvertPrecision_Select) { f = std::make_shared(OutputVector{select}, ParameterVector{input1}); pass::Manager manager; + manager.register_pass(); manager.register_pass(precisions_map{{element::boolean, element::u8}}); manager.run_passes(f); } - + ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_TRUE(has_type(f)); } @@ -710,11 +740,12 @@ TEST(TransformationTests, ConvertPrecision_TypeRelaxedWithSelect) { f = std::make_shared(OutputVector{select}, ParameterVector{input1}); pass::Manager manager; + manager.register_pass(); manager.register_pass(precisions_map{{element::boolean, element::i32}}); manager.register_pass(precisions_map{{element::i32, element::i64}}); manager.run_passes(f); } - + ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); ASSERT_TRUE(has_type(f)); @@ -732,10 +763,12 @@ TEST(TransformationTests, ConvertPrecision_TypeRelaxed) { f = std::make_shared(OutputVector{type_relaxed}, ParameterVector{input1}); pass::Manager manager; + manager.register_pass(); manager.register_pass(precisions_map{{element::boolean, element::i32}}); manager.register_pass(precisions_map{{element::i32, element::i64}}); manager.run_passes(f); + ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); ASSERT_TRUE(has_type(f)); @@ -758,10 +791,11 @@ TEST(TransformationTests, ConvertPrecision_Variables) { f = std::make_shared(NodeVector{mul}, ParameterVector{inp}); pass::Manager manager; + manager.register_pass(); manager.register_pass(precisions_map{{element::f16, element::f32}}); manager.run_passes(f); } - + ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); } @@ -789,12 +823,13 @@ TEST(TransformationTests, ConvertPrecision_skip_precision_sensitive) { pass::Manager manager; type_to_fuse_map empty_type_to_fuse_map = {}; bool keep_precision_sensitive_in_fp32 = true; + manager.register_pass(); manager.register_pass(precisions_map{{element::f32, element::f16}}, empty_type_to_fuse_map, keep_precision_sensitive_in_fp32); manager.run_passes(model); } - + ASSERT_NO_THROW(check_rt_info(model)); ASSERT_TRUE(has_type(model)); ASSERT_TRUE(interpolate->input_value(2).get_element_type() == element::Type_t::f32); } @@ -823,12 +858,13 @@ TEST(TransformationTests, ConvertPrecision_without_keep_precision_sensitive_in_f pass::Manager manager; type_to_fuse_map empty_type_to_fuse_map = {}; bool keep_precision_sensitive_in_fp32 = false; + manager.register_pass(); manager.register_pass(precisions_map{{element::f32, element::f16}}, empty_type_to_fuse_map, keep_precision_sensitive_in_fp32); manager.run_passes(model); } - + ASSERT_NO_THROW(check_rt_info(model)); ASSERT_FALSE(has_type(model)); ASSERT_TRUE(interpolate->input_value(2).get_element_type() == element::Type_t::f16); } @@ -847,6 +883,7 @@ TEST(TransformationTests, ConvertPrecision_check_marking_does_not_leak_in_trivia type_to_fuse_map empty_type_to_fuse_map = {}; bool keep_precision_sensitive_in_fp32 = true; + manager.register_pass(); manager.register_pass(precisions_map{{element::f32, element::f16}}, empty_type_to_fuse_map, keep_precision_sensitive_in_fp32); @@ -863,7 +900,8 @@ TEST(TransformationTests, ConvertPrecision_check_marking_does_not_leak_in_trivia const auto fc = FunctionsComparator::with_default() .enable(FunctionsComparator::PRECISIONS) - .enable(FunctionsComparator::CONST_VALUES); + .enable(FunctionsComparator::CONST_VALUES) + .enable(FunctionsComparator::CmpValues::RUNTIME_KEYS); const auto res = fc.compare(model, model_ref); ASSERT_TRUE(res.valid) << res.message; } @@ -888,6 +926,7 @@ TEST(TransformationTests, ConvertPrecision_whole_shape_subgraph_is_marked_1) { type_to_fuse_map empty_type_to_fuse_map = {}; bool keep_precision_sensitive_in_fp32 = true; + manager.register_pass(); manager.register_pass(precisions_map{{element::f32, element::f16}}, empty_type_to_fuse_map, keep_precision_sensitive_in_fp32); @@ -909,7 +948,8 @@ TEST(TransformationTests, ConvertPrecision_whole_shape_subgraph_is_marked_1) { const auto fc = FunctionsComparator::with_default() .enable(FunctionsComparator::PRECISIONS) - .enable(FunctionsComparator::CONST_VALUES); + .enable(FunctionsComparator::CONST_VALUES) + .enable(FunctionsComparator::CmpValues::RUNTIME_KEYS); const auto res = fc.compare(model, model_ref); ASSERT_TRUE(res.valid) << res.message; } @@ -943,6 +983,7 @@ TEST(TransformationTests, ConvertPrecision_whole_shape_subgraph_is_marked_2) { type_to_fuse_map empty_type_to_fuse_map = {}; bool keep_precision_sensitive_in_fp32 = true; + manager.register_pass(); manager.register_pass(precisions_map{{element::f32, element::f16}}, empty_type_to_fuse_map, keep_precision_sensitive_in_fp32); @@ -973,7 +1014,8 @@ TEST(TransformationTests, ConvertPrecision_whole_shape_subgraph_is_marked_2) { const auto fc = FunctionsComparator::with_default() .enable(FunctionsComparator::PRECISIONS) - .enable(FunctionsComparator::CONST_VALUES); + .enable(FunctionsComparator::CONST_VALUES) + .enable(FunctionsComparator::CmpValues::RUNTIME_KEYS); const auto res = fc.compare(model, model_ref); ASSERT_TRUE(res.valid) << res.message; } @@ -1024,6 +1066,7 @@ TEST(TransformationTests, ConvertPrecision_whole_shape_subgraph_is_marked_3) { type_to_fuse_map empty_type_to_fuse_map = {}; bool keep_precision_sensitive_in_fp32 = true; + manager.register_pass(); manager.register_pass(precisions_map{{element::f32, element::f16}}, empty_type_to_fuse_map, keep_precision_sensitive_in_fp32); @@ -1071,7 +1114,8 @@ TEST(TransformationTests, ConvertPrecision_whole_shape_subgraph_is_marked_3) { const auto fc = FunctionsComparator::with_default() .enable(FunctionsComparator::PRECISIONS) - .enable(FunctionsComparator::CONST_VALUES); + .enable(FunctionsComparator::CONST_VALUES) + .enable(FunctionsComparator::CmpValues::RUNTIME_KEYS); const auto res = fc.compare(model, model_ref); ASSERT_TRUE(res.valid) << res.message; } @@ -1102,12 +1146,13 @@ TEST(TransformationTests, ConvertCompressedToMixedPrecission_do_not_keep_in_fp32 pass::Manager manager; type_to_fuse_map empty_type_to_fuse_map = {}; bool keep_precision_sensitive_in_fp32 = false; // didn't keep in FP32 intentionally + manager.register_pass(); manager.register_pass(precisions_map{{element::f32, element::f16}}, empty_type_to_fuse_map, keep_precision_sensitive_in_fp32); manager.run_passes(model); } - + ASSERT_NO_THROW(check_rt_info(model)); ASSERT_FALSE(has_type(model)); ASSERT_TRUE(interpolate->input_value(2).get_element_type() == element::Type_t::f16); ASSERT_TRUE(interpolate->output(0).get_partial_shape() == PartialShape({1, 3, 287, 511})); From c385c131857c713a3762e3b6e1860f057ce64546 Mon Sep 17 00:00:00 2001 From: Ivan Novoselov Date: Tue, 10 Oct 2023 15:23:28 +0100 Subject: [PATCH 134/257] [Snippets] Delegate domain optimization to a LIR pass (#18991) --- .../snippets/include/snippets/generator.hpp | 14 +- .../include/snippets/lowered/linear_ir.hpp | 28 +++- .../snippets/lowered/pass/optimize_domain.hpp | 68 +++++++++ .../snippets/lowered/port_descriptor.hpp | 6 +- .../snippets/include/snippets/op/subgraph.hpp | 26 +--- .../shape_inference/shape_infer_instances.hpp | 5 + .../shape_inference/shape_inference.hpp | 12 ++ .../snippets/src/lowered/expression.cpp | 12 +- src/common/snippets/src/lowered/linear_ir.cpp | 65 +++++++- .../src/lowered/pass/insert_load_store.cpp | 2 +- .../src/lowered/pass/optimize_domain.cpp | 124 +++++++++++++++ src/common/snippets/src/op/subgraph.cpp | 79 +++++----- .../snippets/src/pass/mha_tokenization.cpp | 2 +- .../shape_inference/shape_infer_instances.cpp | 2 - .../src/shape_inference/shape_inference.cpp | 1 + src/common/snippets/src/utils.cpp | 4 +- .../include/lowered/pass/optimize_domain.hpp | 36 +++++ .../snippets/tests/include/lowering_utils.hpp | 3 +- .../{pass/lowered => lowered/pass}/loop.cpp | 0 .../src/lowered/pass/optimize_domain.cpp | 98 ++++++++++++ .../tests/src/pass/canonicalization.cpp | 1 - .../emitters/x64/jit_snippets_emitters.cpp | 13 +- .../emitters/x64/jit_snippets_emitters.hpp | 4 +- src/plugins/intel_cpu/src/nodes/subgraph.cpp | 144 ++---------------- src/plugins/intel_cpu/src/nodes/subgraph.h | 12 +- 25 files changed, 525 insertions(+), 236 deletions(-) create mode 100644 src/common/snippets/include/snippets/lowered/pass/optimize_domain.hpp create mode 100644 src/common/snippets/src/lowered/pass/optimize_domain.cpp create mode 100644 src/common/snippets/tests/include/lowered/pass/optimize_domain.hpp rename src/common/snippets/tests/src/{pass/lowered => lowered/pass}/loop.cpp (100%) create mode 100644 src/common/snippets/tests/src/lowered/pass/optimize_domain.cpp diff --git a/src/common/snippets/include/snippets/generator.hpp b/src/common/snippets/include/snippets/generator.hpp index 1651679df15a22..b0d30f602a5a88 100644 --- a/src/common/snippets/include/snippets/generator.hpp +++ b/src/common/snippets/include/snippets/generator.hpp @@ -12,6 +12,7 @@ #include "snippets/lowered/linear_ir.hpp" #include "snippets/lowered/pass/pass.hpp" +#include "snippets/shape_types.hpp" namespace ov { namespace snippets { @@ -23,17 +24,13 @@ namespace snippets { */ class Schedule { public: - /** - * @brief Default constructor - */ - Schedule() : work_size({}), is_flat(false), ptr(nullptr) {} + Schedule() = default; /** * @brief Default to create schedule out of specific parameters - * @param ws work size for kernel execution - * @param f can this kernel be linearided to 1D range + * @param wd work domain for kernel execution * @param p pointer to generated code */ - Schedule(const ov::PartialShape& ws, bool f, code p) : work_size(ws), is_flat(f), ptr(p) {} + Schedule(const VectorDims& wd, code p) : parallel_exec_domain(wd), ptr(p) {} /** * @brief Returns callable instanse of code pointer */ @@ -41,8 +38,7 @@ class Schedule { return reinterpret_cast(const_cast(ptr)); } - ov::PartialShape work_size {}; - bool is_flat {false}; + VectorDims parallel_exec_domain {}; code ptr {nullptr}; }; diff --git a/src/common/snippets/include/snippets/lowered/linear_ir.hpp b/src/common/snippets/include/snippets/lowered/linear_ir.hpp index fce7a80d9645b1..8b6a320e18cad7 100644 --- a/src/common/snippets/include/snippets/lowered/linear_ir.hpp +++ b/src/common/snippets/include/snippets/lowered/linear_ir.hpp @@ -21,6 +21,14 @@ class Config { // True if we should check runtime info for nodes to call specific needed transformations bool m_need_fill_tail_register = false; size_t m_loop_depth = 1; + // Some Subgraphs doesn't support domain optimization due to operations' semantics + bool m_enable_domain_optimization = false; + // Minimal advised work amount for parallel execution. + // Set by a backend, typically equals to the number of threads available on the machine. + size_t m_min_parallel_work_amount = 8; + // Minimal advised work amount that should be processed during one call of the executable produced by Subgraph::generate + // Set by a backend, should be large enough to compensate for the kernel call overheads + size_t m_min_kernel_work_amount = 256; }; /* The control flow of Snippets is built on Linear Intermediate Representation (Linear IR). @@ -46,6 +54,7 @@ class LinearIR { const container& get_ops() const {return m_expressions; } const io_container& get_IO_ops() const {return m_io_expressions; } Config get_config() {return m_config; } + void set_loop_depth(size_t loop_depth) { m_config.m_loop_depth = loop_depth; } const ExpressionPtr& get_expr_by_node(const std::shared_ptr& n) const; @@ -103,9 +112,26 @@ class LinearIR { using LoopManagerPtr = std::shared_ptr; const LoopManagerPtr& get_loop_manager() const { return m_loop_manager; } - const std::shared_ptr& get_shape_infer_factory() { return m_shape_infer_factory; } + + IShapeInferSnippets::Result shape_infer(const std::vector& input_shapes); + const std::shared_ptr& get_shape_infer_instance() const {return m_shape_infer; } + VectorDims get_master_shape() const; private: + std::shared_ptr m_shape_infer = nullptr; + + class LIRShapeInfer : public ShapeInferSnippetsNode { + public: + using IOExpression = lowered::IOExpression; + explicit LIRShapeInfer(container& body_exprs, io_container& io_exprs); + Result infer(const std::vector& input_shapes) override; + + private: + const std::shared_ptr m_exprs = nullptr; + std::vector> m_input_exprs {}; + std::vector> m_output_exprs {}; + }; + static ov::NodeVector get_ordered_ops(const std::shared_ptr& model); // Default ctor - can be called only from Linear IR initialization as default way ExpressionPtr create_expression(const std::shared_ptr& n, const std::shared_ptr& model = nullptr); diff --git a/src/common/snippets/include/snippets/lowered/pass/optimize_domain.hpp b/src/common/snippets/include/snippets/lowered/pass/optimize_domain.hpp new file mode 100644 index 00000000000000..4ae68fc38cf37e --- /dev/null +++ b/src/common/snippets/include/snippets/lowered/pass/optimize_domain.hpp @@ -0,0 +1,68 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "snippets/lowered/pass/pass.hpp" +#include "snippets/shape_types.hpp" + +namespace ov { +namespace snippets { +namespace lowered { +namespace pass { + +/** + * @interface OptimizeDomain + * @brief Collapse input/output dimensions to balance parallel/per-thread load. The pass consists of two steps: + * The pass collapses two last dimensions while none of them is broadcasted and the resulting dim size + * 1. Dimension collapsing: If none of the last two dimensions are broadcasted, the last dimension's size + * is less than min_kernel_work_amount and the remaining dimensions provide work amount larger than + * min_parallel_work_amount (min_kernel_work_amount and min_parallel_work_amount specified in LireanIR config), + * then these two dimensions are collapsed into one and the collapsing attempt is repeated. + * 2. Tile rank increment: Tile rank is the rank of a tensor that processed during one call. If all except + * for the last two dimensions provide work_amount larger than min_parallel_work_amount, then tile_rank + * is incremented. This effectively increases kernel work_amount. + * Examples of graphs before and after this transformations are depicted below. + * @param tile_rank (taken by reference) rank of a tensor that processed during one call. Incremented if dimensions are collapsed. + * @ingroup snippets + */ +// Example: +// min_jit_work_amount = 256 +// min_parallel_work_amount = 4 +// +// Before OptimizeDomain | After OptimizeDomain +// ------------------------------------------------------------------- +// tile_rank = 1 | tile_rank = 2 +// | +// in1 in2 | in1 in2 +// [14, 15, 16, 17] [14, 15, 16, 17] | [1, 14, 15, 272] [1, 14, 15, 272] +// \ / | \ / +// Add | Add +// [14, 15, 16, 17] | [1, 14, 15, 272] +// | | | +// Result | Result +// [14, 15, 16, 17] | [1, 14, 15, 272] + +class OptimizeDomain : public snippets::lowered::pass::Pass { +public: + OPENVINO_RTTI("OptimizeDomain", "Pass") + explicit OptimizeDomain(size_t& tile_rank); + bool run(LinearIR& linear_ir) override; + +private: + size_t& m_tile_rank; + static size_t optimize(std::vector& input_shapes, + VectorDims& master_shape, + size_t total_work_amount, + size_t min_parallel_work_amount, + size_t min_jit_work_amount); + inline static bool can_increase_jit_work_amount(const VectorDims& master_shape, + size_t min_parallel_work_amount, + size_t total_work_amount); +}; + +} // namespace pass +} // namespace lowered +} // namespace snippets +} // namespace ov \ No newline at end of file diff --git a/src/common/snippets/include/snippets/lowered/port_descriptor.hpp b/src/common/snippets/include/snippets/lowered/port_descriptor.hpp index ce3e0c641f254a..551ef1907037ab 100644 --- a/src/common/snippets/include/snippets/lowered/port_descriptor.hpp +++ b/src/common/snippets/include/snippets/lowered/port_descriptor.hpp @@ -38,9 +38,9 @@ class PortDescriptor { PortDescriptor(VectorDims shape, VectorDims subtensor_shape, std::vector layout = {}); PortDescriptor() = default; - VectorDims get_shape() const {return m_tensor_shape;} - VectorDims get_subtensor() const {return m_subtensor_shape;} - std::vector get_layout() const {return m_layout;} + const VectorDims& get_shape() const {return m_tensor_shape;} + const VectorDims& get_subtensor() const {return m_subtensor_shape;} + const std::vector& get_layout() const {return m_layout;} size_t get_reg() const { return m_reg; } void set_shape(const VectorDims& tensor) { m_tensor_shape = tensor; } diff --git a/src/common/snippets/include/snippets/op/subgraph.hpp b/src/common/snippets/include/snippets/op/subgraph.hpp index a357c52266bda3..dab2de53e56d47 100644 --- a/src/common/snippets/include/snippets/op/subgraph.hpp +++ b/src/common/snippets/include/snippets/op/subgraph.hpp @@ -124,6 +124,8 @@ class Subgraph : public ov::op::util::SubGraphOp { void set_generator(std::shared_ptr generator); void set_tile_rank(size_t newRank) {tileRank = newRank;} void set_virtual_port_count(const size_t count); + void set_min_jit_work_amount(const size_t jit_work_amount); + void set_min_parallel_work_amount(const size_t parallel_work_amount); void print() const; @@ -178,34 +180,22 @@ class Subgraph : public ov::op::util::SubGraphOp { // True if body has operations that don't support plugin-side domain optimizations // (e.g. Transpose, Softmax, MatMul in general doesn't support dimensions collapsing) bool m_has_domain_sensitive_ops = false; + // Minimal advised work amount for parallel execution. + // Set by a backend, typically equals to the number of threads available on the machine. + size_t m_min_parallel_work_amount = 8; + // Minimal advised work amount every JIT kernel should process during one execution call + // Set by a backend, should be large enough to compensate for the kernel call overheads + size_t m_min_jit_work_amount = 256; } config; - class ShapeInferSnippetsNode : public IShapeInferSnippets { - public: - const Result& get_last_result() {return m_last_result; } - protected: - Result m_last_result{{}, ShapeInferStatus::success}; - }; - std::shared_ptr m_shape_infer = nullptr; class NgraphShapeInfer : public ShapeInferSnippetsNode { std::shared_ptr m_ngraph_body; - ParameterVector m_parameters; - ResultVector m_results; public: explicit NgraphShapeInfer(const std::shared_ptr& body); Result infer(const std::vector& input_shapes) override; }; - class LIRShapeInfer : public ShapeInferSnippetsNode { - using IOExpression = lowered::IOExpression; - std::shared_ptr m_lir_body; - std::vector> m_param_exprs; - std::vector> m_result_exprs; - public: - explicit LIRShapeInfer(const std::shared_ptr& body); - Result infer(const std::vector& input_shapes) override; - }; }; static inline auto create_body(const std::string& name, const ov::ResultVector& results, const ov::ParameterVector& parameters) -> diff --git a/src/common/snippets/include/snippets/shape_inference/shape_infer_instances.hpp b/src/common/snippets/include/snippets/shape_inference/shape_infer_instances.hpp index 0ca0668111b3cf..f673f8ff997558 100644 --- a/src/common/snippets/include/snippets/shape_inference/shape_infer_instances.hpp +++ b/src/common/snippets/include/snippets/shape_inference/shape_infer_instances.hpp @@ -8,6 +8,11 @@ namespace ov { namespace snippets { + +bool broadcast_merge_into(VectorDims& dst, const VectorDims& src, const ov::op::AutoBroadcastSpec& autob = ov::op::AutoBroadcastType::NUMPY); + +bool merge_into(VectorDims& dst, const VectorDims& src); + class NumpyBroadcastShapeInfer : public IShapeInferSnippets { public: Result infer(const std::vector& input_shapes) override; diff --git a/src/common/snippets/include/snippets/shape_inference/shape_inference.hpp b/src/common/snippets/include/snippets/shape_inference/shape_inference.hpp index ad2145d9f59bfc..af7d29f8e3f3c3 100644 --- a/src/common/snippets/include/snippets/shape_inference/shape_inference.hpp +++ b/src/common/snippets/include/snippets/shape_inference/shape_inference.hpp @@ -37,6 +37,18 @@ class IShapeInferSnippets { virtual Result infer(const std::vector& input_shapes) = 0; }; +/** + * Shape inference class for Subgraph node (both nGraph and Linear IRs). + * It stores the result of the last shape inference, so it can be reused in optimization pipeline. + * + */ +class ShapeInferSnippetsNode : public IShapeInferSnippets { +public: + const Result& get_last_result() {return m_last_result; } +protected: + Result m_last_result{{}, ShapeInferStatus::success}; +}; + class IShapeInferSnippetsFactory { public: // Helper type to define specific Makers map values. diff --git a/src/common/snippets/src/lowered/expression.cpp b/src/common/snippets/src/lowered/expression.cpp index 53ab34049bdceb..e5ebfee952ace5 100644 --- a/src/common/snippets/src/lowered/expression.cpp +++ b/src/common/snippets/src/lowered/expression.cpp @@ -112,6 +112,7 @@ ExpressionPort Expression::get_output_port(size_t i) { } void Expression::updateShapes() { + OPENVINO_ASSERT(m_shapeInference, "Attempt to UpdateShapes without initialized shapeInference"); IShapeInferSnippets::Result result; try { std::vector input_shapes; @@ -121,11 +122,10 @@ void Expression::updateShapes() { input_shapes.reserve(in_connectors.size()); for (size_t i = 0; i < in_connectors.size(); i++) { - const auto& src_port = in_connectors[i]->get_source(); - const auto i_shape = src_port.get_descriptor_ptr()->get_shape(); - // todo: do we really need to store the same shape twice in parent's out_port_desc and this in_port_descs - in_descriptors[i]->set_shape(i_shape); - input_shapes.emplace_back(i_shape); + const auto& src_port_desc = in_connectors[i]->get_source().get_descriptor_ptr(); + in_descriptors[i]->set_shape(src_port_desc->get_shape()); + // Note that input_shape is a reference, so we should always bind it to an object with a longer lifetime + input_shapes.emplace_back(in_descriptors[i]->get_shape()); } result = m_shapeInference->infer(input_shapes); @@ -133,6 +133,8 @@ void Expression::updateShapes() { catch (const std::exception& exp) { OPENVINO_THROW("Shape inference of " + (get_node()->get_friendly_name()) + " failed: " + exp.what()); } + OPENVINO_ASSERT(result.status == ShapeInferStatus::success, + "Shape inference of " + (get_node()->get_friendly_name()) + " didn't return success status"); const auto& out_descriptors = get_output_port_descriptors(); OPENVINO_ASSERT(result.dims.size() == out_descriptors.size(), "shapeInference call returned invalid number of output shapes"); for (size_t i = 0; i < out_descriptors.size(); i++) diff --git a/src/common/snippets/src/lowered/linear_ir.cpp b/src/common/snippets/src/lowered/linear_ir.cpp index cc5e5c2fce621e..cc0ace467dd6e3 100644 --- a/src/common/snippets/src/lowered/linear_ir.cpp +++ b/src/common/snippets/src/lowered/linear_ir.cpp @@ -9,10 +9,10 @@ #include "snippets/lowered/loop_manager.hpp" #include "snippets/lowered/expression_factory.hpp" #include "snippets/op/serialization_node.hpp" -#include "snippets/utils.hpp" #include "openvino/core/graph_util.hpp" #include "openvino/core/type.hpp" +#include "snippets/utils.hpp" namespace ov { namespace snippets { @@ -41,6 +41,7 @@ LinearIR::LinearIR(const std::shared_ptr& model, const std::shared_pt last_param = it; } } + m_shape_infer = std::make_shared(m_expressions, m_io_expressions); } ExpressionPtr LinearIR::create_expression(const std::shared_ptr& n, const std::shared_ptr& model) { @@ -296,6 +297,68 @@ LinearIR::constExprReverseIt LinearIR::find_after(LinearIR::constExprReverseIt i return find(it, crend(), target); } +IShapeInferSnippets::Result LinearIR::shape_infer(const std::vector& input_shapes) { + OPENVINO_ASSERT(m_shape_infer, "Attempt to call shape_infer when the shapeInfer instance was not created"); + return m_shape_infer->infer(input_shapes); +} + +VectorDims LinearIR::get_master_shape() const { + VectorDims master_shape{}; + // Note: inputs and outputs must be broadcastable, so it's enough to broadcast-merge only outputs + std::vector> out_exprs; + for (const auto& ioe : m_io_expressions) { + if (ioe->get_type() == IOExpression::io_type::OUTPUT) + out_exprs.push_back(ioe); + } + // Note: Snippets would benefit from a more generic master_shape calculation approach. + // It will be implemented in the scope of ROI propagation activity (ticket 120505) + const auto& result_parent = out_exprs[0]->get_input_port_connector(0)->get_source().get_expr(); + if (!m_config.m_enable_domain_optimization && out_exprs.size() == 1 && + ov::is_type(result_parent->get_node())) { + master_shape = utils::get_planar_vdims(out_exprs[0]->get_input_port_descriptor(0)); + } else { + for (const auto& oe : out_exprs) { + const auto& port_desc = oe->get_input_port_descriptor(0); + OPENVINO_ASSERT(ov::snippets::broadcast_merge_into(master_shape, port_desc->get_shape()), + "Failed to merge input shapes in OptimizeDomain pass"); + } + } + return master_shape; +} + +LinearIR::LIRShapeInfer::LIRShapeInfer(container& body_exprs, io_container& io_exprs) + : ShapeInferSnippetsNode(), + m_exprs{std::make_shared(body_exprs)} { + // Note that here we rely on the assumption that io_expressions can't be changed after the LIR was created + for (const auto& expr : io_exprs) { + if (expr->get_type() == IOExpression::io_type::INPUT) { + m_input_exprs.push_back(expr); + } else if (expr->get_type() == IOExpression::io_type::OUTPUT) { + m_output_exprs.emplace_back(expr); + } else { + OPENVINO_THROW("Invalid io expression type detected"); + } + } +} + +IShapeInferSnippets::Result LinearIR::LIRShapeInfer::infer(const std::vector& input_shapes) { + OPENVINO_ASSERT(m_input_exprs.size() == input_shapes.size(), "Got invalid number of input shapes in LIR ShapeInfer"); + for (size_t i = 0; i < m_input_exprs.size(); i++) + m_input_exprs[i]->get_output_port_descriptor(0)->set_shape(input_shapes[i]); + + for (const auto& expr : *m_exprs) { + if (expr->needShapeInfer()) + expr->updateShapes(); + } + + std::vector outputDims; + outputDims.reserve(m_output_exprs.size()); + for (const auto& expr : m_output_exprs) { + outputDims.push_back(expr->get_input_port_descriptor(0)->get_shape()); + } + m_last_result = {outputDims, ShapeInferStatus::success}; + return m_last_result; +} }// namespace lowered }// namespace snippets diff --git a/src/common/snippets/src/lowered/pass/insert_load_store.cpp b/src/common/snippets/src/lowered/pass/insert_load_store.cpp index 40f802a649a9e9..47fa93f699354b 100644 --- a/src/common/snippets/src/lowered/pass/insert_load_store.cpp +++ b/src/common/snippets/src/lowered/pass/insert_load_store.cpp @@ -24,7 +24,7 @@ size_t InsertLoadStore::get_count(const PortDescriptorPtr& port_desc) const { const auto shape = port_desc->get_shape(); // Find last dimension by layout const auto last_dim_idx = std::find(layout.begin(), layout.end(), layout.size() - 1); - OPENVINO_ASSERT(last_dim_idx != layout.end(), "Load/Store expression have incorrect layout"); + OPENVINO_ASSERT(last_dim_idx != layout.end() && *last_dim_idx < shape.size(), "Load/Store expression have incorrect layout"); const auto dim = shape[*last_dim_idx]; return dim == 1 ? 1 : m_vector_size; } diff --git a/src/common/snippets/src/lowered/pass/optimize_domain.cpp b/src/common/snippets/src/lowered/pass/optimize_domain.cpp new file mode 100644 index 00000000000000..09061e63250464 --- /dev/null +++ b/src/common/snippets/src/lowered/pass/optimize_domain.cpp @@ -0,0 +1,124 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "snippets/lowered/pass/optimize_domain.hpp" + +#include "snippets/itt.hpp" +#include "snippets/lowered/linear_ir.hpp" +#include "snippets/snippets_isa.hpp" +#include "snippets/shape_inference/shape_inference.hpp" + + +namespace ov { +namespace snippets { +namespace lowered { +namespace pass { + +OptimizeDomain::OptimizeDomain(size_t& tile_rank) : Pass(), m_tile_rank(tile_rank) { +} +size_t OptimizeDomain::optimize(std::vector& input_shapes, + VectorDims& master_shape, + const size_t total_work_amount, + const size_t min_parallel_work_amount, + const size_t min_jit_work_amount) { + if (master_shape.size() <= 2) + return false; + + auto CollapseLastDim = [](VectorDims& dims) { + OPENVINO_ASSERT(dims.size() >= 2, "CollapseLastDim can't process shape with less than two dims"); + dims[dims.size() - 1] *= dims[dims.size() - 2]; + for (auto i = dims.size() - 2; i > 0; i--) + dims[i] = dims[i - 1]; + dims[0] = 1; + }; + // Check that neither of the two last dims is broadcasted, so they can be collapsed + auto LastDimsNotBroadcasted = [] (const std::vector& input_shapes, const VectorDims& master_shape) { + const auto master_last = *master_shape.rbegin(); + const auto master_prelast = *++master_shape.rbegin(); + return std::all_of(input_shapes.begin(), input_shapes.end(), + [=](const VectorDims& s) { + return *s.rbegin() == master_last && + *++s.rbegin() == master_prelast; + }); + }; + + size_t jit_work_amount = master_shape.back(); + size_t num_dims_collapsed = 0; + while (jit_work_amount < min_jit_work_amount && + can_increase_jit_work_amount(master_shape, min_parallel_work_amount, total_work_amount) && + LastDimsNotBroadcasted(input_shapes, master_shape) && + num_dims_collapsed < master_shape.size() - 1) { + for (auto &s : input_shapes) + CollapseLastDim(s); + + CollapseLastDim(master_shape); + num_dims_collapsed++; + + jit_work_amount = master_shape.back(); + } + return num_dims_collapsed; +} + +inline bool OptimizeDomain::can_increase_jit_work_amount(const VectorDims& master_shape, + const size_t min_parallel_work_amount, + const size_t total_work_amount) { + return master_shape.size() > 2 && + master_shape[master_shape.size() - 1] * master_shape[master_shape.size() - 2] * + min_parallel_work_amount <= total_work_amount; +} +bool OptimizeDomain::run(snippets::lowered::LinearIR& linear_ir) { + OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "Snippets::OptimizeDomain") + const auto& config = linear_ir.get_config(); + if (linear_ir.empty()) + return false; + m_tile_rank = 1; + if (!config.m_enable_domain_optimization) { + // Note: this is a special case: if optimization is not allowed, always assume 2D tile + m_tile_rank = 2; + return false; + } + OPENVINO_ASSERT(config.m_min_parallel_work_amount != 0, "OptimizeDomain: Min parallel work amount can't equal to zero"); + std::vector> input_exprs; + std::vector input_shapes; + VectorDims master_shape = linear_ir.get_master_shape(); + for (const auto& expr : linear_ir.get_IO_ops()) { + if (expr->get_type() == snippets::lowered::IOExpression::io_type::INPUT) { + input_exprs.push_back(expr); + const auto& shape = expr->get_output_port_descriptor(0)->get_shape(); + OPENVINO_ASSERT(std::none_of(shape.begin(), shape.end(), + [](size_t d) {return d == snippets::IShapeInferSnippets::DYNAMIC_DIMENSION; }), + "OptimizeDomain pass does not support dynamic shapes"); + OPENVINO_ASSERT(ov::snippets::broadcast_merge_into(master_shape, shape), + "Failed to merge input shapes in OptimizeDomain pass"); + input_shapes.emplace_back(shape); + } + } + const auto total_work_amount = std::accumulate(master_shape.begin(), + master_shape.end(), + (size_t)1, + std::multiplies()); + const auto num_dims_collapsed = optimize(input_shapes, + master_shape, + total_work_amount, + config.m_min_parallel_work_amount, + config.m_min_kernel_work_amount); + if (num_dims_collapsed > 0) { + std::vector infer_shapes; + infer_shapes.reserve(input_shapes.size()); + for (const auto& is : input_shapes) + infer_shapes.emplace_back(is); + // Need to propagate updated shapes through LIR + linear_ir.shape_infer(infer_shapes); + } + // We can still try to increment tile rank after dimension collapsing + if (can_increase_jit_work_amount(master_shape, config.m_min_parallel_work_amount, total_work_amount) && + num_dims_collapsed != master_shape.size() - 1) + m_tile_rank++; + return num_dims_collapsed > 0; +} + +} // namespace pass +} // namespace lowered +} // namespace snippets +} // namespace ov \ No newline at end of file diff --git a/src/common/snippets/src/op/subgraph.cpp b/src/common/snippets/src/op/subgraph.cpp index 5e67d6cad9e31c..5de4dae47a95a4 100644 --- a/src/common/snippets/src/op/subgraph.cpp +++ b/src/common/snippets/src/op/subgraph.cpp @@ -40,6 +40,7 @@ #include "snippets/lowered/pass/identify_buffers.hpp" #include "snippets/lowered/pass/validate_loops.hpp" #include "snippets/lowered/pass/insert_loops.hpp" +#include "snippets/lowered/pass/optimize_domain.hpp" #include "transformations/utils/utils.hpp" @@ -67,6 +68,14 @@ void Subgraph::set_virtual_port_count(const size_t count) { m_virtual_port_count = count; } +void Subgraph::set_min_jit_work_amount(const size_t jit_work_amount) { + config.m_min_jit_work_amount = jit_work_amount; +} + +void Subgraph::set_min_parallel_work_amount(const size_t parallel_work_amount) { + config.m_min_parallel_work_amount = parallel_work_amount; +} + auto Subgraph::is_domain_sensitive_op(const std::shared_ptr& op) -> bool { return ov::is_type(op) || ov::is_type(op) || @@ -151,6 +160,7 @@ Subgraph::Subgraph(const OutputVector& args, const std::shared_ptr& b for (size_t i = 0; i < body->get_output_size(); ++i) m_output_descriptions[0].push_back(std::make_shared(i, i)); m_transformations_allowed = false; + m_shape_infer = std::make_shared(body); } Subgraph::Subgraph(const NodeVector& args, const std::shared_ptr& body) @@ -470,70 +480,38 @@ bool Subgraph::check_broadcast(const std::shared_ptr& node) noex } IShapeInferSnippets::Result Subgraph::shape_infer(const std::vector& input_shapes) { - if (!m_shape_infer && !m_linear_ir) { - OPENVINO_ASSERT(body_ptr(), "Can't create shape infer for Subgraph with an empty body"); - m_shape_infer = std::make_shared(body_ptr()); - } else if (!std::dynamic_pointer_cast(m_shape_infer) && m_linear_ir) { - m_shape_infer = std::make_shared(m_linear_ir); - } + OPENVINO_ASSERT(m_shape_infer, "Attempt to call shape_infer when it's not initialized"); return m_shape_infer->infer(input_shapes); } Subgraph::NgraphShapeInfer::NgraphShapeInfer(const std::shared_ptr& body) : - m_ngraph_body(body), m_parameters(body->get_parameters()), m_results(body->get_results()) { + m_ngraph_body(body) { + OPENVINO_ASSERT(m_ngraph_body, "Can't initialize shape infer with empty body"); } IShapeInferSnippets::Result Subgraph::NgraphShapeInfer::infer(const std::vector& input_shapes) { - OPENVINO_ASSERT(m_parameters.size() == input_shapes.size(), "Got invalid number of input shapes to reshape subgraph body"); - for (size_t i = 0; i < m_parameters.size(); ++i) - m_parameters[i]->set_partial_shape(utils::vdims_to_pshape(input_shapes[i].get())); + const ParameterVector& parameters = m_ngraph_body->get_parameters(); + const ResultVector& results = m_ngraph_body->get_results(); + OPENVINO_ASSERT(parameters.size() == input_shapes.size(), "Got invalid number of input shapes to reshape subgraph body"); + for (size_t i = 0; i < parameters.size(); ++i) + parameters[i]->set_partial_shape(utils::vdims_to_pshape(input_shapes[i].get())); m_ngraph_body->validate_nodes_and_infer_types(); std::vector outputDims; - for (const auto& res : m_results) + for (const auto& res : results) outputDims.emplace_back(utils::pshape_to_vdims(res->get_input_partial_shape(0))); m_last_result = {outputDims, ShapeInferStatus::success}; return m_last_result; } -Subgraph::LIRShapeInfer::LIRShapeInfer(const std::shared_ptr& body) : - m_lir_body(body) { - for (const auto& io_expr : m_lir_body->get_IO_ops()) { - switch (io_expr->get_type()) { - case IOExpression::io_type::INPUT : m_param_exprs.push_back(io_expr); break; - case IOExpression::io_type::OUTPUT : m_result_exprs.push_back(io_expr); break; - default : OPENVINO_THROW("Undefined io expression type"); - } - } -} - -IShapeInferSnippets::Result -Subgraph::LIRShapeInfer::infer(const std::vector& input_shapes) { - OPENVINO_ASSERT(m_param_exprs.size() == input_shapes.size(), "Got invalid number of input shapes in LIR ShapeInfer"); - // todo: check that order of param_exprs is always the same as that of input_shapes - // if not use io_expr index to sort in constructor - - for (size_t i = 0; i < m_param_exprs.size(); ++i) { - m_param_exprs[i]->get_output_port_descriptor(0)->set_shape(input_shapes[i]); - } - for (const auto& expr : *m_lir_body) { - if (expr->needShapeInfer()) - expr->updateShapes(); - } - std::vector outputDims; - outputDims.reserve(m_result_exprs.size()); - for (const auto& r : m_result_exprs) { - outputDims.push_back(r->get_input_port_descriptor(0)->get_shape()); - } - m_last_result = {outputDims, ShapeInferStatus::success}; - return m_last_result; -} - std::shared_ptr Subgraph::convert_body_to_linear_ir(const std::shared_ptr& shape_infer_factory) const { lowered::Config lowering_config; lowering_config.m_save_expressions = config.m_has_domain_sensitive_ops; lowering_config.m_need_fill_tail_register = config.m_has_domain_sensitive_ops; lowering_config.m_loop_depth = tileRank; + lowering_config.m_enable_domain_optimization = !config.m_has_domain_sensitive_ops; + lowering_config.m_min_parallel_work_amount = config.m_min_parallel_work_amount; + lowering_config.m_min_kernel_work_amount = config.m_min_jit_work_amount; return std::make_shared(body_ptr(), shape_infer_factory, lowering_config); } @@ -650,6 +628,11 @@ void Subgraph::control_flow_transformations(lowered::LinearIR& linear_ir, INTERNAL_OP_SCOPE(Subgraph); OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "Snippets::op::control_flow_transformations") + // Domain optimization must be the first pass, because all other transformations may depend on PortDescriptor shapes + size_t loop_depth = 1; + lowered::pass::OptimizeDomain(loop_depth).run(linear_ir); + linear_ir.set_loop_depth(loop_depth); + const size_t vector_size = get_generator()->get_target_machine()->get_lanes(); const int32_t buffer_allocation_rank = static_cast(linear_ir.get_config().m_loop_depth); @@ -730,7 +713,13 @@ snippets::Schedule Subgraph::generate(const std::vectorgenerate(linear_ir, linear_ir.get_config(), compile_params); const auto ptr = lowering_result.binary_code; - return {master_shape, false /*canBeLinearized*/, ptr}; + + VectorDims parallel_exec_domain = linear_ir.get_master_shape(); + const size_t loop_depth = linear_ir.get_config().m_loop_depth; + for (size_t i = 0; i < loop_depth; i++) + parallel_exec_domain[parallel_exec_domain.size() - 1 - i] = 1; + + return {parallel_exec_domain, ptr}; } void Subgraph::print() const { diff --git a/src/common/snippets/src/pass/mha_tokenization.cpp b/src/common/snippets/src/pass/mha_tokenization.cpp index 417c7ff043718b..e9f939e8d72d75 100644 --- a/src/common/snippets/src/pass/mha_tokenization.cpp +++ b/src/common/snippets/src/pass/mha_tokenization.cpp @@ -175,7 +175,7 @@ auto update_intermediate_supported_ops(std::shared_ptr& interm_op, ov: interm_op = interm_op->get_output_target_inputs(0).begin()->get_node()->shared_from_this(); } return true; -}; +} } // namespace bool ov::snippets::pass::TokenizeMHASnippets::is_matmul0_supported(const std::shared_ptr& matmul) { diff --git a/src/common/snippets/src/shape_inference/shape_infer_instances.cpp b/src/common/snippets/src/shape_inference/shape_infer_instances.cpp index b254adbdc64888..e8df307a0b93ab 100644 --- a/src/common/snippets/src/shape_inference/shape_infer_instances.cpp +++ b/src/common/snippets/src/shape_inference/shape_infer_instances.cpp @@ -7,7 +7,6 @@ namespace ov { namespace snippets { using Result = IShapeInferSnippets::Result; -namespace { /* * Merge SRC to DST with broadcasting rules defined by the Autobroadcast specifier */ @@ -87,7 +86,6 @@ bool merge_into(VectorDims& dst, const VectorDims& src) { success &= merge_dim(dst[i], dst[i], src[i]); return success; } -} // namespace Result NumpyBroadcastShapeInfer::infer(const std::vector& input_shapes) { OPENVINO_ASSERT(!input_shapes.empty(), "No input shapes were provided for NumpyBroadcastShapeInfer"); diff --git a/src/common/snippets/src/shape_inference/shape_inference.cpp b/src/common/snippets/src/shape_inference/shape_inference.cpp index bc9534f7b08bce..cfc4dc460d4b16 100644 --- a/src/common/snippets/src/shape_inference/shape_inference.cpp +++ b/src/common/snippets/src/shape_inference/shape_inference.cpp @@ -55,6 +55,7 @@ const IShapeInferSnippetsFactory::TRegistry IShapeInferSnippetsFactory::registry SHAPE_INFER_PREDEFINED(op::Scalar, SingleElementShapeInfer), SHAPE_INFER_PREDEFINED(op::VectorBuffer, SingleElementShapeInfer), SHAPE_INFER_PREDEFINED(op::LoopEnd, EmptyShapeInfer), + SHAPE_INFER_PREDEFINED(op::Kernel, EmptyShapeInfer), SHAPE_INFER_PREDEFINED(op::Nop, EmptyShapeInfer), SHAPE_INFER_OP_SPECIFIC_EXTERNAL(opset1::Select, SelectShapeInfer), // Note that Result has no output PortConnectors, so the shape must be empty diff --git a/src/common/snippets/src/utils.cpp b/src/common/snippets/src/utils.cpp index 621c6c9bf67426..df894604d11693 100644 --- a/src/common/snippets/src/utils.cpp +++ b/src/common/snippets/src/utils.cpp @@ -117,8 +117,10 @@ ov::PartialShape get_planar_pshape(const Output& out) { VectorDims get_planar_vdims(const VectorDims& shape, const std::vector& layout) { VectorDims reordered_shape(shape.size()); - for (size_t i = 0; i < layout.size(); i++) + for (size_t i = 0; i < layout.size(); i++) { + OPENVINO_ASSERT(layout[i] < shape.size(), "get_planar_vdims: layout index is greater than the shape size"); reordered_shape[i] = shape[layout[i]]; + } return reordered_shape; } diff --git a/src/common/snippets/tests/include/lowered/pass/optimize_domain.hpp b/src/common/snippets/tests/include/lowered/pass/optimize_domain.hpp new file mode 100644 index 00000000000000..55edfd639fd9f3 --- /dev/null +++ b/src/common/snippets/tests/include/lowered/pass/optimize_domain.hpp @@ -0,0 +1,36 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include "snippets/shape_types.hpp" + +namespace ov { +namespace test { +namespace snippets { + +struct OptimizeDomainParams { + OptimizeDomainParams() = default; + OptimizeDomainParams(size_t, size_t, std::vector, ov::snippets::VectorDims, size_t); + size_t min_jit_work_amount = 0; + size_t min_parallel_work_amount = 0; + std::vector input_shapes; + ov::snippets::VectorDims exp_master_shape; // Expected master_shape + size_t exp_loop_depth = 0; // Expected loop depth (aka tile rank) +}; + +class OptimizeDomainTest : public testing::TestWithParam { +public: + using VectorDims = ov::snippets::VectorDims; + static std::string getTestCaseName(testing::TestParamInfo obj); +protected: + void SetUp() override; + std::shared_ptr m_model; + OptimizeDomainParams m_domain_opt_params; +}; + +} // namespace snippets +} // namespace test +} // namespace ov diff --git a/src/common/snippets/tests/include/lowering_utils.hpp b/src/common/snippets/tests/include/lowering_utils.hpp index 7ca5ecc47439f4..a419e6575a5de5 100644 --- a/src/common/snippets/tests/include/lowering_utils.hpp +++ b/src/common/snippets/tests/include/lowering_utils.hpp @@ -47,7 +47,6 @@ class LoweringTests : public TransformationTestsF { void SetUp() override; void TearDown() override; -protected: static std::shared_ptr getSubgraph(const std::shared_ptr& f); static std::shared_ptr getLoweredSubgraph(const std::shared_ptr& f, @@ -57,6 +56,8 @@ class LoweringTests : public TransformationTestsF { const ov::snippets::lowered::pass::PassPipeline& lowered_post_common = {}, const std::shared_ptr& generator = nullptr); static std::shared_ptr getTokenizedSubgraph(const std::shared_ptr& f); + +protected: ov::PartialShape master_shape{}; }; diff --git a/src/common/snippets/tests/src/pass/lowered/loop.cpp b/src/common/snippets/tests/src/lowered/pass/loop.cpp similarity index 100% rename from src/common/snippets/tests/src/pass/lowered/loop.cpp rename to src/common/snippets/tests/src/lowered/pass/loop.cpp diff --git a/src/common/snippets/tests/src/lowered/pass/optimize_domain.cpp b/src/common/snippets/tests/src/lowered/pass/optimize_domain.cpp new file mode 100644 index 00000000000000..025c2406ea33ab --- /dev/null +++ b/src/common/snippets/tests/src/lowered/pass/optimize_domain.cpp @@ -0,0 +1,98 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "common_test_utils/common_utils.hpp" +#include "snippets/lowered/pass/optimize_domain.hpp" +#include "snippets/lowered/pass/pass.hpp" +#include "lowered/pass/optimize_domain.hpp" +#include "subgraph_simple.hpp" +#include "lowering_utils.hpp" + +namespace ov { +namespace test { +namespace snippets { +OptimizeDomainParams::OptimizeDomainParams(size_t min_jit_work_amount, + size_t min_parallel_work_amount, + std::vector input_shapes, + ov::snippets::VectorDims exp_master_shape, + size_t exp_loop_depth) : + min_jit_work_amount(min_jit_work_amount), + min_parallel_work_amount(min_parallel_work_amount), + input_shapes(std::move(input_shapes)), + exp_master_shape(std::move(exp_master_shape)), + exp_loop_depth(exp_loop_depth) { +} + +std::string OptimizeDomainTest::getTestCaseName(testing::TestParamInfo obj) { + OptimizeDomainParams domain_opt_params = obj.param; + std::ostringstream result; + result << "MinJitWork=" << domain_opt_params.min_jit_work_amount << "_"; + result << "MinParWork=" << domain_opt_params.min_parallel_work_amount << "_"; + for (size_t i = 0; i < domain_opt_params.input_shapes.size(); i++) + result << "IS[" << i << "]=" << ov::test::utils::partialShape2str({domain_opt_params.input_shapes[i]}) << "_"; + result << "ExpMS=" << ov::test::utils::vec2str(domain_opt_params.exp_master_shape) << "_"; + result << "ExpLD=" << domain_opt_params.exp_loop_depth << "_"; + return result.str(); +} + +void OptimizeDomainTest::SetUp() { + m_domain_opt_params = this->GetParam(); + m_model = std::make_shared(m_domain_opt_params.input_shapes)->getOriginal(); +} + +TEST_P(OptimizeDomainTest, DomainOptimization) { + auto subgraph = LoweringTests::getTokenizedSubgraph(m_model); + subgraph->set_min_jit_work_amount(m_domain_opt_params.min_jit_work_amount); + subgraph->set_min_parallel_work_amount(m_domain_opt_params.min_parallel_work_amount); + auto linear_ir = *subgraph->convert_body_to_linear_ir(); + size_t loop_depth = 1; + ov::snippets::lowered::pass::OptimizeDomain(loop_depth).run(linear_ir); + const auto& master_shape = linear_ir.get_master_shape(); + EXPECT_EQ(loop_depth, m_domain_opt_params.exp_loop_depth) << "Inconsistent loop depth detected"; + EXPECT_THAT(master_shape, testing::ContainerEq(m_domain_opt_params.exp_master_shape)) << "Inconsistent master_shape detected"; +} + +namespace OptimizeDomainTestsInstantiation { + +std::vector dopt_params = { + // No broadcasting => dimensions collapsed + {256, 4, {{14, 15, 1, 17}, {14, 15, 1, 17}}, {1, 1, 14, 255}, 1}, + {256, 4, {{14, 15, 16, 1}, {14, 15, 16, 1}}, {1, 1, 14, 240}, 1}, + // Same dimensions, but larger num threads => collapsing omitted + {256, 18, {{14, 15, 1, 17}, {14, 15, 1, 17}}, {1, 14, 15, 17}, 1}, + {256, 18, {{14, 15, 16, 1}, {14, 15, 16, 1}}, {1, 14, 15, 16}, 1}, + + // No broadcasting => collapsing and loop_depth increment + {256, 4, {{14, 15, 16, 17}, {14, 15, 16, 17}}, {1, 14, 15, 272}, 2}, + // Same dimensions, but smaller jit work amount => collapsing omitted + {16, 4, {{14, 15, 16, 17}, {14, 15, 16, 17}}, {14, 15, 16, 17}, 2}, + // Same dimensions, but higher parallel work amount => collapsing but no loop_depth increment + {256, 18, {{14, 15, 16, 17}, {14, 15, 16, 17}}, {1, 14, 15, 272}, 1}, + + // Broadcasting breaks dimension collapsing => loop depth incremented + {256, 4, {{14, 15, 16, 1}, {14, 15, 1, 17}}, {14, 15, 16, 17}, 2}, + {256, 4, {{14, 15, 1, 17}, {14, 15, 16, 17}}, {14, 15, 16, 17}, 2}, + + // Collapse even if not enough work to cover min_jit_work_amount + {256, 18, {{4, 5, 6, 7}, {4, 5, 6, 7}}, {1, 4, 5, 42}, 1}, + // Same dims, but higher parallel work amount => do not collapse to load all the threads + {256, 32, {{4, 5, 6, 7}, {4, 5, 6, 7}}, {4, 5, 6, 7}, 1}, + + // 2D and 1D shapes are too small, so no collapsing should be done in such cases + {256, 32, {{4, 5}, {4, 5}}, {4, 5}, 1}, + {256, 32, {{5}, {5}}, {5}, 1}, + + // min_parallel_work_amount = 1 is a special case that would cause all dimensions to collapse (up to min_jit_work_amount of course) + {256, 1, {{4, 1, 6, 7}, {4, 1, 6, 7}}, {1, 1, 1, 168}, 1}, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_Snippets_DomainOptimization, OptimizeDomainTest, + ::testing::ValuesIn(dopt_params), + OptimizeDomainTest::getTestCaseName); + +} // namespace OptimizeDomainTestsInstantiation +} // namespace snippets +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/common/snippets/tests/src/pass/canonicalization.cpp b/src/common/snippets/tests/src/pass/canonicalization.cpp index 79b5b97e0579a7..4981b4f8d8e139 100644 --- a/src/common/snippets/tests/src/pass/canonicalization.cpp +++ b/src/common/snippets/tests/src/pass/canonicalization.cpp @@ -67,7 +67,6 @@ namespace CanonicalizationTestsInstantiation { using ov::snippets::op::Subgraph; std::vector input_shapes; Shape expected_output_shape; -Subgraph::BlockedShapeVector input_blocked_shapes; using ov::Shape; ov::element::Type_t prec = ov::element::f32; diff --git a/src/plugins/intel_cpu/src/emitters/x64/jit_snippets_emitters.cpp b/src/plugins/intel_cpu/src/emitters/x64/jit_snippets_emitters.cpp index a146141c85c268..24493334f1d675 100644 --- a/src/plugins/intel_cpu/src/emitters/x64/jit_snippets_emitters.cpp +++ b/src/plugins/intel_cpu/src/emitters/x64/jit_snippets_emitters.cpp @@ -109,6 +109,10 @@ KernelEmitter::KernelEmitter(jit_generator* h, cpu_isa_t isa, const ExpressionPt IE_THROW() << "KernelEmitter invoked with op::Kernel that contains no compile_params"; body = kernel->region; jcp = *reinterpret_cast(kernel->compile_params); + master_shape = body.get_master_shape(); + // Note: plugin can prepend master shape with 1 to facilitate parallel execution (usually up to 6D tensor) + // so we have to reproduce this behavior here + master_shape.insert(master_shape.begin(), jcp.parallel_executor_ndims - master_shape.size(), 1); const auto& io_exprs = body.get_IO_ops(); num_inputs = 0; num_outputs = 0; @@ -217,7 +221,7 @@ void KernelEmitter::init_data_pointers(const Xbyak::Reg64& reg_indexes, const Xb const std::vector& data_ptr_regs) const { const auto num_params = num_inputs + num_outputs; // Note that we don't need offset for the last dim, since it's handled directly by Tile emitter - const size_t offset_rank = jcp.master_shape.size() - 1; + const size_t offset_rank = master_shape.size() - 1; std::vector> data_offsets(num_params, std::vector{}); auto offset_calculation = [=](const std::vector& shape, const std::vector& layout, const size_t data_size) { // Strides represent distance between consecutive elements of corresponding dimension. @@ -243,11 +247,8 @@ void KernelEmitter::init_data_pointers(const Xbyak::Reg64& reg_indexes, const Xb strides = std::move(reordered_strides); } // the last stride is ignored, since the entire last dim is processed by kernel - // and no parallel_for data_ptr offsets can be applied in this case (cover tile_rank == 1) + // and no parallel_for data_ptr offsets can be applied in this case strides.pop_back(); - // if tile_rank > 1, then zero corresponding strides since no external offset can be applied - // for (auto j = 0; j < tile_rank - 1; j++) - // strides[strides.size() - 1 - j] = 0; // actual offset size might be larger that the shape size due to 6D scheduling strides.insert(strides.begin(), offset_rank - strides.size(), 0); @@ -260,7 +261,7 @@ void KernelEmitter::init_data_pointers(const Xbyak::Reg64& reg_indexes, const Xb std::function&, Reg64)> init_ptr_with_offset; init_ptr_with_offset = [&](Reg64 pointer, const std::vector& offsets, Reg64 reg_tmp) { for (size_t j = 0; j < offset_rank; j++) { - if (jcp.master_shape[j] != 1 && offsets[j] != 0) { + if (master_shape[j] != 1 && offsets[j] != 0) { h->mov(reg_tmp, offsets[j]); h->imul(reg_tmp, h->ptr[reg_indexes + j * sizeof(size_t)]); h->add(pointer, reg_tmp); diff --git a/src/plugins/intel_cpu/src/emitters/x64/jit_snippets_emitters.hpp b/src/plugins/intel_cpu/src/emitters/x64/jit_snippets_emitters.hpp index d7d8922612c47b..7019fb14c6ec29 100644 --- a/src/plugins/intel_cpu/src/emitters/x64/jit_snippets_emitters.hpp +++ b/src/plugins/intel_cpu/src/emitters/x64/jit_snippets_emitters.hpp @@ -35,8 +35,7 @@ struct jit_snippets_call_args { }; struct jit_snippets_compile_args { - std::vector master_shape{}; - size_t tile_rank = 0; + size_t parallel_executor_ndims = 1; }; /// /// \brief jit_container_emitter designed to wrap Emitters that contain other Emitters (for example, KernelEmitter) @@ -94,6 +93,7 @@ class KernelEmitter : public jit_container_emitter { jit_snippets_compile_args jcp; std::vector gp_regs_pool; + std::vector master_shape; size_t num_inputs; size_t num_outputs; size_t num_unique_buffers; diff --git a/src/plugins/intel_cpu/src/nodes/subgraph.cpp b/src/plugins/intel_cpu/src/nodes/subgraph.cpp index 286faee9b85d15..dd2c756ba63849 100644 --- a/src/plugins/intel_cpu/src/nodes/subgraph.cpp +++ b/src/plugins/intel_cpu/src/nodes/subgraph.cpp @@ -18,6 +18,7 @@ #include #include +#include #include "snippets/pass/matmul_to_brgemm.hpp" #include "utils/cpu_utils.hpp" #include "emitters/x64/cpu_generator.hpp" @@ -461,7 +462,7 @@ void Snippet::SnippetJitExecutor::update_ptrs(jit_snippets_call_args& call_args, } void Snippet::SnippetJitExecutor::schedule_6d(const std::vector& inMemPtrs, const std::vector& outMemPtrs) { - const auto& dom = exec_domain; + const auto& dom = parallel_exec_domain; // < N, C, H, W > < 1, 1, N, C*H*W> parallel_for5d(dom[0], dom[1], dom[2], dom[3], dom[4], [&](int64_t d0, int64_t d1, int64_t d2, int64_t d3, int64_t d4) { @@ -474,7 +475,7 @@ void Snippet::SnippetJitExecutor::schedule_6d(const std::vector& inMe } void Snippet::SnippetJitExecutor::schedule_nt(const std::vector& inMemPtrs, const std::vector& outMemPtrs) { - const auto& work_size = exec_domain; + const auto& work_size = parallel_exec_domain; parallel_nt(0, [&](const int ithr, const int nthr) { jit_snippets_call_args call_args; update_ptrs(call_args, inMemPtrs, outMemPtrs); @@ -552,65 +553,20 @@ Snippet::SnippetJitExecutor::SnippetJitExecutor(const SnippetAttrs& attrs, bool if (canonicalShape.is_dynamic()) IE_THROW() << "Snippets: Canonicalization returned dynamic shape in static pipeline"; - masterShape = canonicalShape.get_shape(); - const auto &body = snippet_for_generation->body_ptr(); - normInputShapes.clear(); - for (const auto& p : body->get_parameters()) - normInputShapes.emplace_back(p->get_output_shape(0)); - normOutputShapes.clear(); - for (const auto& r : body->get_results()) - normOutputShapes.emplace_back(r->get_input_shape(0)); - - // prepare - masterShape = getNormalizedDimsBySize(masterShape, tensorRank); - std::vector original_input_shape_ranks; - for (auto& pshape : normInputShapes) { - original_input_shape_ranks.push_back(pshape.size()); - pshape = getNormalizedDimsBySize(pshape, tensorRank); - } - for (auto& pshape : normOutputShapes) - pshape = getNormalizedDimsBySize(pshape, tensorRank); - - tileRank = 1; - bool dims_collapsed = false; - fullWorkAmount = std::accumulate(masterShape.begin(), masterShape.end(), 1, std::multiplies()); - if (snippet_for_generation->has_domain_sensitive_ops()) { - tileRank = 2; - } else { - dims_collapsed = optimizeExecDomain(normInputShapes, normOutputShapes, masterShape, tileRank); - } - exec_domain = masterShape; - - std::vector scheduler_work_amounts; - // rename schedulerWorkAmount to harnessWorkAmount? - harnessWorkAmount = fullWorkAmount; - const auto rank = exec_domain.size(); - for (auto i = rank - tileRank; i < rank; i++) { - auto& dim = exec_domain[i]; - harnessWorkAmount /= dim; - scheduler_work_amounts.push_back(dim); - dim = 1; - } - - if (dims_collapsed) { - std::vector new_shapes; - for (size_t i = 0; i < normInputShapes.size(); i++) { - const auto norm_shape = normInputShapes[i]; - size_t ndims_to_skip = norm_shape.size() - original_input_shape_ranks[i]; - new_shapes.emplace_back(norm_shape.begin() + ndims_to_skip, norm_shape.end()); - } - snippet_for_generation->reshape_body(new_shapes); - } - snippet_for_generation->set_master_shape(ov::PartialShape(masterShape)); - snippet_for_generation->set_tile_rank(tileRank); + snippet_for_generation->set_min_parallel_work_amount(static_cast(parallel_get_max_threads())); + // Note: minimal JIT work amount is a predefined value that describes the number of kernel iterations (work amount) + // needed to cover kernel call overhead. It is used for balancing between parallel and JIT work amounts in domain optimization. + snippet_for_generation->set_min_jit_work_amount(256); // generate jit_snippets_compile_args jcp; - jcp.master_shape = masterShape; - jcp.tile_rank = tileRank; + jcp.parallel_executor_ndims = tensorRank; generate(&jcp); buffer_scratchpad_size = snippet_for_generation->get_buffer_scratchpad_size(); buffer_scratchpad.resize(buffer_scratchpad_size * parallel_get_max_threads(), 0); + parallel_exec_domain = schedule.parallel_exec_domain; + harnessWorkAmount = std::accumulate(parallel_exec_domain.begin(), parallel_exec_domain.end(), 1, std::multiplies()); + parallel_exec_domain = getNormalizedDimsBySize(parallel_exec_domain, tensorRank); } ov::PartialShape Snippet::SnippetJitExecutor::canonicalizeBody(bool reshape) { @@ -628,74 +584,6 @@ ov::PartialShape Snippet::SnippetJitExecutor::canonicalizeBody(bool reshape) { } } -bool Snippet::SnippetJitExecutor::optimizeExecDomain(std::vector& inputShapes, std::vector& outputShapes, - VectorDims &domain, size_t& TileRank) const { - const size_t minimalConcurrency = parallel_get_max_threads(); - const size_t minimalJitWorkAmount = 256; - const size_t ds = domain.size(); - if ( ds <= 2 || // not enough dimensions to collapse - domain[ds-1] >= minimalJitWorkAmount || // There is enough work for 1D Tiles, no need to collapse - domain[ds-1] * domain[ds-2] >= fullWorkAmount / minimalConcurrency) // There won't be enough work for every thread (even one iter) if we collapse - return false; - auto findDimsToCollapse = [&]() { - auto collapseLastDims = [](VectorDims& dims, size_t dimsToCollapse) { - if (dimsToCollapse >= dims.size() - 1) - IE_THROW() << "Got invalid number of dims to collapse. Expected < " << dims.size() - 1 << " got " << dimsToCollapse; - for (int i = dims.size() - 2; i > static_cast(dims.size() - dimsToCollapse - 2); i--) { - dims[dims.size() - 1] *= dims[i]; - } - - for (int i = dims.size() - 2; i >= static_cast(dimsToCollapse); i--) { - dims[i] = dims[i - dimsToCollapse]; - } - - for (int i = dimsToCollapse - 1; i >= 0; i--) { - dims[i] = 1; - } - }; - int collapsedDims = 0; - size_t currentJitWorkAmount = domain[domain.size() - 1]; - while (currentJitWorkAmount < minimalJitWorkAmount && currentJitWorkAmount < fullWorkAmount) { - if (static_cast(domain.size()) - collapsedDims - 2 < 0) - break; - - bool canCollapse = true; - for (size_t i = 0; i < inputShapes.size(); i++) { - const size_t last = inputShapes[i].size() - 1; - if ((inputShapes[i][last - 1] != 1 && inputShapes[i][last] == 1) || - (inputShapes[i][last - 1] == 1 && inputShapes[i][last] != 1)) { - canCollapse = false; - break; - } - } - - size_t nextJitWorkAmount = currentJitWorkAmount * domain[domain.size() - 2]; - if (fullWorkAmount / nextJitWorkAmount >= minimalConcurrency) { - currentJitWorkAmount = nextJitWorkAmount; - // if we cannot use dim collapsing we should use tile2D - if (!canCollapse) { - if (TileRank < maxTileRank) { - TileRank++; - continue; - } - - break; - } - collapsedDims++; - for (auto &d : inputShapes) - collapseLastDims(d, 1); - for (auto &d : outputShapes) - collapseLastDims(d, 1); - collapseLastDims(domain, 1); - } else { - break; - } - } - return collapsedDims > 0; - }; - return findDimsToCollapse(); -} - void Snippet::SnippetJitExecutor::generate(const jit_snippets_compile_args* jcp) { using Manager = snippets::pass::Manager; std::vector backend_passes; @@ -726,16 +614,16 @@ void Snippet::SnippetJitExecutor::generate(const jit_snippets_compile_args* jcp) #undef SNIPPETS_REGISTER_PASS ov::snippets::lowered::pass::PassPipeline control_flow_markup_pipeline; - CPU_REGISTER_PASS_X64(control_flow_markup_pipeline, ov::intel_cpu::pass::BrgemmBlocking); + CPU_REGISTER_PASS_X64(control_flow_markup_pipeline, ov::intel_cpu::pass::BrgemmBlocking) ov::snippets::lowered::pass::PassPipeline control_flow_pipeline; - CPU_REGISTER_PASS_X64(control_flow_pipeline, ov::intel_cpu::pass::FuseLoadStoreConvert); - // Todo: We don't need shape infer factory now, since shape infer will be done through validate_and_infer_types - // pass std::make_shared() instead of nullptr, when shape infer is performed on LIR + CPU_REGISTER_PASS_X64(control_flow_pipeline, ov::intel_cpu::pass::FuseLoadStoreConvert) + // Note: we need to pass valid shapeInfer factory to generate, so it can be used in OptimizeDomain pass + // in all other cases nGraph shape inference will be used until ticket # 113209 (PR 18563) is merged schedule = snippet_for_generation->generate(backend_passes, control_flow_markup_pipeline, control_flow_pipeline, - nullptr, + std::make_shared(), reinterpret_cast(jcp)); } diff --git a/src/plugins/intel_cpu/src/nodes/subgraph.h b/src/plugins/intel_cpu/src/nodes/subgraph.h index ed706443e68ef6..086e84e15ba631 100644 --- a/src/plugins/intel_cpu/src/nodes/subgraph.h +++ b/src/plugins/intel_cpu/src/nodes/subgraph.h @@ -114,8 +114,6 @@ class Snippet : public Node { size_t numOutput = 0; ov::PartialShape canonicalizeBody(bool reshape); - // returns true if exec domain was modified - bool optimizeExecDomain(std::vector&, std::vector&, VectorDims&, size_t&) const; void generate(const jit_snippets_compile_args*); inline void update_ptrs(jit_snippets_call_args&, const std::vector& inMemPtrs, const std::vector& outMemPtrs); @@ -130,22 +128,14 @@ class Snippet : public Node { // Holds index of output used as in execution domain // it should be compatible with a schedule's work size - std::vector exec_domain = {}; + std::vector parallel_exec_domain = {}; /// scheduling info size_t tensorRank = 0; - size_t tileRank = 1; - size_t fullWorkAmount = 0; size_t harnessWorkAmount = 0; - const size_t maxTileRank = 2; std::vector dataSize = {}; - // master shape is mutable since we need to modify it inside const shapeInfer method - mutable VectorDims masterShape = {}; - mutable std::vector normInputShapes = {}; - mutable std::vector normOutputShapes = {}; - std::vector start_offset_in = {}; std::vector start_offset_out = {}; From 0060b26b7486a5ed2ce990e1efa41078cf414d0b Mon Sep 17 00:00:00 2001 From: Andrey Kashchikhin Date: Tue, 10 Oct 2023 18:06:18 +0100 Subject: [PATCH 135/257] [CI] [GHA] [Azure] Unskip `test_div_uint8_cpu`, unskip `src/frontends/onnx/tests/test_python/test_zoo_models.py` in Win and Linux pipelines (#20366) * only skip test if mac * unskip * skip for onnx fe as well * remove skips * rm unused imports * revert import --- .github/workflows/linux.yml | 3 +-- .github/workflows/windows.yml | 3 +-- .../python/tests_compatibility/test_onnx/test_backend.py | 5 ----- 3 files changed, 2 insertions(+), 9 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index c658f6a9f88b11..a3c7e9a4e1c250 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -986,8 +986,7 @@ jobs: # Skip test_onnx/test_zoo_models and test_onnx/test_backend due to long execution time - ONNX Model Zoo tests are run separately python3 -m pytest -sv ${OPENVINO_REPO}/src/frontends/onnx/tests -k 'not cuda' \ --junitxml=${INSTALL_TEST_DIR}/TEST-onnx_frontend.xml \ - --ignore=${OPENVINO_REPO}/src/frontends/onnx/tests/test_python/test_zoo_models.py \ - --ignore=${OPENVINO_REPO}/src/frontends/onnx/tests/test_python/test_backend.py + --ignore=${OPENVINO_REPO}/src/frontends/onnx/tests/test_python/test_zoo_models.py - name: MO Python API Tests run: | diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 1e39f82e7b887a..70c7ac216121dc 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -402,8 +402,7 @@ jobs: python3 -m pytest ${{ env.OPENVINO_REPO }}/src/frontends/onnx/tests -k "not cuda" ^ --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-onnx_frontend.xml ^ - --ignore=${{ env.OPENVINO_REPO }}/src/frontends/onnx/tests/test_python/test_zoo_models.py ^ - --ignore=${{ env.OPENVINO_REPO }}/src/frontends/onnx/tests/test_python/test_backend.py + --ignore=${{ env.OPENVINO_REPO }}/src/frontends/onnx/tests/test_python/test_zoo_models.py - name: MO Python API Tests shell: cmd diff --git a/src/bindings/python/tests_compatibility/test_onnx/test_backend.py b/src/bindings/python/tests_compatibility/test_onnx/test_backend.py index f9d7c4fe261a13..87f53223c2d672 100644 --- a/src/bindings/python/tests_compatibility/test_onnx/test_backend.py +++ b/src/bindings/python/tests_compatibility/test_onnx/test_backend.py @@ -32,7 +32,6 @@ xfail_issue_48052, xfail_issue_52463, xfail_issue_58033, - xfail_issue_58676, xfail_issue_63033, xfail_issue_63036, xfail_issue_63043, @@ -302,10 +301,6 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None "OnnxBackendNodeModelTest.test_tril_zero_cpu", "OnnxBackendNodeModelTest.test_triu_zero_cpu", ), - ( - xfail_issue_58676, - "OnnxBackendNodeModelTest.test_div_uint8_cpu" - ), ( skip_dynamic_model, "OnnxBackendNodeModelTest.test_triu_one_row_cpu", From 246d8efadaa076689c4559d7cc610d14ef28be3e Mon Sep 17 00:00:00 2001 From: Oleg Pipikin Date: Tue, 10 Oct 2023 19:54:20 +0200 Subject: [PATCH 136/257] Refactor LoopLayerTest, LrnLayerTest, LSTMSequenceTest, LSTMCellTest (#20269) * Refactor LoopLayerTest * Refactor LrnLayerTest * LSTMCellTest * Refactor LSTMSequenceTest --- .../single_layer_tests/loop.cpp | 66 ++----- .../single_layer_tests/lrn.cpp | 74 ++++---- .../single_layer_tests/lstm_cell.cpp | 69 +++---- .../single_layer_tests/lstm_sequence.cpp | 143 +++++++------- .../skip_tests_config.cpp | 2 + .../shared/include/single_op_tests/loop.hpp | 15 ++ .../shared/include/single_op_tests/lrn.hpp | 15 ++ .../include/single_op_tests/lstm_cell.hpp | 15 ++ .../include/single_op_tests/lstm_sequence.hpp | 15 ++ .../shared_test_classes/single_op/loop.hpp | 39 ++++ .../shared_test_classes/single_op/lrn.hpp | 35 ++++ .../single_op/lstm_cell.hpp | 38 ++++ .../single_op/lstm_sequence.hpp | 38 ++++ .../src/base/utils/generate_inputs.cpp | 6 +- .../src/single_op/loop.cpp | 125 +++++++++++++ .../shared_test_classes/src/single_op/lrn.cpp | 66 +++++++ .../src/single_op/lstm_cell.cpp | 141 ++++++++++++++ .../src/single_op/lstm_sequence.cpp | 175 ++++++++++++++++++ 18 files changed, 886 insertions(+), 191 deletions(-) create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/loop.hpp create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/lrn.hpp create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/lstm_cell.hpp create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/lstm_sequence.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/loop.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/lrn.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/lstm_cell.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/lstm_sequence.hpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/loop.cpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/lrn.cpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/lstm_cell.cpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/lstm_sequence.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/loop.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/loop.cpp index f4ee4161943ecd..cadb1cd9998026 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/loop.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/loop.cpp @@ -2,67 +2,37 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include "single_layer_tests/loop.hpp" +#include "single_op_tests/loop.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; - namespace { +using ov::test::LoopLayerTest; +using ov::test::LOOP_IN_TYPE; + // without clip values increase rapidly, so use only seq_lengths = 2 std::vector execute_first_iteration{true}; std::vector is_body_condition_const{true/*, false*/}; std::vector body_condition{true/*, false*/}; // works only if is_body_condition_const == true std::vector trip_count{1, 10/*, -1*/}; // -1 means infinity - std::vector, LOOP_IN_TYPE>>> inputs = { - {{{32, 1, 10}, LOOP_IN_TYPE::INVARIANT}, {{32, 1, 10}, LOOP_IN_TYPE::INVARIANT}, {{32, 1, 10}, LOOP_IN_TYPE::MERGED}}, - }; - std::vector netPrecisions = {InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16}; + std::vector input_shapes_static = {{32, 1, 10}}; + + std::vector> inputs_types = { + {LOOP_IN_TYPE::INVARIANT}, + {LOOP_IN_TYPE::MERGED}}; + + std::vector model_types = { + ov::element::f32, + ov::element::f16}; - INSTANTIATE_TEST_SUITE_P(smoke_LoopCommonZeroClip, LoopTest, + INSTANTIATE_TEST_SUITE_P(smoke_LoopCommonZeroClip, LoopLayerTest, ::testing::Combine( ::testing::ValuesIn(execute_first_iteration), ::testing::ValuesIn(is_body_condition_const), ::testing::ValuesIn(body_condition), ::testing::ValuesIn(trip_count), - ::testing::ValuesIn(inputs), - ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::static_shapes_to_test_representation(input_shapes_static)), + ::testing::ValuesIn(inputs_types), + ::testing::ValuesIn(model_types), ::testing::Values(ov::test::utils::DEVICE_CPU)), - LoopTest::getTestCaseName); - - static const std::vector> static_loop_types { - // GCC4.8 limitation: have to specify type of each element in list - // static_trip_count | max | dynamic_exit | axis - std::tuple{ true , 5, -1, -1 }, // n_iter 5, no dynamic exit - std::tuple{ true , 5, 3, -1 }, // n_iter 3, dynamic exit on 3 - std::tuple{ true , 5, 7, -1 }, // n_iter 5, dynamic exit not reached - std::tuple{ true , -1, 5, -1 }, // n_iter 5, inf loop with dynamic exit on 5 - std::tuple{ true , 5, -1, 1 }, // n_iter 5, const for loop with auto concatenated out - std::tuple{ false , 5, -1, -1 }, // | - std::tuple{ false , 5, 3, -1 }, // | same with dynamic trip count - std::tuple{ false , 5, 7, -1 }, // | - std::tuple{ false , -1, 5, -1 } // | - }; - - using namespace testing; - using namespace InferenceEngine; - - INSTANTIATE_TEST_SUITE_P(smoke_StaticShapeLoop, StaticShapeLoopTest, - Combine( - ValuesIn(std::vector{true, false}), - Values(true), - ValuesIn(static_loop_types), - Values(7), - Values({2, 1, 4}), - Values(Precision::FP32, Precision::I32), - Values(ov::test::utils::DEVICE_CPU), - Values>({}))); - using namespace testing; - INSTANTIATE_TEST_SUITE_P(smoke_TrivialLoop, TrivialLoopTest, - Combine( - Values(Precision::FP32, Precision::I32), - Values({2, 3, 4}), - Values(ov::test::utils::DEVICE_CPU))); - + LoopLayerTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/lrn.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/lrn.cpp index 2554f63e7b7341..40e05cac3976ed 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/lrn.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/lrn.cpp @@ -2,94 +2,88 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "single_layer_tests/lrn.hpp" - -#include - +#include "single_op_tests/lrn.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; +namespace { +using ov::test::LrnLayerTest; + +// common values -const std::vector netPrecisions{ - InferenceEngine::Precision::FP32 -}; +const std::vector model_types{ov::element::f32}; const double alpha = 9.9e-05; const double beta = 2; const double bias = 1.0; const size_t size = 5; -namespace LRN2D { +// 2D -const std::vector> axes = {{1}}; +const std::vector> axes_2d = {{1}}; +std::vector input_shapes_2d_static = {{10, 16}}; INSTANTIATE_TEST_SUITE_P(smoke_LrnCheck_2D, LrnLayerTest, ::testing::Combine(::testing::Values(alpha), ::testing::Values(beta), ::testing::Values(bias), ::testing::Values(size), - ::testing::ValuesIn(axes), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(std::vector({10, 16})), + ::testing::ValuesIn(axes_2d), + ::testing::ValuesIn(model_types), + ::testing::Values(ov::test::static_shapes_to_test_representation(input_shapes_2d_static)), ::testing::Values(ov::test::utils::DEVICE_CPU)), LrnLayerTest::getTestCaseName); -} // namespace LRN2D -namespace LRN3D { +// 3D -const std::vector> axes = {{1}, {2}}; +const std::vector> axes_3d = {{1}, {2}}; +std::vector input_shapes_3d_static = {{6, 10, 16}}; INSTANTIATE_TEST_SUITE_P(smoke_LrnCheck_3D, LrnLayerTest, ::testing::Combine(::testing::Values(alpha), ::testing::Values(beta), ::testing::Values(bias), ::testing::Values(size), - ::testing::ValuesIn(axes), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(std::vector({6, 10, 16})), + ::testing::ValuesIn(axes_3d), + ::testing::ValuesIn(model_types), + ::testing::Values(ov::test::static_shapes_to_test_representation(input_shapes_3d_static)), ::testing::Values(ov::test::utils::DEVICE_CPU)), LrnLayerTest::getTestCaseName); -} // namespace LRN3D -namespace LRN4D { -const std::vector> axes = {{1}, {2, 3}, {3, 2}}; +// 4D + +const std::vector> axes_4d = {{1}, {2, 3}, {3, 2}}; +std::vector input_shapes_4d_static = {{10, 10, 3, 8}}; INSTANTIATE_TEST_SUITE_P(smoke_LrnCheck_4D, LrnLayerTest, ::testing::Combine(::testing::Values(alpha), ::testing::Values(beta), ::testing::Values(bias), ::testing::Values(size), - ::testing::ValuesIn(axes), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(std::vector({10, 10, 3, 8})), + ::testing::ValuesIn(axes_4d), + ::testing::ValuesIn(model_types), + ::testing::Values(ov::test::static_shapes_to_test_representation(input_shapes_4d_static)), ::testing::Values(ov::test::utils::DEVICE_CPU)), LrnLayerTest::getTestCaseName); -} // namespace LRN4D -namespace LRN5D { -const std::vector> axes = {{1}, {2, 3, 4}, {4, 2, 3}}; +// 5D + +const std::vector> axes_5d = {{1}, {2, 3, 4}, {4, 2, 3}}; +std::vector input_shapes_5d_static = {{1, 10, 10, 7, 4}}; + INSTANTIATE_TEST_SUITE_P(smoke_LrnCheck_5D, LrnLayerTest, ::testing::Combine(::testing::Values(alpha), ::testing::Values(beta), ::testing::Values(bias), ::testing::Values(size), - ::testing::ValuesIn(axes), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(std::vector({1, 10, 10, 7, 4})), + ::testing::ValuesIn(axes_5d), + ::testing::ValuesIn(model_types), + ::testing::Values(ov::test::static_shapes_to_test_representation(input_shapes_5d_static)), ::testing::Values(ov::test::utils::DEVICE_CPU)), LrnLayerTest::getTestCaseName); -} // namespace LRN5D +} // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/lstm_cell.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/lstm_cell.cpp index d950c2c3e2351d..6d44dcb427193e 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/lstm_cell.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/lstm_cell.cpp @@ -4,40 +4,45 @@ #include -#include "single_layer_tests/lstm_cell.hpp" +#include "single_op_tests/lstm_cell.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; - namespace { - std::vector should_decompose{false, true}; - std::vector batch{5}; - std::vector hidden_size{1, 10}; - std::vector input_size{1, 30}; - std::vector> activations = {{"relu", "sigmoid", "tanh"}, {"sigmoid", "tanh", "tanh"}, - {"tanh", "relu", "sigmoid"}, {"sigmoid", "sigmoid", "sigmoid"}, - {"tanh", "tanh", "tanh"}, {"relu", "relu", "relu"}}; - std::vector clip{0.f, 0.7f}; - std::vector layer_types = { - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::PARAMETER - }; - std::vector netPrecisions = {InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16}; - - INSTANTIATE_TEST_SUITE_P(smoke_LSTMCellCommon, LSTMCellTest, - ::testing::Combine( - ::testing::ValuesIn(should_decompose), - ::testing::ValuesIn(batch), - ::testing::ValuesIn(hidden_size), - ::testing::ValuesIn(input_size), - ::testing::ValuesIn(activations), - ::testing::ValuesIn(clip), - ::testing::ValuesIn(layer_types), - ::testing::ValuesIn(layer_types), - ::testing::ValuesIn(layer_types), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - LSTMCellTest::getTestCaseName); +using ov::test::LSTMCellTest; +using ov::test::utils::InputLayerType; + +std::vector should_decompose{false, true}; +std::vector batch{5}; +std::vector hidden_size{1, 10}; +std::vector input_size{1, 30}; +std::vector clip{0.f, 0.7f}; + +std::vector> activations = {{"relu", "sigmoid", "tanh"}, {"sigmoid", "tanh", "tanh"}, + {"tanh", "relu", "sigmoid"}, {"sigmoid", "sigmoid", "sigmoid"}, + {"tanh", "tanh", "tanh"}, {"relu", "relu", "relu"}}; + +std::vector layer_types = { + InputLayerType::CONSTANT, + InputLayerType::PARAMETER +}; + +std::vector model_types = { + ov::element::f32, + ov::element::f16}; + +INSTANTIATE_TEST_SUITE_P(smoke_LSTMCellCommon, LSTMCellTest, + ::testing::Combine( + ::testing::ValuesIn(should_decompose), + ::testing::ValuesIn(batch), + ::testing::ValuesIn(hidden_size), + ::testing::ValuesIn(input_size), + ::testing::ValuesIn(activations), + ::testing::ValuesIn(clip), + ::testing::ValuesIn(layer_types), + ::testing::ValuesIn(layer_types), + ::testing::ValuesIn(layer_types), + ::testing::ValuesIn(model_types), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + LSTMCellTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/lstm_sequence.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/lstm_sequence.cpp index 12f1eadc5b5e2a..f9b555f23eb59b 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/lstm_sequence.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/lstm_sequence.cpp @@ -2,81 +2,84 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include "single_layer_tests/lstm_sequence.hpp" +#include "single_op_tests/lstm_sequence.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; - namespace { - std::vector mode{ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_CONST, - ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST, - ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM, - ngraph::helpers::SequenceTestsMode::PURE_SEQ, - ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST, - ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM}; - // output values increase rapidly without clip, so use only seq_lengths = 2 - std::vector seq_lengths_zero_clip{2}; - std::vector seq_lengths_clip_non_zero{20}; - std::vector batch{10}; - std::vector hidden_size{1, 10}; - std::vector input_size{10}; - std::vector> activations = {{"relu", "sigmoid", "tanh"}, {"sigmoid", "tanh", "tanh"}, - {"tanh", "relu", "sigmoid"}, {"sigmoid", "sigmoid", "sigmoid"}, - {"tanh", "tanh", "tanh"}, {"relu", "relu", "relu"}}; - std::vector clip{0.f}; - std::vector clip_non_zeros{0.7f}; - std::vector direction = {ngraph::op::RecurrentSequenceDirection::FORWARD, - ngraph::op::RecurrentSequenceDirection::REVERSE, - ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL - }; - std::vector netPrecisions = {InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16}; +using ov::test::LSTMSequenceTest; +using ov::test::utils::SequenceTestsMode; +using ov::test::utils::InputLayerType; + +std::vector mode{ + SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_CONST, + SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST, + SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM, + SequenceTestsMode::PURE_SEQ, + SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST, + SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM}; + +// output values increase rapidly without clip, so use only seq_lengths = 2 +std::vector seq_lengths_zero_clip{2}; +std::vector seq_lengths_clip_non_zero{20}; +std::vector batch{10}; +std::vector hidden_size{1, 10}; +std::vector input_size{10}; +std::vector> activations = {{"relu", "sigmoid", "tanh"}, {"sigmoid", "tanh", "tanh"}, + {"tanh", "relu", "sigmoid"}, {"sigmoid", "sigmoid", "sigmoid"}, + {"tanh", "tanh", "tanh"}, {"relu", "relu", "relu"}}; +std::vector clip{0.f}; +std::vector clip_non_zeros{0.7f}; +std::vector direction = {ov::op::RecurrentSequenceDirection::FORWARD, + ov::op::RecurrentSequenceDirection::REVERSE, + ov::op::RecurrentSequenceDirection::BIDIRECTIONAL +}; +std::vector model_types = { + ov::element::f32, + ov::element::f16}; - INSTANTIATE_TEST_SUITE_P(smoke_LSTMSequenceCommonZeroClip, LSTMSequenceTest, - ::testing::Combine( - ::testing::ValuesIn(mode), - ::testing::ValuesIn(seq_lengths_zero_clip), - ::testing::ValuesIn(batch), - ::testing::ValuesIn(hidden_size), - ::testing::ValuesIn(input_size), - ::testing::ValuesIn(activations), - ::testing::ValuesIn(clip), - ::testing::ValuesIn(direction), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - LSTMSequenceTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_LSTMSequenceCommonZeroClip, LSTMSequenceTest, + ::testing::Combine( + ::testing::ValuesIn(mode), + ::testing::ValuesIn(seq_lengths_zero_clip), + ::testing::ValuesIn(batch), + ::testing::ValuesIn(hidden_size), + ::testing::ValuesIn(input_size), + ::testing::ValuesIn(activations), + ::testing::ValuesIn(clip), + ::testing::ValuesIn(direction), + ::testing::Values(InputLayerType::CONSTANT), + ::testing::ValuesIn(model_types), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + LSTMSequenceTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_LSTMSequenceCommonZeroClipNonconstantWRB, LSTMSequenceTest, - ::testing::Combine( - ::testing::Values(ngraph::helpers::SequenceTestsMode::PURE_SEQ), - ::testing::ValuesIn(seq_lengths_zero_clip), - ::testing::ValuesIn(batch), - ::testing::ValuesIn(hidden_size), - ::testing::ValuesIn(input_size), - ::testing::ValuesIn(activations), - ::testing::ValuesIn(clip), - ::testing::ValuesIn(direction), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - LSTMSequenceTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_LSTMSequenceCommonZeroClipNonconstantWRB, LSTMSequenceTest, + ::testing::Combine( + ::testing::Values(SequenceTestsMode::PURE_SEQ), + ::testing::ValuesIn(seq_lengths_zero_clip), + ::testing::ValuesIn(batch), + ::testing::ValuesIn(hidden_size), + ::testing::ValuesIn(input_size), + ::testing::ValuesIn(activations), + ::testing::ValuesIn(clip), + ::testing::ValuesIn(direction), + ::testing::Values(InputLayerType::PARAMETER), + ::testing::ValuesIn(model_types), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + LSTMSequenceTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_LSTMSequenceCommonClip, LSTMSequenceTest, - ::testing::Combine( - ::testing::ValuesIn(mode), - ::testing::ValuesIn(seq_lengths_clip_non_zero), - ::testing::ValuesIn(batch), - ::testing::ValuesIn(hidden_size), - ::testing::ValuesIn(input_size), - ::testing::ValuesIn(activations), - ::testing::ValuesIn(clip_non_zeros), - ::testing::ValuesIn(direction), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - LSTMSequenceTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_LSTMSequenceCommonClip, LSTMSequenceTest, + ::testing::Combine( + ::testing::ValuesIn(mode), + ::testing::ValuesIn(seq_lengths_clip_non_zero), + ::testing::ValuesIn(batch), + ::testing::ValuesIn(hidden_size), + ::testing::ValuesIn(input_size), + ::testing::ValuesIn(activations), + ::testing::ValuesIn(clip_non_zeros), + ::testing::ValuesIn(direction), + ::testing::Values(InputLayerType::CONSTANT), + ::testing::ValuesIn(model_types), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + LSTMSequenceTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index bc4b9786d1511f..857caffecb713a 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -187,6 +187,8 @@ std::vector disabledTestPatterns() { // Issue: 121313 R"(smoke_GroupConvBackpropData.*paddingDefined/GroupConvBackpropLayerTest.Inference.*f16.*)", R"(smoke_GroupConvBackpropData.*paddingDefined/GroupConvBackpropLayerTest.Inference.*f32.*)", + // Issue: 122177 + R"(smoke_LSTMSequenceCommon.*LSTMSequenceTest.Inference.*CONVERT_TO_TI.*)", // Issue: 122094 R"(smoke_Interpolate_Basic_Down_Sample_Tail/InterpolateLayerTest.Inference.*(asymmetric|align_corners).*f16.*)", }; diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/loop.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/loop.hpp new file mode 100644 index 00000000000000..ada7e8b32b8e79 --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/loop.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/loop.hpp" + +namespace ov { +namespace test { +TEST_P(LoopLayerTest, Inference) { + run(); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/lrn.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/lrn.hpp new file mode 100644 index 00000000000000..cbe1b3f5710f0e --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/lrn.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/lrn.hpp" + +namespace ov { +namespace test { +TEST_P(LrnLayerTest, Inference) { + run(); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/lstm_cell.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/lstm_cell.hpp new file mode 100644 index 00000000000000..8ce396ec36f279 --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/lstm_cell.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/lstm_cell.hpp" + +namespace ov { +namespace test { +TEST_P(LSTMCellTest, Inference) { + run(); +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/lstm_sequence.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/lstm_sequence.hpp new file mode 100644 index 00000000000000..c77ce06ece2c92 --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/lstm_sequence.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/lstm_sequence.hpp" + +namespace ov { +namespace test { +TEST_P(LSTMSequenceTest, Inference) { + run(); +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/loop.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/loop.hpp new file mode 100644 index 00000000000000..c9645d6a5b1c74 --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/loop.hpp @@ -0,0 +1,39 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { +enum LOOP_IN_TYPE { + INVARIANT, + MERGED +}; + +using LoopParams = typename std::tuple< + bool, // ExecuteFirstIteration + bool, // BodyCondition is a constant? + bool, // BodyCondition value, if it is a Const + int64_t, // TripCount, -1 means infinity + std::vector, // input shapes + std::vector, // input types. Vector size have to be equal to input shapes vector size + ov::element::Type, // Model type + std::string>; // Device name + +class LoopLayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo &obj); + +protected: + void SetUp() override; +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/lrn.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/lrn.hpp new file mode 100644 index 00000000000000..c7b887cb1fc631 --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/lrn.hpp @@ -0,0 +1,35 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { +typedef std::tuple< + double, // Alpha + double, // Beta + double, // Bias + size_t, // Size + std::vector, // Reduction axes + ov::element::Type, // Network precision + std::vector, // Input shapes + std::string // Device name +> lrnLayerTestParamsSet; + +class LrnLayerTest + : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); +protected: + void SetUp() override; +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/lstm_cell.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/lstm_cell.hpp new file mode 100644 index 00000000000000..d63b7553da1c55 --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/lstm_cell.hpp @@ -0,0 +1,38 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "common_test_utils/test_enums.hpp" + +namespace ov { +namespace test { +using LSTMCellParams = typename std::tuple< + bool, // using decompose to sub-ops transformation + size_t, // batch + size_t, // hidden size + size_t, // input size + std::vector, // activations + float, // clip + ov::test::utils::InputLayerType, // W input type (Constant or Parameter) + ov::test::utils::InputLayerType, // R input type (Constant or Parameter) + ov::test::utils::InputLayerType, // B input type (Constant or Parameter) + ov::element::Type, // Network precision + std::string>; // Device name + +class LSTMCellTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo &obj); +protected: + void SetUp() override; +}; + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/lstm_sequence.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/lstm_sequence.hpp new file mode 100644 index 00000000000000..c4996cb32c9d7d --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/lstm_sequence.hpp @@ -0,0 +1,38 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "common_test_utils/test_enums.hpp" + +namespace ov { +namespace test { +using LSTMSequenceParams = typename std::tuple< + ov::test::utils::SequenceTestsMode, // pure Sequence or TensorIterator + size_t, // seq_lengths + size_t, // batch + size_t, // hidden size + size_t, // input size + std::vector, // activations + float, // clip + ov::op::RecurrentSequenceDirection, // direction + ov::test::utils::InputLayerType, // WRB input type (Constant or Parameter) + ov::element::Type, // Network precision + std::string>; // Device name + + +class LSTMSequenceTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo &obj); +protected: + void SetUp() override; +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp b/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp index 6080c4de5ab39b..b654b41feeb66b 100644 --- a/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp +++ b/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp @@ -632,7 +632,7 @@ ov::runtime::Tensor generate(const std::shared_ptr& no return Activation::generate(elemType, targetShape); } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { @@ -640,6 +640,10 @@ ov::runtime::Tensor generate(const std::shared_ptr unsigned int m_max_seq_len = 10; return ov::test::utils::create_and_fill_tensor(elemType, targetShape, m_max_seq_len, 0); } + if (port == 3 && node->input(0).get_partial_shape().is_static()) { + auto seq_len = node->input(0).get_shape()[1]; + return ov::test::utils::create_and_fill_tensor(elemType, targetShape, seq_len); + } return generate(std::dynamic_pointer_cast(node), port, elemType, targetShape); } diff --git a/src/tests/functional/shared_test_classes/src/single_op/loop.cpp b/src/tests/functional/shared_test_classes/src/single_op/loop.cpp new file mode 100644 index 00000000000000..d699fa51d8a550 --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/loop.cpp @@ -0,0 +1,125 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/loop.hpp" + +#include "transformations/control_flow/unroll_tensor_iterator.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/loop.hpp" + +namespace ov { +namespace test { +std::string LoopLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { + bool execute_first_iteration; + bool is_body_condition_const; + bool body_condition; // works only if is_body_condition_const == + int64_t trip_count; + std::vector shapes; + std::vector input_types; + ov::element::Type model_type; + std::string targetDevice; + std::tie(execute_first_iteration, is_body_condition_const, body_condition, trip_count, shapes, input_types, model_type, + targetDevice) = obj.param; + + std::ostringstream result; + result << "IS=("; + for (size_t i = 0lu; i < shapes.size(); i++) { + result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < shapes.size(); j++) { + result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + result << "execute_first_iteration" << execute_first_iteration << "_"; + result << "is_body_condition_const=" << is_body_condition_const << "_"; + result << "body_condition=" << body_condition << "_"; + result << "trip_count=" << trip_count << "_"; + result << "types=" << ov::test::utils::vec2str(input_types) << "_"; + result << "modelType=" << model_type.get_type_name() << "_"; + result << "targetDevice=" << targetDevice << "_"; + auto res_str = result.str(); + std::replace(res_str.begin(), res_str.end(), '-', '_'); + return res_str; +} + +void LoopLayerTest::SetUp() { + bool execute_first_iteration; + bool is_body_condition_const; + bool body_condition; // works only if is_body_condition_const == + int64_t trip_count; + std::vector shapes; + std::vector input_types; + ov::element::Type model_type; + std::tie(execute_first_iteration, is_body_condition_const, body_condition, trip_count, shapes, input_types, model_type, + targetDevice) = this->GetParam(); + init_input_shapes(shapes); + + // Example: +/* auto X = std::make_shared(ov::element::f32, ov::Shape{32, 1, 10}); + auto Y = std::make_shared(ov::element::f32, ov::Shape{32, 1, 10}); + auto M = std::make_shared(ov::element::f32, ov::Shape{32, 1, 10});*/ + ov::ParameterVector params; + for (auto&& shape : inputDynamicShapes) { + params.push_back(std::make_shared(model_type, shape)); + } + + //Example: +/* auto Xi = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); + auto Yi = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); + auto M_body = std::make_shared(ov::element::f32, ov::PartialShape::dynamic());*/ + + ov::ParameterVector body_params; + for (int i = 0; i < inputDynamicShapes.size(); i++) { + body_params.push_back(std::make_shared(model_type, ov::PartialShape::dynamic())); + } + + std::shared_ptr body_condition_const; + if (is_body_condition_const) { + body_condition_const = std::make_shared(ov::element::boolean, ov::Shape{1}, body_condition); + } + auto trip_count_const = std::make_shared(ov::element::i64, ov::Shape{1}, trip_count); + auto exec_condition = std::make_shared(ov::element::boolean, ov::Shape{1}, execute_first_iteration); + + // Body + std::shared_ptr Zo = body_params[0]; + for (int i = 1; i < body_params.size(); ++i) { + Zo = std::make_shared(body_params[i], Zo); + } + + auto body = std::make_shared(ov::OutputVector{body_condition_const, Zo}, body_params); + + auto loop = std::make_shared(trip_count_const, exec_condition); + loop->set_function(body); + loop->set_special_body_ports(ov::op::v5::Loop::SpecialBodyPorts{-1, 0}); + + for (int i = 0; i < body_params.size(); ++i) { + if (input_types[i] == LOOP_IN_TYPE::INVARIANT) { + loop->set_invariant_input(body_params[i], params[i]); + } else if (input_types[i] == LOOP_IN_TYPE::MERGED) { + // todo: support several merged inputs + // now supported only one in this sample + loop->set_merged_input(body_params[i], params[i], Zo); + } + } + + // Output 0 is last Zo + auto out0 = loop->get_iter_value(body_condition_const, -1); + auto out1 = loop->get_iter_value(Zo, -1); + // Output 1 is concat of Zos + // start=0, stride=1, part_size=1, end=-1, axis=1 + auto out2 = loop->get_concatenated_slices(Zo, 0, 1, 1, -1, 1); + + auto result0 = std::make_shared(out0); + auto result1 = std::make_shared(out1); + auto result2 = std::make_shared(out2); + function = std::make_shared(ov::ResultVector{result0, result1, result2}, params, "loop"); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_op/lrn.cpp b/src/tests/functional/shared_test_classes/src/single_op/lrn.cpp new file mode 100644 index 00000000000000..94604dcb5f6887 --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/lrn.cpp @@ -0,0 +1,66 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/lrn.hpp" + +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/lrn.hpp" + +namespace ov { +namespace test { +std::string LrnLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { + double alpha, beta, bias; + size_t size; + std::vector axes; + ov::element::Type model_type; + std::vector shapes; + std::string targetDevice; + std::tie(alpha, beta, bias, size, axes, model_type, shapes, targetDevice) = obj.param; + + std::ostringstream result; + const char separator = '_'; + result << "IS=("; + for (size_t i = 0lu; i < shapes.size(); i++) { + result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < shapes.size(); j++) { + result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + result << "Alpha=" << alpha << separator; + result << "Beta=" << beta << separator; + result << "Bias=" << bias << separator; + result << "Size=" << size << separator; + result << "Axes=" << ov::test::utils::vec2str(axes) << separator; + result << "netPRC=" << model_type.get_type_name() << separator; + result << "trgDev=" << targetDevice; + + return result.str(); +} + +void LrnLayerTest::SetUp() { + ov::element::Type model_type; + std::vector shapes; + double alpha, beta, bias; + size_t size; + std::vector axes; + std::tie(alpha, beta, bias, size, axes, model_type, shapes, targetDevice) = GetParam(); + init_input_shapes(shapes); + + auto param = std::make_shared(model_type, inputDynamicShapes.front()); + auto axes_node = std::make_shared(ov::element::i64, ov::Shape{axes.size()}, axes.data()); + + auto lrn = std::make_shared(param, axes_node, alpha, beta, bias, size); + + auto result = std::make_shared(lrn); + function = std::make_shared(result, ov::ParameterVector{param}, "lrn"); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_op/lstm_cell.cpp b/src/tests/functional/shared_test_classes/src/single_op/lstm_cell.cpp new file mode 100644 index 00000000000000..4c717bdab3e247 --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/lstm_cell.cpp @@ -0,0 +1,141 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/lstm_cell.hpp" + +#include "common_test_utils/ov_tensor_utils.hpp" +#include "transformations/op_conversions/lstm_cell_decomposition.hpp" +#include "openvino/pass/manager.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/lstm_cell.hpp" + +namespace ov { +namespace test { +using ov::test::utils::InputLayerType; + +std::string LSTMCellTest::getTestCaseName(const testing::TestParamInfo &obj) { + bool should_decompose; + size_t batch; + size_t hidden_size; + size_t input_size; + std::vector activations; + std::vector activations_alpha; + std::vector activations_beta; + float clip; + InputLayerType WType; + InputLayerType RType; + InputLayerType BType; + ov::element::Type model_type; + std::string targetDevice; + std::tie(should_decompose, batch, hidden_size, input_size, activations, clip, WType, RType, BType, + model_type, targetDevice) = obj.param; + std::vector> input_shapes = { + {{batch, input_size}, {batch, hidden_size}, {batch, hidden_size}, {4 * hidden_size, input_size}, + {4 * hidden_size, hidden_size}, {4 * hidden_size}}, + }; + std::ostringstream result; + result << "decomposition" << should_decompose << "_"; + result << "batch=" << batch << "_"; + result << "hidden_size=" << hidden_size << "_"; + result << "input_size=" << input_size << "_"; + result << "IS=" << ov::test::utils::vec2str(input_shapes) << "_"; + result << "activations=" << ov::test::utils::vec2str(activations) << "_"; + result << "clip=" << clip << "_"; + result << "WType=" << WType << "_"; + result << "RType=" << RType << "_"; + result << "BType=" << BType << "_"; + result << "modelType=" << model_type.get_type_name() << "_"; + result << "targetDevice=" << targetDevice << "_"; + return result.str(); +} + +void LSTMCellTest::SetUp() { + bool should_decompose; + size_t batch; + size_t hidden_size; + size_t input_size; + std::vector activations; + std::vector activations_alpha; + std::vector activations_beta; + float clip; + InputLayerType WType; + InputLayerType RType; + InputLayerType BType; + ov::element::Type model_type; + std::tie(should_decompose, batch, hidden_size, input_size, activations, clip, WType, RType, BType, + model_type, targetDevice) = this->GetParam(); + + std::vector input_shapes = { + {batch, input_size}, + {batch, hidden_size}, + {batch, hidden_size}, + {4 * hidden_size, input_size}, + {4 * hidden_size, hidden_size}, + {4 * hidden_size} + }; + + std::vector param_shapes{input_shapes[0], input_shapes[1], input_shapes[2]}; + if (WType == InputLayerType::PARAMETER) + param_shapes.push_back(input_shapes[3]); + + if (RType == InputLayerType::PARAMETER) + param_shapes.push_back(input_shapes[4]); + + if (BType == InputLayerType::PARAMETER) + param_shapes.push_back(input_shapes[5]); + init_input_shapes(ov::test::static_shapes_to_test_representation(param_shapes)); + + ov::ParameterVector params{std::make_shared(model_type, inputDynamicShapes[0]), + std::make_shared(model_type, inputDynamicShapes[1]), + std::make_shared(model_type, inputDynamicShapes[2])}; + + + ov::NodeVector inputs{params[0], params[1], params[2]}; + if (WType == InputLayerType::PARAMETER) { + auto param = std::make_shared(model_type, inputDynamicShapes[params.size()]); + params.push_back(param); + inputs.push_back(param); + } else { + auto tensor = ov::test::utils::create_and_fill_tensor(model_type, input_shapes[3]); + auto constant = std::make_shared(tensor); + inputs.push_back(constant); + } + + if (RType == InputLayerType::PARAMETER) { + auto param = std::make_shared(model_type, inputDynamicShapes[params.size()]); + params.push_back(param); + inputs.push_back(param); + } else { + auto tensor = ov::test::utils::create_and_fill_tensor(model_type, input_shapes[4]); + auto constant = std::make_shared(tensor); + inputs.push_back(constant); + } + + if (BType == InputLayerType::PARAMETER) { + auto param = std::make_shared(model_type, inputDynamicShapes[params.size()]); + params.push_back(param); + inputs.push_back(param); + } else { + auto tensor = ov::test::utils::create_and_fill_tensor(model_type, input_shapes[5]); + auto constant = std::make_shared(tensor); + inputs.push_back(constant); + } + + auto lstm_cell = std::make_shared(inputs[0], inputs[1], inputs[2], inputs[3], inputs[4], inputs[5], + hidden_size, activations, + activations_alpha, activations_beta, clip); + + ov::ResultVector results{std::make_shared(lstm_cell->output(0)), + std::make_shared(lstm_cell->output(1))}; + function = std::make_shared(results, params, "lstm_cell"); + if (should_decompose) { + ov::pass::Manager m; + m.register_pass(); + m.run_passes(function); + } +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_op/lstm_sequence.cpp b/src/tests/functional/shared_test_classes/src/single_op/lstm_sequence.cpp new file mode 100644 index 00000000000000..514557ead7a72d --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/lstm_sequence.cpp @@ -0,0 +1,175 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/lstm_sequence.hpp" + +#include "transformations/op_conversions/bidirectional_sequences_decomposition.hpp" +#include "transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp" +#include "openvino/pass/visualize_tree.hpp" +#include "openvino/pass/manager.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/lstm_sequence.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" +#include "ov_models/utils/ov_helpers.hpp" + + +namespace ov { +namespace test { +using ov::test::utils::SequenceTestsMode; +using ov::test::utils::InputLayerType; + +std::string LSTMSequenceTest::getTestCaseName(const testing::TestParamInfo &obj) { + SequenceTestsMode mode; + size_t seq_lengths; + size_t batch; + size_t hidden_size; + size_t input_size; + std::vector activations; + std::vector activations_alpha; + std::vector activations_beta; + float clip; + ov::op::RecurrentSequenceDirection direction; + InputLayerType WRBType; + ov::element::Type model_type; + std::string targetDevice; + std::tie(mode, seq_lengths, batch, hidden_size, input_size, activations, clip, direction, + WRBType, model_type, targetDevice) = obj.param; + std::vector> input_shapes = { + {{batch, input_size}, {batch, hidden_size}, {batch, hidden_size}, {4 * hidden_size, input_size}, + {4 * hidden_size, hidden_size}, {4 * hidden_size}}, + }; + std::ostringstream result; + result << "mode=" << mode << "_"; + result << "seq_lengths=" << seq_lengths << "_"; + result << "batch=" << batch << "_"; + result << "hidden_size=" << hidden_size << "_"; + result << "input_size=" << input_size << "_"; + result << "IS=" << ov::test::utils::vec2str(input_shapes) << "_"; + result << "activations=" << ov::test::utils::vec2str(activations) << "_"; + result << "direction=" << direction << "_"; + result << "clip=" << clip << "_"; + result << "WRBType=" << WRBType << "_"; + result << "modelType=" << model_type.get_type_name() << "_"; + result << "targetDevice=" << targetDevice << "_"; + return result.str(); +} + +void LSTMSequenceTest::SetUp() { + SequenceTestsMode mode; + size_t seq_lengths; + size_t batch; + size_t hidden_size; + size_t input_size; + std::vector activations; + std::vector activations_alpha; + std::vector activations_beta; + float clip; + ov::op::RecurrentSequenceDirection direction; + InputLayerType WRBType; + ov::element::Type model_type; + std::tie(mode, seq_lengths, batch, hidden_size, input_size, activations, clip, direction, + WRBType, model_type, targetDevice) = this->GetParam(); + size_t num_directions = direction == ov::op::RecurrentSequenceDirection::BIDIRECTIONAL ? 2 : 1; + std::vector inputShapes = { + {batch, seq_lengths, input_size}, + {batch, num_directions, hidden_size}, + {batch, num_directions, hidden_size}, + {batch}, + {num_directions, 4 * hidden_size, input_size}, + {num_directions, 4 * hidden_size, hidden_size}, + {num_directions, 4 * hidden_size}, + }; + + const auto& W_shape = inputShapes[4]; + const auto& R_shape = inputShapes[5]; + const auto& B_shape = inputShapes[6]; + + std::vector param_shapes{inputShapes[0], inputShapes[1], inputShapes[2]}; + std::vector const_input_shapes; + if (mode == SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_PARAM || + mode == SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM || + mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM) { + param_shapes.push_back(inputShapes[3]); + } + + if (WRBType == InputLayerType::PARAMETER) { + param_shapes.push_back(inputShapes[4]); + param_shapes.push_back(inputShapes[5]); + param_shapes.push_back(inputShapes[6]); + } + init_input_shapes(ov::test::static_shapes_to_test_representation(param_shapes)); + + ov::ParameterVector params{std::make_shared(model_type, inputDynamicShapes[0]), + std::make_shared(model_type, inputDynamicShapes[1]), + std::make_shared(model_type, inputDynamicShapes[2])}; + + std::shared_ptr seq_lengths_node; + if (mode == SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_PARAM || + mode == SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM || + mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM) { + auto param = std::make_shared(ov::element::i64, inputDynamicShapes[3]); + seq_lengths_node = param; + seq_lengths_node->set_friendly_name("seq_lengths"); + params.push_back(param); + } else if (mode == SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST || + mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST) { + auto tensor = ov::test::utils::create_and_fill_tensor(ov::element::i64, inputShapes[3], seq_lengths); + seq_lengths_node = std::make_shared(tensor); + } else { + std::vector lengths(inputShapes[3][0], seq_lengths); + seq_lengths_node = ov::op::v0::Constant::create(ov::element::i64, inputShapes[3], lengths); + } + + std::shared_ptr W, R, B; + if (WRBType == InputLayerType::PARAMETER) { + auto param_num = inputDynamicShapes.size(); + const auto W_param = std::make_shared(model_type, inputDynamicShapes[param_num - 3]); + const auto R_param = std::make_shared(model_type, inputDynamicShapes[param_num - 2]); + const auto B_param = std::make_shared(model_type, inputDynamicShapes[param_num - 1]); + W = W_param; + R = R_param; + B = B_param; + params.push_back(W_param); + params.push_back(R_param); + params.push_back(B_param); + } else { + auto tensor_w = ov::test::utils::create_and_fill_tensor(model_type, W_shape); + W = std::make_shared(tensor_w); + + auto tensor_r = ov::test::utils::create_and_fill_tensor(model_type, R_shape); + R = std::make_shared(tensor_r); + + auto tensor_b = ov::test::utils::create_and_fill_tensor(model_type, B_shape); + B = std::make_shared(tensor_b); + } + + auto lstm_sequence = std::make_shared(params[0], params[1], params[2], seq_lengths_node, W, R, B, hidden_size, direction, + std::vector{}, std::vector{}, activations, clip); + + ov::ResultVector results{std::make_shared(lstm_sequence->output(0)), + std::make_shared(lstm_sequence->output(1)), + std::make_shared(lstm_sequence->output(2))}; + + function = std::make_shared(results, params, "lstm_sequence"); + bool is_pure_sequence = mode == SequenceTestsMode::PURE_SEQ || + mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM || + mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST; + + if (!is_pure_sequence) { + ov::pass::Manager manager; + if (direction == ov::op::RecurrentSequenceDirection::BIDIRECTIONAL) + manager.register_pass(); + manager.register_pass(); + manager.run_passes(function); + bool ti_found = ngraph::helpers::is_tensor_iterator_exist(function); + EXPECT_EQ(ti_found, true); + } else { + bool ti_found = ngraph::helpers::is_tensor_iterator_exist(function); + EXPECT_EQ(ti_found, false); + } +} +} // namespace test +} // namespace ov From 0a0d0f0aa5f21d090b6ce6341a8672dfc2048fbe Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Tue, 10 Oct 2023 22:08:20 +0400 Subject: [PATCH 137/257] Revert "Switch telemetry to opt-out approach. (#20290)" (#20370) This reverts commit b630bffa14bab108f1cb128bff5a47f45dd5bb32. --- README.md | 8 ---- docs/resources/telemetry_information.md | 40 ++++++++++++++++--- src/bindings/python/requirements.txt | 2 +- tools/constraints.txt | 2 +- .../tools/mo/utils/telemetry_utils.py | 6 +-- .../ovc/openvino/tools/ovc/telemetry_utils.py | 6 +-- 6 files changed, 39 insertions(+), 25 deletions(-) diff --git a/README.md b/README.md index e7004ffc7f57aa..adc6f9f2b965ea 100644 --- a/README.md +++ b/README.md @@ -129,14 +129,6 @@ OpenVINO™ Toolkit also contains several plugins which simplify loading models OpenVINO™ Toolkit is licensed under [Apache License Version 2.0](LICENSE). By contributing to the project, you agree to the license and copyright terms therein and release your contribution under these terms. -## Telemetry -OpenVINO™ collects software performance and usage data for the purpose of improving OpenVINO™ tools. This data is collected directly by OpenVINO™ or through the use of Google Analytics 4. -You can opt-out at any time by running the command: - -opt_in_out --opt_out - -More Information is available at https://docs.openvino.ai/latest/openvino_docs_telemetry_information.html. - ## Documentation ### User documentation diff --git a/docs/resources/telemetry_information.md b/docs/resources/telemetry_information.md index b23a763ff97e70..4340a40923770b 100644 --- a/docs/resources/telemetry_information.md +++ b/docs/resources/telemetry_information.md @@ -3,11 +3,13 @@ @sphinxdirective .. meta:: - :description: Learn about OpenVINO™ telemetry, that collects anonymous usage data for the purpose of improving OpenVINO™ tools. + :description: Learn about OpenVINO™ telemetry, that with your explicit consent + collects only usage data to simplify debugging and further development. -To facilitate debugging and further development, OpenVINO™ collects anonymous telemetry data. Anonymous telemetry data is collected by default, -but you can stop data collection anytime by running the command ``opt_in_out --opt_out``. +To facilitate debugging and further development, OpenVINO™ asks its users for +a permission to collect telemetry data. It will not be collected +without an explicit consent on your part and will cover only OpenVINO™ usage information. It does not extend to any other Intel software, hardware, website usage, or other products. Google Analytics is used for telemetry purposes. Refer to @@ -16,6 +18,34 @@ Google Analytics is used for telemetry purposes. Refer to Enable or disable Telemetry reporting ########################################################### +First-run consent ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +On the first run of an application that collects telemetry data, you will be prompted +to opt in or out of telemetry collection with the following telemetry message: + +.. code-block:: console + + Intel would like your permission to collect software performance and usage data + for the purpose of improving Intel products and services. This data will be collected + directly by Intel or through the use of Google Analytics. This data will be stored + in countries where Intel or Google operate. + + You can opt-out at any time in the future by running ``opt_in_out --opt_in``. + + More Information is available at docs.openvino.ai. + + Please type ``Y`` to give your consent or ``N`` to decline. + +Choose your preference by typing ``Y`` to enable or ``N`` to disable telemetry. Your choice will +be confirmed by a corresponding disclaimer. If you do not reply to the telemetry message, +your telemetry data will not be collected. + +For the Neural Network Compression Framework (NNCF), which is not a command line application, +the telemetry message will not display. Telemetry data will only be collected from NNCF +if you have explicitly provided consent in another OpenVINO tool. + + Changing consent decision +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ @@ -52,8 +82,8 @@ Telemetry Data Collection Details .. tab-item:: Telemetry Data Retention :sync: telemetry-data-retention - Telemetry data is retained in Google Analytics for a maximum of 14 months. - Any raw data that has reached the 14-month threshold is deleted from Google Analytics on a monthly basis. + Telemetry data is retained in Google Analytics for a maximum of 26 months. + Any raw data that has reached the 26-month threshold is deleted from Google Analytics on a monthly basis. @endsphinxdirective \ No newline at end of file diff --git a/src/bindings/python/requirements.txt b/src/bindings/python/requirements.txt index 92e55596bd6227..72438eeb2ecd91 100644 --- a/src/bindings/python/requirements.txt +++ b/src/bindings/python/requirements.txt @@ -1,3 +1,3 @@ numpy>=1.16.6 singledispatchmethod; python_version<'3.8' -openvino-telemetry>=2023.2.0 +openvino-telemetry>=2023.1.0 diff --git a/tools/constraints.txt b/tools/constraints.txt index 484466b5bda48e..18a3080d3a1e78 100644 --- a/tools/constraints.txt +++ b/tools/constraints.txt @@ -18,4 +18,4 @@ pyenchant>=3.0.0 test-generator==0.1.1 py>=1.9.0 urllib3>=1.26.4 -openvino-telemetry>=2023.2.0 +openvino-telemetry>=2023.1.0 diff --git a/tools/mo/openvino/tools/mo/utils/telemetry_utils.py b/tools/mo/openvino/tools/mo/utils/telemetry_utils.py index 09ecd528ae28e1..802986edf4c4c0 100644 --- a/tools/mo/openvino/tools/mo/utils/telemetry_utils.py +++ b/tools/mo/openvino/tools/mo/utils/telemetry_utils.py @@ -22,11 +22,7 @@ def init_mo_telemetry(app_name='Model Optimizer'): - return tm.Telemetry(tid=get_tid(), - app_name=app_name, - app_version=get_rt_version(), - backend='ga4', - enable_opt_in_dialog=False) + return tm.Telemetry(tid=get_tid(), app_name=app_name, app_version=get_rt_version(), backend='ga4') def send_framework_info(framework: str): diff --git a/tools/ovc/openvino/tools/ovc/telemetry_utils.py b/tools/ovc/openvino/tools/ovc/telemetry_utils.py index 812575c1fba8f6..87e0132ccd17a6 100644 --- a/tools/ovc/openvino/tools/ovc/telemetry_utils.py +++ b/tools/ovc/openvino/tools/ovc/telemetry_utils.py @@ -17,11 +17,7 @@ def init_mo_telemetry(app_name='Model Conversion API'): - return tm.Telemetry(tid=get_tid(), - app_name=app_name, - app_version=get_rt_version(), - backend='ga4', - enable_opt_in_dialog=False) + return tm.Telemetry(tid=get_tid(), app_name=app_name, app_version=get_rt_version(), backend='ga4') def send_framework_info(framework: str): """ From c61fce428ea6e416cc05db12f893bfcd4aa669e4 Mon Sep 17 00:00:00 2001 From: Oleg Pipikin Date: Tue, 10 Oct 2023 21:54:46 +0200 Subject: [PATCH 138/257] Refactor PowerLayerTest, PriorBoxClusteredLayerTest, PriorBoxLayerTest (#20349) * Refactor PowerLayerTest * Refactor PriorBoxClusteredLayerTest * Refactor PriorBoxLayerTest --- .../single_layer_tests/prior_box.cpp | 25 ++-- .../prior_box_clustered.cpp | 32 ++--- .../single_layer_tests/power.cpp | 67 +++++----- .../shared/include/single_op_tests/power.hpp | 15 +++ .../include/single_op_tests/prior_box.hpp | 15 +++ .../single_op_tests/prior_box_clustered.hpp | 15 +++ .../shared_test_classes/single_op/power.hpp | 30 +++++ .../single_op/prior_box.hpp | 46 +++++++ .../single_op/prior_box_clustered.hpp | 40 ++++++ .../src/single_op/power.cpp | 57 +++++++++ .../src/single_op/prior_box.cpp | 117 ++++++++++++++++++ .../src/single_op/prior_box_clustered.cpp | 95 ++++++++++++++ 12 files changed, 485 insertions(+), 69 deletions(-) create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/power.hpp create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/prior_box.hpp create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/prior_box_clustered.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/power.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/prior_box.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/prior_box_clustered.hpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/power.cpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/prior_box.cpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/prior_box_clustered.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/prior_box.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/prior_box.cpp index 6c6a08be138b74..3b75acd67bc26d 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/prior_box.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/prior_box.cpp @@ -4,14 +4,16 @@ #include -#include "single_layer_tests/prior_box.hpp" +#include "single_op_tests/prior_box.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; +namespace { +using ov::test::PriorBoxLayerTest; + +const std::vector model_types = { + ov::element::i32, + ov::element::u16}; -const std::vector netPrecisions = { - InferenceEngine::Precision::I32, - InferenceEngine::Precision::U16}; const std::vector> min_sizes = { {256.0f}}; @@ -53,8 +55,7 @@ const std::vector scale_all_sizes = { const std::vector min_max_aspect_ratios_order = { false, true}; -const std::vector inputShape = {300, 300}; -const std::vector imageShape = {32, 32}; +const std::vector input_shapes_static = {{300, 300}, {32, 32}}; const auto layerSpecificParams = ::testing::Combine( ::testing::ValuesIn(min_sizes), @@ -74,12 +75,8 @@ const auto layerSpecificParams = ::testing::Combine( INSTANTIATE_TEST_SUITE_P(smoke_PriorBox_Basic, PriorBoxLayerTest, ::testing::Combine( layerSpecificParams, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(inputShape), - ::testing::Values(imageShape), + ::testing::ValuesIn(model_types), + ::testing::Values(ov::test::static_shapes_to_test_representation(input_shapes_static)), ::testing::Values(ov::test::utils::DEVICE_CPU)), PriorBoxLayerTest::getTestCaseName); +} // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/prior_box_clustered.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/prior_box_clustered.cpp index 34dfbf6031ec6c..2319c7367fb151 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/prior_box_clustered.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/prior_box_clustered.cpp @@ -3,17 +3,15 @@ // #include -#include "single_layer_tests/prior_box_clustered.hpp" +#include "single_op_tests/prior_box_clustered.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; -using namespace ngraph::helpers; - namespace { +using ov::test::PriorBoxClusteredLayerTest; // Common params -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16 +const std::vector model_types = { + ov::element::f32, + ov::element::f16 }; const std::vector> widths = { @@ -63,18 +61,14 @@ const auto layerSpeficParams = ::testing::Combine( ::testing::ValuesIn(variances) ); +std::vector input_shapes_static = {{4, 4}, {50, 50}}; + INSTANTIATE_TEST_SUITE_P(smoke_PriorBoxClustered_Basic, PriorBoxClusteredLayerTest, - ::testing::Combine( - layerSpeficParams, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(std::vector({ 4, 4 })), - ::testing::Values(std::vector({ 50, 50 })), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - PriorBoxClusteredLayerTest::getTestCaseName + ::testing::Combine( + layerSpeficParams, + ::testing::ValuesIn(model_types), + ::testing::Values(ov::test::static_shapes_to_test_representation(input_shapes_static)), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + PriorBoxClusteredLayerTest::getTestCaseName ); - } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/power.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/power.cpp index d3af6893b3e2f5..951df90937f729 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/power.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/power.cpp @@ -2,47 +2,42 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include "single_layer_tests/power.hpp" +#include "single_op_tests/power.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; - namespace { +using ov::test::PowerLayerTest; - std::vector>> inShapes = { - {{1, 8}}, - {{2, 16}}, - {{3, 32}}, - {{4, 64}}, - {{5, 128}}, - {{6, 256}}, - {{7, 512}}, - {{8, 1024}} - }; +std::vector> input_shape_static = { + {{1, 8}}, + {{2, 16}}, + {{3, 32}}, + {{4, 64}}, + {{5, 128}}, + {{6, 256}}, + {{7, 512}}, + {{8, 1024}} +}; - std::vector> Power = { - {0.0f}, - {0.5f}, - {1.0f}, - {1.1f}, - {1.5f}, - {2.0f}, - }; +std::vector> powers = { + {0.0f}, + {0.5f}, + {1.0f}, + {1.1f}, + {1.5f}, + {2.0f}, +}; - std::vector netPrecisions = {InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16, - }; +std::vector model_types = { + ov::element::f32, + ov::element::f16, +}; - INSTANTIATE_TEST_SUITE_P(smoke_power, PowerLayerTest, - ::testing::Combine( - ::testing::ValuesIn(inShapes), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::ValuesIn(Power)), - PowerLayerTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_power, PowerLayerTest, + ::testing::Combine( + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shape_static)), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(powers), + ::testing::Values(ov::test::utils::DEVICE_GPU)), + PowerLayerTest::getTestCaseName); } // namespace diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/power.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/power.hpp new file mode 100644 index 00000000000000..922ccbcbb345cc --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/power.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/power.hpp" + +namespace ov { +namespace test { +TEST_P(PowerLayerTest, Inference){ + run(); +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/prior_box.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/prior_box.hpp new file mode 100644 index 00000000000000..707aadec0e8b29 --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/prior_box.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/prior_box.hpp" + +namespace ov { +namespace test { +TEST_P(PriorBoxLayerTest, Inference) { + run(); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/prior_box_clustered.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/prior_box_clustered.hpp new file mode 100644 index 00000000000000..389bf2309b7831 --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/prior_box_clustered.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/prior_box_clustered.hpp" + +namespace ov { +namespace test { +TEST_P(PriorBoxClusteredLayerTest, Inference) { + run(); +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/power.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/power.hpp new file mode 100644 index 00000000000000..28634d34428819 --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/power.hpp @@ -0,0 +1,30 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { +using PowerParamsTuple = typename std::tuple< + std::vector, // Input shapes + ov::element::Type, // Model type + std::vector, // Power + std::string>; // Device name + +class PowerLayerTest: + public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest{ +public: + static std::string getTestCaseName(const testing::TestParamInfo &obj); +protected: + void SetUp() override; +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/prior_box.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/prior_box.hpp new file mode 100644 index 00000000000000..e48759fab47589 --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/prior_box.hpp @@ -0,0 +1,46 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { +using priorBoxSpecificParams = std::tuple< + std::vector, // min_size + std::vector, // max_size + std::vector, // aspect_ratio + std::vector, // density + std::vector, // fixed_ratio + std::vector, // fixed_size + bool, // clip + bool, // flip + float, // step + float, // offset + std::vector, // variance + bool, // scale_all_sizes + bool>; // min_max_aspect_ratios_order + +typedef std::tuple< + priorBoxSpecificParams, + ov::element::Type, // model type + std::vector, // input shape + std::string> priorBoxLayerParams; + +class PriorBoxLayerTest + : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseStaticTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); +protected: + void SetUp() override; +}; + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/prior_box_clustered.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/prior_box_clustered.hpp new file mode 100644 index 00000000000000..61af34ccccf9d2 --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/prior_box_clustered.hpp @@ -0,0 +1,40 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { +typedef std::tuple< + std::vector, // widths + std::vector, // heights + bool, // clip + float, // step_width + float, // step_height + float, // step + float, // offset + std::vector> priorBoxClusteredSpecificParams; + +typedef std::tuple< + priorBoxClusteredSpecificParams, + ov::element::Type, // Model type + std::vector, // Input shape + std::string> priorBoxClusteredLayerParams; + +class PriorBoxClusteredLayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); + +protected: + void SetUp() override; +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_op/power.cpp b/src/tests/functional/shared_test_classes/src/single_op/power.cpp new file mode 100644 index 00000000000000..54da5487a41b32 --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/power.cpp @@ -0,0 +1,57 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/power.hpp" + +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/power.hpp" + +namespace ov { +namespace test { +std::string PowerLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { + std::vector shapes; + ov::element::Type model_type; + std::string device_name; + std::vector power; + std::tie(shapes, model_type, power, device_name) = obj.param; + + std::ostringstream result; + result << "IS=("; + for (size_t i = 0lu; i < shapes.size(); i++) { + result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < shapes.size(); j++) { + result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + result << "Power=" << ov::test::utils::vec2str(power) << "_"; + result << "netPRC=" << model_type.get_type_name() << "_"; + result << "trgDev=" << device_name << "_"; + return result.str(); +} + +void PowerLayerTest::SetUp() { + abs_threshold = 0.04f; + + std::vector shapes; + ov::element::Type model_type; + std::vector power; + std::tie(shapes, model_type, power, targetDevice) = this->GetParam(); + init_input_shapes(shapes); + + auto param = std::make_shared(model_type, inputDynamicShapes.front()); + + auto power_const = std::make_shared(model_type, ngraph::Shape{1}, power); + auto pow = std::make_shared(param, power_const); + + function = std::make_shared(pow, ov::ParameterVector{param}, "power"); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_op/prior_box.cpp b/src/tests/functional/shared_test_classes/src/single_op/prior_box.cpp new file mode 100644 index 00000000000000..9f297a1f07c505 --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/prior_box.cpp @@ -0,0 +1,117 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/prior_box.hpp" + +#include "openvino/pass/constant_folding.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/prior_box_clustered.hpp" + +namespace ov { +namespace test { +std::string PriorBoxLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { + ov::element::Type model_type; + std::vector shapes; + std::string target_device; + priorBoxSpecificParams spec_params; + std::tie(spec_params, model_type, shapes, target_device) = obj.param; + + std::vector min_size, max_size, aspect_ratio, density, fixed_ratio, fixed_size, variance; + float step, offset; + bool clip, flip, scale_all_sizes, min_max_aspect_ratios_order; + std::tie(min_size, max_size, aspect_ratio, density, fixed_ratio, fixed_size, clip, + flip, step, offset, variance, scale_all_sizes, min_max_aspect_ratios_order) = spec_params; + + std::ostringstream result; + const char separator = '_'; + result << "IS=("; + for (size_t i = 0lu; i < shapes.size(); i++) { + result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < shapes.size(); j++) { + result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + result << "netPRC=" << model_type.get_type_name() << separator; + result << "min_s=" << ov::test::utils::vec2str(min_size) << separator; + result << "max_s=" << ov::test::utils::vec2str(max_size)<< separator; + result << "asp_r=" << ov::test::utils::vec2str(aspect_ratio)<< separator; + result << "dens=" << ov::test::utils::vec2str(density)<< separator; + result << "fix_r=" << ov::test::utils::vec2str(fixed_ratio)<< separator; + result << "fix_s=" << ov::test::utils::vec2str(fixed_size)<< separator; + result << "var=" << ov::test::utils::vec2str(variance)<< separator; + result << "step=" << step << separator; + result << "off=" << offset << separator; + result << "clip=" << clip << separator; + result << "flip=" << flip<< separator; + result << "scale_all=" << scale_all_sizes << separator; + result << "min_max_aspect_ratios_order=" << min_max_aspect_ratios_order << separator; + result << "trgDev=" << target_device; + + return result.str(); +} + +void PriorBoxLayerTest::SetUp() { + ov::element::Type model_type; + std::vector shapes; + std::vector min_size; + std::vector max_size; + std::vector aspect_ratio; + std::vector density; + std::vector fixed_ratio; + std::vector fixed_size; + std::vector variance; + float step; + float offset; + bool clip; + bool flip; + bool scale_all_sizes; + bool min_max_aspect_ratios_order; + + priorBoxSpecificParams spec_params; + std::tie(spec_params, model_type, shapes, targetDevice) = GetParam(); + + std::tie(min_size, max_size, aspect_ratio, density, fixed_ratio, fixed_size, clip, + flip, step, offset, variance, scale_all_sizes, min_max_aspect_ratios_order) = spec_params; + init_input_shapes(shapes); + + ov::ParameterVector params{std::make_shared(model_type, inputDynamicShapes[0]), + std::make_shared(model_type, inputDynamicShapes[1])}; + + ov::op::v8::PriorBox::Attributes attributes; + attributes.min_size = min_size; + attributes.max_size = max_size; + attributes.aspect_ratio = aspect_ratio; + attributes.density = density; + attributes.fixed_ratio = fixed_ratio; + attributes.fixed_size = fixed_size; + attributes.variance = variance; + attributes.step = step; + attributes.offset = offset; + attributes.clip = clip; + attributes.flip = flip; + attributes.scale_all_sizes = scale_all_sizes; + attributes.min_max_aspect_ratios_order = min_max_aspect_ratios_order; + + auto shape_of_1 = std::make_shared(params[0]); + auto shape_of_2 = std::make_shared(params[1]); + auto priorBox = std::make_shared( + shape_of_1, + shape_of_2, + attributes); + + ov::pass::disable_constant_folding(priorBox); + + auto result = std::make_shared(priorBox); + function = std::make_shared (result, params, "PriorBoxFunction"); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_op/prior_box_clustered.cpp b/src/tests/functional/shared_test_classes/src/single_op/prior_box_clustered.cpp new file mode 100644 index 00000000000000..a630c498ee69a3 --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/prior_box_clustered.cpp @@ -0,0 +1,95 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/prior_box_clustered.hpp" + +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/prior_box_clustered.hpp" + +namespace ov { +namespace test { +std::string PriorBoxClusteredLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { + ov::element::Type model_type; + std::vector shapes; + std::string target_device; + priorBoxClusteredSpecificParams specParams; + std::tie(specParams, model_type, shapes, target_device) = obj.param; + + std::vector widths, heights, variances; + float step_width, step_height, step, offset; + bool clip; + std::tie(widths, heights, clip, step_width, step_height, step, offset, variances) = specParams; + + std::ostringstream result; + const char separator = '_'; + result << "IS=("; + for (size_t i = 0lu; i < shapes.size(); i++) { + result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < shapes.size(); j++) { + result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + result << "netPRC=" << model_type.get_type_name() << separator; + result << "widths=" << ov::test::utils::vec2str(widths) << separator; + result << "heights=" << ov::test::utils::vec2str(heights) << separator; + result << "variances="; + if (variances.empty()) + result << "()" << separator; + else + result << ov::test::utils::vec2str(variances) << separator; + result << "stepWidth=" << step_width << separator; + result << "stepHeight=" << step_height << separator; + result << "step=" << step << separator; + result << "offset=" << offset << separator; + result << "clip=" << std::boolalpha << clip << separator; + result << "trgDev=" << target_device; + return result.str(); +} + +void PriorBoxClusteredLayerTest::SetUp() { + std::vector shapes; + ov::element::Type model_type; + std::vector widths; + std::vector heights; + std::vector variances; + float step_width; + float step_height; + float step; + float offset; + bool clip; + priorBoxClusteredSpecificParams specParams; + std::tie(specParams, model_type, shapes, targetDevice) = GetParam(); + std::tie(widths, heights, clip, step_width, step_height, step, offset, variances) = specParams; + init_input_shapes(shapes); + + ov::ParameterVector params{std::make_shared(model_type, inputDynamicShapes[0]), + std::make_shared(model_type, inputDynamicShapes[1])}; + + ov::op::v0::PriorBoxClustered::Attributes attributes; + attributes.widths = widths; + attributes.heights = heights; + attributes.clip = clip; + attributes.step_widths = step_width; + attributes.step_heights = step_height; + attributes.step = step; + attributes.offset = offset; + attributes.variances = variances; + + auto shape_of_1 = std::make_shared(params[0]); + auto shape_of_2 = std::make_shared(params[1]); + auto prior_box_clustered = std::make_shared(shape_of_1, shape_of_2, attributes); + + auto result = std::make_shared(prior_box_clustered); + function = std::make_shared(result, params, "PB_Clustered"); +} +} // namespace test +} // namespace ov From b3ead626310b1eff3fe2d5374b287bc908c162f4 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Wed, 11 Oct 2023 00:38:23 +0400 Subject: [PATCH 139/257] Fixed numpy deprecation error (#20375) --- tests/layer_tests/tensorflow_lite_tests/test_tfl_ScatterND.py | 4 ++-- .../tensorflow_lite_tests/test_tfl_StridedSlice.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/layer_tests/tensorflow_lite_tests/test_tfl_ScatterND.py b/tests/layer_tests/tensorflow_lite_tests/test_tfl_ScatterND.py index d2393dc4aa5c13..77720238b1feb3 100644 --- a/tests/layer_tests/tensorflow_lite_tests/test_tfl_ScatterND.py +++ b/tests/layer_tests/tensorflow_lite_tests/test_tfl_ScatterND.py @@ -13,7 +13,7 @@ 'shape_shape': [1], 'shape_value': [8]}, {'indices_shape': [4, 1], 'indices_value': [4, 3, 1, 7], 'updates_dtype': np.float32, 'updates_shape': [4], 'shape_shape': [1], 'shape_value': [8]}, - {'indices_shape': [4, 1], 'indices_value': [4, 3, 1, 7], 'updates_dtype': np.bool, 'updates_shape': [4], + {'indices_shape': [4, 1], 'indices_value': [4, 3, 1, 7], 'updates_dtype': bool, 'updates_shape': [4], 'shape_shape': [1], 'shape_value': [8]}, {'indices_shape': [4, 2], 'indices_value': [[0, 0], [1, 0], [0, 2], [1, 2]], 'updates_dtype': np.int32, @@ -22,7 +22,7 @@ 'updates_shape': [4, 5], 'shape_shape': [3], 'shape_value': [2, 3, 5]}, {'indices_shape': [4, 2], 'indices_value': [[0, 0], [1, 0], [0, 2], [1, 2]], 'updates_dtype': np.float32, 'updates_shape': [4, 5], 'shape_shape': [3], 'shape_value': [2, 3, 5]}, - {'indices_shape': [4, 2], 'indices_value': [[0, 0], [1, 0], [0, 2], [1, 2]], 'updates_dtype': np.bool, + {'indices_shape': [4, 2], 'indices_value': [[0, 0], [1, 0], [0, 2], [1, 2]], 'updates_dtype': bool, 'updates_shape': [4, 5], 'shape_shape': [3], 'shape_value': [2, 3, 5]}, ] diff --git a/tests/layer_tests/tensorflow_lite_tests/test_tfl_StridedSlice.py b/tests/layer_tests/tensorflow_lite_tests/test_tfl_StridedSlice.py index fea9364b045651..12c18d02077e2a 100644 --- a/tests/layer_tests/tensorflow_lite_tests/test_tfl_StridedSlice.py +++ b/tests/layer_tests/tensorflow_lite_tests/test_tfl_StridedSlice.py @@ -13,7 +13,7 @@ 'begin_mask': 8, 'end_mask': 3, 'shrink_axis_mask': 4}, {'shape': [12, 2, 2, 5], 'dtype': np.int64, 'strides': [1], 'begin': [0], 'end': [1], 'begin_mask': 8, 'end_mask': 3, 'shrink_axis_mask': None}, - {'shape': [12, 2, 2, 5], 'dtype': np.bool, 'strides': [1], 'begin': [0], 'end': [1], + {'shape': [12, 2, 2, 5], 'dtype': bool, 'strides': [1], 'begin': [0], 'end': [1], 'begin_mask': 8, 'end_mask': 3, 'shrink_axis_mask': None}, ] @@ -24,7 +24,7 @@ class TestTFLiteStridedSliceLayerTest(TFLiteLayerTest): allowed_ops = ['STRIDED_SLICE'] def _prepare_input(self, inputs_dict, generator=None): - if self.input_dtype == np.bool: + if self.input_dtype == bool: inputs_dict['Input'] = np.random.choice([True, False], size=inputs_dict['Input']) else: inputs_dict['Input'] = np.random.randint(-255, 255, inputs_dict['Input']).astype(self.input_dtype) From df55e282e38b43ad581638dceb7d05a9d6818a9e Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Wed, 11 Oct 2023 02:18:14 +0400 Subject: [PATCH 140/257] Update tensorflow requirement in /src/bindings/python (#20372) Updates the requirements on [tensorflow](https://github.com/tensorflow/tensorflow) to permit the latest version. - [Release notes](https://github.com/tensorflow/tensorflow/releases) - [Changelog](https://github.com/tensorflow/tensorflow/blob/master/RELEASE.md) - [Commits](https://github.com/tensorflow/tensorflow/compare/v1.15.5...v2.14.0) --- updated-dependencies: - dependency-name: tensorflow dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- tests/constraints.txt | 1 + tools/mo/requirements_tf.txt | 2 +- tools/mo/requirements_tf2.txt | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/constraints.txt b/tests/constraints.txt index 671c60bc937c83..7abffee14c8ce2 100644 --- a/tests/constraints.txt +++ b/tests/constraints.txt @@ -11,6 +11,7 @@ scipy>=1.11.1; python_version >= "3.9" wheel>=0.38.1 defusedxml>=0.7.1 fastjsonschema~=2.17.1 +tensorflow>=2.5,<2.14.0 test-generator==0.1.2 requests>=2.25.1 opencv-python>=4.5 diff --git a/tools/mo/requirements_tf.txt b/tools/mo/requirements_tf.txt index 548f30808083a1..240b60351a6cad 100644 --- a/tools/mo/requirements_tf.txt +++ b/tools/mo/requirements_tf.txt @@ -1,5 +1,5 @@ -c ../constraints.txt -tensorflow>=1.15.5,<2.13.0 +tensorflow>=1.15.5,<2.14.0 numpy>=1.16.6,<1.26 networkx defusedxml diff --git a/tools/mo/requirements_tf2.txt b/tools/mo/requirements_tf2.txt index a96ed84004f984..1b955f23d0feea 100644 --- a/tools/mo/requirements_tf2.txt +++ b/tools/mo/requirements_tf2.txt @@ -1,5 +1,5 @@ -c ../constraints.txt -tensorflow>=2.5,<2.13.0 +tensorflow>=2.5,<2.14.0 numpy>=1.16.6,<1.26 networkx defusedxml From e24b6211e3fa217b1678913c3bd4f7f972ea7402 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Krzemi=C5=84ski?= Date: Wed, 11 Oct 2023 00:51:59 +0200 Subject: [PATCH 141/257] [BUGFIX][Core][Template] Multinomial shape filling for 1D input (#20359) * [BUGFIX] Fix incorrect shape filling for 1D tensor smaller than requested sample size * [FIX] Remove redeclaration --- .../openvino/reference/multinomial.hpp | 7 ++- src/core/src/op/multinomial.cpp | 9 ++-- .../functional/op_reference/multinomial.cpp | 50 ++++++++++++------- 3 files changed, 40 insertions(+), 26 deletions(-) diff --git a/src/core/reference/include/openvino/reference/multinomial.hpp b/src/core/reference/include/openvino/reference/multinomial.hpp index fc141d1204cbea..ce9fe0a52d7c61 100644 --- a/src/core/reference/include/openvino/reference/multinomial.hpp +++ b/src/core/reference/include/openvino/reference/multinomial.hpp @@ -113,8 +113,7 @@ void multinomial(const T* probs, auto batch_size = probs_shape.size() == 2 ? static_cast(probs_shape[0]) : static_cast(1); auto class_size = probs_shape.size() == 2 ? static_cast(probs_shape[1]) : static_cast(probs_shape[0]); - auto samples_size = - probs_shape.size() == 2 ? static_cast(num_samples[0]) : static_cast(probs_shape[0]); + auto samples_size = static_cast(num_samples[0]); // Iterate over each channel in uniform samples std::vector output_samples(total_output_elements_count); @@ -132,8 +131,8 @@ void multinomial(const T* probs, break; } } - // Additional step with replacement - change probability of a given class to 0, and update the cdf - if (with_replacement) { + // Additional step without replacement - change probability of a given class to 0, and update the cdf + if (!with_replacement) { T class_probability = selected_class_idx ? cdf[i_translated + selected_class_idx] - cdf[i_translated + selected_class_idx - 1] : cdf[i_translated + selected_class_idx]; diff --git a/src/core/src/op/multinomial.cpp b/src/core/src/op/multinomial.cpp index 0dd4a93867d74a..90f41369364879 100644 --- a/src/core/src/op/multinomial.cpp +++ b/src/core/src/op/multinomial.cpp @@ -116,12 +116,13 @@ namespace multinomial { namespace validate { void input_types(const Node* op) { NODE_VALIDATION_CHECK(op, - op->get_input_element_type(0).is_real(), + op->get_input_element_type(0).is_real() || op->get_input_element_type(0).is_dynamic(), "Expected floating point type as element type for the 'probs' input."); - NODE_VALIDATION_CHECK(op, - op->get_input_element_type(1).is_integral_number(), - "Expected integer type as element type for the 'num_samples' input."); + NODE_VALIDATION_CHECK( + op, + op->get_input_element_type(1).is_integral_number() || op->get_input_element_type(1).is_dynamic(), + "Expected integer type as element type for the 'num_samples' input."); } } // namespace validate } // namespace multinomial diff --git a/src/plugins/template/tests/functional/op_reference/multinomial.cpp b/src/plugins/template/tests/functional/op_reference/multinomial.cpp index d2edf5bedd9d60..25159ae3ee2a22 100644 --- a/src/plugins/template/tests/functional/op_reference/multinomial.cpp +++ b/src/plugins/template/tests/functional/op_reference/multinomial.cpp @@ -86,8 +86,11 @@ std::vector generateMultinomialParams() { const ov::Shape prob_2d_shape{2, 4}; const ov::Shape prob_1d_shape{4}; const ov::Shape num_samples_shape{1}; + const ov::Shape prob_1d_shape_expand_small{2}; + const ov::Shape out_1d_shape_expand_big{16}; reference_tests::Tensor num_samples(num_samples_shape, ov::element::Type_t::i32, std::vector{4}); + reference_tests::Tensor num_samples_big(num_samples_shape, ov::element::Type_t::i32, std::vector{16}); reference_tests::Tensor probabilities_2d_no_log(prob_2d_shape, et, @@ -95,50 +98,61 @@ std::vector generateMultinomialParams() { reference_tests::Tensor probabilities_2d_log(prob_2d_shape, et, std::vector{1, 2, 3, 4, 2, 4, 6, 8}); reference_tests::Tensor probabilities_1d_no_log(prob_1d_shape, et, std::vector{0.001, 0.01, 0.1, 0.899}); reference_tests::Tensor probabilities_1d_log(prob_1d_shape, et, std::vector{1, 10, 7, 3}); + reference_tests::Tensor probabilities_1d_expand(prob_1d_shape_expand_small, et, std::vector{0.00001, 0.99999}); - reference_tests::Tensor output_2d_no_log_no_replacement(prob_2d_shape, - ov::element::Type_t::i32, - std::vector{3, 3, 3, 3, 0, 0, 0, 0}); - reference_tests::Tensor output_2d_log_no_replacement(prob_2d_shape, + reference_tests::Tensor output_2d_no_log_replacement(prob_2d_shape, ov::element::Type_t::i32, - std::vector{3, 3, 2, 3, 3, 3, 3, 3}); - reference_tests::Tensor output_1d_no_log_replacement(prob_1d_shape, + std::vector{3, 3, 3, 3, 0, 0, 0, 0}); + reference_tests::Tensor output_2d_log_replacement(prob_2d_shape, + ov::element::Type_t::i32, + std::vector{3, 3, 2, 3, 3, 3, 3, 3}); + reference_tests::Tensor output_1d_no_log_no_replacement(prob_1d_shape, + ov::element::Type_t::i64, + std::vector{3, 2, 1, 0}); + reference_tests::Tensor output_1d_log_no_replacement(prob_1d_shape, ov::element::Type_t::i64, - std::vector{3, 2, 1, 0}); - reference_tests::Tensor output_1d_log_replacement(prob_1d_shape, - ov::element::Type_t::i64, - std::vector{1, 2, 3, 0}); + std::vector{1, 2, 3, 0}); + reference_tests::Tensor output_1d_expand(out_1d_shape_expand_big, + ov::element::Type_t::i64, + std::vector{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}); std::vector params; // probabilities, num_samples, output, convert_type, log_probs, with_replacement, name params.emplace_back(probabilities_2d_no_log, num_samples, - output_2d_no_log_no_replacement, + output_2d_no_log_replacement, ov::element::Type_t::i32, false, - false, + true, "input_2d"); params.emplace_back(probabilities_2d_log, num_samples, - output_2d_log_no_replacement, + output_2d_log_replacement, ov::element::Type_t::i32, true, - false, + true, "input_2d"); params.emplace_back(probabilities_1d_no_log, num_samples, - output_1d_no_log_replacement, + output_1d_no_log_no_replacement, ov::element::Type_t::i64, false, - true, + false, "input_1d"); params.emplace_back(probabilities_1d_log, num_samples, - output_1d_log_replacement, + output_1d_log_no_replacement, ov::element::Type_t::i64, true, - true, + false, "input_1d"); + params.emplace_back(probabilities_1d_expand, + num_samples_big, + output_1d_expand, + ov::element::Type_t::i64, + false, + true, + "input_1d_expand"); return params; } From 35308ce34d2e7603e395f134fecb9db5b50a09e0 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Wed, 11 Oct 2023 03:29:56 +0400 Subject: [PATCH 142/257] Use np.float32 instead of np.float (#20377) --- tests/layer_tests/onnx_tests/test_loop.py | 2 +- tools/mo/unit_tests/mo/front/tf/identityN_to_identity_test.py | 4 ++-- tools/mo/unit_tests/mo/middle/UpsampleToResample_test.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/layer_tests/onnx_tests/test_loop.py b/tests/layer_tests/onnx_tests/test_loop.py index 579c2dfca32c87..be90ccd106bcdf 100644 --- a/tests/layer_tests/onnx_tests/test_loop.py +++ b/tests/layer_tests/onnx_tests/test_loop.py @@ -16,7 +16,7 @@ def create_const(name, tensor_type, value): if tensor_type == TensorProto.INT64: np_type = np.int64 elif tensor_type == TensorProto.FLOAT: - np_type = np.float + np_type = np.float32 elif tensor_type == TensorProto.BOOL: np_type = bool else: diff --git a/tools/mo/unit_tests/mo/front/tf/identityN_to_identity_test.py b/tools/mo/unit_tests/mo/front/tf/identityN_to_identity_test.py index 1c736ddb6edb2e..7a9ba24cc515e9 100644 --- a/tools/mo/unit_tests/mo/front/tf/identityN_to_identity_test.py +++ b/tools/mo/unit_tests/mo/front/tf/identityN_to_identity_test.py @@ -14,12 +14,12 @@ **regular_op_with_shaped_data('placeholder_0', [1, 227, 227, 3], {'type': 'Parameter'}), **regular_op_with_shaped_data('placeholder_1', [1, 227, 227, 3], {'type': 'Parameter'}), - **regular_op_with_empty_data('identityN', {'op': 'IdentityN', 'type': None, 'data_types': [np.int32, np.float], + **regular_op_with_empty_data('identityN', {'op': 'IdentityN', 'type': None, 'data_types': [np.int32, np.float32], 'name': 'my_identity'}), **empty_data('identityN_1_d'), **regular_op_with_empty_data('identity0', {'op': 'Identity', 'type': None, 'data_type': np.int32, 'name': 'my_identity/0_port'}), - **regular_op_with_empty_data('identity1', {'op': 'Identity', 'type': None, 'data_type': np.float, + **regular_op_with_empty_data('identity1', {'op': 'Identity', 'type': None, 'data_type': np.float32, 'name': 'my_identity/1_port'}), **result('output0'), diff --git a/tools/mo/unit_tests/mo/middle/UpsampleToResample_test.py b/tools/mo/unit_tests/mo/middle/UpsampleToResample_test.py index e65eef945a1705..e1530ff61d37d7 100644 --- a/tools/mo/unit_tests/mo/middle/UpsampleToResample_test.py +++ b/tools/mo/unit_tests/mo/middle/UpsampleToResample_test.py @@ -40,7 +40,7 @@ 'ss_stride_data': {'kind': 'data', 'value': int64_array([1]), 'shape': int64_array([1])}, 'strided_slice': {'type': 'StridedSlice', 'kind': 'op', 'op': 'StridedSlice'}, 'strided_slice_data': {'kind': 'data', 'shape': None, 'value': None}, - 'cast_to_float': {'kind': 'op', 'op': 'Cast', 'type': 'Convert', 'dst_type': np.float}, + 'cast_to_float': {'kind': 'op', 'op': 'Cast', 'type': 'Convert', 'dst_type': np.float32}, 'cast_to_float_d': {'kind': 'data', 'value': None, 'shape': None}, 'factor': {'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([5, 5]), 'shape': int64_array([2])}, 'factor_data': {'kind': 'data', 'value': int64_array([5, 5]), 'shape': int64_array([2])}, @@ -104,7 +104,7 @@ 'ss_end_data': {'kind': 'data', 'value': None, 'shape': None}, 'ss_stride': {'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([1]), 'shape': int64_array([1])}, 'ss_stride_data': {'kind': 'data', 'value': None, 'shape': None}, - 'cast_to_float': {'kind': 'op', 'op': 'Cast', 'type': 'Convert', 'dst_type': np.float}, + 'cast_to_float': {'kind': 'op', 'op': 'Cast', 'type': 'Convert', 'dst_type': np.float32}, 'cast_to_float_d': {'kind': 'data', 'value': None, 'shape': None}, 'mul': {'type': 'Multiply', 'kind': 'op', 'op': 'Multiply'}, 'mul_data': {'kind': 'data', 'shape': None, 'value': None}, From 8020530e678f875e8ac35162522e4bf174921806 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Wed, 11 Oct 2023 07:09:04 +0400 Subject: [PATCH 143/257] Reduce ngraph namespace usage from core component (#20309) * Reduce ngraph namespace usage from core component * Fixed build * Fixed build 2 * Added missed opset to legacy API --- src/core/include/ngraph/opsets/opset.hpp | 2 + src/core/src/graph_util.cpp | 197 ++-- src/core/src/opsets/opset.cpp | 85 +- src/core/src/runtime/aligned_buffer.cpp | 18 +- src/core/src/runtime/tensor.cpp | 4 - src/core/src/shape_util.cpp | 12 +- src/core/src/util.cpp | 216 ++--- src/core/src/validation_util.cpp | 1092 +++++++++++----------- src/inference/src/ie_core.cpp | 4 - 9 files changed, 802 insertions(+), 828 deletions(-) diff --git a/src/core/include/ngraph/opsets/opset.hpp b/src/core/include/ngraph/opsets/opset.hpp index 443de5714e4243..26c21e237b16c3 100644 --- a/src/core/include/ngraph/opsets/opset.hpp +++ b/src/core/include/ngraph/opsets/opset.hpp @@ -67,6 +67,8 @@ const NGRAPH_API OpSet& get_opset8(); const NGRAPH_API OpSet& get_opset9(); const NGRAPH_API OpSet& get_opset10(); const NGRAPH_API OpSet& get_opset11(); +const NGRAPH_API OpSet& get_opset12(); +const NGRAPH_API OpSet& get_opset13(); const NGRAPH_API std::map>& get_available_opsets(); } // namespace ngraph NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/src/core/src/graph_util.cpp b/src/core/src/graph_util.cpp index 93457bd17083e3..8001678dab2601 100644 --- a/src/core/src/graph_util.cpp +++ b/src/core/src/graph_util.cpp @@ -10,30 +10,17 @@ #include #include -#include "ngraph/descriptor/input.hpp" -#include "ngraph/descriptor/output.hpp" -#include "ngraph/function.hpp" -#include "ngraph/log.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/broadcast.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/parameter.hpp" -#include "ngraph/op/result.hpp" -#include "ngraph/op/tensor_iterator.hpp" -#include "ngraph/op/util/op_types.hpp" -#include "ngraph/opsets/opset5.hpp" -#include "ngraph/opsets/opset8.hpp" -#include "ngraph/pass/manager.hpp" -#include "ngraph/pass/visualize_tree.hpp" -#include "ngraph/rt_info.hpp" -#include "ngraph/util.hpp" #include "openvino/core/descriptor/tensor.hpp" +#include "openvino/core/rt_info.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/util/op_types.hpp" +#include "openvino/pass/manager.hpp" +#include "openvino/pass/visualize_tree.hpp" #include "transformations/common_optimizations/compress_float_constants.hpp" #include "transformations/common_optimizations/fused_names_cleanup.hpp" #include "transformations/common_optimizations/mark_precision_sensitive_shapeof_subgraphs.hpp" -using namespace std; - namespace { void clone_ov_nodes(const std::vector>& nodes, @@ -49,7 +36,7 @@ void clone_ov_nodes(const std::vector>& nodes, } std::vector> cloned_dependencies; for (const auto& dependency : node->get_control_dependencies()) { - shared_ptr& dependent = node_map.at(dependency.get()); + std::shared_ptr& dependent = node_map.at(dependency.get()); if (find(cloned_dependencies.begin(), cloned_dependencies.end(), dependent) == cloned_dependencies.end()) { cloned_dependencies.push_back(dependent); @@ -161,7 +148,7 @@ void replace_node(const std::shared_ptr& target, const OutputVector& repla OPENVINO_ASSERT(target->get_output_size() == replacement_values.size()); - unordered_set> replacement_nodes; + std::unordered_set> replacement_nodes; // For each of target's output O with replacement output O_rep: // For each O's connected downstream input I: // Change I's connected upstream output to O_rep @@ -179,15 +166,15 @@ void replace_node(const std::shared_ptr& target, const OutputVector& repla } void replace_node(const std::shared_ptr& target, const std::shared_ptr& replacement) { - auto default_output_order = vector(target->get_output_size()); + auto default_output_order = std::vector(target->get_output_size()); std::iota(default_output_order.begin(), default_output_order.end(), 0); replace_node(target, replacement, default_output_order); } void replace_nodes(const std::shared_ptr& f, - const unordered_map, shared_ptr>& - parameter_replacement_map, - const unordered_map, shared_ptr>& body_replacement_map) { + const std::unordered_map, + std::shared_ptr>& parameter_replacement_map, + const std::unordered_map, std::shared_ptr>& body_replacement_map) { auto& params = f->get_parameters(); for (size_t i = 0; i < params.size(); i++) { @@ -231,7 +218,7 @@ std::shared_ptr clone_ov_model(const Model& func, std::unordered_map node : func.get_results()) { + for (std::shared_ptr node : func.get_results()) { auto result = ov::as_type_ptr(node_map.at(node.get())); if (!result) { OPENVINO_THROW("Results should be of type op::Result"); @@ -240,7 +227,7 @@ std::shared_ptr clone_ov_model(const Model& func, std::unordered_map(node_map.at(node.get()))); + cloned_sinks.push_back(std::static_pointer_cast(node_map.at(node.get()))); } std::vector> cloned_params; @@ -273,8 +260,8 @@ bool compare_constants(const std::shared_ptr& n1, const std::shared_ptr(n1)->get_value_strings() != - static_pointer_cast(n2)->get_value_strings()) { + if (std::static_pointer_cast(n1)->get_value_strings() != + std::static_pointer_cast(n2)->get_value_strings()) { return false; } @@ -368,7 +355,8 @@ void save_model(const std::shared_ptr& m, const std::string& ou OPENVINO_SUPPRESS_DEPRECATED_START -ngraph::NodeVector ngraph::find_common_args(std::shared_ptr node1, std::shared_ptr node2) { +namespace ngraph { +ov::NodeVector find_common_args(std::shared_ptr node1, std::shared_ptr node2) { std::unordered_set> node1_args; auto compute_node1_args = [&node1_args](const std::shared_ptr& node) { @@ -396,15 +384,15 @@ ngraph::NodeVector ngraph::find_common_args(std::shared_ptr node1, std::sh } // Check if all paths from X to a result go through Y -bool ngraph::is_post_dominated(Node* X, Node* Y) { +bool is_post_dominated(Node* X, Node* Y) { std::unordered_set visited; std::stack> stack; stack.push(X); while (stack.size() > 0) { - ngraph::Node* curr = stack.top(); + ov::Node* curr = stack.top(); visited.insert(curr); - if (ngraph::op::is_output(curr)) { + if (ov::op::util::is_output(curr)) { return false; } stack.pop(); @@ -419,8 +407,8 @@ bool ngraph::is_post_dominated(Node* X, Node* Y) { return true; } -std::vector> ngraph::clone_nodes(const std::vector>& nodes, - NodeMap& node_map) { +std::vector> clone_nodes(const std::vector>& nodes, + NodeMap& node_map) { // for each node in topological order auto sorted_nodes = topological_sort(nodes); for (const auto& node : sorted_nodes) { @@ -433,7 +421,7 @@ std::vector> ngraph::clone_nodes(const std::vector } std::vector> cloned_dependencies; for (auto& dependency : node->get_control_dependencies()) { - shared_ptr& dependent = node_map.at(dependency.get()); + std::shared_ptr& dependent = node_map.at(dependency.get()); if (find(cloned_dependencies.begin(), cloned_dependencies.end(), dependent) == cloned_dependencies.end()) { cloned_dependencies.push_back(dependent); @@ -463,18 +451,18 @@ std::vector> ngraph::clone_nodes(const std::vector // create and return vector of cloned nodes // order matches input vector (not necessarily topological) - std::vector> cloned_nodes; + std::vector> cloned_nodes; for (const auto& node : nodes) { cloned_nodes.push_back(node_map.at(node.get())); } return cloned_nodes; } -std::list> ngraph::clone_nodes(const std::vector>& nodes, - RawNodeOutputMap& output_map) { +std::list> clone_nodes(const std::vector>& nodes, + RawNodeOutputMap& output_map) { // for each node in topological order auto sorted_nodes = topological_sort(nodes); - std::list> cloned_nodes; + std::list> cloned_nodes; for (const auto& node : sorted_nodes) { auto node_outputs = node->outputs(); for (const auto& value : node_outputs) { @@ -488,7 +476,7 @@ std::list> ngraph::clone_nodes(const std::vectorget_control_dependencies()) { for (const auto& dependency_value : dependency->outputs()) { - shared_ptr dependent = output_map.at(dependency_value).get_node_shared_ptr(); + std::shared_ptr dependent = output_map.at(dependency_value).get_node_shared_ptr(); if (find(cloned_dependencies.begin(), cloned_dependencies.end(), dependent) == cloned_dependencies.end()) { cloned_dependencies.push_back(dependent); @@ -514,8 +502,8 @@ std::list> ngraph::clone_nodes(const std::vector& reduce_constant) { - if (auto rc = ov::as_type_ptr(reduce_constant.get_node_shared_ptr())) { +bool is_equal_to_const_value(const std::string& const_value, const Output& reduce_constant) { + if (auto rc = ov::as_type_ptr(reduce_constant.get_node_shared_ptr())) { return (rc->get_all_data_elements_bitwise_identical() && rc->convert_value_to_string(0) == const_value); } else { return false; @@ -535,28 +523,28 @@ bool ngraph::is_equal_to_const_value(const std::string& const_value, const Outpu // | +------[2]------> | | | +------[6]------> | | +------[10]-----> | // | <------[3]------+ | | | <------[7]------+ | | <------[11]-----+ | // +-----+ +-----+ | +-----+ +-----+ +-----+ +-----+ -pair, shared_ptr> ngraph::insert_result_parameter_split( - const shared_ptr& src_node, - const shared_ptr& dst_node) { +std::pair, std::shared_ptr> insert_result_parameter_split( + const std::shared_ptr& src_node, + const std::shared_ptr& dst_node) { if (src_node->get_output_size() != 1) { OPENVINO_THROW("Multiple output per op not supported in graph partition yet."); } // Make parameter node - shared_ptr par_node = - make_shared(src_node->get_output_element_type(0), src_node->get_output_shape(0)); + std::shared_ptr par_node = + std::make_shared(src_node->get_output_element_type(0), src_node->get_output_shape(0)); // Fix input / output among src, dst and par std::vector> dst_inputs = get_inputs_from(*src_node, *dst_node); - NGRAPH_CHECK(dst_inputs.size() == 1, - "insert_result_parameter_split encountered more than " - "one input between the source and destination nodes"); + OPENVINO_ASSERT(dst_inputs.size() == 1, + "insert_result_parameter_split encountered more than " + "one input between the source and destination nodes"); auto& dst_input = dst_inputs[0]; std::vector> src_outputs = get_outputs_to(*src_node, *dst_node); - NGRAPH_CHECK(src_outputs.size() == 1, - "insert_result_parameter_split encountered more than " - "one output between the source and destination nodes"); + OPENVINO_ASSERT(src_outputs.size() == 1, + "insert_result_parameter_split encountered more than " + "one output between the source and destination nodes"); auto& src_output = src_outputs[0]; // Remove [0] @@ -567,7 +555,7 @@ pair, shared_ptr> ngraph:: // Add res node // Add [4], [5], [6], [7] - shared_ptr res_node = make_shared(src_node); + std::shared_ptr res_node = std::make_shared(src_node); return make_pair(res_node, par_node); } @@ -612,58 +600,59 @@ pair, shared_ptr> ngraph:: // Typically new_node is connected to src_node already. The reason we don't create `new_node` // inside the function and return it (similar to ngraph::insert_result_parameter_split) is that // we'll have to templatize its function to call new_node's constructor. -void ngraph::insert_new_node_between(const shared_ptr& src_node, - const shared_ptr& dst_node, - const shared_ptr& new_node) { +void insert_new_node_between(const std::shared_ptr& src_node, + const std::shared_ptr& dst_node, + const std::shared_ptr& new_node) { // Fix input / output std::vector> dst_inputs = get_inputs_from(*src_node, *dst_node); - NGRAPH_CHECK(dst_inputs.size() == 1, - "insert_new_node_between encountered more than one " - "input between the source and destination nodes"); + OPENVINO_ASSERT(dst_inputs.size() == 1, + "insert_new_node_between encountered more than one " + "input between the source and destination nodes"); auto& dst_input = dst_inputs[0]; std::vector> src_outputs = get_outputs_to(*src_node, *dst_node); - NGRAPH_CHECK(src_outputs.size() == 1, - "insert_new_node_between encountered more than one " - "output between the source and destination nodes"); + OPENVINO_ASSERT(src_outputs.size() == 1, + "insert_new_node_between encountered more than one " + "output between the source and destination nodes"); auto& src_output = src_outputs[0]; src_output.remove_target_input(dst_input); // Remove [0] dst_input.replace_source_output(new_node->output(0)); // Remove [0] (again), add [8], remove [1], add [9] } -std::shared_ptr ngraph::make_zero(const element::Type& element_type, const Shape& shape) { - auto zero = op::Constant::create(element_type, Shape{}, {0.0}); +std::shared_ptr make_zero(const element::Type& element_type, const Shape& shape) { + auto zero = ov::op::v0::Constant::create(element_type, Shape{}, {0.0}); if (shape.size() > 0) { - return std::make_shared(zero, - op::Constant::create(element::u64, Shape{shape.size()}, shape)); + return std::make_shared( + zero, + op::v0::Constant::create(element::u64, Shape{shape.size()}, shape)); } return zero; } -std::shared_ptr ngraph::make_constant_from_string(std::string val, - const element::Type& element_type, - const Shape& shape) { +std::shared_ptr make_constant_from_string(std::string val, + const element::Type& element_type, + const Shape& shape) { auto cvals = std::vector(shape_size(shape), val); - return std::make_shared(element_type, shape, cvals); + return std::make_shared(element_type, shape, cvals); } -bool ngraph::is_zero(const Output& reduce_constant) { +bool is_zero(const Output& reduce_constant) { auto result_bool = is_equal_to_const_value("0", reduce_constant); return result_bool; } -bool ngraph::is_one(const Output& reduce_constant) { +bool is_one(const Output& reduce_constant) { auto result_bool = is_equal_to_const_value("1", reduce_constant); return result_bool; } -ngraph::NodeVector ngraph::get_subgraph_outputs(const NodeVector& nodes, - const NodeVector& exclusions, - bool ignore_unused, - bool ignore_output_duplicates) { - std::set> exclusions_set(exclusions.begin(), exclusions.end()); - std::set> nodes_set(nodes.begin(), nodes.end()); +ov::NodeVector get_subgraph_outputs(const NodeVector& nodes, + const NodeVector& exclusions, + bool ignore_unused, + bool ignore_output_duplicates) { + std::set> exclusions_set(exclusions.begin(), exclusions.end()); + std::set> nodes_set(nodes.begin(), nodes.end()); NodeVector outputs; @@ -684,7 +673,7 @@ ngraph::NodeVector ngraph::get_subgraph_outputs(const NodeVector& nodes, return outputs; } -ngraph::NodeVector ngraph::extract_subgraph(const NodeVector& results, const NodeVector& args) { +ov::NodeVector extract_subgraph(const NodeVector& results, const NodeVector& args) { NodeVector subgraph; traverse_nodes( results, @@ -695,15 +684,15 @@ ngraph::NodeVector ngraph::extract_subgraph(const NodeVector& results, const Nod return subgraph; } -bool ngraph::is_used(Node* node) { +bool is_used(Node* node) { std::unordered_set instances_seen; std::stack> stack; stack.push(node); while (stack.size() > 0) { - ngraph::Node* n = stack.top(); + ov::Node* n = stack.top(); if (instances_seen.count(n) == 0) { - if (ngraph::op::is_output(n)) { + if (ov::op::util::is_output(n)) { return true; } instances_seen.insert(n); @@ -718,7 +707,7 @@ bool ngraph::is_used(Node* node) { return false; } -size_t ngraph::get_user_count(Node* node) { +size_t get_user_count(Node* node) { size_t count = 0; for (const auto& node_user : node->get_users()) { count += is_used(node_user.get()); @@ -726,13 +715,13 @@ size_t ngraph::get_user_count(Node* node) { return count; } -bool ngraph::is_strided(const Strides& strides) { +bool is_strided(const Strides& strides) { return std::any_of(strides.begin(), strides.end(), [](size_t stride) { return stride != 1; }); } -bool ngraph::is_valid_rank(const std::shared_ptr& node, std::vector valid_ranks) { +bool is_valid_rank(const std::shared_ptr& node, std::vector valid_ranks) { auto node_rank = node->get_shape().size(); for (auto rank : valid_ranks) { if (rank == node_rank) { @@ -742,15 +731,15 @@ bool ngraph::is_valid_rank(const std::shared_ptr& node, std::vector f, - const std::string& filename, - std::function& attributes)> attributes) { - ngraph::pass::Manager pass_manager; - pass_manager.register_pass(filename, attributes); +void plot_graph(std::shared_ptr f, + const std::string& filename, + std::function& attributes)> attributes) { + ov::pass::Manager pass_manager; + pass_manager.register_pass(filename, attributes); pass_manager.run_passes(std::move(f)); } -std::vector> ngraph::get_inputs_from(Node& src, Node& dst) { +std::vector> get_inputs_from(Node& src, Node& dst) { std::vector> result; for (auto& input : dst.inputs()) { @@ -762,7 +751,7 @@ std::vector> ngraph::get_inputs_from(Node& src, Node return result; } -std::vector> ngraph::get_outputs_to(Node& src, Node& dst) { +std::vector> get_outputs_to(Node& src, Node& dst) { std::vector> result; for (auto& output : src.outputs()) { @@ -783,10 +772,10 @@ std::vector> ngraph::get_outputs_to(Node& src, Node return result; } -static bool check_for_cycles_bkwd(const std::shared_ptr& node, - std::deque>& path, - std::unordered_set>& path_set, - ngraph::NodeVector& cycle_nodes) { +static bool check_for_cycles_bkwd(const std::shared_ptr& node, + std::deque>& path, + std::unordered_set>& path_set, + ov::NodeVector& cycle_nodes) { path.push_back(node); path_set.insert(node); for (size_t i = 0; i < node->inputs().size(); i++) { @@ -808,10 +797,10 @@ static bool check_for_cycles_bkwd(const std::shared_ptr& node, return false; } -static bool check_for_cycles_fwd(const std::shared_ptr& node, - std::deque>& path, - std::unordered_set>& path_set, - ngraph::NodeVector& cycle_nodes) { +static bool check_for_cycles_fwd(const std::shared_ptr& node, + std::deque>& path, + std::unordered_set>& path_set, + ov::NodeVector& cycle_nodes) { path.push_back(node); path_set.insert(node); for (auto& arg : node->get_users()) { @@ -832,7 +821,7 @@ static bool check_for_cycles_fwd(const std::shared_ptr& node, return false; } -bool ngraph::check_for_cycles(const ngraph::Function* func, ngraph::NodeVector& cycle_nodes, bool& is_bkwd_cycle) { +bool check_for_cycles(const ov::Model* func, ov::NodeVector& cycle_nodes, bool& is_bkwd_cycle) { for (const auto& res : func->get_results()) { std::deque> path; // mirror of path stack for faster cycle check @@ -865,3 +854,5 @@ bool ngraph::check_for_cycles(const ngraph::Function* func, ngraph::NodeVector& // no cycles return false; } + +} // namespace ngraph diff --git a/src/core/src/opsets/opset.cpp b/src/core/src/opsets/opset.cpp index 1a61c91e7a1133..9adcd22a43cc35 100644 --- a/src/core/src/opsets/opset.cpp +++ b/src/core/src/opsets/opset.cpp @@ -5,17 +5,11 @@ #include "ngraph/opsets/opset.hpp" #include "itt.hpp" -#include "ngraph/deprecated.hpp" #include "ngraph/log.hpp" #include "openvino/op/ops.hpp" #include "openvino/opsets/opset.hpp" #include "openvino/util/log.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START -ngraph::OpSet::OpSet(const ov::OpSet& opset) : ov::OpSet(opset) {} - -ngraph::OpSet::OpSet(const ngraph::OpSet& opset) : ov::OpSet(opset) {} - ov::OpSet::OpSet(const std::string& name) : m_name(name) {} ov::OpSet::OpSet(const ov::OpSet& opset) { @@ -51,24 +45,6 @@ ov::Node* ov::OpSet::create_insensitive(const std::string& name) const { return m_factory_registry.create(type_info_it->second); } -const std::map>& ngraph::get_available_opsets() { -#define _NGRAPH_REG_OPSET(OPSET) \ - { #OPSET, ngraph::get_##OPSET } - const static std::map> opset_map = {_NGRAPH_REG_OPSET(opset1), - _NGRAPH_REG_OPSET(opset2), - _NGRAPH_REG_OPSET(opset3), - _NGRAPH_REG_OPSET(opset4), - _NGRAPH_REG_OPSET(opset5), - _NGRAPH_REG_OPSET(opset6), - _NGRAPH_REG_OPSET(opset7), - _NGRAPH_REG_OPSET(opset8), - _NGRAPH_REG_OPSET(opset9), - _NGRAPH_REG_OPSET(opset10), - _NGRAPH_REG_OPSET(opset11)}; -#undef _NGRAPH_REG_OPSET - return opset_map; -} - const std::map>& ov::get_available_opsets() { #define _OPENVINO_REG_OPSET(OPSET) \ { #OPSET, ov::get_##OPSET } @@ -232,57 +208,96 @@ const ov::OpSet& ov::get_opset13() { return opset; } -const ngraph::OpSet& ngraph::get_opset1() { +OPENVINO_SUPPRESS_DEPRECATED_START +namespace ngraph { + +OpSet::OpSet(const ov::OpSet& opset) : ov::OpSet(opset) {} + +OpSet::OpSet(const OpSet& opset) : ov::OpSet(opset) {} + +const std::map>& get_available_opsets() { +#define _REG_OPSET(OPSET) \ + { #OPSET, get_##OPSET } + const static std::map> opset_map = {_REG_OPSET(opset1), + _REG_OPSET(opset2), + _REG_OPSET(opset3), + _REG_OPSET(opset4), + _REG_OPSET(opset5), + _REG_OPSET(opset6), + _REG_OPSET(opset7), + _REG_OPSET(opset8), + _REG_OPSET(opset9), + _REG_OPSET(opset10), + _REG_OPSET(opset11), + _REG_OPSET(opset12), + _REG_OPSET(opset13)}; +#undef _REG_OPSET + return opset_map; +} + +const OpSet& get_opset1() { static OpSet opset(ov::get_opset1()); return opset; } -const ngraph::OpSet& ngraph::get_opset2() { +const OpSet& get_opset2() { static OpSet opset(ov::get_opset2()); return opset; } -const ngraph::OpSet& ngraph::get_opset3() { +const OpSet& get_opset3() { static OpSet opset(ov::get_opset3()); return opset; } -const ngraph::OpSet& ngraph::get_opset4() { +const OpSet& get_opset4() { static OpSet opset(ov::get_opset4()); return opset; } -const ngraph::OpSet& ngraph::get_opset5() { +const OpSet& get_opset5() { static OpSet opset(ov::get_opset5()); return opset; } -const ngraph::OpSet& ngraph::get_opset6() { +const OpSet& get_opset6() { static OpSet opset(ov::get_opset6()); return opset; } -const ngraph::OpSet& ngraph::get_opset7() { +const OpSet& get_opset7() { static OpSet opset(ov::get_opset7()); return opset; } -const ngraph::OpSet& ngraph::get_opset8() { +const OpSet& get_opset8() { static OpSet opset(ov::get_opset8()); return opset; } -const ngraph::OpSet& ngraph::get_opset9() { +const OpSet& get_opset9() { static OpSet opset(ov::get_opset9()); return opset; } -const ngraph::OpSet& ngraph::get_opset10() { +const OpSet& get_opset10() { static OpSet opset(ov::get_opset10()); return opset; } -const ngraph::OpSet& ngraph::get_opset11() { +const OpSet& get_opset11() { static OpSet opset(ov::get_opset11()); return opset; } + +const OpSet& get_opset12() { + static OpSet opset(ov::get_opset12()); + return opset; +} + +const OpSet& get_opset13() { + static OpSet opset(ov::get_opset13()); + return opset; +} + +} // namespace ngraph diff --git a/src/core/src/runtime/aligned_buffer.cpp b/src/core/src/runtime/aligned_buffer.cpp index 95ab3f44306eb8..d7c5229fcc0efa 100644 --- a/src/core/src/runtime/aligned_buffer.cpp +++ b/src/core/src/runtime/aligned_buffer.cpp @@ -8,17 +8,18 @@ #include #include "ngraph/util.hpp" +#include "openvino/util/log.hpp" -using namespace ngraph; -using namespace std; NGRAPH_SUPPRESS_DEPRECATED_START +namespace ngraph { + runtime::AlignedBuffer::AlignedBuffer() : m_allocated_buffer(nullptr), m_aligned_buffer(nullptr), m_byte_size(0) {} runtime::AlignedBuffer::AlignedBuffer(size_t byte_size, size_t alignment) : m_byte_size(byte_size) { m_byte_size = std::max(1, byte_size); size_t allocation_size = m_byte_size + alignment; - m_allocated_buffer = static_cast(ngraph_malloc(allocation_size)); + m_allocated_buffer = new char[allocation_size]; m_aligned_buffer = m_allocated_buffer; size_t mod = (alignment != 0) ? size_t(m_aligned_buffer) % alignment : 0; @@ -38,14 +39,14 @@ runtime::AlignedBuffer::AlignedBuffer(AlignedBuffer&& other) runtime::AlignedBuffer::~AlignedBuffer() { if (m_allocated_buffer != nullptr) { - free(m_allocated_buffer); + delete[] m_allocated_buffer; } } runtime::AlignedBuffer& runtime::AlignedBuffer::operator=(AlignedBuffer&& other) { if (this != &other) { if (m_allocated_buffer != nullptr) { - free(m_allocated_buffer); + delete[] m_allocated_buffer; } m_allocated_buffer = other.m_allocated_buffer; m_aligned_buffer = other.m_aligned_buffer; @@ -56,9 +57,10 @@ runtime::AlignedBuffer& runtime::AlignedBuffer::operator=(AlignedBuffer&& other) } return *this; } +} // namespace ngraph namespace ov { -AttributeAdapter>::AttributeAdapter( - shared_ptr& value) - : DirectValueAccessor>(value) {} +AttributeAdapter>::AttributeAdapter( + std::shared_ptr& value) + : DirectValueAccessor>(value) {} } // namespace ov diff --git a/src/core/src/runtime/tensor.cpp b/src/core/src/runtime/tensor.cpp index 7f0c51fa45fea0..f7f587d1a95e9d 100644 --- a/src/core/src/runtime/tensor.cpp +++ b/src/core/src/runtime/tensor.cpp @@ -4,10 +4,6 @@ #include "ngraph/runtime/tensor.hpp" -#include "ngraph/log.hpp" -#include "ngraph/runtime/aligned_buffer.hpp" -#include "ngraph/type/element_type.hpp" - using namespace ngraph; using namespace std; diff --git a/src/core/src/shape_util.cpp b/src/core/src/shape_util.cpp index 9ce8512d7a7797..72c72c39b68d4f 100644 --- a/src/core/src/shape_util.cpp +++ b/src/core/src/shape_util.cpp @@ -9,10 +9,9 @@ #include "openvino/core/partial_shape.hpp" #include "openvino/core/shape_util.hpp" -using namespace ngraph; - +namespace ngraph { template <> -PartialShape ngraph::project(const PartialShape& shape, const AxisSet& axes) { +PartialShape project(const PartialShape& shape, const AxisSet& axes) { if (shape.rank().is_dynamic()) { return shape; } else { @@ -29,7 +28,7 @@ PartialShape ngraph::project(const PartialShape& shape, const AxisSet& axes) { } template <> -PartialShape ngraph::reduce(const PartialShape& shape, const AxisSet& deleted_axes, bool keep_dims) { +PartialShape reduce(const PartialShape& shape, const AxisSet& deleted_axes, bool keep_dims) { if (shape.rank().is_dynamic()) { return shape; } else { @@ -49,8 +48,8 @@ PartialShape ngraph::reduce(const PartialShape& shape, const AxisSet& deleted_ax } template <> -PartialShape ngraph::inject_pairs(const PartialShape& shape, - std::vector> new_axis_pos_value_pairs) { +PartialShape inject_pairs(const PartialShape& shape, + std::vector> new_axis_pos_value_pairs) { if (shape.rank().is_dynamic()) { return shape; } else { @@ -76,6 +75,7 @@ PartialShape ngraph::inject_pairs(const PartialShape& shape, return PartialShape{result_dims}; } } +} // namespace ngraph namespace ov { template diff --git a/src/core/src/util.cpp b/src/core/src/util.cpp index 3c12bd5cb10881..49ae1575101e7b 100644 --- a/src/core/src/util.cpp +++ b/src/core/src/util.cpp @@ -13,21 +13,13 @@ #include #include -#include "ngraph/coordinate_diff.hpp" -#include "ngraph/function.hpp" -#include "ngraph/graph_util.hpp" -#include "ngraph/log.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/result.hpp" -#include "ngraph/partial_shape.hpp" -#include "ngraph/shape.hpp" #include "openvino/util/common_util.hpp" #include "openvino/util/log.hpp" -NGRAPH_SUPPRESS_DEPRECATED_START -using namespace std; +OPENVINO_SUPPRESS_DEPRECATED_START -void ngraph::dump(ostream& out, const void* _data, size_t _size) { +namespace ngraph { +void dump(std::ostream& out, const void* _data, size_t _size) { auto flags = out.flags(); const uint8_t* data = reinterpret_cast(_data); size_t len = _size; @@ -61,27 +53,27 @@ void ngraph::dump(ostream& out, const void* _data, size_t _size) { out.flags(flags); } -std::string ngraph::to_lower(const std::string& s) { +std::string to_lower(const std::string& s) { return ov::util::to_lower(s); } -std::string ngraph::to_upper(const std::string& s) { +std::string to_upper(const std::string& s) { return ov::util::to_upper(s); } -string ngraph::trim(const string& s) { +std::string trim(const std::string& s) { return ov::util::trim(s); } -vector ngraph::split(const string& src, char delimiter, bool do_trim) { +std::vector split(const std::string& src, char delimiter, bool do_trim) { return ov::util::split(src, delimiter, do_trim); } -size_t ngraph::hash_combine(const std::vector& list) { +size_t hash_combine(const std::vector& list) { return ov::util::hash_combine(list); } -void* ngraph::ngraph_malloc(size_t size) { +void* ngraph_malloc(size_t size) { auto ptr = malloc(size); if (size != 0 && !ptr) { OPENVINO_ERR << "malloc failed to allocate memory of size " << size; @@ -90,13 +82,13 @@ void* ngraph::ngraph_malloc(size_t size) { return ptr; } -void ngraph::ngraph_free(void* ptr) { +void ngraph_free(void* ptr) { if (ptr) { free(ptr); } } -size_t ngraph::round_up(size_t size, size_t alignment) { +size_t round_up(size_t size, size_t alignment) { if (alignment == 0) { return size; } @@ -109,27 +101,27 @@ size_t ngraph::round_up(size_t size, size_t alignment) { return size + alignment - remainder; } -size_t ngraph::stopwatch::get_call_count() const { +size_t stopwatch::get_call_count() const { return m_total_count; } -size_t ngraph::stopwatch::get_seconds() const { - return chrono::duration_cast(get_timer_value()).count(); +size_t stopwatch::get_seconds() const { + return std::chrono::duration_cast(get_timer_value()).count(); } -size_t ngraph::stopwatch::get_milliseconds() const { - return chrono::duration_cast(get_timer_value()).count(); +size_t stopwatch::get_milliseconds() const { + return std::chrono::duration_cast(get_timer_value()).count(); } -size_t ngraph::stopwatch::get_microseconds() const { - return chrono::duration_cast(get_timer_value()).count(); +size_t stopwatch::get_microseconds() const { + return std::chrono::duration_cast(get_timer_value()).count(); } -size_t ngraph::stopwatch::get_nanoseconds() const { +size_t stopwatch::get_nanoseconds() const { return get_timer_value().count(); } -chrono::nanoseconds ngraph::stopwatch::get_timer_value() const { +std::chrono::nanoseconds stopwatch::get_timer_value() const { if (m_active) { return (m_clock.now() - m_start_time); } else { @@ -137,23 +129,22 @@ chrono::nanoseconds ngraph::stopwatch::get_timer_value() const { } } -size_t ngraph::stopwatch::get_total_seconds() const { - return chrono::duration_cast(m_total_time).count(); +size_t stopwatch::get_total_seconds() const { + return std::chrono::duration_cast(m_total_time).count(); } -size_t ngraph::stopwatch::get_total_milliseconds() const { - return chrono::duration_cast(m_total_time).count(); +size_t stopwatch::get_total_milliseconds() const { + return std::chrono::duration_cast(m_total_time).count(); } -size_t ngraph::stopwatch::get_total_microseconds() const { - return chrono::duration_cast(m_total_time).count(); +size_t stopwatch::get_total_microseconds() const { + return std::chrono::duration_cast(m_total_time).count(); } -size_t ngraph::stopwatch::get_total_nanoseconds() const { +size_t stopwatch::get_total_nanoseconds() const { return m_total_time.count(); } -namespace ngraph { template <> float parse_string(const std::string& s) { const char* tmp = s.c_str(); @@ -201,40 +192,30 @@ uint8_t parse_string(const std::string& s) { return result; } -} // namespace ngraph -std::ostream& operator<<(std::ostream& os, const ngraph::NodeVector& nv) { - std::vector names; - for (auto n : nv) { - names.push_back(n->get_name()); - } - os << ngraph::vector_to_string(names); - return os; -} - -ngraph::AxisVector ngraph::get_default_order(const Shape& shape) { +AxisVector get_default_order(const Shape& shape) { return get_default_order(shape.size()); } -ngraph::AxisVector ngraph::get_default_order(const PartialShape& shape) { +AxisVector get_default_order(const PartialShape& shape) { return get_default_order(shape.rank()); } -ngraph::AxisVector ngraph::get_default_order(size_t rank) { +AxisVector get_default_order(size_t rank) { AxisVector default_order(rank); std::iota(begin(default_order), end(default_order), 0); return default_order; } -ngraph::AxisVector ngraph::get_default_order(const Rank& rank) { - NGRAPH_CHECK(rank.is_static(), "Can not calculate default order for dynamic rank"); +AxisVector get_default_order(const Rank& rank) { + OPENVINO_ASSERT(rank.is_static(), "Can not calculate default order for dynamic rank"); AxisVector default_order(rank.get_length()); std::iota(begin(default_order), end(default_order), 0); return default_order; } -void ngraph::parse_version_string(std::string version, size_t& major, size_t& minor, size_t& patch, string& extra) { +void parse_version_string(std::string version, size_t& major, size_t& minor, size_t& patch, std::string& extra) { // Since regex is broken in gcc 4.8 I will just manually parse the version string // Version strings look like `0.25.0-rc.0+7c32240` or `v0.25.0-rc.0+7c32240` size_t start; @@ -242,18 +223,18 @@ void ngraph::parse_version_string(std::string version, size_t& major, size_t& mi extra = ""; start = (version[0] == 'v' ? 1 : 0); end = version.find_first_of('.', start); - string major_str = version.substr(start, end - start); + std::string major_str = version.substr(start, end - start); start = end + 1; end = version.find_first_of('.', start); - string minor_str = version.substr(start, end - start); + std::string minor_str = version.substr(start, end - start); start = end + 1; end = version.find_first_of("-+", start); - string patch_str = version.substr(start, end - start); + std::string patch_str = version.substr(start, end - start); start = end; - if (start != string::npos) { + if (start != std::string::npos) { extra = version.substr(start); } @@ -279,73 +260,74 @@ void ngraph::parse_version_string(std::string version, size_t& major, size_t& mi OPENVINO_THROW("Error parsing version string '", version, "'"); } } +} // namespace ngraph -vector read_float_vector(shared_ptr tv) { - vector float_vec; - ngraph::element::Type element_type = tv->get_element_type(); +std::vector read_float_vector(std::shared_ptr tv) { + std::vector float_vec; + ov::element::Type element_type = tv->get_element_type(); - if (element_type == ngraph::element::boolean) { - vector vec = read_vector(tv); + if (element_type == ov::element::boolean) { + std::vector vec = read_vector(tv); // Changed from vector ctor to explicit for loop to add static_cast // This silences MSVC warnings for (char value : vec) { float_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::bf16) { - vector vec = read_vector(tv); - float_vec = ngraph::bfloat16::to_float_vector(vec); - } else if (element_type == ngraph::element::f16) { - vector vec = read_vector(tv); - for (ngraph::float16 value : vec) { + } else if (element_type == ov::element::bf16) { + std::vector vec = read_vector(tv); + float_vec = ov::bfloat16::to_float_vector(vec); + } else if (element_type == ov::element::f16) { + std::vector vec = read_vector(tv); + for (ov::float16 value : vec) { float_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::f32) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::f32) { + std::vector vec = read_vector(tv); for (float value : vec) { float_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::f64) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::f64) { + std::vector vec = read_vector(tv); for (double value : vec) { float_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::i8) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::i8) { + std::vector vec = read_vector(tv); for (int8_t value : vec) { float_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::i16) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::i16) { + std::vector vec = read_vector(tv); for (int16_t value : vec) { float_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::i32) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::i32) { + std::vector vec = read_vector(tv); for (int32_t value : vec) { float_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::i64) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::i64) { + std::vector vec = read_vector(tv); for (int64_t value : vec) { float_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::u8) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::u8) { + std::vector vec = read_vector(tv); for (uint8_t value : vec) { float_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::u16) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::u16) { + std::vector vec = read_vector(tv); for (uint16_t value : vec) { float_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::u32) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::u32) { + std::vector vec = read_vector(tv); for (uint32_t value : vec) { float_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::u64) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::u64) { + std::vector vec = read_vector(tv); for (uint64_t value : vec) { float_vec.push_back(static_cast(value)); } @@ -356,72 +338,72 @@ vector read_float_vector(shared_ptr tv) { return float_vec; } -vector read_index_vector(shared_ptr tv) { - vector index_vec; - ngraph::element::Type element_type = tv->get_element_type(); +std::vector read_index_vector(std::shared_ptr tv) { + std::vector index_vec; + ov::element::Type element_type = tv->get_element_type(); - if (element_type == ngraph::element::boolean) { - vector vec = read_vector(tv); + if (element_type == ov::element::boolean) { + std::vector vec = read_vector(tv); // Changed from vector ctor to explicit for loop to add static_cast // This silences MSVC warnings for (char value : vec) { index_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::bf16) { - vector vec = read_vector(tv); - vector float_vec = ngraph::bfloat16::to_float_vector(vec); + } else if (element_type == ov::element::bf16) { + std::vector vec = read_vector(tv); + std::vector float_vec = ov::bfloat16::to_float_vector(vec); for (float value : float_vec) { index_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::f16) { - vector vec = read_vector(tv); - for (ngraph::float16 value : vec) { + } else if (element_type == ov::element::f16) { + std::vector vec = read_vector(tv); + for (ov::float16 value : vec) { index_vec.push_back(static_cast(static_cast(value))); } - } else if (element_type == ngraph::element::f32) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::f32) { + std::vector vec = read_vector(tv); for (float value : vec) { index_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::f64) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::f64) { + std::vector vec = read_vector(tv); for (double value : vec) { index_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::i8) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::i8) { + std::vector vec = read_vector(tv); for (int8_t value : vec) { index_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::i16) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::i16) { + std::vector vec = read_vector(tv); for (int16_t value : vec) { index_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::i32) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::i32) { + std::vector vec = read_vector(tv); for (int32_t value : vec) { index_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::i64) { + } else if (element_type == ov::element::i64) { index_vec = read_vector(tv); - } else if (element_type == ngraph::element::u8) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::u8) { + std::vector vec = read_vector(tv); for (uint8_t value : vec) { index_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::u16) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::u16) { + std::vector vec = read_vector(tv); for (uint16_t value : vec) { index_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::u32) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::u32) { + std::vector vec = read_vector(tv); for (uint32_t value : vec) { index_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::u64) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::u64) { + std::vector vec = read_vector(tv); for (uint64_t value : vec) { index_vec.push_back(static_cast(value)); } diff --git a/src/core/src/validation_util.cpp b/src/core/src/validation_util.cpp index 3d2f72b8533825..2e1db9dd6864f1 100644 --- a/src/core/src/validation_util.cpp +++ b/src/core/src/validation_util.cpp @@ -5,36 +5,25 @@ #include "ngraph/validation_util.hpp" #include -#include -#include #include #include "bound_evaluate.hpp" #include "compare.hpp" #include "ngraph/evaluator.hpp" -#include "ngraph/op/concat.hpp" -#include "ngraph/op/convert.hpp" -#include "ngraph/op/gather.hpp" -#include "ngraph/op/min.hpp" -#include "ngraph/op/minimum.hpp" -#include "ngraph/op/reshape.hpp" -#include "ngraph/op/shape_of.hpp" -#include "ngraph/op/squeeze.hpp" -#include "ngraph/op/unsqueeze.hpp" -#include "ngraph/shape.hpp" -#include "ngraph/type/element_type_traits.hpp" -#include "ngraph/util.hpp" #include "openvino/core/dimension_tracker.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/gather.hpp" #include "openvino/op/ops.hpp" #include "sequnce_generator.hpp" #include "validation_util.hpp" -NGRAPH_SUPPRESS_DEPRECATED_START -using namespace std; +OPENVINO_SUPPRESS_DEPRECATED_START -ngraph::Strides ngraph::conv_default_strides(const Node* /* node */, - const PartialShape& data_batch_shape, - const PartialShape& filters_shape) { +namespace ngraph { + +Strides conv_default_strides(const Node* /* node */, + const PartialShape& data_batch_shape, + const PartialShape& filters_shape) { size_t rank; if (data_batch_shape.rank().is_static() && data_batch_shape.rank().get_length() >= 2) { @@ -48,9 +37,9 @@ ngraph::Strides ngraph::conv_default_strides(const Node* /* node */, return Strides(rank, 1); } -ngraph::CoordinateDiff ngraph::conv_default_padding(const Node* /* node */, - const PartialShape& data_batch_shape, - const PartialShape& filters_shape) { +CoordinateDiff conv_default_padding(const Node* /* node */, + const PartialShape& data_batch_shape, + const PartialShape& filters_shape) { size_t rank; if (data_batch_shape.rank().is_static() && data_batch_shape.rank().get_length() >= 2) { @@ -71,16 +60,16 @@ ngraph::CoordinateDiff ngraph::conv_default_padding(const Node* /* node */, // TODO(amprocte): The messages here would be a bit friendlier if we didn't say "after // padding/after dilation" for cases where there is actually no padding/dilation. // -ngraph::PartialShape ngraph::infer_windowed_reduction_output_shape(const Node* node, - const PartialShape& data_shape, - const Strides& data_dilation, - const CoordinateDiff& data_padding_below, - const CoordinateDiff& data_padding_above, - const PartialShape& window_shape, - const Strides& window_strides, - const Strides& window_dilation, - bool is_window_all_in_padding_allowed, - bool ceil_mode) { +PartialShape infer_windowed_reduction_output_shape(const Node* node, + const PartialShape& data_shape, + const Strides& data_dilation, + const CoordinateDiff& data_padding_below, + const CoordinateDiff& data_padding_above, + const PartialShape& window_shape, + const Strides& window_strides, + const Strides& window_dilation, + bool is_window_all_in_padding_allowed, + bool ceil_mode) { PartialShape data_shape_merged{PartialShape::dynamic()}; NODE_VALIDATION_CHECK( @@ -203,13 +192,13 @@ ngraph::PartialShape ngraph::infer_windowed_reduction_output_shape(const Node* n return output_shape; } -void ngraph::validate_conv_params_spatial_dimensions(const Node* node, - const size_t num_spatial_dims, - const op::PadType auto_pad, - Strides& strides, - Strides& dilations, - CoordinateDiff& pads_begin, - CoordinateDiff& pads_end) { +void validate_conv_params_spatial_dimensions(const Node* node, + const size_t num_spatial_dims, + const op::PadType auto_pad, + Strides& strides, + Strides& dilations, + CoordinateDiff& pads_begin, + CoordinateDiff& pads_end) { if (strides.size() == 0) { strides = Strides(num_spatial_dims, 1); } @@ -233,15 +222,15 @@ void ngraph::validate_conv_params_spatial_dimensions(const Node* node, "Pads should be defined for all and only spatial features."); } -ngraph::PartialShape ngraph::validate_and_infer_convolution_forward_output_shape(const Node* node, - const Rank& result_ps_rank, - const PartialShape& data_batch_pshape, - const PartialShape& filters_pshape, - const op::PadType auto_pad, - Strides& strides, - Strides& dilations, - CoordinateDiff& pads_begin, - CoordinateDiff& pads_end) { +PartialShape validate_and_infer_convolution_forward_output_shape(const Node* node, + const Rank& result_ps_rank, + const PartialShape& data_batch_pshape, + const PartialShape& filters_pshape, + const op::PadType auto_pad, + Strides& strides, + Strides& dilations, + CoordinateDiff& pads_begin, + CoordinateDiff& pads_end) { PartialShape result_shape = PartialShape::dynamic(); if (result_ps_rank.is_static()) { const auto num_spatial_dims = result_ps_rank.get_length() - 2; @@ -267,7 +256,7 @@ ngraph::PartialShape ngraph::validate_and_infer_convolution_forward_output_shape pads_end.clear(); const PartialShape filter_spatial_shape = [filters_pshape]() { - vector filter_dims{filters_pshape}; + std::vector filter_dims{filters_pshape}; filter_dims.erase(filter_dims.begin(), filter_dims.begin() + 2); // Remove {C_OUT, C_IN} return PartialShape{filter_dims}; @@ -299,142 +288,18 @@ ngraph::PartialShape ngraph::validate_and_infer_convolution_forward_output_shape return result_shape; } -// -// Infers the output batch shape and element type for convolution fprop. -// -ngraph::PartialShape ov::infer_convolution_forward(const Node* node, - const PartialShape& data_batch_shape, - const Strides& data_dilation, - const CoordinateDiff& data_padding_below, - const CoordinateDiff& data_padding_above, - const PartialShape& filters_shape, - const Strides& filter_strides, - const Strides& filter_dilation) { - Rank data_batch_filters_rank{Rank::dynamic()}; - - NODE_VALIDATION_CHECK(node, - Rank::merge(data_batch_filters_rank, data_batch_shape.rank(), filters_shape.rank()), - "Data batch and filters rank do not match (data batch shape: ", - data_batch_shape, - ", filters shape: ", - filters_shape, - ")."); - - NODE_VALIDATION_CHECK(node, - data_batch_filters_rank.is_dynamic() || data_batch_filters_rank.get_length() >= 3, - "Data batch and filters must have rank of at least 3 (one batch axis, ", - "one input-channel axis, and at least one spatial dimension) ", - "(data batch shape: ", - data_batch_shape, - ", filters shape: ", - filters_shape, - ")."); - - Rank spatial_rank{Rank::dynamic()}; - NODE_VALIDATION_CHECK(node, - Rank::merge(spatial_rank, spatial_rank, data_batch_filters_rank - 2) && - Rank::merge(spatial_rank, spatial_rank, data_dilation.size()) && - Rank::merge(spatial_rank, spatial_rank, data_padding_below.size()) && - Rank::merge(spatial_rank, spatial_rank, data_padding_above.size()) && - Rank::merge(spatial_rank, spatial_rank, filter_strides.size()) && - Rank::merge(spatial_rank, spatial_rank, filter_dilation.size()), - "Ranks for data item shape/filters shape (data batch has shape ", - data_batch_shape, - ", so data item rank is ", - (data_batch_shape.rank() - 2), - " and filters have shape ", - filters_shape, - ", so filters spatial rank is ", - (filters_shape.rank() - 2), - "), data dilation (", - data_dilation, - "), padding below (", - data_padding_below, - "), padding above (", - data_padding_above, - "), filter strides (", - filter_strides, - "), and filter dilation (", - filter_dilation, - ") do not match."); - - Dimension batch_size = (data_batch_shape.rank().is_static() ? data_batch_shape[0] : Dimension::dynamic()); - Dimension data_channel_count = (data_batch_shape.rank().is_static() ? data_batch_shape[1] : Dimension::dynamic()); - PartialShape data_spatial_shape(PartialShape::dynamic(spatial_rank)); - - Dimension filter_output_channel_count = - (filters_shape.rank().is_static() ? filters_shape[0] : Dimension::dynamic()); - Dimension filter_input_channel_count = (filters_shape.rank().is_static() ? filters_shape[1] : Dimension::dynamic()); - PartialShape filter_spatial_shape(PartialShape::dynamic(spatial_rank)); - - // - // Note: spatial_rank is definitely static at this point. - // - - for (int64_t i = 0; i < spatial_rank.get_length(); i++) { - if (data_batch_shape.rank().is_static()) { - data_spatial_shape[i] = data_batch_shape[i + 2]; - } - - if (filters_shape.rank().is_static()) { - filter_spatial_shape[i] = filters_shape[i + 2]; - } - } - - NODE_VALIDATION_CHECK(node, batch_size.is_dynamic() || batch_size.get_length() > 0, "Batch size is zero."); - - Dimension merged_channel_count; - - NODE_VALIDATION_CHECK(node, - Dimension::merge(merged_channel_count, data_channel_count, filter_input_channel_count), - "Data batch channel count (", - data_channel_count, - ") does not match filter input ", - "channel count (", - filter_input_channel_count, - ")."); - - NODE_VALIDATION_CHECK(node, - merged_channel_count.is_dynamic() || merged_channel_count.get_length() > 0, - "Data batch channel count and/or filter input channel count is zero."); - - NODE_VALIDATION_CHECK(node, - filter_output_channel_count.is_dynamic() || filter_output_channel_count.get_length() > 0, - "Filter output channel count is zero."); - - PartialShape data_output_shape = ngraph::infer_windowed_reduction_output_shape(node, - data_spatial_shape, - data_dilation, - data_padding_below, - data_padding_above, - filter_spatial_shape, - filter_strides, - filter_dilation, - true); - - PartialShape batch_output_shape(PartialShape::dynamic(spatial_rank + 2)); - batch_output_shape[0] = batch_size; - batch_output_shape[1] = filter_output_channel_count; - - for (int64_t i = 0; i < spatial_rank.get_length(); i++) { - batch_output_shape[i + 2] = data_output_shape[i]; - } - - return batch_output_shape; -} - // // Infers the output batch shape and element type for batched pooling fprop. // -ngraph::PartialShape ngraph::infer_batched_pooling_forward(const Node* node, - const PartialShape& data_batch_shape, - const CoordinateDiff& data_padding_below, - const CoordinateDiff& data_padding_above, - const PartialShape& window_shape, - const Strides& window_strides, - bool is_window_all_in_padding_allowed, - bool ceil_mode, - const Strides& window_dilation) { +PartialShape infer_batched_pooling_forward(const Node* node, + const PartialShape& data_batch_shape, + const CoordinateDiff& data_padding_below, + const CoordinateDiff& data_padding_above, + const PartialShape& window_shape, + const Strides& window_strides, + bool is_window_all_in_padding_allowed, + bool ceil_mode, + const Strides& window_dilation) { NODE_VALIDATION_CHECK(node, data_batch_shape.rank().is_dynamic() || (data_batch_shape.rank().get_length() >= 3 && data_batch_shape.rank().get_length() <= 5), @@ -517,15 +382,15 @@ ngraph::PartialShape ngraph::infer_batched_pooling_forward(const Node* node, } struct ChannelShapedInputSpec { - ngraph::element::Type m_element_type; - ngraph::PartialShape m_shape; + element::Type m_element_type; + PartialShape m_shape; std::string m_input_name; }; -static std::tuple infer_batch_norm_forward_helper( - const ngraph::Node* node, - ngraph::element::Type input_element_type, - const ngraph::PartialShape& input_shape, +static std::tuple infer_batch_norm_forward_helper( + const Node* node, + element::Type input_element_type, + const PartialShape& input_shape, const std::vector& channel_shaped_inputs) { // Built up a slash-separated string naming all the channel-shaped inputs, for use in error // messages. @@ -541,11 +406,11 @@ static std::tuple= 2, @@ -570,11 +435,11 @@ static std::tuple ngraph::infer_batch_norm_forward( - const Node* node, - element::Type input_element_type, - element::Type gamma_element_type, - element::Type beta_element_type, - element::Type mean_element_type, - element::Type variance_element_type, - const PartialShape& input_shape, - const PartialShape& gamma_shape, - const PartialShape& beta_shape, - const PartialShape& mean_shape, - const PartialShape& variance_shape) { +std::tuple infer_batch_norm_forward(const Node* node, + element::Type input_element_type, + element::Type gamma_element_type, + element::Type beta_element_type, + element::Type mean_element_type, + element::Type variance_element_type, + const PartialShape& input_shape, + const PartialShape& gamma_shape, + const PartialShape& beta_shape, + const PartialShape& mean_shape, + const PartialShape& variance_shape) { return infer_batch_norm_forward_helper(node, input_element_type, input_shape, @@ -634,14 +498,13 @@ std::tuple ng {variance_element_type, variance_shape, "variance"}}); } -std::tuple ngraph::infer_batch_norm_forward( - const Node* node, - element::Type input_element_type, - element::Type gamma_element_type, - element::Type beta_element_type, - const PartialShape& input_shape, - const PartialShape& gamma_shape, - const PartialShape& beta_shape) { +std::tuple infer_batch_norm_forward(const Node* node, + element::Type input_element_type, + element::Type gamma_element_type, + element::Type beta_element_type, + const PartialShape& input_shape, + const PartialShape& gamma_shape, + const PartialShape& beta_shape) { return infer_batch_norm_forward_helper( node, input_element_type, @@ -649,64 +512,13 @@ std::tuple ng {{gamma_element_type, gamma_shape, "gamma"}, {beta_element_type, beta_shape, "beta"}}); } -void ov::infer_auto_padding(const Shape& image_shape, +bool try_apply_auto_padding(const PartialShape& image_shape, const Shape& filter_shape, const Strides& filter_strides, const Strides& filter_dilations, const op::PadType pad_type, CoordinateDiff& padding_above, CoordinateDiff& padding_below) { - const auto image_dims = std::vector(std::begin(image_shape), std::end(image_shape)); - // because image_shape is fully known result of try_apply_infer_auto_padding is ignored - ov::util::try_apply_auto_padding(image_dims, - filter_shape, - filter_strides, - filter_dilations, - pad_type, - padding_above, - padding_below); -} - -bool ov::util::try_apply_auto_padding(const PartialShape& image_shape, - const Shape& filter_shape, - const Strides& filter_strides, - const Strides& filter_dilations, - const op::PadType pad_type, - CoordinateDiff& padding_above, - CoordinateDiff& padding_below) { - OPENVINO_ASSERT(pad_type == op::PadType::SAME_UPPER || pad_type == op::PadType::SAME_LOWER); - - if (image_shape.rank().is_dynamic()) { - return false; - } - const auto image_dims = static_cast>(image_shape); - for (size_t i = 0; i < static_cast(filter_shape.size()); i++) { - if (image_dims[i + 2].is_static()) { - auto image_size = static_cast(image_dims[i + 2].get_length()); - int64_t filter_size = (static_cast(filter_shape[i]) - 1) * filter_dilations[i] + 1; - auto filter_stride = static_cast(filter_strides[i]); - auto output_size = (image_size + filter_stride - 1) / filter_stride; - - auto padding_needed = std::max(int64_t(0), (output_size - 1) * filter_stride + filter_size - image_size); - auto padding_lhs = padding_needed / 2; - auto padding_rhs = padding_needed - padding_lhs; - padding_below.push_back(pad_type == op::PadType::SAME_UPPER ? padding_lhs : padding_rhs); - padding_above.push_back(pad_type == op::PadType::SAME_UPPER ? padding_rhs : padding_lhs); - } else { - padding_below.push_back(0); - padding_above.push_back(0); - } - } - return true; -} - -bool ngraph::try_apply_auto_padding(const PartialShape& image_shape, - const Shape& filter_shape, - const Strides& filter_strides, - const Strides& filter_dilations, - const op::PadType pad_type, - CoordinateDiff& padding_above, - CoordinateDiff& padding_below) { return ov::util::try_apply_auto_padding(image_shape, filter_shape, filter_strides, @@ -716,16 +528,16 @@ bool ngraph::try_apply_auto_padding(const PartialShape& image_shape, padding_below); } -ngraph::PartialShape ngraph::infer_slice_shape(const Node* node, - const PartialShape& input_shape, - const std::vector& begin, - const std::vector& end, - const std::vector& strides, - const AxisSet& begin_mask, - const AxisSet& end_mask, - const AxisSet& new_axis_mask, - const AxisSet& shrink_axis_mask, - const AxisSet& ellipsis_mask) { +PartialShape infer_slice_shape(const Node* node, + const PartialShape& input_shape, + const std::vector& begin, + const std::vector& end, + const std::vector& strides, + const AxisSet& begin_mask, + const AxisSet& end_mask, + const AxisSet& new_axis_mask, + const AxisSet& shrink_axis_mask, + const AxisSet& ellipsis_mask) { if (begin.size() && end.size()) { NODE_VALIDATION_CHECK(node, begin.size() == end.size(), @@ -869,146 +681,58 @@ ngraph::PartialShape ngraph::infer_slice_shape(const Node* node, return dim; } -namespace { -const auto normalize_axis_to = [](const int64_t& tensor_rank) { - return [&tensor_rank](int64_t& axis) { - if (axis < 0) { - axis += tensor_rank; - } - }; -}; - -std::string normalize_axis_error_msg(const int64_t& axis, const int64_t& lower, const int64_t& upper) { - return std::string(" Parameter axis ") - .append(to_string(axis)) - .append(" out of the tensor rank range [") - .append(to_string(lower)) - .append(", ") - .append(to_string(upper)) - .append("]."); -} -} // namespace +void opset1::infer_conv_backprop_auto_padding(const Shape& input_data_shape, + const Shape& filters_shape, + const Shape& output_shape, + const Strides& strides, + const Strides& dilations, + const op::PadType auto_pad_type, + const CoordinateDiff& output_padding, + CoordinateDiff& pads_begin, + CoordinateDiff& pads_end) { + OPENVINO_ASSERT(auto_pad_type == op::PadType::SAME_UPPER || auto_pad_type == op::PadType::SAME_LOWER); -int64_t ov::util::normalize(const int64_t& value, const int64_t& max) { - return (value < 0) ? value + max : value; -}; + size_t num_spatial_dims = input_data_shape.size(); + OPENVINO_ASSERT(filters_shape.size() == num_spatial_dims && strides.size() == num_spatial_dims && + dilations.size() == num_spatial_dims && pads_begin.size() == num_spatial_dims && + pads_end.size() == num_spatial_dims && output_padding.size() == num_spatial_dims); -void ov::normalize_axes(const Node* node, const int64_t& tensor_rank, std::vector& axes) { - const auto axis_checker = cmp::Between(-tensor_rank, tensor_rank ? (tensor_rank - 1) : 0); - const auto invalid_axis = std::find_if_not(axes.cbegin(), axes.cend(), axis_checker); - NODE_VALIDATION_CHECK(node, - invalid_axis == axes.cend(), - normalize_axis_error_msg(*invalid_axis, axis_checker.lower(), axis_checker.upper())); - std::for_each(axes.begin(), axes.end(), normalize_axis_to(tensor_rank)); -} + pads_begin = CoordinateDiff(num_spatial_dims); + pads_end = CoordinateDiff(num_spatial_dims); -std::vector ov::normalize_axes(const std::string& node_description, - const std::vector& axes, - const Rank& tensor_rank) { - std::vector new_axes; - new_axes.reserve(axes.size()); - for (const auto& axis : axes) { - new_axes.push_back(normalize_axis(node_description, axis, tensor_rank)); + for (uint64_t i = 0; i < num_spatial_dims; ++i) { + int total_padding = std::max( + static_cast(strides[i] * (input_data_shape[i] - 1) + dilations[i] * (filters_shape[i] - 1) + 1 - + output_shape[i] + output_padding[i]), + 0); + if (auto_pad_type != op::PadType::SAME_UPPER) { + pads_begin[i] = total_padding / 2; + pads_end[i] = total_padding - pads_begin[i]; + } else { + pads_end[i] = total_padding / 2; + pads_begin[i] = total_padding - pads_end[i]; + } } - return new_axes; -} - -int64_t ov::normalize_axis(const Node* node, std::int64_t axis, const Rank& tensor_rank) { - return normalize_axis(node->description(), axis, tensor_rank); } -int64_t ov::normalize_axis(const std::string& node_description, std::int64_t axis, const Rank& tensor_rank) { - if (axis < 0) { - // Handling negative axis requires static tensor rank - NGRAPH_CHECK(tensor_rank.is_static(), - node_description, - " Rank must be static in order to normalize negative axis=", - axis); - } - if (tensor_rank.is_dynamic()) { - return axis; +namespace { +/// \brief Scalar variant describes value of an Output, for use in max shape determination +/// +/// For tensor values, we use the maximum value in the tensor +struct MaxValue { + /// \brief No information known about the output + MaxValue() = default; + /// \brief uint64_t assoiated with the output + MaxValue(uint64_t value) : m_value(value) {} + MaxValue(const std::vector& slices, int64_t slice_axis) : m_slices(slices), m_slice_axis(slice_axis) { + m_value = *max_element(m_slices.begin(), m_slices.end()); } + uint64_t m_value{std::numeric_limits::max()}; + std::vector m_slices; + int64_t m_slice_axis{-1}; +}; - const auto tensor_rank_value = tensor_rank.get_length(); - return normalize_axis(node_description, - axis, - tensor_rank_value, - -tensor_rank_value, - tensor_rank_value ? (tensor_rank_value - 1) : 0); -} - -int64_t ov::normalize_axis(const Node* node, - std::int64_t axis, - std::uint64_t tensor_rank, - std::int64_t axis_range_min, - std::int64_t axis_range_max) { - return normalize_axis(node->description(), axis, tensor_rank, axis_range_min, axis_range_max); -} - -int64_t ov::normalize_axis(const std::string& node_description, - std::int64_t axis, - std::uint64_t tensor_rank, - std::int64_t axis_range_min, - std::int64_t axis_range_max) { - // Accepted range of value for axis is [axis_range_min, axis_range_max]. - OPENVINO_ASSERT((axis_range_min <= axis) && (axis <= axis_range_max), - node_description, - normalize_axis_error_msg(axis, axis_range_min, axis_range_max)); - return util::normalize(axis, tensor_rank); -} - -void ngraph::opset1::infer_conv_backprop_auto_padding(const Shape& input_data_shape, - const Shape& filters_shape, - const Shape& output_shape, - const Strides& strides, - const Strides& dilations, - const op::PadType auto_pad_type, - const CoordinateDiff& output_padding, - CoordinateDiff& pads_begin, - CoordinateDiff& pads_end) { - NGRAPH_CHECK(auto_pad_type == op::PadType::SAME_UPPER || auto_pad_type == op::PadType::SAME_LOWER); - - size_t num_spatial_dims = input_data_shape.size(); - NGRAPH_CHECK(filters_shape.size() == num_spatial_dims && strides.size() == num_spatial_dims && - dilations.size() == num_spatial_dims && pads_begin.size() == num_spatial_dims && - pads_end.size() == num_spatial_dims && output_padding.size() == num_spatial_dims); - - pads_begin = CoordinateDiff(num_spatial_dims); - pads_end = CoordinateDiff(num_spatial_dims); - - for (uint64_t i = 0; i < num_spatial_dims; ++i) { - int total_padding = std::max( - static_cast(strides[i] * (input_data_shape[i] - 1) + dilations[i] * (filters_shape[i] - 1) + 1 - - output_shape[i] + output_padding[i]), - 0); - if (auto_pad_type != op::PadType::SAME_UPPER) { - pads_begin[i] = total_padding / 2; - pads_end[i] = total_padding - pads_begin[i]; - } else { - pads_end[i] = total_padding / 2; - pads_begin[i] = total_padding - pads_end[i]; - } - } -} - -namespace { -/// \brief Scalar variant describes value of an Output, for use in max shape determination -/// -/// For tensor values, we use the maximum value in the tensor -struct MaxValue { - /// \brief No information known about the output - MaxValue() = default; - /// \brief uint64_t assoiated with the output - MaxValue(uint64_t value) : m_value(value) {} - MaxValue(const vector& slices, int64_t slice_axis) : m_slices(slices), m_slice_axis(slice_axis) { - m_value = *max_element(m_slices.begin(), m_slices.end()); - } - uint64_t m_value{numeric_limits::max()}; - vector m_slices; - int64_t m_slice_axis{-1}; -}; - -vector exec_constant(ngraph::Node* node, vector& inputs) { +std::vector exec_constant(Node* node, std::vector& inputs) { auto result = MaxValue(); auto op = ov::as_type(node); auto element_type = op->get_output_element_type(0); @@ -1032,44 +756,44 @@ vector exec_constant(ngraph::Node* node, vector& inputs) { return {result}; } -vector exec_minimum(ngraph::Node* node, vector& inputs) { - uint64_t min_value = numeric_limits::max(); +std::vector exec_minimum(Node* node, std::vector& inputs) { + uint64_t min_value = std::numeric_limits::max(); switch (node->get_output_element_type(0)) { - case ngraph::element::Type_t::i8: - min_value = numeric_limits::max(); + case element::Type_t::i8: + min_value = std::numeric_limits::max(); break; - case ngraph::element::Type_t::i16: - min_value = numeric_limits::max(); + case element::Type_t::i16: + min_value = std::numeric_limits::max(); break; - case ngraph::element::Type_t::i32: - min_value = numeric_limits::max(); + case element::Type_t::i32: + min_value = std::numeric_limits::max(); break; - case ngraph::element::Type_t::i64: - min_value = numeric_limits::max(); + case element::Type_t::i64: + min_value = std::numeric_limits::max(); break; - case ngraph::element::Type_t::u8: - min_value = numeric_limits::max(); + case element::Type_t::u8: + min_value = std::numeric_limits::max(); break; - case ngraph::element::Type_t::u16: - min_value = numeric_limits::max(); + case element::Type_t::u16: + min_value = std::numeric_limits::max(); break; - case ngraph::element::Type_t::u32: - min_value = numeric_limits::max(); + case element::Type_t::u32: + min_value = std::numeric_limits::max(); break; - case ngraph::element::Type_t::u64: - min_value = numeric_limits::max(); + case element::Type_t::u64: + min_value = std::numeric_limits::max(); break; default: break; } - min_value = min(min_value, inputs.at(0).m_value); - min_value = min(min_value, inputs.at(1).m_value); + min_value = std::min(min_value, inputs.at(0).m_value); + min_value = std::min(min_value, inputs.at(1).m_value); return {MaxValue(min_value)}; } -vector exec_concat(ngraph::Node* node, vector& inputs) { - auto op = ov::as_type(node); - vector slice_maxen; +std::vector exec_concat(Node* node, std::vector& inputs) { + auto op = ov::as_type(node); + std::vector slice_maxen; for (const auto& input : inputs) { slice_maxen.push_back(input.m_value); } @@ -1077,13 +801,13 @@ vector exec_concat(ngraph::Node* node, vector& inputs) { return {MaxValue(slice_maxen, axis)}; } -vector exec_reduce_min(ngraph::Node* node, vector& inputs) { +std::vector exec_reduce_min(Node* node, std::vector& inputs) { auto data = inputs.at(0); if (data.m_slice_axis >= 0 && data.m_slices.size() > 1) { - if (auto indices_const = ov::as_type(node->get_input_node_ptr(1))) { + if (auto indices_const = ov::as_type(node->get_input_node_ptr(1))) { if (indices_const->get_output_element_type(0).is_integral()) { const auto& indices_shape = indices_const->get_output_shape(0); - if (indices_shape == ngraph::Shape{1}) { + if (indices_shape == Shape{1}) { auto indices = indices_const->cast_vector(); auto axis = indices.at(0); if (axis == data.m_slice_axis) { @@ -1097,7 +821,7 @@ vector exec_reduce_min(ngraph::Node* node, vector& inputs) { return {MaxValue(data.m_value)}; } -vector exec_shape_of(ngraph::Node* node, vector& inputs) { +std::vector exec_shape_of(Node* node, std::vector& inputs) { const auto& inputPS = node->get_input_partial_shape(0); std::vector shapeDims; for (int64_t i = 0; i < inputPS.rank().get_length(); i++) { @@ -1111,11 +835,11 @@ vector exec_shape_of(ngraph::Node* node, vector& inputs) { return {MaxValue(shapeDims, 0)}; } -vector exec_gather(ngraph::Node* node, vector& inputs) { - auto gather = ov::as_type(node); +std::vector exec_gather(Node* node, std::vector& inputs) { + auto gather = ov::as_type(node); - const auto& indices = ov::as_type_ptr(node->input_value(1).get_node_shared_ptr()); - const auto& axis = ov::as_type_ptr(node->input_value(2).get_node_shared_ptr()); + const auto& indices = ov::as_type_ptr(node->input_value(1).get_node_shared_ptr()); + const auto& axis = ov::as_type_ptr(node->input_value(2).get_node_shared_ptr()); if (!indices || !axis) { return {MaxValue()}; @@ -1133,32 +857,33 @@ vector exec_gather(ngraph::Node* node, vector& inputs) { return {MaxValue(inputs[0].m_slices[indicesVec[0]])}; } -vector exec_nop(ngraph::Node* node, vector& inputs) { +std::vector exec_nop(Node* node, std::vector& inputs) { return {inputs.at(0)}; } } // namespace -pair ngraph::maximum_value(const Output& value) { - static Evaluator::op_handler_map handlers = {{op::v0::Concat::get_type_info_static(), exec_concat}, - {op::v0::Constant::get_type_info_static(), exec_constant}, - {op::v0::Convert::get_type_info_static(), exec_nop}, - {op::v1::Gather::get_type_info_static(), exec_gather}, - {op::v1::Minimum::get_type_info_static(), exec_minimum}, - {op::v1::ReduceMin::get_type_info_static(), exec_reduce_min}, - {op::v1::Reshape::get_type_info_static(), exec_nop}, - {op::v3::ShapeOf::get_type_info_static(), exec_shape_of}, - {op::v0::Squeeze::get_type_info_static(), exec_nop}, - {op::v0::Unsqueeze::get_type_info_static(), exec_nop}}; +std::pair maximum_value(const Output& value) { + static ngraph::Evaluator::op_handler_map handlers = { + {ov::op::v0::Concat::get_type_info_static(), exec_concat}, + {ov::op::v0::Constant::get_type_info_static(), exec_constant}, + {ov::op::v0::Convert::get_type_info_static(), exec_nop}, + {ov::op::v1::Gather::get_type_info_static(), exec_gather}, + {ov::op::v1::Minimum::get_type_info_static(), exec_minimum}, + {ov::op::v1::ReduceMin::get_type_info_static(), exec_reduce_min}, + {ov::op::v1::Reshape::get_type_info_static(), exec_nop}, + {ov::op::v3::ShapeOf::get_type_info_static(), exec_shape_of}, + {ov::op::v0::Squeeze::get_type_info_static(), exec_nop}, + {ov::op::v0::Unsqueeze::get_type_info_static(), exec_nop}}; Evaluator::value_map value_map; Evaluator evaluator(handlers, value_map); auto val = evaluator.evaluate(value); - return pair(val.m_value < numeric_limits::max(), val.m_value); + return std::pair(val.m_value < std::numeric_limits::max(), val.m_value); } -void ngraph::evaluate_nodes(std::map& value_map, - std::map& output_tensor_map, - const OutputVector& outputs, - const EvaluationContext& evaluation_context) { +void evaluate_nodes(std::map& value_map, + std::map& output_tensor_map, + const OutputVector& outputs, + const EvaluationContext& evaluation_context) { Evaluator evaluator({}, value_map); evaluator.set_universal_handler( [&output_tensor_map, &evaluation_context](Node* node, @@ -1167,7 +892,7 @@ void ngraph::evaluate_nodes(std::map& value_map, for (const auto& v : node->outputs()) { auto it = output_tensor_map.find(v); if (it == output_tensor_map.end()) { - auto c = make_shared(v); + auto c = std::make_shared(v); output_tensors.push_back(c); } else { output_tensors.push_back(it->second); @@ -1176,7 +901,7 @@ void ngraph::evaluate_nodes(std::map& value_map, if (node->evaluate(output_tensors, input_tensors, evaluation_context)) { return output_tensors; } else { - NGRAPH_CHECK(false, "Evaluation failed on ", node); + OPENVINO_THROW("Evaluation failed on ", node); } }); for (const auto& value : outputs) { @@ -1184,45 +909,8 @@ void ngraph::evaluate_nodes(std::map& value_map, } } -bool ov::evaluate_as_partial_shape(const Output& output, PartialShape& pshape) { - Tensor lb, ub; - std::tie(lb, ub) = ov::evaluate_both_bounds(output); - bool shape_defined = false; - if (lb && ub) { - auto lower_bound = std::make_shared(lb.get_element_type(), lb.get_shape(), lb.data()) - ->cast_vector(); - auto upper_bound = std::make_shared(ub.get_element_type(), ub.get_shape(), ub.data()) - ->cast_vector(); - NGRAPH_CHECK(lower_bound.size() == upper_bound.size()); - const TensorLabel& labels = output.get_tensor().get_value_label(); - NGRAPH_CHECK(labels.empty() || lower_bound.size() == labels.size()); - - vector resulting_pshape(lower_bound.size()); - for (size_t i = 0; i < lower_bound.size(); ++i) { - auto low = lower_bound[i], up = upper_bound[i]; - NGRAPH_CHECK(low >= 0 && up >= 0, "Value for partial shape evaluation can't be lower than zero."); - if (output.get_element_type() == element::i32 && low != up) { - if (up == std::numeric_limits::max()) - up = std::numeric_limits::max(); - if (low == std::numeric_limits::max()) - low = std::numeric_limits::max(); - } - resulting_pshape[i] = {low, up}; - if (!labels.empty() && labels[i]) - ov::DimensionTracker::set_label(resulting_pshape[i], labels[i]); - } - pshape = PartialShape(resulting_pshape); - shape_defined = true; - } - return shape_defined; -} - -bool ov::default_label_evaluator(const Node* node, TensorLabelVector& output_labels) { - return default_label_evaluator(node, {0}, output_labels); -} - -shared_ptr ngraph::get_constant_max_of_type(element::Type_t t) { -#define NGRAPH_TYPE_TO_MAX_CONST(t) \ +std::shared_ptr get_constant_max_of_type(element::Type_t t) { +#define OPENVINO_TYPE_TO_MAX_CONST(t) \ case t: \ return ov::op::v0::Constant::create( \ t, \ @@ -1231,27 +919,27 @@ shared_ptr ngraph::get_constant_max_of_type(element::T break switch (t) { - NGRAPH_TYPE_TO_MAX_CONST(element::boolean); - NGRAPH_TYPE_TO_MAX_CONST(element::bf16); - NGRAPH_TYPE_TO_MAX_CONST(element::f16); - NGRAPH_TYPE_TO_MAX_CONST(element::f32); - NGRAPH_TYPE_TO_MAX_CONST(element::f64); - NGRAPH_TYPE_TO_MAX_CONST(element::i8); - NGRAPH_TYPE_TO_MAX_CONST(element::i16); - NGRAPH_TYPE_TO_MAX_CONST(element::i32); - NGRAPH_TYPE_TO_MAX_CONST(element::i64); - NGRAPH_TYPE_TO_MAX_CONST(element::u1); - NGRAPH_TYPE_TO_MAX_CONST(element::u8); - NGRAPH_TYPE_TO_MAX_CONST(element::u16); - NGRAPH_TYPE_TO_MAX_CONST(element::u32); - NGRAPH_TYPE_TO_MAX_CONST(element::u64); + OPENVINO_TYPE_TO_MAX_CONST(element::boolean); + OPENVINO_TYPE_TO_MAX_CONST(element::bf16); + OPENVINO_TYPE_TO_MAX_CONST(element::f16); + OPENVINO_TYPE_TO_MAX_CONST(element::f32); + OPENVINO_TYPE_TO_MAX_CONST(element::f64); + OPENVINO_TYPE_TO_MAX_CONST(element::i8); + OPENVINO_TYPE_TO_MAX_CONST(element::i16); + OPENVINO_TYPE_TO_MAX_CONST(element::i32); + OPENVINO_TYPE_TO_MAX_CONST(element::i64); + OPENVINO_TYPE_TO_MAX_CONST(element::u1); + OPENVINO_TYPE_TO_MAX_CONST(element::u8); + OPENVINO_TYPE_TO_MAX_CONST(element::u16); + OPENVINO_TYPE_TO_MAX_CONST(element::u32); + OPENVINO_TYPE_TO_MAX_CONST(element::u64); default: return nullptr; } } -shared_ptr ngraph::get_constant_min_of_type(element::Type_t t) { -#define NGRAPH_TYPE_TO_MIN_CONST(t) \ +std::shared_ptr get_constant_min_of_type(element::Type_t t) { +#define OPENVINO_TYPE_TO_MIN_CONST(t) \ case t: \ return ov::op::v0::Constant::create( \ t, \ @@ -1260,27 +948,27 @@ shared_ptr ngraph::get_constant_min_of_type(element::T break switch (t) { - NGRAPH_TYPE_TO_MIN_CONST(element::boolean); - NGRAPH_TYPE_TO_MIN_CONST(element::bf16); - NGRAPH_TYPE_TO_MIN_CONST(element::f16); - NGRAPH_TYPE_TO_MIN_CONST(element::f32); - NGRAPH_TYPE_TO_MIN_CONST(element::f64); - NGRAPH_TYPE_TO_MIN_CONST(element::i8); - NGRAPH_TYPE_TO_MIN_CONST(element::i16); - NGRAPH_TYPE_TO_MIN_CONST(element::i32); - NGRAPH_TYPE_TO_MIN_CONST(element::i64); - NGRAPH_TYPE_TO_MIN_CONST(element::u1); - NGRAPH_TYPE_TO_MIN_CONST(element::u8); - NGRAPH_TYPE_TO_MIN_CONST(element::u16); - NGRAPH_TYPE_TO_MIN_CONST(element::u32); - NGRAPH_TYPE_TO_MIN_CONST(element::u64); + OPENVINO_TYPE_TO_MIN_CONST(element::boolean); + OPENVINO_TYPE_TO_MIN_CONST(element::bf16); + OPENVINO_TYPE_TO_MIN_CONST(element::f16); + OPENVINO_TYPE_TO_MIN_CONST(element::f32); + OPENVINO_TYPE_TO_MIN_CONST(element::f64); + OPENVINO_TYPE_TO_MIN_CONST(element::i8); + OPENVINO_TYPE_TO_MIN_CONST(element::i16); + OPENVINO_TYPE_TO_MIN_CONST(element::i32); + OPENVINO_TYPE_TO_MIN_CONST(element::i64); + OPENVINO_TYPE_TO_MIN_CONST(element::u1); + OPENVINO_TYPE_TO_MIN_CONST(element::u8); + OPENVINO_TYPE_TO_MIN_CONST(element::u16); + OPENVINO_TYPE_TO_MIN_CONST(element::u32); + OPENVINO_TYPE_TO_MIN_CONST(element::u64); default: return nullptr; } } -std::shared_ptr ngraph::get_constant_lowest_of_type(element::Type_t t) { -#define NGRAPH_TYPE_TO_LOWEST_CONST(t) \ +std::shared_ptr get_constant_lowest_of_type(element::Type_t t) { +#define OPENVINO_TYPE_TO_LOWEST_CONST(t) \ case t: \ return op::v0::Constant::create(t, \ {}, \ @@ -1288,20 +976,20 @@ std::shared_ptr ngraph::get_constant_lowest_of_type(el break switch (t) { - NGRAPH_TYPE_TO_LOWEST_CONST(element::boolean); - NGRAPH_TYPE_TO_LOWEST_CONST(element::bf16); - NGRAPH_TYPE_TO_LOWEST_CONST(element::f16); - NGRAPH_TYPE_TO_LOWEST_CONST(element::f32); - NGRAPH_TYPE_TO_LOWEST_CONST(element::f64); - NGRAPH_TYPE_TO_LOWEST_CONST(element::i8); - NGRAPH_TYPE_TO_LOWEST_CONST(element::i16); - NGRAPH_TYPE_TO_LOWEST_CONST(element::i32); - NGRAPH_TYPE_TO_LOWEST_CONST(element::i64); - NGRAPH_TYPE_TO_LOWEST_CONST(element::u1); - NGRAPH_TYPE_TO_LOWEST_CONST(element::u8); - NGRAPH_TYPE_TO_LOWEST_CONST(element::u16); - NGRAPH_TYPE_TO_LOWEST_CONST(element::u32); - NGRAPH_TYPE_TO_LOWEST_CONST(element::u64); + OPENVINO_TYPE_TO_LOWEST_CONST(element::boolean); + OPENVINO_TYPE_TO_LOWEST_CONST(element::bf16); + OPENVINO_TYPE_TO_LOWEST_CONST(element::f16); + OPENVINO_TYPE_TO_LOWEST_CONST(element::f32); + OPENVINO_TYPE_TO_LOWEST_CONST(element::f64); + OPENVINO_TYPE_TO_LOWEST_CONST(element::i8); + OPENVINO_TYPE_TO_LOWEST_CONST(element::i16); + OPENVINO_TYPE_TO_LOWEST_CONST(element::i32); + OPENVINO_TYPE_TO_LOWEST_CONST(element::i64); + OPENVINO_TYPE_TO_LOWEST_CONST(element::u1); + OPENVINO_TYPE_TO_LOWEST_CONST(element::u8); + OPENVINO_TYPE_TO_LOWEST_CONST(element::u16); + OPENVINO_TYPE_TO_LOWEST_CONST(element::u32); + OPENVINO_TYPE_TO_LOWEST_CONST(element::u64); case element::undefined: case element::dynamic: @@ -1310,15 +998,193 @@ std::shared_ptr ngraph::get_constant_lowest_of_type(el } } -shared_ptr ov::get_constant_from_source(const Output& source) { - return ov::util::get_constant_from_source(source); -} - -bool ngraph::validate_host_tensor_vector(const HostTensorVector& tensor_vector, const size_t& size) { +bool validate_host_tensor_vector(const HostTensorVector& tensor_vector, const size_t& size) { return (tensor_vector.size() == size) && std::none_of(tensor_vector.cbegin(), tensor_vector.cend(), ov::cmp::Equal(nullptr)); } +} // namespace ngraph + +void ov::infer_auto_padding(const Shape& image_shape, + const Shape& filter_shape, + const Strides& filter_strides, + const Strides& filter_dilations, + const op::PadType pad_type, + CoordinateDiff& padding_above, + CoordinateDiff& padding_below) { + const auto image_dims = std::vector(std::begin(image_shape), std::end(image_shape)); + // because image_shape is fully known result of try_apply_infer_auto_padding is ignored + ov::util::try_apply_auto_padding(image_dims, + filter_shape, + filter_strides, + filter_dilations, + pad_type, + padding_above, + padding_below); +} + +bool ov::util::try_apply_auto_padding(const PartialShape& image_shape, + const Shape& filter_shape, + const Strides& filter_strides, + const Strides& filter_dilations, + const op::PadType pad_type, + CoordinateDiff& padding_above, + CoordinateDiff& padding_below) { + OPENVINO_ASSERT(pad_type == op::PadType::SAME_UPPER || pad_type == op::PadType::SAME_LOWER); + + if (image_shape.rank().is_dynamic()) { + return false; + } + const auto image_dims = static_cast>(image_shape); + for (size_t i = 0; i < static_cast(filter_shape.size()); i++) { + if (image_dims[i + 2].is_static()) { + auto image_size = static_cast(image_dims[i + 2].get_length()); + int64_t filter_size = (static_cast(filter_shape[i]) - 1) * filter_dilations[i] + 1; + auto filter_stride = static_cast(filter_strides[i]); + auto output_size = (image_size + filter_stride - 1) / filter_stride; + + auto padding_needed = std::max(int64_t(0), (output_size - 1) * filter_stride + filter_size - image_size); + auto padding_lhs = padding_needed / 2; + auto padding_rhs = padding_needed - padding_lhs; + padding_below.push_back(pad_type == op::PadType::SAME_UPPER ? padding_lhs : padding_rhs); + padding_above.push_back(pad_type == op::PadType::SAME_UPPER ? padding_rhs : padding_lhs); + } else { + padding_below.push_back(0); + padding_above.push_back(0); + } + } + return true; +} + +namespace { +const auto normalize_axis_to = [](const int64_t& tensor_rank) { + return [&tensor_rank](int64_t& axis) { + if (axis < 0) { + axis += tensor_rank; + } + }; +}; + +std::string normalize_axis_error_msg(const int64_t& axis, const int64_t& lower, const int64_t& upper) { + return std::string(" Parameter axis ") + .append(std::to_string(axis)) + .append(" out of the tensor rank range [") + .append(std::to_string(lower)) + .append(", ") + .append(std::to_string(upper)) + .append("]."); +} +} // namespace + +int64_t ov::util::normalize(const int64_t& value, const int64_t& max) { + return (value < 0) ? value + max : value; +}; + +void ov::normalize_axes(const Node* node, const int64_t& tensor_rank, std::vector& axes) { + const auto axis_checker = cmp::Between(-tensor_rank, tensor_rank ? (tensor_rank - 1) : 0); + const auto invalid_axis = std::find_if_not(axes.cbegin(), axes.cend(), axis_checker); + NODE_VALIDATION_CHECK(node, + invalid_axis == axes.cend(), + normalize_axis_error_msg(*invalid_axis, axis_checker.lower(), axis_checker.upper())); + std::for_each(axes.begin(), axes.end(), normalize_axis_to(tensor_rank)); +} + +std::vector ov::normalize_axes(const std::string& node_description, + const std::vector& axes, + const Rank& tensor_rank) { + std::vector new_axes; + new_axes.reserve(axes.size()); + for (const auto& axis : axes) { + new_axes.push_back(normalize_axis(node_description, axis, tensor_rank)); + } + return new_axes; +} + +int64_t ov::normalize_axis(const Node* node, std::int64_t axis, const Rank& tensor_rank) { + return normalize_axis(node->description(), axis, tensor_rank); +} + +int64_t ov::normalize_axis(const std::string& node_description, std::int64_t axis, const Rank& tensor_rank) { + if (axis < 0) { + // Handling negative axis requires static tensor rank + OPENVINO_ASSERT(tensor_rank.is_static(), + node_description, + " Rank must be static in order to normalize negative axis=", + axis); + } + if (tensor_rank.is_dynamic()) { + return axis; + } + + const auto tensor_rank_value = tensor_rank.get_length(); + return normalize_axis(node_description, + axis, + tensor_rank_value, + -tensor_rank_value, + tensor_rank_value ? (tensor_rank_value - 1) : 0); +} + +int64_t ov::normalize_axis(const Node* node, + std::int64_t axis, + std::uint64_t tensor_rank, + std::int64_t axis_range_min, + std::int64_t axis_range_max) { + return normalize_axis(node->description(), axis, tensor_rank, axis_range_min, axis_range_max); +} + +int64_t ov::normalize_axis(const std::string& node_description, + std::int64_t axis, + std::uint64_t tensor_rank, + std::int64_t axis_range_min, + std::int64_t axis_range_max) { + // Accepted range of value for axis is [axis_range_min, axis_range_max]. + OPENVINO_ASSERT((axis_range_min <= axis) && (axis <= axis_range_max), + node_description, + normalize_axis_error_msg(axis, axis_range_min, axis_range_max)); + return util::normalize(axis, tensor_rank); +} + +bool ov::evaluate_as_partial_shape(const Output& output, PartialShape& pshape) { + Tensor lb, ub; + std::tie(lb, ub) = ov::evaluate_both_bounds(output); + bool shape_defined = false; + if (lb && ub) { + auto lower_bound = std::make_shared(lb.get_element_type(), lb.get_shape(), lb.data()) + ->cast_vector(); + auto upper_bound = std::make_shared(ub.get_element_type(), ub.get_shape(), ub.data()) + ->cast_vector(); + OPENVINO_ASSERT(lower_bound.size() == upper_bound.size()); + const TensorLabel& labels = output.get_tensor().get_value_label(); + OPENVINO_ASSERT(labels.empty() || lower_bound.size() == labels.size()); + + std::vector resulting_pshape(lower_bound.size()); + for (size_t i = 0; i < lower_bound.size(); ++i) { + auto low = lower_bound[i], up = upper_bound[i]; + OPENVINO_ASSERT(low >= 0 && up >= 0, "Value for partial shape evaluation can't be lower than zero."); + if (output.get_element_type() == element::i32 && low != up) { + if (up == std::numeric_limits::max()) + up = std::numeric_limits::max(); + if (low == std::numeric_limits::max()) + low = std::numeric_limits::max(); + } + resulting_pshape[i] = {low, up}; + if (!labels.empty() && labels[i]) + ov::DimensionTracker::set_label(resulting_pshape[i], labels[i]); + } + pshape = PartialShape(resulting_pshape); + shape_defined = true; + } + return shape_defined; +} + +bool ov::default_label_evaluator(const Node* node, TensorLabelVector& output_labels) { + return default_label_evaluator(node, {0}, output_labels); +} + +std::shared_ptr ov::get_constant_from_source(const Output& source) { + return ov::util::get_constant_from_source(source); +} + bool ov::has_no_labels(const ov::TensorLabel& labels) { return std::all_of(labels.cbegin(), labels.cend(), cmp::Equal(no_label)); } @@ -1381,6 +1247,130 @@ std::shared_ptr ov::util::constantfold_subgraph(const Outp return ov::as_type_ptr(outputs[subgraph_sink.get_index()].get_node_shared_ptr()); } +// +// Infers the output batch shape and element type for convolution fprop. +// +ov::PartialShape ov::infer_convolution_forward(const Node* node, + const PartialShape& data_batch_shape, + const Strides& data_dilation, + const CoordinateDiff& data_padding_below, + const CoordinateDiff& data_padding_above, + const PartialShape& filters_shape, + const Strides& filter_strides, + const Strides& filter_dilation) { + Rank data_batch_filters_rank{Rank::dynamic()}; + + NODE_VALIDATION_CHECK(node, + Rank::merge(data_batch_filters_rank, data_batch_shape.rank(), filters_shape.rank()), + "Data batch and filters rank do not match (data batch shape: ", + data_batch_shape, + ", filters shape: ", + filters_shape, + ")."); + + NODE_VALIDATION_CHECK(node, + data_batch_filters_rank.is_dynamic() || data_batch_filters_rank.get_length() >= 3, + "Data batch and filters must have rank of at least 3 (one batch axis, ", + "one input-channel axis, and at least one spatial dimension) ", + "(data batch shape: ", + data_batch_shape, + ", filters shape: ", + filters_shape, + ")."); + + Rank spatial_rank{Rank::dynamic()}; + NODE_VALIDATION_CHECK(node, + Rank::merge(spatial_rank, spatial_rank, data_batch_filters_rank - 2) && + Rank::merge(spatial_rank, spatial_rank, data_dilation.size()) && + Rank::merge(spatial_rank, spatial_rank, data_padding_below.size()) && + Rank::merge(spatial_rank, spatial_rank, data_padding_above.size()) && + Rank::merge(spatial_rank, spatial_rank, filter_strides.size()) && + Rank::merge(spatial_rank, spatial_rank, filter_dilation.size()), + "Ranks for data item shape/filters shape (data batch has shape ", + data_batch_shape, + ", so data item rank is ", + (data_batch_shape.rank() - 2), + " and filters have shape ", + filters_shape, + ", so filters spatial rank is ", + (filters_shape.rank() - 2), + "), data dilation (", + data_dilation, + "), padding below (", + data_padding_below, + "), padding above (", + data_padding_above, + "), filter strides (", + filter_strides, + "), and filter dilation (", + filter_dilation, + ") do not match."); + + Dimension batch_size = (data_batch_shape.rank().is_static() ? data_batch_shape[0] : Dimension::dynamic()); + Dimension data_channel_count = (data_batch_shape.rank().is_static() ? data_batch_shape[1] : Dimension::dynamic()); + PartialShape data_spatial_shape(PartialShape::dynamic(spatial_rank)); + + Dimension filter_output_channel_count = + (filters_shape.rank().is_static() ? filters_shape[0] : Dimension::dynamic()); + Dimension filter_input_channel_count = (filters_shape.rank().is_static() ? filters_shape[1] : Dimension::dynamic()); + PartialShape filter_spatial_shape(PartialShape::dynamic(spatial_rank)); + + // + // Note: spatial_rank is definitely static at this point. + // + + for (int64_t i = 0; i < spatial_rank.get_length(); i++) { + if (data_batch_shape.rank().is_static()) { + data_spatial_shape[i] = data_batch_shape[i + 2]; + } + + if (filters_shape.rank().is_static()) { + filter_spatial_shape[i] = filters_shape[i + 2]; + } + } + + NODE_VALIDATION_CHECK(node, batch_size.is_dynamic() || batch_size.get_length() > 0, "Batch size is zero."); + + Dimension merged_channel_count; + + NODE_VALIDATION_CHECK(node, + Dimension::merge(merged_channel_count, data_channel_count, filter_input_channel_count), + "Data batch channel count (", + data_channel_count, + ") does not match filter input ", + "channel count (", + filter_input_channel_count, + ")."); + + NODE_VALIDATION_CHECK(node, + merged_channel_count.is_dynamic() || merged_channel_count.get_length() > 0, + "Data batch channel count and/or filter input channel count is zero."); + + NODE_VALIDATION_CHECK(node, + filter_output_channel_count.is_dynamic() || filter_output_channel_count.get_length() > 0, + "Filter output channel count is zero."); + + PartialShape data_output_shape = ngraph::infer_windowed_reduction_output_shape(node, + data_spatial_shape, + data_dilation, + data_padding_below, + data_padding_above, + filter_spatial_shape, + filter_strides, + filter_dilation, + true); + + PartialShape batch_output_shape(PartialShape::dynamic(spatial_rank + 2)); + batch_output_shape[0] = batch_size; + batch_output_shape[1] = filter_output_channel_count; + + for (int64_t i = 0; i < spatial_rank.get_length(); i++) { + batch_output_shape[i + 2] = data_output_shape[i]; + } + + return batch_output_shape; +} + namespace ov { namespace util { using ov::op::v0::Constant; diff --git a/src/inference/src/ie_core.cpp b/src/inference/src/ie_core.cpp index 97dc6382aaccea..a5babd9192768c 100644 --- a/src/inference/src/ie_core.cpp +++ b/src/inference/src/ie_core.cpp @@ -31,10 +31,6 @@ #include "ie_plugin_config.hpp" #include "ie_remote_context.hpp" #include "itt.hpp" -#include "ngraph/graph_util.hpp" -#include "ngraph/ngraph.hpp" -#include "ngraph/opsets/opset.hpp" -#include "ngraph/pass/constant_folding.hpp" #include "openvino/core/except.hpp" #include "openvino/core/so_extension.hpp" #include "openvino/op/parameter.hpp" From ba5878ed2ffe79f5a6cbc71f89308d4b06bb471f Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Wed, 11 Oct 2023 10:36:00 +0400 Subject: [PATCH 144/257] Removed np.int usage (#20378) --- tools/mo/openvino/tools/mo/front/common/partial_infer/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/mo/openvino/tools/mo/front/common/partial_infer/utils.py b/tools/mo/openvino/tools/mo/front/common/partial_infer/utils.py index ef788469ba4574..6f87eebcb41a04 100644 --- a/tools/mo/openvino/tools/mo/front/common/partial_infer/utils.py +++ b/tools/mo/openvino/tools/mo/front/common/partial_infer/utils.py @@ -296,7 +296,7 @@ def get_shape_from_slice(input_shape: np.ndarray, slices: List) -> np.ndarray: in_idx += 1 elif s is np.newaxis: output_shape.append(1) - elif type(s) in [int, np.int, np.int32, np.int64]: # shrink_axis + elif type(s) in [int, np.int32, np.int64]: # shrink_axis in_idx += 1 elif s is Ellipsis: for idx in range(num_ellipsis_inserts): From ec644b9a732d73ab266007bfb30d32d3f3be9e49 Mon Sep 17 00:00:00 2001 From: Sergey Shlyapnikov Date: Wed, 11 Oct 2023 10:47:33 +0400 Subject: [PATCH 145/257] [GPU] Fix device tensors reallocation in case of host user's tensors (#20306) --- .../intel_gpu/plugin/remote_tensor.hpp | 2 +- .../intel_gpu/src/plugin/remote_tensor.cpp | 2 +- .../src/plugin/sync_infer_request.cpp | 25 +++++++++++++++++-- .../functional/behavior/infer_request.cpp | 23 +++++++++++++++++ 4 files changed, 48 insertions(+), 4 deletions(-) diff --git a/src/plugins/intel_gpu/include/intel_gpu/plugin/remote_tensor.hpp b/src/plugins/intel_gpu/include/intel_gpu/plugin/remote_tensor.hpp index 939c7b89784fc9..74a07bbcbf38bf 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/plugin/remote_tensor.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/plugin/remote_tensor.hpp @@ -52,6 +52,7 @@ class RemoteTensorImpl : public ov::IRemoteTensor { bool is_allocated() const noexcept; bool is_surface() const noexcept; + bool is_shared() const noexcept; cldnn::memory::ptr get_memory() const; cldnn::memory::ptr get_original_memory() const; @@ -74,7 +75,6 @@ class RemoteTensorImpl : public ov::IRemoteTensor { uint32_t m_plane; size_t m_hash = 0; - bool is_shared() const; bool supports_caching() const; void update_strides(); void init_properties(); diff --git a/src/plugins/intel_gpu/src/plugin/remote_tensor.cpp b/src/plugins/intel_gpu/src/plugin/remote_tensor.cpp index cd164940027be7..a7c68cd8f81107 100644 --- a/src/plugins/intel_gpu/src/plugin/remote_tensor.cpp +++ b/src/plugins/intel_gpu/src/plugin/remote_tensor.cpp @@ -169,7 +169,7 @@ const std::string& RemoteTensorImpl::get_device_name() const { return m_context->get_device_name(); } -bool RemoteTensorImpl::is_shared() const { +bool RemoteTensorImpl::is_shared() const noexcept { return m_mem_type == TensorType::BT_BUF_SHARED || m_mem_type == TensorType::BT_USM_SHARED || m_mem_type == TensorType::BT_IMG_SHARED || diff --git a/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp b/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp index 5e564f3b9a3ec5..6e9e8bbf353803 100644 --- a/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp +++ b/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp @@ -270,10 +270,31 @@ void SyncInferRequest::set_tensor(const ov::Output& port, const bool is_input = ov::op::util::is_parameter(port.get_node()); + auto update_tensors_maps = [](const std::string& name, + std::unordered_map& user_tensors, + std::unordered_map& plugin_tensors, + const ov::SoPtr& tensor) { + auto current_tensor_owner = user_tensors[name].owner; + auto is_same_tensor = user_tensors[name].ptr == tensor._ptr; + + // Keep PLUGIN as a tensor owner if current user's tensor owner is PLUGIN and underlying tensor pointer is not changed + auto new_tensor_owner = current_tensor_owner == TensorOwner::PLUGIN && is_same_tensor ? TensorOwner::PLUGIN + : TensorOwner::USER; + + user_tensors[name] = { tensor._ptr, new_tensor_owner }; + + // We need to properly handle PLUGIN -> USER ownership change to prevent invalid PLUGIN's ush_host buffer sharing, + // so remove plugin's tensor to reallocate it in prepare_input() mehtod + if (current_tensor_owner == TensorOwner::PLUGIN && new_tensor_owner == TensorOwner::USER) { + if (plugin_tensors.count(name) && std::dynamic_pointer_cast(plugin_tensors[name].ptr)->is_shared()) + plugin_tensors.erase(plugin_tensors.find(name)); + } + }; + if (is_input) { - m_user_inputs[name] = { tensor._ptr, TensorOwner::USER }; + update_tensors_maps(name, m_user_inputs, m_plugin_inputs, tensor); } else { - m_user_outputs[name] = { tensor._ptr, TensorOwner::USER }; + update_tensors_maps(name, m_user_outputs, m_plugin_outputs, tensor); } ov::ISyncInferRequest::set_tensor(port, tensor); diff --git a/src/plugins/intel_gpu/tests/functional/behavior/infer_request.cpp b/src/plugins/intel_gpu/tests/functional/behavior/infer_request.cpp index 1d64aa74232d7c..af0229d5e81d8b 100644 --- a/src/plugins/intel_gpu/tests/functional/behavior/infer_request.cpp +++ b/src/plugins/intel_gpu/tests/functional/behavior/infer_request.cpp @@ -199,3 +199,26 @@ TEST(TensorTest, smoke_canSetTensorForDynamicInput) { ASSERT_NO_THROW(inf_req.set_input_tensor(t2)); ASSERT_NO_THROW(inf_req.infer()); } + +TEST(TensorTest, smoke_canReallocateDeviceInputForHostTensor) { + auto ov = ov::Core(); + using namespace ov::preprocess; + auto p = PrePostProcessor(ngraph::builder::subgraph::makeSplitMultiConvConcat()); + p.input().tensor().set_element_type(ov::element::i8); + p.input().preprocess().convert_element_type(ov::element::f32); + auto function = p.build(); + + auto compiled_model = ov.compile_model(function, ov::test::utils::DEVICE_GPU); + auto inf_req = compiled_model.create_infer_request(); + + auto input = function->input(); + ov::Tensor host_tensor(input.get_element_type(), input.get_shape()); + + // Infer with pre-allocated input tensor + ASSERT_NO_THROW(inf_req.infer()); + + // Infer with host_tensor + ASSERT_NO_THROW(inf_req.set_input_tensor(host_tensor)); + ASSERT_NO_THROW(inf_req.infer()); +} + From 346893fe6fe4c4c85e8447a96d957e31611e74d9 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Wed, 11 Oct 2023 11:43:27 +0400 Subject: [PATCH 146/257] Move cpu subgraph to new api (#20335) * Move ConvEltwiseFuse to new API * Move subgraph tests till LSTM to new API * Fixed GPU initialization * Remove unsupported GNA tests --- .../subgraph_tests/conv_strides_opt.cpp | 40 +++++----- .../convert_pad_to_group_conv.cpp | 67 +++++++++-------- .../get_output_before_activation.cpp | 44 +++++------ .../matmul_const_transposes_extraction.cpp | 40 +++++----- .../subgraph_tests/matmul_multiply_fusion.cpp | 52 ++++++------- .../subgraph_tests/matmul_squeeze_add.cpp | 57 +++++---------- .../get_output_before_activation.cpp | 26 +++---- .../subgraph_tests/matmul_squeeze_add.cpp | 12 ++- .../get_output_before_activation.cpp | 40 +++++----- .../subgraph_tests/matmul_squeeze_add.cpp | 57 +++++---------- .../subgraph_tests/conv_strides_opt.hpp | 9 ++- .../convert_pad_to_group_conv.hpp | 9 ++- .../get_output_before_activation.hpp | 8 +- .../matmul_const_transposes_extraction.hpp | 11 ++- .../subgraph_tests/matmul_multiply_fusion.hpp | 12 ++- .../subgraph_tests/matmul_squeeze_add.hpp | 8 +- .../subgraph/conv_eltwise_fusion.hpp | 1 - .../subgraph/conv_strides_opt.hpp | 32 ++++---- .../subgraph/convert_pad_to_group_conv.hpp | 44 +++++------ .../subgraph/get_output_before_activation.hpp | 37 +++++----- .../matmul_const_transposes_extraction.hpp | 38 +++++----- .../subgraph/matmul_multiply_fusion.hpp | 42 +++++------ .../subgraph/matmul_squeeze_add.hpp | 26 +++---- .../src/subgraph/conv_eltwise_fusion.cpp | 2 + .../src/subgraph/conv_strides_opt.cpp | 22 +++--- .../subgraph/convert_pad_to_group_conv.cpp | 33 +++++---- .../subgraph/get_output_before_activation.cpp | 52 +++++++------ .../matmul_const_transposes_extraction.cpp | 70 ++++++++++-------- .../src/subgraph/matmul_multiply_fusion.cpp | 73 ++++++++++--------- .../src/subgraph/matmul_squeeze_add.cpp | 57 +++++++++------ 30 files changed, 515 insertions(+), 506 deletions(-) diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/conv_strides_opt.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/conv_strides_opt.cpp index 6cedcfbbf88241..f33b57d639138f 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/conv_strides_opt.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/conv_strides_opt.cpp @@ -2,28 +2,30 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - #include "subgraph_tests/conv_strides_opt.hpp" -#include "common_test_utils/test_constants.hpp" -using namespace SubgraphTestsDefinitions; +#include + +using namespace ov::test; namespace { - std::vector input_shapes{ - ngraph::Shape{1, 1, 4, 4}, - ngraph::Shape{1, 64, 56, 56}, - }; - std::vector pads{ - ngraph::op::PadType::SAME_UPPER, - ngraph::op::PadType::SAME_LOWER, - ngraph::op::PadType::EXPLICIT, - }; - INSTANTIATE_TEST_SUITE_P(smoke_Convolution_StridesOpt, ConvStridesOpt, - ::testing::Combine( - ::testing::ValuesIn(input_shapes), - ::testing::ValuesIn(pads), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ConvStridesOpt::getTestCaseName); + +std::vector input_shapes{ + ov::Shape{1, 1, 4, 4}, + ov::Shape{1, 64, 56, 56}, +}; + +std::vector pads{ + ov::op::PadType::SAME_UPPER, + ov::op::PadType::SAME_LOWER, + ov::op::PadType::EXPLICIT, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_Convolution_StridesOpt, + ConvStridesOpt, + ::testing::Combine(::testing::ValuesIn(input_shapes), + ::testing::ValuesIn(pads), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ConvStridesOpt::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/convert_pad_to_group_conv.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/convert_pad_to_group_conv.cpp index 627094f4e717ad..d23a35201b9db7 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/convert_pad_to_group_conv.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/convert_pad_to_group_conv.cpp @@ -2,42 +2,41 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - #include "subgraph_tests/convert_pad_to_group_conv.hpp" -#include "common_test_utils/test_constants.hpp" -using namespace SubgraphTestsDefinitions; +#include + +using namespace ov::test; namespace { - const std::vector> pads_1d{ - {0, 0, 0}, {0, 0, 1}, {0, 2, 0}, {3, 0, 0} - }; - - const std::vector values{0., 1.}; - - INSTANTIATE_TEST_SUITE_P(smoke_Pad_1D, ConvertPadToConvTests, - ::testing::Combine( - ::testing::Values(ngraph::Shape{1, 8, 64}), - ::testing::ValuesIn(pads_1d), - ::testing::ValuesIn(pads_1d), - ::testing::ValuesIn(values), - ::testing::Values(ngraph::op::PadMode::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ConvertPadToConvTests::getTestCaseName); - - const std::vector> pads_2d{ - {0, 0, 0, 0}, {0, 0, 1, 2}, {0, 0, 2, 1}, - {0, 0, 10, 10}, {0, 0, 0, 4}, {0, 0, 4, 0} - }; - - INSTANTIATE_TEST_SUITE_P(smoke_Pad_2D, ConvertPadToConvTests, - ::testing::Combine( - ::testing::Values(ngraph::Shape{1, 8, 64, 16}), - ::testing::ValuesIn(pads_2d), - ::testing::ValuesIn(pads_2d), - ::testing::ValuesIn(values), - ::testing::Values(ngraph::op::PadMode::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ConvertPadToConvTests::getTestCaseName); +const std::vector> pads_1d{{0, 0, 0}, {0, 0, 1}, {0, 2, 0}, {3, 0, 0}}; + +const std::vector values{0., 1.}; + +INSTANTIATE_TEST_SUITE_P(smoke_Pad_1D, + ConvertPadToConvTests, + ::testing::Combine(::testing::Values(ov::Shape{1, 8, 64}), + ::testing::ValuesIn(pads_1d), + ::testing::ValuesIn(pads_1d), + ::testing::ValuesIn(values), + ::testing::Values(ov::op::PadMode::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ConvertPadToConvTests::getTestCaseName); + +const std::vector> pads_2d{{0, 0, 0, 0}, + {0, 0, 1, 2}, + {0, 0, 2, 1}, + {0, 0, 10, 10}, + {0, 0, 0, 4}, + {0, 0, 4, 0}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Pad_2D, + ConvertPadToConvTests, + ::testing::Combine(::testing::Values(ov::Shape{1, 8, 64, 16}), + ::testing::ValuesIn(pads_2d), + ::testing::ValuesIn(pads_2d), + ::testing::ValuesIn(values), + ::testing::Values(ov::op::PadMode::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ConvertPadToConvTests::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/get_output_before_activation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/get_output_before_activation.cpp index ca731518e9feea..4b9feeed52d5b3 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/get_output_before_activation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/get_output_before_activation.cpp @@ -2,33 +2,27 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include "common_test_utils/test_constants.hpp" +#include "subgraph_tests/get_output_before_activation.hpp" + +namespace ov { +namespace test { -namespace SubgraphTestsDefinitions { namespace { - std::vector input_sizes = { - 80, - 32, - 64, - 100 - }; +std::vector input_sizes = {80, 32, 64, 100}; + +std::vector midLayerTypes{midOutputType::Mul, midOutputType::Sub, midOutputType::Sum}; - std::vector midLayerTypes { - midOutputType::Mul, - midOutputType::Sub, - midOutputType::Sum - }; +ov::AnyMap additional_config = {}; +} // namespace - std::map additional_config = {}; -} // namespace +INSTANTIATE_TEST_SUITE_P(OutputBeforeActivation, + OutputBeforeActivation, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(ov::element::f32), + ::testing::ValuesIn(input_sizes), + ::testing::ValuesIn(midLayerTypes), + ::testing::Values(additional_config)), + OutputBeforeActivation::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(OutputBeforeActivation, OutputBeforeActivation, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(InferenceEngine::Precision::FP32), - ::testing::ValuesIn(input_sizes), - ::testing::ValuesIn(midLayerTypes), - ::testing::Values(additional_config)), - OutputBeforeActivation::getTestCaseName); -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/matmul_const_transposes_extraction.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/matmul_const_transposes_extraction.cpp index 41ef64232302f2..1851fd30789f05 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/matmul_const_transposes_extraction.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/matmul_const_transposes_extraction.cpp @@ -4,7 +4,7 @@ #include "subgraph_tests/matmul_const_transposes_extraction.hpp" -using namespace SubgraphTestsDefinitions; +using namespace ov::test; namespace { std::vector shape_params = { @@ -21,12 +21,12 @@ std::vector shape_params = { {{2, 3, 5, 10}, {1, 1, 10, 1}, false}, }; -INSTANTIATE_TEST_SUITE_P(smoke_MatMulConstTransposesExtractionTest, MatMulConstTransposesExtractionTest, - ::testing::Combine( - ::testing::ValuesIn(shape_params), - ::testing::Values(true), // can be fused - ::testing::Values(ov::test::utils::DEVICE_CPU)), - MatMulConstTransposesExtractionTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_MatMulConstTransposesExtractionTest, + MatMulConstTransposesExtractionTest, + ::testing::Combine(::testing::ValuesIn(shape_params), + ::testing::Values(true), // can be fused + ::testing::Values(ov::test::utils::DEVICE_CPU)), + MatMulConstTransposesExtractionTest::getTestCaseName); std::vector negative_shape_params = { {{5}, {5}, false}, @@ -46,12 +46,12 @@ std::vector negative_shape_param {{2, 3, 5, 10}, {2, 3, 10, 7}, false}, }; -INSTANTIATE_TEST_SUITE_P(smoke_NegativeMatMulConstTransposesExtractionTest, MatMulConstTransposesExtractionTest, - ::testing::Combine( - ::testing::ValuesIn(negative_shape_params), - ::testing::Values(false), // cannot be fused - ::testing::Values(ov::test::utils::DEVICE_CPU)), - MatMulConstTransposesExtractionTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_NegativeMatMulConstTransposesExtractionTest, + MatMulConstTransposesExtractionTest, + ::testing::Combine(::testing::ValuesIn(negative_shape_params), + ::testing::Values(false), // cannot be fused + ::testing::Values(ov::test::utils::DEVICE_CPU)), + MatMulConstTransposesExtractionTest::getTestCaseName); std::vector shape_params2 = { {{2, 2}, {2, 2}, false}, @@ -63,11 +63,11 @@ std::vector shape_params2 = { {{2, 3, 5, 10}, {1, 1, 10, 7}, false}, }; -INSTANTIATE_TEST_SUITE_P(smoke_QuantizedMatMulConstTransposesExtractionTest, QuantizedMatMulConstTransposesExtractionTest, - ::testing::Combine( - ::testing::ValuesIn(shape_params2), - ::testing::Values(true), // can be fused - ::testing::Values(ov::test::utils::DEVICE_CPU)), - QuantizedMatMulConstTransposesExtractionTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_QuantizedMatMulConstTransposesExtractionTest, + QuantizedMatMulConstTransposesExtractionTest, + ::testing::Combine(::testing::ValuesIn(shape_params2), + ::testing::Values(true), // can be fused + ::testing::Values(ov::test::utils::DEVICE_CPU)), + QuantizedMatMulConstTransposesExtractionTest::getTestCaseName); -} // namespace +} // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/matmul_multiply_fusion.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/matmul_multiply_fusion.cpp index 1f10491ad5bf90..026a7595ed9381 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/matmul_multiply_fusion.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/matmul_multiply_fusion.cpp @@ -4,7 +4,7 @@ #include "subgraph_tests/matmul_multiply_fusion.hpp" -using namespace SubgraphTestsDefinitions; +using namespace ov::test; namespace { std::vector shape_params = { @@ -67,12 +67,12 @@ std::vector shape_params = { {{2, 3, 5, 10}, {2, 3, 7, 10}, true, {2, 3, 1, 7}}, }; -INSTANTIATE_TEST_SUITE_P(smoke_MatMulMultiplyFusion, MatMulMultiplyFusion, - ::testing::Combine( - ::testing::ValuesIn(shape_params), - ::testing::Values(true), // can be fused - ::testing::Values(ov::test::utils::DEVICE_CPU)), - MatMulMultiplyFusion::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_MatMulMultiplyFusion, + MatMulMultiplyFusion, + ::testing::Combine(::testing::ValuesIn(shape_params), + ::testing::Values(true), // can be fused + ::testing::Values(ov::test::utils::DEVICE_CPU)), + MatMulMultiplyFusion::getTestCaseName); std::vector negative_shape_params = { {{5}, {5}, false, {1}}, @@ -108,12 +108,12 @@ std::vector negative_shape_params = { {{2, 3, 5, 10}, {2, 3, 10, 7}, false, {1, 1, 1, 1, 7}}, }; -INSTANTIATE_TEST_SUITE_P(smoke_NegativeMatMulMultiplyFusion, MatMulMultiplyFusion, - ::testing::Combine( - ::testing::ValuesIn(negative_shape_params), - ::testing::Values(false), // cannot be fused - ::testing::Values(ov::test::utils::DEVICE_CPU)), - MatMulMultiplyFusion::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_NegativeMatMulMultiplyFusion, + MatMulMultiplyFusion, + ::testing::Combine(::testing::ValuesIn(negative_shape_params), + ::testing::Values(false), // cannot be fused + ::testing::Values(ov::test::utils::DEVICE_CPU)), + MatMulMultiplyFusion::getTestCaseName); std::vector shape_params2 = { {{2, 2}, {2, 2}, false, {}}, @@ -158,12 +158,12 @@ std::vector shape_params2 = { {{2, 3, 5, 10}, {2, 3, 7, 10}, true, {2, 3, 1, 7}}, }; -INSTANTIATE_TEST_SUITE_P(smoke_QuantizedMatMulMultiplyFusion, QuantizedMatMulMultiplyFusion, - ::testing::Combine( - ::testing::ValuesIn(shape_params2), - ::testing::Values(true), // can be fused - ::testing::Values(ov::test::utils::DEVICE_CPU)), - QuantizedMatMulMultiplyFusion::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_QuantizedMatMulMultiplyFusion, + QuantizedMatMulMultiplyFusion, + ::testing::Combine(::testing::ValuesIn(shape_params2), + ::testing::Values(true), // can be fused + ::testing::Values(ov::test::utils::DEVICE_CPU)), + QuantizedMatMulMultiplyFusion::getTestCaseName); std::vector negative_shape_params2 = { {{2, 2}, {2, 2}, false, {2, 2}}, @@ -198,11 +198,11 @@ std::vector negative_shape_params2 = { {{2, 3, 5, 10}, {3, 7, 10}, true, {2, 3, 5, 7}}, }; -INSTANTIATE_TEST_SUITE_P(smoke_NegativeQuantizedMatMulMultiplyFusion, QuantizedMatMulMultiplyFusion, - ::testing::Combine( - ::testing::ValuesIn(negative_shape_params2), - ::testing::Values(false), // cannot be fused - ::testing::Values(ov::test::utils::DEVICE_CPU)), - QuantizedMatMulMultiplyFusion::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_NegativeQuantizedMatMulMultiplyFusion, + QuantizedMatMulMultiplyFusion, + ::testing::Combine(::testing::ValuesIn(negative_shape_params2), + ::testing::Values(false), // cannot be fused + ::testing::Values(ov::test::utils::DEVICE_CPU)), + QuantizedMatMulMultiplyFusion::getTestCaseName); -} // namespace +} // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp index db6bdc22104958..97319465428a5c 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp @@ -2,46 +2,27 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - -#include "common_test_utils/test_constants.hpp" #include "subgraph_tests/matmul_squeeze_add.hpp" -using namespace SubgraphTestsDefinitions; +#include + +using namespace ov::test; namespace { -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16 -}; - -const std::vector> configs = { - { } -}; - -std::vector> input_shapes = { - {1, 8}, - {1, 42}, - {1, 100}, - {1, 128}, - {1, 512} -}; - -std::vector output_sizes = { - 1000, - 512, - 128, - 42, - 16, - 8 -}; - -INSTANTIATE_TEST_SUITE_P(smoke_MatmulSqueezeAdd, MatmulSqueezeAddTest, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(configs), - ::testing::ValuesIn(input_shapes), - ::testing::ValuesIn(output_sizes)), - MatmulSqueezeAddTest::getTestCaseName); +const std::vector netPrecisions = {ov::element::f32, ov::element::f16}; + +const std::vector configs = {{}}; + +std::vector input_shapes = {{1, 8}, {1, 42}, {1, 100}, {1, 128}, {1, 512}}; + +std::vector output_sizes = {1000, 512, 128, 42, 16, 8}; + +INSTANTIATE_TEST_SUITE_P(smoke_MatmulSqueezeAdd, + MatmulSqueezeAddTest, + ::testing::Combine(::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(configs), + ::testing::ValuesIn(input_shapes), + ::testing::ValuesIn(output_sizes)), + MatmulSqueezeAddTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_gna/tests/functional/shared_tests_instances/subgraph_tests/get_output_before_activation.cpp b/src/plugins/intel_gna/tests/functional/shared_tests_instances/subgraph_tests/get_output_before_activation.cpp index ef6c7eb8257725..07cbd0dd905afc 100644 --- a/src/plugins/intel_gna/tests/functional/shared_tests_instances/subgraph_tests/get_output_before_activation.cpp +++ b/src/plugins/intel_gna/tests/functional/shared_tests_instances/subgraph_tests/get_output_before_activation.cpp @@ -2,31 +2,31 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "subgraph_tests/get_output_before_activation.hpp" -#include "common_test_utils/test_constants.hpp" - -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { namespace { std::vector input_sizes = {80, 32, 64, 100}; std::vector midLayerTypes{midOutputType::Mul, midOutputType::Sub, midOutputType::Sum}; -std::vector> configs = {{ - {"GNA_COMPACT_MODE", "NO"}, - {"GNA_DEVICE_MODE", "GNA_SW_EXACT"}, - {"GNA_SCALE_FACTOR_0", "1638.4"}, - {"GNA_SCALE_FACTOR_1", "1638.4"}, - }, - {{"GNA_DEVICE_MODE", "GNA_SW_FP32"}}}; +std::vector configs = {{ + {"GNA_COMPACT_MODE", "NO"}, + {"GNA_DEVICE_MODE", "GNA_SW_EXACT"}, + {"GNA_SCALE_FACTOR_0", "1638.4"}, + {"GNA_SCALE_FACTOR_1", "1638.4"}, + }, + {{"GNA_DEVICE_MODE", "GNA_SW_FP32"}}}; } // namespace INSTANTIATE_TEST_SUITE_P(OutputBeforeActivation, OutputBeforeActivation, ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_GNA), - ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::Values(ov::element::f32), ::testing::ValuesIn(input_sizes), ::testing::ValuesIn(midLayerTypes), ::testing::ValuesIn(configs)), OutputBeforeActivation::getTestCaseName); -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_gna/tests/functional/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp b/src/plugins/intel_gna/tests/functional/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp index b5ece0ac7f71b6..aafc875bc5866d 100644 --- a/src/plugins/intel_gna/tests/functional/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp +++ b/src/plugins/intel_gna/tests/functional/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp @@ -8,17 +8,15 @@ #include "common_test_utils/test_constants.hpp" -using namespace SubgraphTestsDefinitions; +using namespace ov::test; namespace { -const std::vector netPrecisions = {InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16}; +const std::vector netPrecisions = {ov::element::f32}; -const std::vector> configs = { - {{"GNA_DEVICE_MODE", "GNA_SW_EXACT"}, {"GNA_SCALE_FACTOR_0", "81.9175"}}, - {{"GNA_DEVICE_MODE", "GNA_SW_FP32"}}}; +const std::vector configs = {{{"GNA_DEVICE_MODE", "GNA_SW_EXACT"}, {"GNA_SCALE_FACTOR_0", "81.9175"}}, + {{"GNA_DEVICE_MODE", "GNA_SW_FP32"}}}; -std::vector> input_shapes = {{1, 8}, {1, 42}, {1, 100}, {1, 128}, {1, 512}}; +std::vector input_shapes = {{1, 8}, {1, 42}, {1, 100}, {1, 128}, {1, 512}}; std::vector output_sizes = {1000, 512, 128, 42, 16, 8}; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/get_output_before_activation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/get_output_before_activation.cpp index 1f3ae389ce87ef..f96f89f19124e8 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/get_output_before_activation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/get_output_before_activation.cpp @@ -3,32 +3,26 @@ // #include + #include "common_test_utils/test_constants.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { namespace { - std::vector input_sizes = { - 80, - 32, - 64, - 100 - }; +std::vector input_sizes = {80, 32, 64, 100}; - std::vector midLayerTypes { - midOutputType::Mul, - midOutputType::Sub, - midOutputType::Sum - }; +std::vector midLayerTypes{midOutputType::Mul, midOutputType::Sub, midOutputType::Sum}; - std::map additional_config = {}; -} // namespace +ov::AnyMap additional_config = {}; +} // namespace -INSTANTIATE_TEST_SUITE_P(OutputBeforeActivation, OutputBeforeActivation, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(InferenceEngine::Precision::FP32), - ::testing::ValuesIn(input_sizes), - ::testing::ValuesIn(midLayerTypes), - ::testing::Values(additional_config)), - OutputBeforeActivation::getTestCaseName); -} // namespace SubgraphTestsDefinitions +INSTANTIATE_TEST_SUITE_P(OutputBeforeActivation, + OutputBeforeActivation, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_GPU), + ::testing::Values(ov::element::f32), + ::testing::ValuesIn(input_sizes), + ::testing::ValuesIn(midLayerTypes), + ::testing::Values(additional_config)), + OutputBeforeActivation::getTestCaseName); +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp index 04a19a95a61ba7..bfeed0cab84a30 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp @@ -2,46 +2,27 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - -#include "common_test_utils/test_constants.hpp" #include "subgraph_tests/matmul_squeeze_add.hpp" -using namespace SubgraphTestsDefinitions; +#include + +using namespace ov::test; namespace { -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16 -}; - -const std::vector> configs = { - { } -}; - -std::vector> input_shapes = { - {1, 8}, - {1, 42}, - {1, 100}, - {1, 128}, - {1, 512} -}; - -std::vector output_sizes = { - 1000, - 512, - 128, - 42, - 16, - 8 -}; - -INSTANTIATE_TEST_SUITE_P(MatmulSqueezeAdd, MatmulSqueezeAddTest, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::ValuesIn(configs), - ::testing::ValuesIn(input_shapes), - ::testing::ValuesIn(output_sizes)), - MatmulSqueezeAddTest::getTestCaseName); +const std::vector netPrecisions = {ov::element::f32, ov::element::f16}; + +const std::vector configs = {{}}; + +std::vector input_shapes = {{1, 8}, {1, 42}, {1, 100}, {1, 128}, {1, 512}}; + +std::vector output_sizes = {1000, 512, 128, 42, 16, 8}; + +INSTANTIATE_TEST_SUITE_P(MatmulSqueezeAdd, + MatmulSqueezeAddTest, + ::testing::Combine(::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_GPU), + ::testing::ValuesIn(configs), + ::testing::ValuesIn(input_shapes), + ::testing::ValuesIn(output_sizes)), + MatmulSqueezeAddTest::getTestCaseName); } // namespace diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/conv_strides_opt.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/conv_strides_opt.hpp index 67ae6cda1320aa..9d46654ebd743e 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/conv_strides_opt.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/conv_strides_opt.hpp @@ -6,9 +6,12 @@ #include "shared_test_classes/subgraph/conv_strides_opt.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { TEST_P(ConvStridesOpt, CompareWithRefs) { - Run(); + run(); } -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/convert_pad_to_group_conv.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/convert_pad_to_group_conv.hpp index 4d9ce3770aba9a..8547d6b17436a4 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/convert_pad_to_group_conv.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/convert_pad_to_group_conv.hpp @@ -6,9 +6,12 @@ #include "shared_test_classes/subgraph/convert_pad_to_group_conv.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { TEST_P(ConvertPadToConvTests, CompareWithRefs) { - Run(); + run(); } -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/get_output_before_activation.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/get_output_before_activation.hpp index 996a42e26cd2b4..eca03aab8e56ca 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/get_output_before_activation.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/get_output_before_activation.hpp @@ -6,10 +6,12 @@ #include "shared_test_classes/subgraph/get_output_before_activation.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { TEST_P(OutputBeforeActivation, CompareWithRefs) { - Run(); + run(); }; -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/matmul_const_transposes_extraction.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/matmul_const_transposes_extraction.hpp index 48e0755fa65d3c..e16847f17105d5 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/matmul_const_transposes_extraction.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/matmul_const_transposes_extraction.hpp @@ -4,18 +4,21 @@ #pragma once +#include "functional_test_utils/skip_tests_config.hpp" #include "shared_test_classes/subgraph/matmul_const_transposes_extraction.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { TEST_P(MatMulConstTransposesExtractionTest, CompareWithRefs) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Run(); + run(); } TEST_P(QuantizedMatMulConstTransposesExtractionTest, CompareWithRefs) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Run(); + run(); } -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/matmul_multiply_fusion.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/matmul_multiply_fusion.hpp index e2db9bee578207..77ff3497cb2e03 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/matmul_multiply_fusion.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/matmul_multiply_fusion.hpp @@ -4,17 +4,21 @@ #pragma once +#include "functional_test_utils/skip_tests_config.hpp" #include "shared_test_classes/subgraph/matmul_multiply_fusion.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { TEST_P(MatMulMultiplyFusion, CompareWithRefs) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Run(); + run(); } TEST_P(QuantizedMatMulMultiplyFusion, CompareWithRefs) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Run(); + run(); } -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/matmul_squeeze_add.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/matmul_squeeze_add.hpp index b745f70da4e238..267053d695162e 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/matmul_squeeze_add.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/matmul_squeeze_add.hpp @@ -6,10 +6,12 @@ #include "shared_test_classes/subgraph/matmul_squeeze_add.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { TEST_P(MatmulSqueezeAddTest, CompareWithRefImpl) { - Run(); + run(); }; -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_eltwise_fusion.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_eltwise_fusion.hpp index eff28f7d7f2574..c156ab395cfe29 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_eltwise_fusion.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_eltwise_fusion.hpp @@ -9,7 +9,6 @@ #include #include "ov_models/builders.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" namespace ov { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_strides_opt.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_strides_opt.hpp index 9df042c72de523..ca35c527b6d32a 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_strides_opt.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_strides_opt.hpp @@ -4,29 +4,29 @@ #pragma once -#include #include -#include -#include "shared_test_classes/base/layer_test_utils.hpp" +#include + #include "ov_models/builders.hpp" -#include -#include +#include "shared_test_classes/base/ov_subgraph.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { -typedef std::tuple< - ngraph::Shape, // input shape - ngraph::op::PadType, - std::string // Device name - > ConvStridesOptParams; +typedef std::tuple + ConvStridesOptParams; -class ConvStridesOpt - : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { +class ConvStridesOpt : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseStaticTest { public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; }; -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/convert_pad_to_group_conv.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/convert_pad_to_group_conv.hpp index 53bcd5d850e1c5..108c0086f04b07 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/convert_pad_to_group_conv.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/convert_pad_to_group_conv.hpp @@ -4,32 +4,32 @@ #pragma once -#include #include +#include #include -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include -#include - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - ngraph::Shape, // input shape - std::vector, // pad_begin - std::vector, // pad_end - float, // pad_value - ngraph::op::PadMode, // pad_mode - std::string // Device name - > PadParams; - -class ConvertPadToConvTests - : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { + +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { + +typedef std::tuple, // pad_begin + std::vector, // pad_end + float, // pad_value + ov::op::PadMode, // pad_mode + std::string // Device name + > + PadParams; + +class ConvertPadToConvTests : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseStaticTest { public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; }; -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/get_output_before_activation.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/get_output_before_activation.hpp index b6241ee0a049b3..5aac351fe7d01c 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/get_output_before_activation.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/get_output_before_activation.hpp @@ -4,33 +4,34 @@ #pragma once -#include "common_test_utils/test_common.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" -#include - -namespace SubgraphTestsDefinitions { +#include "shared_test_classes/base/ov_subgraph.hpp" +namespace ov { +namespace test { enum class midOutputType { Sum, Sub, Mul, }; -typedef std::tuple< - std::string, // Target device name - InferenceEngine::Precision, // Network precision - size_t, // Input size - midOutputType, // Type of layer that will be an output - std::map // Configuration -> outputBeforeActivationParams; +typedef std::tuple + outputBeforeActivationParams; -std::ostream& operator<< (std::ostream& os, const midOutputType& oType); +std::ostream& operator<<(std::ostream& os, const midOutputType& oType); -class OutputBeforeActivation : virtual public LayerTestsUtils::LayerTestsCommon, - public testing::WithParamInterface { +class OutputBeforeActivation : virtual public ov::test::SubgraphBaseStaticTest, + public testing::WithParamInterface { protected: void SetUp() override; + public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; + static std::string getTestCaseName(const testing::TestParamInfo& obj); + // void generate_inputs(const std::vector& targetInputStaticShapes) override; }; -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_const_transposes_extraction.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_const_transposes_extraction.hpp index ab345a20167618..e67acee0208017 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_const_transposes_extraction.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_const_transposes_extraction.hpp @@ -4,44 +4,46 @@ #pragma once -#include #include -#include "shared_test_classes/base/layer_test_utils.hpp" -#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { struct MatMulConstTransposesExtractionTestShapeParams { - ngraph::Shape input_shape; - ngraph::Shape weights_shape; + ov::Shape input_shape; + ov::Shape weights_shape; bool trans_b; }; -typedef std::tuple< - MatMulConstTransposesExtractionTestShapeParams, - bool, // whether Mul can be fused to MatMul in this case - std::string // Device name - > MatMulConstTransposesExtractionTestParams; +typedef std::tuple + MatMulConstTransposesExtractionTestParams; class MatMulConstTransposesExtractionTest - : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { + : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseStaticTest { public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; }; class QuantizedMatMulConstTransposesExtractionTest - : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { + : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseStaticTest { public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; void TearDown() override; }; -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_multiply_fusion.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_multiply_fusion.hpp index ad65d51366276b..3dd24f50746bac 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_multiply_fusion.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_multiply_fusion.hpp @@ -4,45 +4,45 @@ #pragma once -#include #include -#include "shared_test_classes/base/layer_test_utils.hpp" -#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { struct MatMulMultiplyFusionShapeParams { - ngraph::Shape input_shape; - ngraph::Shape weights_shape; + ov::Shape input_shape; + ov::Shape weights_shape; bool trans_b; - ngraph::Shape const_shape; + ov::Shape const_shape; }; -typedef std::tuple< - MatMulMultiplyFusionShapeParams, - bool, // whether Mul can be fused to MatMul in this case - std::string // Device name - > MatMulMultiplyFusionParams; +typedef std::tuple + MatMulMultiplyFusionParams; -class MatMulMultiplyFusion - : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { +class MatMulMultiplyFusion : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseStaticTest { public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; }; -class QuantizedMatMulMultiplyFusion - : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { +class QuantizedMatMulMultiplyFusion : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseStaticTest { public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; void TearDown() override; }; -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_squeeze_add.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_squeeze_add.hpp index 2aa4039e0cd9a3..fc5270096f0d52 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_squeeze_add.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_squeeze_add.hpp @@ -9,22 +9,21 @@ #include #include -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { -typedef std::tuple< - InferenceEngine::Precision, // Network Precision - std::string, // Target Device - std::map, // Configuration - std::vector, // Input Shapes - size_t // Output Size -> matmulSqueezeAddParams; +typedef std::tuple + matmulSqueezeAddParams; class MatmulSqueezeAddTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { + virtual public ov::test::SubgraphBaseStaticTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj); @@ -32,4 +31,5 @@ class MatmulSqueezeAddTest : public testing::WithParamInterface &obj) { +std::string ConvStridesOpt::getTestCaseName(const testing::TestParamInfo& obj) { Shape input_shape; op::PadType pad; std::string targetName; @@ -25,20 +24,21 @@ void ConvStridesOpt::SetUp() { Shape input_shape; op::PadType pad_type; std::tie(input_shape, pad_type, targetDevice) = this->GetParam(); - auto param = std::make_shared(element::f32, input_shape); + auto param = std::make_shared(element::f32, input_shape); auto C = input_shape[1]; auto weights1 = ngraph::builder::makeConstant(element::f32, {C, C, 3, 3}, {}, true); auto spatial_dims = input_shape.size() - 2; Strides strides1(spatial_dims, 1); Strides dilations(spatial_dims, 1); CoordinateDiff pad_begin1(spatial_dims, 1), pad_end1(spatial_dims, 1); - auto conv1 = std::make_shared(param, weights1, strides1, pad_begin1, pad_end1, - dilations, pad_type); + auto conv1 = + std::make_shared(param, weights1, strides1, pad_begin1, pad_end1, dilations, pad_type); auto weights2 = ngraph::builder::makeConstant(element::f32, {C, C, 1, 1}, {}, true); CoordinateDiff pad_begin2(spatial_dims, 0), pad_end2(spatial_dims, 0); Strides strides2(spatial_dims, 2); - auto conv2 = std::make_shared(conv1, weights2, strides2, pad_begin2, pad_end2, - dilations); - function = std::make_shared(OutputVector{conv2}, ParameterVector{param}); + auto conv2 = std::make_shared(conv1, weights2, strides2, pad_begin2, pad_end2, dilations); + function = std::make_shared(OutputVector{conv2}, ParameterVector{param}); } -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/subgraph/convert_pad_to_group_conv.cpp b/src/tests/functional/shared_test_classes/src/subgraph/convert_pad_to_group_conv.cpp index c8f96576dc3761..5db0177f8afb57 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/convert_pad_to_group_conv.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/convert_pad_to_group_conv.cpp @@ -4,13 +4,14 @@ #include "shared_test_classes/subgraph/convert_pad_to_group_conv.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { -std::string ConvertPadToConvTests::getTestCaseName(const testing::TestParamInfo &obj) { - ngraph::Shape input_shape; +std::string ConvertPadToConvTests::getTestCaseName(const testing::TestParamInfo& obj) { + ov::Shape input_shape; std::string targetName; std::vector pad_begin, pad_end; - ngraph::op::PadMode mode; + ov::op::PadMode mode; float value; std::tie(input_shape, pad_begin, pad_end, value, mode, targetName) = obj.param; std::ostringstream results; @@ -25,20 +26,24 @@ std::string ConvertPadToConvTests::getTestCaseName(const testing::TestParamInfo< } void ConvertPadToConvTests::SetUp() { - ngraph::Shape input_shape; + ov::Shape input_shape; std::vector pad_begin, pad_end; - ngraph::op::PadMode mode; + ov::op::PadMode mode; float value; std::tie(input_shape, pad_begin, pad_end, value, mode, targetDevice) = this->GetParam(); { - auto param = std::make_shared(ngraph::element::f32, input_shape); - auto pad = std::make_shared(param, - ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{pad_begin.size()}, pad_begin), - ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{pad_end.size()}, pad_end), - ngraph::opset4::Constant::create(ngraph::element::f32, ngraph::Shape{}, {value}), mode); - auto relu = std::make_shared(pad); - function = std::make_shared(ngraph::OutputVector{relu}, ngraph::ParameterVector{param}, "pad"); + auto param = std::make_shared(ov::element::f32, input_shape); + auto pad = std::make_shared( + param, + ov::op::v0::Constant::create(ov::element::i64, ov::Shape{pad_begin.size()}, pad_begin), + ov::op::v0::Constant::create(ov::element::i64, ov::Shape{pad_end.size()}, pad_end), + ov::op::v0::Constant::create(ov::element::f32, ov::Shape{}, {value}), + mode); + auto relu = std::make_shared(pad); + function = std::make_shared(ov::OutputVector{relu}, ov::ParameterVector{param}, "pad"); } } -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/subgraph/get_output_before_activation.cpp b/src/tests/functional/shared_test_classes/src/subgraph/get_output_before_activation.cpp index 8a84303f79acfb..7a566ae58a801c 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/get_output_before_activation.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/get_output_before_activation.cpp @@ -2,10 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ov_models/builders.hpp" #include "shared_test_classes/subgraph/get_output_before_activation.hpp" -namespace SubgraphTestsDefinitions { +#include "ov_models/builders.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { + std::ostream& operator<<(std::ostream& os, const midOutputType& oType) { switch (oType) { case midOutputType::Sub: @@ -21,51 +25,50 @@ std::ostream& operator<<(std::ostream& os, const midOutputType& oType) { std::string OutputBeforeActivation::getTestCaseName(const testing::TestParamInfo& obj) { std::string targetDevice; - InferenceEngine::Precision netPrecision; + ov::element::Type element_type; size_t inputSize; midOutputType outputType; - std::map config; - std::tie(targetDevice, netPrecision, inputSize, outputType, config) = obj.param; + ov::AnyMap config; + std::tie(targetDevice, element_type, inputSize, outputType, config) = obj.param; std::ostringstream result; - result << "netPrecision=" << netPrecision.name() << "_"; + result << "InputType=" << element_type << "_"; result << "IS=" << inputSize << "_"; result << "OutputType=" << outputType << "_"; result << "targetDevice=" << targetDevice; for (auto const& configItem : config) { - result << "_configItem=" << configItem.first << "_" << configItem.second; + result << "_configItem=" << configItem.first << "_" << configItem.second.as(); } return result.str(); } void OutputBeforeActivation::SetUp() { - InferenceEngine::Precision netPrecision; - std::map config; + ov::element::Type element_type; + ov::AnyMap config; size_t inputSize; midOutputType outputType; - std::tie(targetDevice, netPrecision, inputSize, outputType, config) = this->GetParam(); + std::tie(targetDevice, element_type, inputSize, outputType, config) = this->GetParam(); configuration.insert(config.begin(), config.end()); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - std::vector input_dims { 1, inputSize }; + std::vector input_dims{1, inputSize}; - ov::ParameterVector input_parameter {std::make_shared(ngPrc, ov::Shape(input_dims)), - std::make_shared(ngPrc, ov::Shape(input_dims))}; + ov::ParameterVector input_parameter{std::make_shared(element_type, ov::Shape(input_dims)), + std::make_shared(element_type, ov::Shape(input_dims))}; auto input0 = input_parameter[0]; auto input1 = input_parameter[1]; ngraph::OutputVector outputs; std::shared_ptr midLayer; switch (outputType) { - case SubgraphTestsDefinitions::midOutputType::Sum: { + case ov::test::midOutputType::Sum: { midLayer = ngraph::builder::makeEltwise(input0, input1, ngraph::helpers::EltwiseTypes::ADD); break; } - case SubgraphTestsDefinitions::midOutputType::Sub: { + case ov::test::midOutputType::Sub: { midLayer = ngraph::builder::makeEltwise(input0, input1, ngraph::helpers::EltwiseTypes::SUBTRACT); break; } - case SubgraphTestsDefinitions::midOutputType::Mul: { + case ov::test::midOutputType::Mul: { midLayer = ngraph::builder::makeEltwise(input0, input1, ngraph::helpers::EltwiseTypes::MULTIPLY); break; } @@ -73,12 +76,17 @@ void OutputBeforeActivation::SetUp() { GTEST_FAIL() << "Unknown midOutputType"; } - auto act = ngraph::builder::makeActivation(midLayer, ngPrc, ngraph::helpers::ActivationTypes::Tanh); + auto act = ngraph::builder::makeActivation(midLayer, element_type, ngraph::helpers::ActivationTypes::Tanh); outputs.insert(outputs.end(), {midLayer, act}); function = std::make_shared(outputs, input_parameter, "output_before_activation"); } -InferenceEngine::Blob::Ptr OutputBeforeActivation::GenerateInput(const InferenceEngine::InputInfo &info) const { - return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 2, -1, 100); -} -} // namespace SubgraphTestsDefinitions +// void OutputBeforeActivation::generate_inputs(const std::vector& targetInputStaticShapes) { +// ov::test::SubgraphBaseTest::generate_inputs(targetInputStaticShapes); +// } +// InferenceEngine::Blob::Ptr OutputBeforeActivation::GenerateInput(const InferenceEngine::InputInfo& info) const { +// return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 2, -1, 100); +// } + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/subgraph/matmul_const_transposes_extraction.cpp b/src/tests/functional/shared_test_classes/src/subgraph/matmul_const_transposes_extraction.cpp index 59e5d4e397df17..05e434a1307b15 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/matmul_const_transposes_extraction.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/matmul_const_transposes_extraction.cpp @@ -3,15 +3,19 @@ // #include "transformations/common_optimizations/matmul_const_transposes_extraction.hpp" -#include "shared_test_classes/subgraph/matmul_const_transposes_extraction.hpp" -#include "ov_models/builders.hpp" -#include -namespace SubgraphTestsDefinitions { +#include "common_test_utils/graph_comparator.hpp" +#include "functional_test_utils/skip_tests_config.hpp" +#include "openvino/pass/manager.hpp" +#include "openvino/runtime/exec_model_info.hpp" +#include "ov_models/builders.hpp" +#include "shared_test_classes/subgraph/matmul_const_transposes_extraction.hpp" -using namespace ngraph; +namespace ov { +namespace test { -std::string MatMulConstTransposesExtractionTest::getTestCaseName(const testing::TestParamInfo &obj) { +std::string MatMulConstTransposesExtractionTest::getTestCaseName( + const testing::TestParamInfo& obj) { MatMulConstTransposesExtractionTestShapeParams shape_params; std::string device; std::tie(shape_params, std::ignore, device) = obj.param; @@ -33,18 +37,18 @@ void MatMulConstTransposesExtractionTest::SetUp() { const auto& input_shape = shape_params.input_shape; const auto& weights_shape = shape_params.weights_shape; - auto param = std::make_shared(type, input_shape); - auto weights = opset8::Constant::create(type, weights_shape, {0.5}); - auto matmul = std::make_shared(param, weights, false, shape_params.trans_b); - function = std::make_shared(matmul, ParameterVector{param}); + auto param = std::make_shared(type, input_shape); + auto weights = ov::op::v0::Constant::create(type, weights_shape, {0.5}); + auto matmul = std::make_shared(param, weights, false, shape_params.trans_b); + function = std::make_shared(matmul, ParameterVector{param}); - auto transformed_function = clone_function(*function); + auto transformed_function = function->clone(); pass::Manager manager; manager.register_pass(); manager.run_passes(transformed_function); bool functions_equal; - auto orig_function = clone_function(*function); + auto orig_function = function->clone(); std::tie(functions_equal, std::ignore) = compare_functions(transformed_function, orig_function, true); if (can_be_fused) { ASSERT_FALSE(functions_equal); @@ -54,15 +58,19 @@ void MatMulConstTransposesExtractionTest::SetUp() { } std::string QuantizedMatMulConstTransposesExtractionTest::getTestCaseName( - const testing::TestParamInfo &obj) { + const testing::TestParamInfo& obj) { MatMulConstTransposesExtractionTestShapeParams params; std::string device; std::tie(params, std::ignore, device) = obj.param; std::ostringstream results; - results << "input=" << params.input_shape << "_" - "weights=" << params.weights_shape << "_" - "dev=" << device; + results << "input=" << params.input_shape + << "_" + "weights=" + << params.weights_shape + << "_" + "dev=" + << device; return results.str(); } @@ -75,23 +83,23 @@ void QuantizedMatMulConstTransposesExtractionTest::SetUp() { auto weights_shape = params.weights_shape; element::Type type = element::f32; - auto param = std::make_shared(type, input_shape); + auto param = std::make_shared(type, input_shape); std::shared_ptr input; - std::shared_ptr weights = opset8::Constant::create(type, weights_shape, {0.5}); - auto low = opset8::Constant::create(type, {1}, {-2}); - auto high = opset8::Constant::create(type, {1}, {2}); - input = std::make_shared(param, low, high, low, high, 256); - weights = std::make_shared(weights, low, high, low, high, 255); - auto matmul = std::make_shared(input, weights, false, false); - function = std::make_shared(matmul, ParameterVector{param}); - - auto transformed_function = clone_function(*function); + std::shared_ptr weights = ov::op::v0::Constant::create(type, weights_shape, {0.5}); + auto low = ov::op::v0::Constant::create(type, {1}, {-2}); + auto high = ov::op::v0::Constant::create(type, {1}, {2}); + input = std::make_shared(param, low, high, low, high, 256); + weights = std::make_shared(weights, low, high, low, high, 255); + auto matmul = std::make_shared(input, weights, false, false); + function = std::make_shared(matmul, ParameterVector{param}); + + auto transformed_function = function->clone(); pass::Manager manager; manager.register_pass(); manager.run_passes(transformed_function); bool functions_equal; - auto orig_function = clone_function(*function); + auto orig_function = function->clone(); std::tie(functions_equal, std::ignore) = compare_functions(transformed_function, orig_function, true); if (can_be_fused) { ASSERT_FALSE(functions_equal); @@ -102,10 +110,10 @@ void QuantizedMatMulConstTransposesExtractionTest::SetUp() { void QuantizedMatMulConstTransposesExtractionTest::TearDown() { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - auto runtime_function = executableNetwork.GetExecGraphInfo().getFunction(); + auto runtime_function = compiledModel.get_runtime_model(); int ops_found = 0; for (const auto& node : runtime_function->get_ordered_ops()) { - const auto& layer_type = node->get_rt_info().at(ExecGraphInfoSerialization::LAYER_TYPE).as(); + const auto& layer_type = node->get_rt_info().at(ov::exec_model_info::LAYER_TYPE).as(); if (layer_type == "FullyConnected" || layer_type == "MatMul") { ops_found++; auto inputs = node->input_values(); @@ -115,4 +123,6 @@ void QuantizedMatMulConstTransposesExtractionTest::TearDown() { } ASSERT_GT(ops_found, 0); } -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/subgraph/matmul_multiply_fusion.cpp b/src/tests/functional/shared_test_classes/src/subgraph/matmul_multiply_fusion.cpp index 02252c96fdf4d1..1764223d930f0f 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/matmul_multiply_fusion.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/matmul_multiply_fusion.cpp @@ -2,16 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "transformations/common_optimizations/matmul_multiply_fusion.hpp" #include "shared_test_classes/subgraph/matmul_multiply_fusion.hpp" -#include "ov_models/builders.hpp" -#include -namespace SubgraphTestsDefinitions { +#include "common_test_utils/graph_comparator.hpp" +#include "functional_test_utils/skip_tests_config.hpp" +#include "openvino/pass/manager.hpp" +#include "openvino/runtime/exec_model_info.hpp" +#include "transformations/common_optimizations/matmul_multiply_fusion.hpp" -using namespace ngraph; +namespace ov { +namespace test { -std::string MatMulMultiplyFusion::getTestCaseName(const testing::TestParamInfo &obj) { +std::string MatMulMultiplyFusion::getTestCaseName(const testing::TestParamInfo& obj) { MatMulMultiplyFusionShapeParams shape_params; std::string device; std::tie(shape_params, std::ignore, device) = obj.param; @@ -35,20 +37,20 @@ void MatMulMultiplyFusion::SetUp() { const auto& weights_shape = shape_params.weights_shape; const auto& const_shape = shape_params.const_shape; - auto param = std::make_shared(precision, input_shape); - auto weights = opset8::Constant::create(precision, weights_shape, {0.5}); - auto matmul = std::make_shared(param, weights, false, shape_params.trans_b); - auto mul_const = opset8::Constant::create(precision, const_shape, {2.0}); - auto mul = std::make_shared(matmul, mul_const); - function = std::make_shared(OutputVector{mul}, ParameterVector{param}); + auto param = std::make_shared(precision, input_shape); + auto weights = ov::op::v0::Constant::create(precision, weights_shape, {0.5}); + auto matmul = std::make_shared(param, weights, false, shape_params.trans_b); + auto mul_const = ov::op::v0::Constant::create(precision, const_shape, {2.0}); + auto mul = std::make_shared(matmul, mul_const); + function = std::make_shared(OutputVector{mul}, ParameterVector{param}); - auto transformed_function = clone_function(*function); + auto transformed_function = function->clone(); pass::Manager manager; manager.register_pass(); manager.run_passes(transformed_function); bool functions_equal; - auto orig_function = clone_function(*function); + auto orig_function = function->clone(); std::tie(functions_equal, std::ignore) = compare_functions(transformed_function, orig_function, true); if (can_be_fused) { ASSERT_FALSE(functions_equal); @@ -57,7 +59,8 @@ void MatMulMultiplyFusion::SetUp() { } } -std::string QuantizedMatMulMultiplyFusion::getTestCaseName(const testing::TestParamInfo &obj) { +std::string QuantizedMatMulMultiplyFusion::getTestCaseName( + const testing::TestParamInfo& obj) { MatMulMultiplyFusionShapeParams shape_params; std::string device; std::tie(shape_params, std::ignore, device) = obj.param; @@ -81,31 +84,31 @@ void QuantizedMatMulMultiplyFusion::SetUp() { auto weights_shape = shape_params.weights_shape; const auto& const_shape = shape_params.const_shape; - auto param = std::make_shared(precision, input_shape); - auto low = opset8::Constant::create(precision, {1}, {-2}); - auto high = opset8::Constant::create(precision, {1}, {2}); - auto input_fq = std::make_shared(param, low, high, low, high, 256); - std::shared_ptr weights = opset8::Constant::create(precision, weights_shape, {0.5}); - weights = std::make_shared(weights, low, high, low, high, 255); + auto param = std::make_shared(precision, input_shape); + auto low = ov::op::v0::Constant::create(precision, {1}, {-2}); + auto high = ov::op::v0::Constant::create(precision, {1}, {2}); + auto input_fq = std::make_shared(param, low, high, low, high, 256); + std::shared_ptr weights = ov::op::v0::Constant::create(precision, weights_shape, {0.5}); + weights = std::make_shared(weights, low, high, low, high, 255); if (shape_params.trans_b) { std::vector perm(weights_shape.size(), 0); std::iota(perm.begin(), perm.end(), 0); std::swap(*(perm.end() - 2), *(perm.end() - 1)); - auto perm_const = opset8::Constant::create(element::i32, {perm.size()}, perm); - weights = std::make_shared(weights, perm_const); + auto perm_const = ov::op::v0::Constant::create(element::i32, {perm.size()}, perm); + weights = std::make_shared(weights, perm_const); } - auto matmul = std::make_shared(input_fq, weights); - auto mul_const = opset8::Constant::create(precision, const_shape, {2}); - auto mul = std::make_shared(matmul, mul_const); - function = std::make_shared(OutputVector{mul}, ParameterVector{param}); + auto matmul = std::make_shared(input_fq, weights); + auto mul_const = ov::op::v0::Constant::create(precision, const_shape, {2}); + auto mul = std::make_shared(matmul, mul_const); + function = std::make_shared(OutputVector{mul}, ParameterVector{param}); - auto transformed_function = clone_function(*function); + auto transformed_function = function->clone(); pass::Manager manager; manager.register_pass(); manager.run_passes(transformed_function); bool functions_equal; - auto orig_function = clone_function(*function); + auto orig_function = function->clone(); std::tie(functions_equal, std::ignore) = compare_functions(transformed_function, orig_function, true); if (can_be_fused) { ASSERT_FALSE(functions_equal); @@ -116,14 +119,14 @@ void QuantizedMatMulMultiplyFusion::SetUp() { void QuantizedMatMulMultiplyFusion::TearDown() { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - auto get_layer_type = [] (const std::shared_ptr& node) -> const std::string& { + auto get_layer_type = [](const std::shared_ptr& node) -> const std::string& { const auto& rt_info = node->get_rt_info(); - auto it = rt_info.find(ExecGraphInfoSerialization::LAYER_TYPE); - IE_ASSERT(it != rt_info.end()); + auto it = rt_info.find(ov::exec_model_info::LAYER_TYPE); + OPENVINO_ASSERT(it != rt_info.end()); return it->second.as(); }; - auto runtime_function = executableNetwork.GetExecGraphInfo().getFunction(); + auto runtime_function = compiledModel.get_runtime_model(); int ops_found = 0; for (const auto& node : runtime_function->get_ordered_ops()) { const auto& layer_type = get_layer_type(node); @@ -136,4 +139,6 @@ void QuantizedMatMulMultiplyFusion::TearDown() { } ASSERT_GT(ops_found, 0); } -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/subgraph/matmul_squeeze_add.cpp b/src/tests/functional/shared_test_classes/src/subgraph/matmul_squeeze_add.cpp index b91b50bd8a9457..01b628d63cf8fd 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/matmul_squeeze_add.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/matmul_squeeze_add.cpp @@ -3,57 +3,68 @@ // #include "shared_test_classes/subgraph/matmul_squeeze_add.hpp" + +#include "common_test_utils/data_utils.hpp" #include "ov_models/builders.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { std::string MatmulSqueezeAddTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - std::vector inputShape; + ov::element::Type element_type; + ov::Shape input_shape; std::size_t outputSize; std::string targetDevice; - std::map configuration; - std::tie(netPrecision, targetDevice, configuration, inputShape, outputSize) = obj.param; + ov::AnyMap configuration; + std::tie(element_type, targetDevice, configuration, input_shape, outputSize) = obj.param; std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; + result << "IS=" << ov::test::utils::vec2str(input_shape) << "_"; result << "OS=" << outputSize << "_"; - result << "netPRC=" << netPrecision.name() << "_"; + result << "IT=" << element_type << "_"; result << "targetDevice=" << targetDevice; for (auto const& configItem : configuration) { - result << "_configItem=" << configItem.first << "_" << configItem.second; + result << "_configItem=" << configItem.first << "_" << configItem.second.as(); } return result.str(); } void MatmulSqueezeAddTest::SetUp() { auto seed = std::chrono::high_resolution_clock::now().time_since_epoch().count(); - InferenceEngine::Precision netPrecision; - std::map tempConfig; - std::vector inputShape; + ov::element::Type element_type; + ov::AnyMap tempConfig; + ov::Shape inputShape; size_t outputSize; - std::tie(netPrecision, targetDevice, tempConfig, inputShape, outputSize) = this->GetParam(); + std::tie(element_type, targetDevice, tempConfig, inputShape, outputSize) = this->GetParam(); configuration.insert(tempConfig.begin(), tempConfig.end()); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; + ov::ParameterVector params{std::make_shared(element_type, ov::Shape(inputShape))}; - auto constant_0 = ngraph::builder::makeConstant(ngPrc, { outputSize, inputShape[1] }, - ov::test::utils::generate_float_numbers(outputSize * inputShape[1], 0, 1, seed), false); + auto constant_0 = ngraph::builder::makeConstant( + element_type, + {outputSize, inputShape[1]}, + ov::test::utils::generate_float_numbers(outputSize * inputShape[1], 0, 1, seed), + false); auto matmul_0 = std::make_shared(params[0], constant_0, false, true); - auto constant_1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 1 }, std::vector{0}); + auto constant_1 = + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{1}, std::vector{0}); auto unsqueeze_0 = std::make_shared(matmul_0, constant_1); - auto constant_2 = ngraph::builder::makeConstant(ngPrc, { 1, inputShape[0], outputSize }, - ov::test::utils::generate_float_numbers(inputShape[0] * outputSize, 0, 1, seed), false); + auto constant_2 = ngraph::builder::makeConstant( + element_type, + {1, inputShape[0], outputSize}, + ov::test::utils::generate_float_numbers(inputShape[0] * outputSize, 0, 1, seed), + false); auto add_0 = std::make_shared(unsqueeze_0, constant_2); - auto constant_3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 1 }, std::vector{0}); + auto constant_3 = + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{1}, std::vector{0}); auto squeeze_0 = std::make_shared(add_0, constant_3); - ngraph::ResultVector results {std::make_shared(squeeze_0)}; + ngraph::ResultVector results{std::make_shared(squeeze_0)}; function = std::make_shared(results, params, "MatmulSqueezeAddTest"); } -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov From 6a326455b964359c034a2568942501834b13268e Mon Sep 17 00:00:00 2001 From: Jan Iwaszkiewicz Date: Wed, 11 Oct 2023 09:53:34 +0200 Subject: [PATCH 147/257] [PyOV] Propagate errors on imports from runtime module (#20273) Co-authored-by: Michal Lukaszewski --- src/bindings/python/CMakeLists.txt | 21 ++--- .../src/compatibility/openvino/__init__.py | 82 +++++++++---------- src/bindings/python/src/openvino/__init__.py | 82 +++++++++---------- tools/benchmark_tool/openvino/__init__.py | 82 +++++++++---------- tools/openvino_dev/CMakeLists.txt | 22 ++--- tools/ovc/openvino/__init__.py | 82 +++++++++---------- 6 files changed, 181 insertions(+), 190 deletions(-) diff --git a/src/bindings/python/CMakeLists.txt b/src/bindings/python/CMakeLists.txt index 2093c315a06691..8a572f89a0f47e 100644 --- a/src/bindings/python/CMakeLists.txt +++ b/src/bindings/python/CMakeLists.txt @@ -123,17 +123,8 @@ ov_check_python_build_conditions() # check __init__.py files alignment -function(ov_check_init_files_alignment) +function(ov_check_init_files_alignment init_files) # check the files in pairs - list(APPEND init_files - "${OpenVINOPython_SOURCE_DIR}/src/openvino/__init__.py" - "${OpenVINOPython_SOURCE_DIR}/src/compatibility/openvino/__init__.py" - "${OpenVINO_SOURCE_DIR}/tools/mo/openvino/__init__.py" - "${OpenVINO_SOURCE_DIR}/tools/pot/openvino/__init__.py" - "${OpenVINO_SOURCE_DIR}/tools/ovc/openvino/__init__.py" - "${OpenVINO_SOURCE_DIR}/tools/benchmark_tool/openvino/__init__.py" - "${OpenVINO_SOURCE_DIR}/tools/openvino_dev/src/openvino/__init__.py") - list(LENGTH init_files init_files_count) math(EXPR file_loop_range "${init_files_count}-2") foreach(init_file_idx RANGE 0 ${file_loop_range}) @@ -145,12 +136,18 @@ function(ov_check_init_files_alignment) RESULT_VARIABLE compare_result ) if(compare_result EQUAL 1) - message(FATAL_ERROR "The __init__.py files are misaligned: ${file1} and ${file2}") + message(FATAL_ERROR "The runtime __init__.py files are misaligned: ${file1} and ${file2}") endif() endforeach() endfunction() -ov_check_init_files_alignment() +set(INIT_FILES_RUNTIME +"${OpenVINOPython_SOURCE_DIR}/src/openvino/__init__.py" +"${OpenVINOPython_SOURCE_DIR}/src/compatibility/openvino/__init__.py" +"${OpenVINO_SOURCE_DIR}/tools/ovc/openvino/__init__.py" +"${OpenVINO_SOURCE_DIR}/tools/benchmark_tool/openvino/__init__.py") + +ov_check_init_files_alignment("${INIT_FILES_RUNTIME}") ov_option(ENABLE_PYTHON "Enables OpenVINO Python API build" ${ENABLE_PYTHON_DEFAULT}) diff --git a/src/bindings/python/src/compatibility/openvino/__init__.py b/src/bindings/python/src/compatibility/openvino/__init__.py index 90552e0befed68..b7dc434f3148cc 100644 --- a/src/bindings/python/src/compatibility/openvino/__init__.py +++ b/src/bindings/python/src/compatibility/openvino/__init__.py @@ -12,47 +12,47 @@ except ImportError: pass -# API 2.0 -try: - # Import all public modules - from openvino import runtime as runtime - from openvino import frontend as frontend - from openvino import helpers as helpers - from openvino import preprocess as preprocess - from openvino import utils as utils - from openvino.runtime import properties as properties - - # Import most important classes and functions from openvino.runtime - from openvino.runtime import Model - from openvino.runtime import Core - from openvino.runtime import CompiledModel - from openvino.runtime import InferRequest - from openvino.runtime import AsyncInferQueue - - from openvino.runtime import Dimension - from openvino.runtime import Strides - from openvino.runtime import PartialShape - from openvino.runtime import Shape - from openvino.runtime import Layout - from openvino.runtime import Type - from openvino.runtime import Tensor - from openvino.runtime import OVAny - - from openvino.runtime import compile_model - from openvino.runtime import get_batch - from openvino.runtime import set_batch - from openvino.runtime import serialize - from openvino.runtime import shutdown - from openvino.runtime import tensor_from_file - from openvino.runtime import save_model - from openvino.runtime import layout_helpers - - # Set version for openvino package - from openvino.runtime import get_version - __version__ = get_version() -except ImportError: - import warnings - warnings.warn("openvino package has problems with imports!", ImportWarning, stacklevel=2) +# # +# # API 2.0 +# # This __init__.py forces checking of runtime modules to propagate errors. +# # It is not compared with init files from openvino-dev package. +# # +# Import all public modules +from openvino import runtime as runtime +from openvino import frontend as frontend +from openvino import helpers as helpers +from openvino import preprocess as preprocess +from openvino import utils as utils +from openvino.runtime import properties as properties + +# Import most important classes and functions from openvino.runtime +from openvino.runtime import Model +from openvino.runtime import Core +from openvino.runtime import CompiledModel +from openvino.runtime import InferRequest +from openvino.runtime import AsyncInferQueue + +from openvino.runtime import Dimension +from openvino.runtime import Strides +from openvino.runtime import PartialShape +from openvino.runtime import Shape +from openvino.runtime import Layout +from openvino.runtime import Type +from openvino.runtime import Tensor +from openvino.runtime import OVAny + +from openvino.runtime import compile_model +from openvino.runtime import get_batch +from openvino.runtime import set_batch +from openvino.runtime import serialize +from openvino.runtime import shutdown +from openvino.runtime import tensor_from_file +from openvino.runtime import save_model +from openvino.runtime import layout_helpers + +# Set version for openvino package +from openvino.runtime import get_version +__version__ = get_version() # Tools try: diff --git a/src/bindings/python/src/openvino/__init__.py b/src/bindings/python/src/openvino/__init__.py index 90552e0befed68..b7dc434f3148cc 100644 --- a/src/bindings/python/src/openvino/__init__.py +++ b/src/bindings/python/src/openvino/__init__.py @@ -12,47 +12,47 @@ except ImportError: pass -# API 2.0 -try: - # Import all public modules - from openvino import runtime as runtime - from openvino import frontend as frontend - from openvino import helpers as helpers - from openvino import preprocess as preprocess - from openvino import utils as utils - from openvino.runtime import properties as properties - - # Import most important classes and functions from openvino.runtime - from openvino.runtime import Model - from openvino.runtime import Core - from openvino.runtime import CompiledModel - from openvino.runtime import InferRequest - from openvino.runtime import AsyncInferQueue - - from openvino.runtime import Dimension - from openvino.runtime import Strides - from openvino.runtime import PartialShape - from openvino.runtime import Shape - from openvino.runtime import Layout - from openvino.runtime import Type - from openvino.runtime import Tensor - from openvino.runtime import OVAny - - from openvino.runtime import compile_model - from openvino.runtime import get_batch - from openvino.runtime import set_batch - from openvino.runtime import serialize - from openvino.runtime import shutdown - from openvino.runtime import tensor_from_file - from openvino.runtime import save_model - from openvino.runtime import layout_helpers - - # Set version for openvino package - from openvino.runtime import get_version - __version__ = get_version() -except ImportError: - import warnings - warnings.warn("openvino package has problems with imports!", ImportWarning, stacklevel=2) +# # +# # API 2.0 +# # This __init__.py forces checking of runtime modules to propagate errors. +# # It is not compared with init files from openvino-dev package. +# # +# Import all public modules +from openvino import runtime as runtime +from openvino import frontend as frontend +from openvino import helpers as helpers +from openvino import preprocess as preprocess +from openvino import utils as utils +from openvino.runtime import properties as properties + +# Import most important classes and functions from openvino.runtime +from openvino.runtime import Model +from openvino.runtime import Core +from openvino.runtime import CompiledModel +from openvino.runtime import InferRequest +from openvino.runtime import AsyncInferQueue + +from openvino.runtime import Dimension +from openvino.runtime import Strides +from openvino.runtime import PartialShape +from openvino.runtime import Shape +from openvino.runtime import Layout +from openvino.runtime import Type +from openvino.runtime import Tensor +from openvino.runtime import OVAny + +from openvino.runtime import compile_model +from openvino.runtime import get_batch +from openvino.runtime import set_batch +from openvino.runtime import serialize +from openvino.runtime import shutdown +from openvino.runtime import tensor_from_file +from openvino.runtime import save_model +from openvino.runtime import layout_helpers + +# Set version for openvino package +from openvino.runtime import get_version +__version__ = get_version() # Tools try: diff --git a/tools/benchmark_tool/openvino/__init__.py b/tools/benchmark_tool/openvino/__init__.py index 90552e0befed68..b7dc434f3148cc 100644 --- a/tools/benchmark_tool/openvino/__init__.py +++ b/tools/benchmark_tool/openvino/__init__.py @@ -12,47 +12,47 @@ except ImportError: pass -# API 2.0 -try: - # Import all public modules - from openvino import runtime as runtime - from openvino import frontend as frontend - from openvino import helpers as helpers - from openvino import preprocess as preprocess - from openvino import utils as utils - from openvino.runtime import properties as properties - - # Import most important classes and functions from openvino.runtime - from openvino.runtime import Model - from openvino.runtime import Core - from openvino.runtime import CompiledModel - from openvino.runtime import InferRequest - from openvino.runtime import AsyncInferQueue - - from openvino.runtime import Dimension - from openvino.runtime import Strides - from openvino.runtime import PartialShape - from openvino.runtime import Shape - from openvino.runtime import Layout - from openvino.runtime import Type - from openvino.runtime import Tensor - from openvino.runtime import OVAny - - from openvino.runtime import compile_model - from openvino.runtime import get_batch - from openvino.runtime import set_batch - from openvino.runtime import serialize - from openvino.runtime import shutdown - from openvino.runtime import tensor_from_file - from openvino.runtime import save_model - from openvino.runtime import layout_helpers - - # Set version for openvino package - from openvino.runtime import get_version - __version__ = get_version() -except ImportError: - import warnings - warnings.warn("openvino package has problems with imports!", ImportWarning, stacklevel=2) +# # +# # API 2.0 +# # This __init__.py forces checking of runtime modules to propagate errors. +# # It is not compared with init files from openvino-dev package. +# # +# Import all public modules +from openvino import runtime as runtime +from openvino import frontend as frontend +from openvino import helpers as helpers +from openvino import preprocess as preprocess +from openvino import utils as utils +from openvino.runtime import properties as properties + +# Import most important classes and functions from openvino.runtime +from openvino.runtime import Model +from openvino.runtime import Core +from openvino.runtime import CompiledModel +from openvino.runtime import InferRequest +from openvino.runtime import AsyncInferQueue + +from openvino.runtime import Dimension +from openvino.runtime import Strides +from openvino.runtime import PartialShape +from openvino.runtime import Shape +from openvino.runtime import Layout +from openvino.runtime import Type +from openvino.runtime import Tensor +from openvino.runtime import OVAny + +from openvino.runtime import compile_model +from openvino.runtime import get_batch +from openvino.runtime import set_batch +from openvino.runtime import serialize +from openvino.runtime import shutdown +from openvino.runtime import tensor_from_file +from openvino.runtime import save_model +from openvino.runtime import layout_helpers + +# Set version for openvino package +from openvino.runtime import get_version +__version__ = get_version() # Tools try: diff --git a/tools/openvino_dev/CMakeLists.txt b/tools/openvino_dev/CMakeLists.txt index 12a24082a83a8e..494ac86c725acf 100644 --- a/tools/openvino_dev/CMakeLists.txt +++ b/tools/openvino_dev/CMakeLists.txt @@ -56,17 +56,8 @@ endforeach() # check __init__.py files alignment -function(ov_check_init_files_alignment) +function(ov_check_init_files_alignment init_files) # check the files in pairs - list(APPEND init_files - "${OpenVINO_SOURCE_DIR}/src/bindings/python/src/openvino/__init__.py" - "${OpenVINO_SOURCE_DIR}/src/bindings/python/src/compatibility/openvino/__init__.py" - "${OpenVINO_SOURCE_DIR}/tools/mo/openvino/__init__.py" - "${OpenVINO_SOURCE_DIR}/tools/pot/openvino/__init__.py" - "${OpenVINO_SOURCE_DIR}/tools/ovc/openvino/__init__.py" - "${OpenVINO_SOURCE_DIR}/tools/benchmark_tool/openvino/__init__.py" - "${OpenVINO_SOURCE_DIR}/tools/openvino_dev/src/openvino/__init__.py") - list(LENGTH init_files init_files_count) math(EXPR file_loop_range "${init_files_count}-2") foreach(init_file_idx RANGE 0 ${file_loop_range}) @@ -78,14 +69,17 @@ function(ov_check_init_files_alignment) RESULT_VARIABLE compare_result ) if(compare_result EQUAL 1) - message(STATUS ${file1}) - message(STATUS ${file2}) - message(FATAL_ERROR "The __init__.py files are misaligned: ${file1} and ${file2}") + message(FATAL_ERROR "The tools __init__.py files are misaligned: ${file1} and ${file2}") endif() endforeach() endfunction() -ov_check_init_files_alignment() +set(INIT_FILES_TOOLS +"${OpenVINO_SOURCE_DIR}/tools/mo/openvino/__init__.py" +"${OpenVINO_SOURCE_DIR}/tools/pot/openvino/__init__.py" +"${OpenVINO_SOURCE_DIR}/tools/openvino_dev/src/openvino/__init__.py") + +ov_check_init_files_alignment("${INIT_FILES_TOOLS}") # openvino_dev build diff --git a/tools/ovc/openvino/__init__.py b/tools/ovc/openvino/__init__.py index 90552e0befed68..b7dc434f3148cc 100644 --- a/tools/ovc/openvino/__init__.py +++ b/tools/ovc/openvino/__init__.py @@ -12,47 +12,47 @@ except ImportError: pass -# API 2.0 -try: - # Import all public modules - from openvino import runtime as runtime - from openvino import frontend as frontend - from openvino import helpers as helpers - from openvino import preprocess as preprocess - from openvino import utils as utils - from openvino.runtime import properties as properties - - # Import most important classes and functions from openvino.runtime - from openvino.runtime import Model - from openvino.runtime import Core - from openvino.runtime import CompiledModel - from openvino.runtime import InferRequest - from openvino.runtime import AsyncInferQueue - - from openvino.runtime import Dimension - from openvino.runtime import Strides - from openvino.runtime import PartialShape - from openvino.runtime import Shape - from openvino.runtime import Layout - from openvino.runtime import Type - from openvino.runtime import Tensor - from openvino.runtime import OVAny - - from openvino.runtime import compile_model - from openvino.runtime import get_batch - from openvino.runtime import set_batch - from openvino.runtime import serialize - from openvino.runtime import shutdown - from openvino.runtime import tensor_from_file - from openvino.runtime import save_model - from openvino.runtime import layout_helpers - - # Set version for openvino package - from openvino.runtime import get_version - __version__ = get_version() -except ImportError: - import warnings - warnings.warn("openvino package has problems with imports!", ImportWarning, stacklevel=2) +# # +# # API 2.0 +# # This __init__.py forces checking of runtime modules to propagate errors. +# # It is not compared with init files from openvino-dev package. +# # +# Import all public modules +from openvino import runtime as runtime +from openvino import frontend as frontend +from openvino import helpers as helpers +from openvino import preprocess as preprocess +from openvino import utils as utils +from openvino.runtime import properties as properties + +# Import most important classes and functions from openvino.runtime +from openvino.runtime import Model +from openvino.runtime import Core +from openvino.runtime import CompiledModel +from openvino.runtime import InferRequest +from openvino.runtime import AsyncInferQueue + +from openvino.runtime import Dimension +from openvino.runtime import Strides +from openvino.runtime import PartialShape +from openvino.runtime import Shape +from openvino.runtime import Layout +from openvino.runtime import Type +from openvino.runtime import Tensor +from openvino.runtime import OVAny + +from openvino.runtime import compile_model +from openvino.runtime import get_batch +from openvino.runtime import set_batch +from openvino.runtime import serialize +from openvino.runtime import shutdown +from openvino.runtime import tensor_from_file +from openvino.runtime import save_model +from openvino.runtime import layout_helpers + +# Set version for openvino package +from openvino.runtime import get_version +__version__ = get_version() # Tools try: From 1ca2f9c6de17e33d0f4c0db17faeee172a5163a5 Mon Sep 17 00:00:00 2001 From: Maciej Smyk Date: Wed, 11 Oct 2023 11:03:14 +0200 Subject: [PATCH 148/257] Update openvino_intro.md (#20383) --- docs/articles_en/openvino_workflow/openvino_intro.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/articles_en/openvino_workflow/openvino_intro.md b/docs/articles_en/openvino_workflow/openvino_intro.md index 40db0d15b52bd5..2937189c136a01 100644 --- a/docs/articles_en/openvino_workflow/openvino_intro.md +++ b/docs/articles_en/openvino_workflow/openvino_intro.md @@ -33,7 +33,7 @@ OpenVINO Runtime is a set of C++ libraries with C and Python bindings providing Note that TensorFlow models can be run using the :doc:`torch.compile feature `, as well as the standard ways of :doc:`converting TensorFlow ` - or reading them directly. + or reading them directly. OpenVINO Runtime uses a plugin architecture. Its plugins are software components that contain complete implementation for inference on a particular Intel® hardware device: CPU, GPU, GNA, etc. Each plugin implements the unified API and provides additional hardware-specific APIs for configuring devices or API interoperability between OpenVINO Runtime and underlying plugin backend. From ac11751e9c751417a74e6af587134ebb79fac0c3 Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Wed, 11 Oct 2023 11:50:29 +0200 Subject: [PATCH 149/257] [core]Migrate Eye to new API (#20258) * Migrate Eye to new API * Fix `matrix_offset` initialization * get_tensors_shapes -> get_tensors_partial_shapes --- src/core/include/openvino/op/eye.hpp | 4 +- .../openvino/op/util/evaluate_helpers.hpp | 23 +++ .../include/openvino/reference/eye.hpp | 32 ++-- src/core/src/op/eye.cpp | 152 ++++++++---------- src/core/src/op/util/evaluate_helpers.cpp | 17 ++ 5 files changed, 127 insertions(+), 101 deletions(-) create mode 100644 src/core/include/openvino/op/util/evaluate_helpers.hpp diff --git a/src/core/include/openvino/op/eye.hpp b/src/core/include/openvino/op/eye.hpp index feaebafca82264..1096e488aa13eb 100644 --- a/src/core/include/openvino/op/eye.hpp +++ b/src/core/include/openvino/op/eye.hpp @@ -55,9 +55,7 @@ class OPENVINO_API Eye : public Op { m_output_type = output_type; } - OPENVINO_SUPPRESS_DEPRECATED_START - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - OPENVINO_SUPPRESS_DEPRECATED_END + bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override; bool has_evaluate() const override; protected: diff --git a/src/core/include/openvino/op/util/evaluate_helpers.hpp b/src/core/include/openvino/op/util/evaluate_helpers.hpp new file mode 100644 index 00000000000000..616528adf60d08 --- /dev/null +++ b/src/core/include/openvino/op/util/evaluate_helpers.hpp @@ -0,0 +1,23 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/core/partial_shape.hpp" +#include "openvino/runtime/tensor.hpp" + +namespace ov { +namespace op { +namespace util { + +/** + * @brief Get the tensors shapes as ov::PartialShape. + * + * @param tensors Input tensors vector to get its shapes. + * @return Vector of partial shapes sam size as input tensor vector. + */ +std::vector get_tensors_partial_shapes(const TensorVector& tensors); +} // namespace util +} // namespace op +} // namespace ov diff --git a/src/core/reference/include/openvino/reference/eye.hpp b/src/core/reference/include/openvino/reference/eye.hpp index 0991637031538f..2cb997c03f0817 100644 --- a/src/core/reference/include/openvino/reference/eye.hpp +++ b/src/core/reference/include/openvino/reference/eye.hpp @@ -7,31 +7,41 @@ #include #include "openvino/core/shape.hpp" -#include "utils/span.hpp" namespace ov { namespace reference { + +/** + * @brief Reference implementation of Eye operator + * + * @param data Pointer to output data. + * @param out_shape Output data size. + * @param diagonal_index Eye diagonal index to populate matrix with ones + */ template void eye(T* data, const Shape& out_shape, const int64_t diagonal_index) { - const int64_t num_matrices = shape_size(span(out_shape).subspan(0, out_shape.size() - 2)); - const int64_t num_rows = out_shape[out_shape.size() - 2]; - const int64_t num_columns = out_shape[out_shape.size() - 1]; + const auto spatial_dims_offset = out_shape.size() - 2; + const int64_t num_columns = out_shape.back(); + const int64_t num_rows = out_shape[spatial_dims_offset]; const int64_t matrix_size = num_rows * num_columns; + const int64_t out_size = shape_size(out_shape); // fill tensor by zero - std::fill(data, data + num_matrices * matrix_size, T(0)); + std::fill(data, std::next(data, out_size), T(0)); // set ones on diagonal - const int64_t shift_by_columns = std::max(diagonal_index, int64_t(0)); - const int64_t count_by_columns = std::max(num_columns - std::abs(diagonal_index), int64_t(0)); - const int64_t count_by_rows = std::max(num_rows - std::abs(diagonal_index), int64_t(0)); + constexpr int64_t zero{0}; + const auto abs_diag_idx = static_cast(std::abs(diagonal_index)); + const int64_t shift_by_columns = std::max(diagonal_index, zero); + const int64_t count_by_columns = std::max(num_columns - abs_diag_idx, zero); + const int64_t count_by_rows = std::max(num_rows - abs_diag_idx, zero); const int64_t count = diagonal_index > 0 ? std::min(count_by_columns, num_rows) : std::min(count_by_rows, num_columns); - for (auto i = 0; i < num_matrices; i++) { - for (auto j = 0; j < count; j++) { + for (auto matrix_offset = zero; matrix_offset < out_size; matrix_offset += matrix_size) { + for (auto j = 0; j < count; ++j) { const int64_t index = (j + shift_by_columns - diagonal_index) * num_columns + j + shift_by_columns; - data[index + i * matrix_size] = static_cast(1); + data[matrix_offset + index] = T{1}; } } } diff --git a/src/core/src/op/eye.cpp b/src/core/src/op/eye.cpp index 77e4082792e2f6..edf9abbb06f4c4 100644 --- a/src/core/src/op/eye.cpp +++ b/src/core/src/op/eye.cpp @@ -4,62 +4,49 @@ #include "openvino/op/eye.hpp" +#include "element_visitor.hpp" #include "eye_shape_inference.hpp" #include "itt.hpp" -#include "ngraph/validation_util.hpp" +#include "openvino/core/validation_util.hpp" +#include "openvino/op/util/evaluate_helpers.hpp" #include "openvino/reference/eye.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START namespace ov { namespace op { namespace eye { -namespace { -template -bool evaluate(const ngraph::HostTensorPtr& out, const int64_t diagonal_index) { - ov::reference::eye(out->get_data_ptr(), out->get_shape(), diagonal_index); - return true; -} -bool evaluate_eye(const ngraph::HostTensorPtr& out, const int64_t diagonal_index) { - bool rc = true; - switch (out->get_element_type()) { - OPENVINO_TYPE_CASE(evaluate, i8, out, diagonal_index); - OPENVINO_TYPE_CASE(evaluate, u8, out, diagonal_index); - OPENVINO_TYPE_CASE(evaluate, f16, out, diagonal_index); - OPENVINO_TYPE_CASE(evaluate, bf16, out, diagonal_index); - OPENVINO_TYPE_CASE(evaluate, i32, out, diagonal_index); - OPENVINO_TYPE_CASE(evaluate, f32, out, diagonal_index); - OPENVINO_TYPE_CASE(evaluate, f64, out, diagonal_index); - OPENVINO_TYPE_CASE(evaluate, i64, out, diagonal_index); - default: - rc = false; - break; +struct Evaluate : element::NoAction { + using element::NoAction::visit; + + template > + static result_type visit(Tensor& out, const Shape& out_shape, const int64_t diagonal_idx) { + reference::eye(out.data(), out_shape, diagonal_idx); + return true; } - return rc; -} -} // namespace +}; } // namespace eye -ov::op::v9::Eye::Eye(const Output& num_rows, - const Output& num_columns, - const Output& diagonal_index, - const Output& batch_shape, - const ov::element::Type& out_type) +namespace v9 { +Eye::Eye(const Output& num_rows, + const Output& num_columns, + const Output& diagonal_index, + const Output& batch_shape, + const ov::element::Type& out_type) : Op({num_rows, num_columns, diagonal_index, batch_shape}), m_output_type(out_type) { constructor_validate_and_infer_types(); } -ov::op::v9::Eye::Eye(const Output& num_rows, - const Output& num_columns, - const Output& diagonal_index, - const ov::element::Type& out_type) +Eye::Eye(const Output& num_rows, + const Output& num_columns, + const Output& diagonal_index, + const ov::element::Type& out_type) : Op({num_rows, num_columns, diagonal_index}), m_output_type(out_type) { constructor_validate_and_infer_types(); } -void ov::op::v9::Eye::validate_and_infer_types() { +void Eye::validate_and_infer_types() { OV_OP_SCOPE(v9_Eye_validate_and_infer_types); for (size_t i = 0; i < get_input_size(); ++i) { @@ -78,81 +65,72 @@ void ov::op::v9::Eye::validate_and_infer_types() { set_output_type(0, get_out_type(), output_shape); } -bool ov::op::v9::Eye::visit_attributes(ov::AttributeVisitor& visitor) { +bool Eye::visit_attributes(ov::AttributeVisitor& visitor) { OV_OP_SCOPE(v9_Eye_visit_attributes); visitor.on_attribute("output_type", m_output_type); return true; } -std::shared_ptr ov::op::v9::Eye::clone_with_new_inputs(const ov::OutputVector& new_args) const { +std::shared_ptr Eye::clone_with_new_inputs(const ov::OutputVector& new_args) const { OV_OP_SCOPE(v9_Eye_clone_with_new_inputs); check_new_args_count(this, new_args); - if (new_args.size() == 3) { - return std::make_shared(new_args[0], new_args[1], new_args[2], m_output_type); - } else if (new_args.size() == 4) { - return std::make_shared(new_args[0], new_args[1], new_args[2], new_args[3], m_output_type); - } else { + + switch (new_args.size()) { + case 3: + return std::make_shared(new_args[0], new_args[1], new_args[2], m_output_type); + case 4: + return std::make_shared(new_args[0], new_args[1], new_args[2], new_args[3], m_output_type); + default: OPENVINO_THROW("Eye has incorrect input number: ", new_args.size()); } } -bool ov::op::v9::Eye::has_evaluate() const { +bool Eye::has_evaluate() const { OV_OP_SCOPE(v9_Eye_has_evaluate); switch (m_output_type) { - case ov::element::i8: - case ov::element::u8: - case ov::element::f16: - case ov::element::bf16: - case ov::element::i32: - case ov::element::f32: - case ov::element::i64: + case element::bf16: + case element::f16: + case element::f32: + case element::f64: + case element::i8: + case element::i32: + case element::i64: + case element::u8: return true; default: - break; + return false; } - return false; } -bool ov::op::v9::Eye::evaluate(const ngraph::HostTensorVector& outputs, const ngraph::HostTensorVector& inputs) const { +bool Eye::evaluate(TensorVector& outputs, const TensorVector& inputs) const { OV_OP_SCOPE(v9_Eye_evaluate); - OPENVINO_SUPPRESS_DEPRECATED_START - OPENVINO_ASSERT(ngraph::validate_host_tensor_vector(inputs, get_input_size()), "Invalid Eye input TensorVector."); - OPENVINO_ASSERT(ngraph::validate_host_tensor_vector(outputs, 1), "Invalid Eye output TensorVector."); - OPENVINO_SUPPRESS_DEPRECATED_END - - int64_t diagonal_index; - - if (get_input_size() > 1) { - const auto& diagonal_index_data = inputs[2]; - - switch (diagonal_index_data->get_element_type()) { - case element::i32: - diagonal_index = diagonal_index_data->get_data_ptr()[0]; - break; - case element::i64: - diagonal_index = diagonal_index_data->get_data_ptr()[0]; - break; - default: - OPENVINO_THROW("Unsupported type of input `diagonal_index` in Eye operation: ", - diagonal_index_data->get_element_type().to_string()); - } - } else { - diagonal_index = 0; - } - - std::vector input_shapes; - input_shapes.reserve(inputs.size()); - - for (size_t i = 0; i < inputs.size(); ++i) { - input_shapes.push_back(inputs[i]->get_partial_shape()); - } + OPENVINO_ASSERT(outputs.size() == 1); + // Inputs size and shapes checked by shape_infer + const auto input_shapes = util::get_tensors_partial_shapes(inputs); const auto output_shape = shape_infer(this, input_shapes, make_tensor_accessor(inputs)).front().to_shape(); - outputs[0]->set_element_type(get_out_type()); - outputs[0]->set_shape(output_shape); + int64_t diagonal_index; + const auto& diagonal_tensor = inputs[2]; + switch (diagonal_tensor.get_element_type()) { + case element::i32: + diagonal_index = diagonal_tensor.data>()[0]; + break; + case element::i64: + diagonal_index = diagonal_tensor.data>()[0]; + break; + default: + OPENVINO_THROW("Unsupported type of input `diagonal_index` in Eye operation: ", + diagonal_tensor.get_element_type().to_string()); + } - return eye::evaluate_eye(outputs[0], diagonal_index); + outputs[0].set_shape(output_shape); + using namespace ov::element; + return IfTypeOf::apply(outputs[0].get_element_type(), + outputs[0], + output_shape, + diagonal_index); } +} // namespace v9 } // namespace op } // namespace ov diff --git a/src/core/src/op/util/evaluate_helpers.cpp b/src/core/src/op/util/evaluate_helpers.cpp index cffc57e6fbd87c..4e21da40bfe013 100644 --- a/src/core/src/op/util/evaluate_helpers.cpp +++ b/src/core/src/op/util/evaluate_helpers.cpp @@ -4,6 +4,8 @@ #include "ngraph/op/util/evaluate_helpers.hpp" +#include "openvino/op/util/evaluate_helpers.hpp" + namespace ngraph { AxisSet get_normalized_axes_from_tensor(const HostTensorPtr tensor, const ngraph::Rank& rank, @@ -15,3 +17,18 @@ AxisSet get_normalized_axes_from_tensor(const HostTensorPtr tensor, return AxisSet{normalized_axes}; } } // namespace ngraph + +namespace ov { +namespace op { +namespace util { +std::vector get_tensors_partial_shapes(const TensorVector& tensors) { + std::vector shapes; + shapes.reserve(tensors.size()); + for (const auto& t : tensors) { + shapes.emplace_back(t.get_shape()); + } + return shapes; +} +} // namespace util +} // namespace op +} // namespace ov From a3d6d0bca952d206bb1c89eaf8d9114e2bc1a28a Mon Sep 17 00:00:00 2001 From: Andrey Kashchikhin Date: Wed, 11 Oct 2023 11:05:12 +0100 Subject: [PATCH 150/257] [CI] [GHA] Skip `test_div_uint8_cpu` on macOS only; unskip `test_onnx/test_backend.py` in GHA workflows (#20367) * only skip test if mac * unskip * unskip trigger * skip for onnx fe as well * do not skip * return skips and unskip test_backend in Python API 1.0 * rm pr trigger --------- Co-authored-by: Ilya Lavrenov --- .github/workflows/linux.yml | 3 +-- .github/workflows/mac.yml | 5 ++--- .github/workflows/windows.yml | 2 +- .../python/tests_compatibility/test_onnx/test_backend.py | 9 +++++++++ src/frontends/onnx/tests/tests_python/test_backend.py | 9 +++++++++ 5 files changed, 22 insertions(+), 6 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index a3c7e9a4e1c250..6bd6ef2342afbf 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -896,8 +896,7 @@ jobs: run: | python3 -m pytest -s ${INSTALL_TEST_DIR}/pyngraph \ --junitxml=${INSTALL_TEST_DIR}/TEST-Pyngraph.xml \ - --ignore=${INSTALL_TEST_DIR}/pyngraph/tests_compatibility/test_onnx/test_zoo_models.py \ - --ignore=${INSTALL_TEST_DIR}/pyngraph/tests_compatibility/test_onnx/test_backend.py + --ignore=${INSTALL_TEST_DIR}/pyngraph/tests_compatibility/test_onnx/test_zoo_models.py - name: Python API 2.0 Tests run: | diff --git a/.github/workflows/mac.yml b/.github/workflows/mac.yml index 5097a6bb006b87..5f9658fd303f52 100644 --- a/.github/workflows/mac.yml +++ b/.github/workflows/mac.yml @@ -485,12 +485,11 @@ jobs: python3 -m pip install $ov_dev_wheel_name[mxnet,caffe,kaldi,onnx,tensorflow2] popd - - name: nGraph and IE Python Bindings Tests + - name: Python API 1.0 Tests run: | python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/pyngraph \ --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml \ - --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests/test_onnx/test_zoo_models.py \ - --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests/test_onnx/test_backend.py + --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests/test_onnx/test_zoo_models.py - name: Python API 2.0 Tests run: | diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 70c7ac216121dc..c23419a4463a47 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -310,7 +310,7 @@ jobs: shell: cmd run: | set PYTHONPATH=${{ env.OPENVINO_REPO }}\tools\mo;${{ env.LAYER_TESTS_INSTALL_DIR }};%PYTHONPATH% - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/pyngraph ${{ env.PYTHON_STATIC_ARGS }} --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests_compatibility/test_onnx/test_zoo_models.py --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests_compatibility/test_onnx/test_backend.py + call "${{ env.INSTALL_DIR }}\\setupvars.bat" && python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/pyngraph ${{ env.PYTHON_STATIC_ARGS }} --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests_compatibility/test_onnx/test_zoo_models.py - name: Python API 2.0 Tests shell: cmd diff --git a/src/bindings/python/tests_compatibility/test_onnx/test_backend.py b/src/bindings/python/tests_compatibility/test_onnx/test_backend.py index 87f53223c2d672..396cddb80a598f 100644 --- a/src/bindings/python/tests_compatibility/test_onnx/test_backend.py +++ b/src/bindings/python/tests_compatibility/test_onnx/test_backend.py @@ -3,6 +3,8 @@ import logging +from sys import platform + import onnx.backend.test from tests_compatibility import ( BACKEND_NAME, @@ -32,6 +34,7 @@ xfail_issue_48052, xfail_issue_52463, xfail_issue_58033, + xfail_issue_58676, xfail_issue_63033, xfail_issue_63036, xfail_issue_63043, @@ -809,6 +812,12 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None ), ] +if platform == 'darwin': + tests_expected_to_fail.append(( + xfail_issue_58676, + "OnnxBackendNodeModelTest.test_div_uint8_cpu" + )) + for test_group in tests_expected_to_fail: for test_case in test_group[1:]: expect_fail("{}".format(test_case), test_group[0]) diff --git a/src/frontends/onnx/tests/tests_python/test_backend.py b/src/frontends/onnx/tests/tests_python/test_backend.py index d1ef686bdd4124..d75cfcf77aeefd 100644 --- a/src/frontends/onnx/tests/tests_python/test_backend.py +++ b/src/frontends/onnx/tests/tests_python/test_backend.py @@ -4,6 +4,8 @@ import logging +from sys import platform + import onnx.backend.test from tests import ( BACKEND_NAME, @@ -32,6 +34,7 @@ xfail_issue_48052, xfail_issue_52463, xfail_issue_58033, + xfail_issue_58676, xfail_issue_63033, xfail_issue_63036, xfail_issue_63043, @@ -683,6 +686,12 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None ), ] +if platform == 'darwin': + tests_expected_to_fail.append(( + xfail_issue_58676, + "OnnxBackendNodeModelTest.test_div_uint8_cpu" + )) + for test_group in tests_expected_to_fail: for test_case in test_group[1:]: expect_fail(f"{test_case}", test_group[0]) From b345f3c324ae700d55211d4a4d071152a47c4449 Mon Sep 17 00:00:00 2001 From: Roman Lyamin Date: Wed, 11 Oct 2023 14:42:33 +0400 Subject: [PATCH 151/257] [GPU] Fix high latency for LLMs on dGPU (#20328) --- .../prepare_primitive_fusing.cpp | 31 +++++++++++++++++-- .../passes/prepare_primitive_fusing_test.cpp | 13 +++++++- 2 files changed, 41 insertions(+), 3 deletions(-) diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp index d50d306700b902..45739c78a1e36a 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp @@ -1221,8 +1221,9 @@ void prepare_primitive_fusing::fuse_constant_transposes(program& p) { return format::find_format(new_order, fmt.block_sizes()); }; - auto itr = p.get_processing_order().begin(); - while (itr != p.get_processing_order().end()) { + auto& proc_order = p.get_processing_order(); + auto itr = proc_order.begin(); + while (itr != proc_order.end()) { auto& node = *itr++; if (!node->is_type()) @@ -1271,6 +1272,32 @@ void prepare_primitive_fusing::fuse_constant_transposes(program& p) { p.replace(prev_const, new_const_node); new_const_node.recalc_output_layout(false); + + // Add format reorder in case of onednn to avoid overhead during execution on weights memory allocation + if (_lo.get_preferred_impl_type(const_cast(*weightable_node), format::any /*dummy*/) == impl_types::onednn) { + auto next_node = new_const_node.get_users().front(); + bool can_be_fused = next_node->is_type() && + next_node->as().is_simple_reorder() && + next_node->get_users().size() == 1; + if (can_be_fused) { + layout reorder_layout = next_node->get_output_layout(); + reorder_layout.format = format::bfyx; + + auto new_reorder = std::make_shared(next_node->id() + "_reorder_fmt", new_const_node.id(), reorder_layout); + auto& new_reorder_node = p.get_or_create(new_reorder); + p.replace(*next_node, new_reorder_node); + new_reorder_node.recalc_output_layout(false); + itr = std::find(proc_order.begin(), proc_order.end(), &new_reorder_node); + } else { + layout reorder_layout = new_const_node.get_output_layout(); + reorder_layout.format = format::bfyx; + + auto new_reorder = std::make_shared(new_const_node.id() + "_reorder_fmt", new_const_node.id(), reorder_layout); + auto& new_reorder_node = p.get_or_create(std::move(new_reorder)); + p.add_intermediate(new_reorder_node, *new_const_node.get_users().front(), new_const_node); + new_reorder_node.recalc_output_layout(false); + } + } } } diff --git a/src/plugins/intel_gpu/tests/unit/passes/prepare_primitive_fusing_test.cpp b/src/plugins/intel_gpu/tests/unit/passes/prepare_primitive_fusing_test.cpp index f19140d24593ff..a5f1d9e4706b36 100644 --- a/src/plugins/intel_gpu/tests/unit/passes/prepare_primitive_fusing_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/passes/prepare_primitive_fusing_test.cpp @@ -528,7 +528,7 @@ TEST(prepare_primitive_fusing, fuse_constant_transposes_removal_check) { input_layout("input", input->get_layout()), data("weights", weights), permute("permute", input_info("weights"), {1, 0}), - reorder("reorder_dt", input_info("permute"), format::bfyx, data_types::f16), + reorder("reorder_dt", input_info("permute"), format::fbyx, data_types::f16), fully_connected("fc", input_info("input"), { "reorder_dt" }, "", data_types::f16) ); @@ -536,13 +536,24 @@ TEST(prepare_primitive_fusing, fuse_constant_transposes_removal_check) { config.set_property(ov::intel_gpu::optimize_data(true)); config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); + if (engine.get_device_info().supports_immad) { + ov::intel_gpu::ImplementationDesc fc_impl = { format::bfyx, "", impl_types::onednn }; + config.set_property(ov::intel_gpu::force_implementations(ov::intel_gpu::ImplForcingMap{ {"fc", fc_impl} })); + } + auto prog = program::build_program(engine, topology, config, false, true); layout_optimizer lo(true); + lo.set_implementation_forcing(config.get_property(ov::intel_gpu::force_implementations)); program_wrapper::apply_opt_pass(*prog, lo); ASSERT_TRUE(!has_node(*prog, "permute")); ASSERT_EQ(prog->get_node("weights").get_output_layout().format, format::fbyx); + + if (engine.get_device_info().supports_immad) { + ASSERT_TRUE(has_node(*prog, "reorder_dt")); + ASSERT_EQ(prog->get_node("reorder_dt").get_output_layout().format, format::bfyx); + } } TEST(prepare_primitive_fusing, fuse_constant_transposes_accuracy_test) { From 0bb645039850eb5a11a1a8977592879ebd0ee165 Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Wed, 11 Oct 2023 15:24:32 +0400 Subject: [PATCH 152/257] [TF FE] Support TF 2.14 and add OnesLike translator (#20385) * [TF FE] Support TF 2.14 and add OnesLike translator Signed-off-by: Kazantsev, Roman * Update tests constraints * Update open_model_zoo * Adopt TF Lite test to 2.14 TF Signed-off-by: Kazantsev, Roman * Support TF Lite layer tests for diffrent TF versions --------- Signed-off-by: Kazantsev, Roman --- src/bindings/python/constraints.txt | 2 +- src/frontends/tensorflow/src/op_table.cpp | 1 + .../include/common_op_table.hpp | 1 + .../tensorflow_common/src/op/ones_like.cpp | 46 +++++++++++++++++++ tests/constraints.txt | 2 +- .../common/tflite_layer_test_class.py | 11 ++++- .../test_tfl_FullyConnected.py | 2 +- .../tensorflow_tests/test_tf_OnesLike.py | 45 ++++++++++++++++++ thirdparty/open_model_zoo | 2 +- tools/mo/requirements_tf.txt | 2 +- tools/mo/requirements_tf2.txt | 2 +- 11 files changed, 109 insertions(+), 7 deletions(-) create mode 100644 src/frontends/tensorflow_common/src/op/ones_like.cpp create mode 100644 tests/layer_tests/tensorflow_tests/test_tf_OnesLike.py diff --git a/src/bindings/python/constraints.txt b/src/bindings/python/constraints.txt index 9178eef451d033..9db99017681f4f 100644 --- a/src/bindings/python/constraints.txt +++ b/src/bindings/python/constraints.txt @@ -17,7 +17,7 @@ patchelf<=0.17.2.1 # Frontends docopt~=0.6.2 paddlepaddle==2.5.1 -tensorflow>=1.15.5,<2.14.0 +tensorflow>=1.15.5,<2.15.0 six~=1.16.0 protobuf>=3.18.1,<4.0.0 onnx==1.14.1 diff --git a/src/frontends/tensorflow/src/op_table.cpp b/src/frontends/tensorflow/src/op_table.cpp index b1313ec07ba826..fce3af3f0a235b 100644 --- a/src/frontends/tensorflow/src/op_table.cpp +++ b/src/frontends/tensorflow/src/op_table.cpp @@ -211,6 +211,7 @@ const std::map get_supported_ops() { {"NoOp", CreatorFunction(translate_no_op)}, // do nothing {"OneHot", CreatorFunction(translate_one_hot_op)}, {"OneShotIterator", CreatorFunction(translate_iterator_op)}, + {"OnesLike", CreatorFunction(translate_ones_like_op)}, {"Pack", CreatorFunction(translate_pack_op)}, {"Pad", CreatorFunction(translate_pad_op)}, {"PadV2", CreatorFunction(translate_padv2_op)}, diff --git a/src/frontends/tensorflow_common/include/common_op_table.hpp b/src/frontends/tensorflow_common/include/common_op_table.hpp index 457a99de302d53..17a865acfb0e99 100644 --- a/src/frontends/tensorflow_common/include/common_op_table.hpp +++ b/src/frontends/tensorflow_common/include/common_op_table.hpp @@ -97,6 +97,7 @@ OP_CONVERTER(translate_placeholder_op); OP_CONVERTER(translate_placeholder_with_default_op); OP_CONVERTER(translate_no_op); OP_CONVERTER(translate_one_hot_op); +OP_CONVERTER(translate_ones_like_op); OP_CONVERTER(translate_pack_op); OP_CONVERTER(translate_pad_op); OP_CONVERTER(translate_padv2_op); diff --git a/src/frontends/tensorflow_common/src/op/ones_like.cpp b/src/frontends/tensorflow_common/src/op/ones_like.cpp new file mode 100644 index 00000000000000..2084f3db1919cd --- /dev/null +++ b/src/frontends/tensorflow_common/src/op/ones_like.cpp @@ -0,0 +1,46 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_op_table.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/squeeze.hpp" +#include "utils.hpp" + +using namespace std; +using namespace ov::op; + +namespace ov { +namespace frontend { +namespace tensorflow { +namespace op { + +OutputVector translate_ones_like_op(const NodeContext& node) { + default_op_checks(node, 1, {"OnesLike"}); + auto x = node.get_input(0); + Output shape_of = make_shared(x, element::i32); + auto one_const = create_same_type_const_scalar(x, 1); + + // in case of x to be scalar, we need handle it more specifically + // since Broadcast supports only broadcasting to rank greater 0 + // we have to introduce extra dimension for input scalar case + auto one_dim = make_shared(element::i32, Shape{1}, 1); + shape_of = make_shared(OutputVector{one_dim, shape_of}, 0); + + // create a tensor of zeros of shape with extra dimension + Output ones_like = make_shared(one_const, shape_of); + // remove extra dimension by squeezing + auto zero_dim_ind = make_shared(element::i32, Shape{1}, 0); + ones_like = make_shared(ones_like, zero_dim_ind); + + set_node_name(node.get_name(), ones_like.get_node_shared_ptr()); + return {ones_like}; +} + +} // namespace op +} // namespace tensorflow +} // namespace frontend +} // namespace ov diff --git a/tests/constraints.txt b/tests/constraints.txt index 7abffee14c8ce2..3d6aac51540044 100644 --- a/tests/constraints.txt +++ b/tests/constraints.txt @@ -11,7 +11,7 @@ scipy>=1.11.1; python_version >= "3.9" wheel>=0.38.1 defusedxml>=0.7.1 fastjsonschema~=2.17.1 -tensorflow>=2.5,<2.14.0 +tensorflow>=2.5,<2.15.0 test-generator==0.1.2 requests>=2.25.1 opencv-python>=4.5 diff --git a/tests/layer_tests/common/tflite_layer_test_class.py b/tests/layer_tests/common/tflite_layer_test_class.py index b7d6a2043b9366..8ff5122d8d43bc 100644 --- a/tests/layer_tests/common/tflite_layer_test_class.py +++ b/tests/layer_tests/common/tflite_layer_test_class.py @@ -1,6 +1,7 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import os + os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import tensorflow as tf from tensorflow.lite.tools import flatbuffer_utils as utils @@ -62,7 +63,15 @@ def check_tflite_model_has_only_allowed_ops(self): else: op_names.append(builtin_operators[op.builtinCode]) op_names = sorted(op_names) - assert op_names == self.allowed_ops, "TFLite model is not as you expect it to be: " + ", ".join(op_names) + if isinstance(self.allowed_ops, tuple): + passed = False + for allowed_ops_var in self.allowed_ops: + if op_names == allowed_ops_var: + passed = True + break + assert passed, "TFLite model is not as you expect it to be: " + ", ".join(op_names) + else: + assert op_names == self.allowed_ops, "TFLite model is not as you expect it to be: " + ", ".join(op_names) def _test(self, ie_device, precision, temp_dir, params): model = self.make_model(params) diff --git a/tests/layer_tests/tensorflow_lite_tests/test_tfl_FullyConnected.py b/tests/layer_tests/tensorflow_lite_tests/test_tfl_FullyConnected.py index a8119813dd4304..67f0e699eb2963 100644 --- a/tests/layer_tests/tensorflow_lite_tests/test_tfl_FullyConnected.py +++ b/tests/layer_tests/tensorflow_lite_tests/test_tfl_FullyConnected.py @@ -13,7 +13,7 @@ class TestTFLiteFullyConnectedLayerTest(TFLiteLayerTest): inputs = ["Input_x", "Input_y"] outputs = ["FullyConnected"] - allowed_ops = ['FULLY_CONNECTED'] + allowed_ops = (['FULLY_CONNECTED'], ['BATCH_MATMUL']) def make_model(self, params): assert len(set(params.keys()).intersection({'shape_x', 'shape_y'})) == 2, \ diff --git a/tests/layer_tests/tensorflow_tests/test_tf_OnesLike.py b/tests/layer_tests/tensorflow_tests/test_tf_OnesLike.py new file mode 100644 index 00000000000000..71b146edc9c335 --- /dev/null +++ b/tests/layer_tests/tensorflow_tests/test_tf_OnesLike.py @@ -0,0 +1,45 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import tensorflow as tf +from common.tf_layer_test_class import CommonTFLayerTest + + +class TestOnesLike(CommonTFLayerTest): + def _prepare_input(self, inputs_info): + assert 'x' in inputs_info + x_shape = inputs_info['x'] + inputs_data = {} + rng = np.random.default_rng() + inputs_data['x'] = rng.integers(-10, 10, x_shape).astype(self.x_type) + return inputs_data + + def create_ones_like_net(self, x_shape, x_type): + self.x_type = x_type + tf.compat.v1.reset_default_graph() + # Create the graph and model + with tf.compat.v1.Session() as sess: + x = tf.compat.v1.placeholder(tf.dtypes.as_dtype(x_type), x_shape, 'x') + tf.raw_ops.OnesLike(x=x) + tf.compat.v1.global_variables_initializer() + tf_net = sess.graph_def + + return tf_net, None + + test_data_basic = [ + dict(x_shape=[], x_type=np.float32), + dict(x_shape=[2], x_type=np.int32), + dict(x_shape=[2, 3, 4], x_type=np.float32), + dict(x_shape=[1, 4, 3, 1], x_type=np.int32), + ] + + @pytest.mark.parametrize("params", test_data_basic) + @pytest.mark.precommit_tf_fe + @pytest.mark.nightly + def test_ones_like(self, params, ie_device, precision, ir_version, temp_dir, + use_new_frontend, use_old_api): + self._test(*self.create_ones_like_net(**params), + ie_device, precision, ir_version, temp_dir=temp_dir, + use_new_frontend=use_new_frontend, use_old_api=use_old_api) diff --git a/thirdparty/open_model_zoo b/thirdparty/open_model_zoo index d831efe10e7af4..e0e434f64a4da0 160000 --- a/thirdparty/open_model_zoo +++ b/thirdparty/open_model_zoo @@ -1 +1 @@ -Subproject commit d831efe10e7af426ceba0b6ad65dbb5e5fc82beb +Subproject commit e0e434f64a4da07274c31c1aae48fbdcfa087fb0 diff --git a/tools/mo/requirements_tf.txt b/tools/mo/requirements_tf.txt index 240b60351a6cad..f5891c136b2830 100644 --- a/tools/mo/requirements_tf.txt +++ b/tools/mo/requirements_tf.txt @@ -1,5 +1,5 @@ -c ../constraints.txt -tensorflow>=1.15.5,<2.14.0 +tensorflow>=1.15.5,<2.15.0 numpy>=1.16.6,<1.26 networkx defusedxml diff --git a/tools/mo/requirements_tf2.txt b/tools/mo/requirements_tf2.txt index 1b955f23d0feea..7992cc93a3a8b1 100644 --- a/tools/mo/requirements_tf2.txt +++ b/tools/mo/requirements_tf2.txt @@ -1,5 +1,5 @@ -c ../constraints.txt -tensorflow>=2.5,<2.14.0 +tensorflow>=2.5,<2.15.0 numpy>=1.16.6,<1.26 networkx defusedxml From 5894fbe69d5b05377471bf7364d5ec77f6d90e5f Mon Sep 17 00:00:00 2001 From: Vladislav Golubev Date: Wed, 11 Oct 2023 13:25:00 +0200 Subject: [PATCH 153/257] [CPU] Group & NF4 decompression transformation support (#20039) --- src/inference/dev_api/ie_ngraph_utils.hpp | 2 + src/inference/include/ie/ie_precision.hpp | 37 ++- .../intel_cpu/src/dnnl_postops_composer.cpp | 31 ++- .../intel_cpu/src/dnnl_postops_composer.h | 6 +- src/plugins/intel_cpu/src/graph_optimizer.cpp | 52 +++- .../intel_cpu/src/nodes/fullyconnected.cpp | 36 +-- .../intel_cpu/src/nodes/fullyconnected.h | 9 +- .../transformation_pipeline.cpp | 23 +- .../src/matmul_weights_decompression.cpp | 225 ++++++++++++------ .../ov_models/include/ov_models/builders.hpp | 1 + 10 files changed, 271 insertions(+), 151 deletions(-) diff --git a/src/inference/dev_api/ie_ngraph_utils.hpp b/src/inference/dev_api/ie_ngraph_utils.hpp index 57ea7a00036d51..aeaa0b4d9b0ba2 100644 --- a/src/inference/dev_api/ie_ngraph_utils.hpp +++ b/src/inference/dev_api/ie_ngraph_utils.hpp @@ -101,6 +101,8 @@ INFERENCE_ENGINE_1_0_DEPRECATED inline Precision convertPrecision(const ::ngraph return Precision(Precision::BIN); case ::ngraph::element::Type_t::boolean: return Precision(Precision::BOOL); + case ::ngraph::element::Type_t::nf4: + return Precision(Precision::NF4); case ::ngraph::element::Type_t::dynamic: return Precision(Precision::UNSPECIFIED); default: diff --git a/src/inference/include/ie/ie_precision.hpp b/src/inference/include/ie/ie_precision.hpp index 099a1d9881f593..17b995244e253b 100644 --- a/src/inference/include/ie/ie_precision.hpp +++ b/src/inference/include/ie/ie_precision.hpp @@ -41,6 +41,7 @@ class INFERENCE_ENGINE_1_0_DEPRECATED Precision { FP16 = 11, /**< 16bit floating point value, 5 bit for exponent, 10 bit for mantisa */ BF16 = 12, /**< 16bit floating point value, 8 bit for exponent, 7 bit for mantisa*/ FP64 = 13, /**< 64bit floating point value */ + NF4 = 14, /**< 4bit normalized float value */ Q78 = 20, /**< 16bit specific signed fixed point precision */ I16 = 30, /**< 16bit signed integer value */ U4 = 39, /**< 4bit unsigned integer value */ @@ -131,6 +132,7 @@ class INFERENCE_ENGINE_1_0_DEPRECATED Precision { CASE(FP64, double); CASE2(FP16, int16_t, uint16_t); CASE2(BF16, int16_t, uint16_t); + CASE(NF4, int8_t); CASE2(I4, int8_t, uint8_t); CASE(I8, int8_t); CASE(I16, int16_t); @@ -249,24 +251,11 @@ class INFERENCE_ENGINE_1_0_DEPRECATED Precision { static Precision FromStr(const std::string& str) { static const std::unordered_map names = { #define PRECISION_NAME(s) {#s, s} - PRECISION_NAME(Q78), - PRECISION_NAME(BOOL), - PRECISION_NAME(BF16), - PRECISION_NAME(I4), - PRECISION_NAME(I8), - PRECISION_NAME(I16), - PRECISION_NAME(I32), - PRECISION_NAME(I64), - PRECISION_NAME(U4), - PRECISION_NAME(U8), - PRECISION_NAME(U16), - PRECISION_NAME(U32), - PRECISION_NAME(U64), - PRECISION_NAME(FP32), - PRECISION_NAME(FP64), - PRECISION_NAME(FP16), - PRECISION_NAME(MIXED), - PRECISION_NAME(BIN), + PRECISION_NAME(Q78), PRECISION_NAME(BOOL), PRECISION_NAME(BF16), PRECISION_NAME(I4), + PRECISION_NAME(I8), PRECISION_NAME(I16), PRECISION_NAME(I32), PRECISION_NAME(I64), + PRECISION_NAME(U4), PRECISION_NAME(U8), PRECISION_NAME(U16), PRECISION_NAME(U32), + PRECISION_NAME(U64), PRECISION_NAME(FP32), PRECISION_NAME(FP64), PRECISION_NAME(FP16), + PRECISION_NAME(MIXED), PRECISION_NAME(NF4), PRECISION_NAME(BIN), #undef PRECISION_NAME }; auto i = names.find(str); @@ -311,7 +300,8 @@ class INFERENCE_ENGINE_1_0_DEPRECATED Precision { (precisionInfo.value == Precision::I16) || (precisionInfo.value == Precision::I8) || (precisionInfo.value == Precision::I32) || (precisionInfo.value == Precision::I64) || (precisionInfo.value == Precision::BIN) || (precisionInfo.value == Precision::BF16) || - (precisionInfo.value == Precision::CUSTOM) || (precisionInfo.value == Precision::I4); + (precisionInfo.value == Precision::CUSTOM) || (precisionInfo.value == Precision::I4) || + (precisionInfo.value == Precision::NF4); } protected: @@ -359,6 +349,7 @@ class INFERENCE_ENGINE_1_0_DEPRECATED Precision { CASE(FP64); CASE(FP16); CASE(BF16); + CASE(NF4); CASE(I4); CASE(I8); CASE(I16); @@ -475,6 +466,12 @@ struct INFERENCE_ENGINE_1_0_DEPRECATED PrecisionTrait { enum { is_float = false }; }; +template <> +struct INFERENCE_ENGINE_1_0_DEPRECATED PrecisionTrait { + using value_type = int8_t; + enum { is_float = false }; +}; + template INFERENCE_ENGINE_1_0_DEPRECATED inline uint8_t type_size_or_zero() { return sizeof(T); @@ -499,7 +496,7 @@ INFERENCE_ENGINE_1_0_DEPRECATED inline Precision::PrecisionInfo Precision::makeP Precision::PrecisionInfo info; info.name = name; - size_t nBits = precision == BIN ? 1 : (precision == U4 || precision == I4) ? 4 : 8; + size_t nBits = precision == BIN ? 1 : (precision == U4 || precision == I4 || precision == NF4) ? 4 : 8; info.bitsSize = nBits * type_size_or_zero::value_type>(); info.isFloat = PrecisionTrait::is_float; info.value = precision; diff --git a/src/plugins/intel_cpu/src/dnnl_postops_composer.cpp b/src/plugins/intel_cpu/src/dnnl_postops_composer.cpp index c5e368ebd5efc0..1f85dd1f3ffbc6 100644 --- a/src/plugins/intel_cpu/src/dnnl_postops_composer.cpp +++ b/src/plugins/intel_cpu/src/dnnl_postops_composer.cpp @@ -251,41 +251,48 @@ void DnnlPostOpsComposer::appendClip(const std::vector& low, const std::v } } -MemoryPtr DnnlPostOpsComposer::prepackDecompressionParams(const std::vector& params, size_t icBlock) { +MemoryPtr DnnlPostOpsComposer::prepackDecompressionParams(const MemoryCPtr& params_ptr, size_t icBlock) { // Prepacking params from [oc] to [oc, icBlock] layout, where for each icBlock corresponding parameter is duplicated - DnnlBlockedMemoryDesc memoryDesc(InferenceEngine::Precision::FP32, Shape({icBlock * params.size()})); + const auto shape = params_ptr->getShape().getStaticDims(); + const size_t elements_count = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies()); + DnnlBlockedMemoryDesc memoryDesc(InferenceEngine::Precision::FP32, Shape({icBlock * elements_count})); auto mem = std::make_shared(engine, memoryDesc); size_t dstIdx = 0; + auto decomp_scales_data = static_cast(params_ptr->getData()); auto decomp_scales_buf = static_cast(mem->getData()); - for (size_t oc = 0; oc < params.size(); oc++) { + for (size_t oc = 0; oc < elements_count; oc++) { for (size_t intIdx = 0; intIdx < icBlock; intIdx++) { - decomp_scales_buf[dstIdx] = params[oc]; + decomp_scales_buf[dstIdx] = decomp_scales_data[oc]; dstIdx++; } } return mem; } -void DnnlPostOpsComposer::appendDecompressionScales(const std::vector& scales, size_t icBlock) { - if (scales.empty()) +void DnnlPostOpsComposer::appendDecompressionScales(const MemoryCPtr& scales_ptr, size_t icBlock) { + if (scales_ptr == nullptr) return; - int mask = scales.size() > 1 ? weightScaleMaskPerChannel : 0; + const auto shape = scales_ptr->getShape().getStaticDims(); + const auto elements_count = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies()); + int mask = elements_count > 1 ? weightScaleMaskPerChannel : 0; DEBUG_LOG("Set weights scales mask ", "DNNL_ARG: ", DNNL_ARG_WEIGHTS, " mask: ", mask); attr.set_scales_mask(DNNL_ARG_WEIGHTS, mask); - args[DNNL_ARG_ATTR_SCALES | DNNL_ARG_WEIGHTS] = prepackDecompressionParams(scales, icBlock); + args[DNNL_ARG_ATTR_SCALES | DNNL_ARG_WEIGHTS] = prepackDecompressionParams(scales_ptr, icBlock); } -void DnnlPostOpsComposer::appendDecompressionZeroPoints(const std::vector& zero_points, size_t icBlock) { - if (zero_points.empty()) +void DnnlPostOpsComposer::appendDecompressionZeroPoints(const MemoryCPtr& zero_points_ptr, size_t icBlock) { + if (zero_points_ptr == nullptr) return; - int mask = zero_points.size() > 1 ? weightScaleMaskPerChannel : 0; + const auto shape = zero_points_ptr->getShape().getStaticDims(); + const auto elements_count = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies()); + int mask = elements_count > 1 ? weightScaleMaskPerChannel : 0; DEBUG_LOG("Set weights zero points mask ", "DNNL_ARG: ", DNNL_ARG_WEIGHTS, " mask: ", mask); attr.set_zero_points_mask(DNNL_ARG_WEIGHTS, mask); - args[DNNL_ARG_ATTR_ZERO_POINTS | DNNL_ARG_WEIGHTS] = prepackDecompressionParams(zero_points, icBlock); + args[DNNL_ARG_ATTR_ZERO_POINTS | DNNL_ARG_WEIGHTS] = prepackDecompressionParams(zero_points_ptr, icBlock); } } // namespace intel_cpu diff --git a/src/plugins/intel_cpu/src/dnnl_postops_composer.h b/src/plugins/intel_cpu/src/dnnl_postops_composer.h index 365ab0a6f32239..fd52863ed7a9bd 100644 --- a/src/plugins/intel_cpu/src/dnnl_postops_composer.h +++ b/src/plugins/intel_cpu/src/dnnl_postops_composer.h @@ -42,8 +42,8 @@ class DnnlPostOpsComposer { bool appendLinear(const std::vector& scale, const std::vector& shift, bool isLastPostOp, bool allowBinary = true); void appendClip(const std::vector& low, const std::vector& high); - void appendDecompressionScales(const std::vector& scales, size_t icBlock); - void appendDecompressionZeroPoints(const std::vector& zero_points, size_t icBlock); + void appendDecompressionScales(const MemoryCPtr& scales_ptr, size_t icBlock); + void appendDecompressionZeroPoints(const MemoryCPtr& zero_points_ptr, size_t icBlock); const VectorDims& getOutputDims() { return outputDims; @@ -69,7 +69,7 @@ class DnnlPostOpsComposer { void updateWeiScales(); void updateDestScales(); - MemoryPtr prepackDecompressionParams(const std::vector& params, size_t icBlock); + MemoryPtr prepackDecompressionParams(const MemoryCPtr& params_ptr, size_t icBlock); }; } // namespace intel_cpu diff --git a/src/plugins/intel_cpu/src/graph_optimizer.cpp b/src/plugins/intel_cpu/src/graph_optimizer.cpp index 791ff04021737c..770527c09aa334 100644 --- a/src/plugins/intel_cpu/src/graph_optimizer.cpp +++ b/src/plugins/intel_cpu/src/graph_optimizer.cpp @@ -286,7 +286,7 @@ void GraphOptimizer::FuseConvMatmulFCDeconvAndDQScales(Graph &graph) { } void GraphOptimizer::FuseFCAndWeightsDecompression(Graph &graph) { - const std::set supportedWeightsPrecisions{InferenceEngine::Precision::U8}; + std::set supportedWeightsPrecisions{InferenceEngine::Precision::U8, InferenceEngine::Precision::NF4}; const std::set supportedDataPrecisions{InferenceEngine::Precision::FP32, InferenceEngine::Precision::BF16}; auto expectedNode = [](NodePtr node, Type expectedType) { return node->getType() == expectedType && node->getChildEdges().size() == 1; @@ -301,11 +301,19 @@ void GraphOptimizer::FuseFCAndWeightsDecompression(Graph &graph) { if (fcNode == nullptr) continue; - const auto parent = fcNode->getParentEdgesAtPort(1)[0]->getParent(); + auto parent = fcNode->getParentEdgesAtPort(1)[0]->getParent(); const bool withTranspose = parent->getType() == Type::Transpose; const NodePtr transposeNode = withTranspose ? parent : nullptr; + if (transposeNode) + parent = transposeNode->getParentEdgesAtPort(0)[0]->getParent(); - const auto multiplyNode = withTranspose ? parent->getParentEdgesAtPort(0)[0]->getParent() : parent; + const bool withReshape = parent->getType() == Type::Reshape; + const auto reshapeNode = withReshape ? parent : nullptr; + if (reshapeNode) { + parent = reshapeNode->getParentEdgesAtPort(0)[0]->getParent(); + } + + const auto multiplyNode = parent; if (!expectedNode(multiplyNode, Type::Eltwise) || multiplyNode->getAlgorithm() != Algorithm::EltwiseMultiply || !multiplyNode->isConstant()) continue; @@ -346,23 +354,41 @@ void GraphOptimizer::FuseFCAndWeightsDecompression(Graph &graph) { // Shape limitations const auto weightsShape = weightsNode->getOutputShapeAtPort(0); - const auto fcInputWeightsShape = multiplyNode->getOutputShapeAtPort(0); - if (weightsShape != fcInputWeightsShape) + if (weightsShape != multiplyNode->getOutputShapeAtPort(0)) + continue; + if (reshapeNode && (reshapeNode->getInputShapeAtPort(0).getRank() != 3 || reshapeNode->getOutputShapeAtPort(0).getRank() != 2)) continue; - const auto expectedDims = withTranspose ? VectorDims{1, weightsShape.getDims()[1]} - : VectorDims{weightsShape.getDims()[0], 1}; - if (multiplyConstNode->getOutputShapeAtPort(0).getDims() != expectedDims) + VectorDims decompressionConstShape; + const auto fcInputWeightsShape = fcNode->getInputShapeAtPort(1); + // Ordinary case: one decompression group + if (fcInputWeightsShape.getRank() == weightsShape.getRank()) { + const auto& out_channels = fcInputWeightsShape.getDims()[0]; + decompressionConstShape = withTranspose ? VectorDims{1, out_channels} : VectorDims{out_channels, 1}; + } else { + // Group decompression case: last 3 dimension (there could be also prepending '1's in the beginning) of weights shape must be: + // [N, G, O], if transpose = true + // [O, N, G], otherwise. + // O - output channels + // N - number of groups + // G - group size + const auto& weights_dims = weightsShape.getStaticDims(); + const auto& N = withTranspose ? *(weights_dims.rbegin() + 2) : *(weights_dims.rbegin() + 1); + const auto& O = withTranspose ? *weights_dims.rbegin() : *(weights_dims.rbegin() + 2); + // Group decompression is applied by O and N dims + decompressionConstShape = withTranspose ? VectorDims{N, 1, O} : VectorDims{O, N, 1}; + } + if (multiplyConstNode->getOutputShapeAtPort(0).getDims() != decompressionConstShape) continue; - if (withSubtract && subtractConstNode->getOutputShapeAtPort(0).getDims() != expectedDims) + if (withSubtract && subtractConstNode->getOutputShapeAtPort(0).getDims() != decompressionConstShape) continue; // HW specific shape limitations if (impl::cpu::x64::mayiuse(impl::cpu::x64::avx512_core_amx)) { // OneDNN AMX IP implementation has limited shapes support due to performance considerations. As a current solution conditions below are copied // from OneDNN to make sure correct IP impl will be used since fallback one doesn't support weights decompression feature. - size_t OC = withTranspose ? weightsShape.getDims()[1] : weightsShape.getDims()[0]; - size_t IC = withTranspose ? weightsShape.getDims()[0] : weightsShape.getDims()[1]; + size_t OC = fcInputWeightsShape.getDims()[0]; + size_t IC = fcInputWeightsShape.getDims()[1]; size_t simdWidth = 16; size_t vnniFactor = 2; size_t maxSize = 512; @@ -398,6 +424,10 @@ void GraphOptimizer::FuseFCAndWeightsDecompression(Graph &graph) { transposeNode->setOriginalInputPrecisionAtPort(0, weightsPrecision); transposeNode->setOriginalOutputPrecisionAtPort(0, weightsPrecision); } + if (withReshape) { + reshapeNode->setOriginalInputPrecisionAtPort(0, weightsPrecision); + reshapeNode->setOriginalOutputPrecisionAtPort(0, weightsPrecision); + } fcNode->setOriginalInputPrecisionAtPort(1, weightsPrecision); } } diff --git a/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp b/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp index 117290ebcb0e52..3add267195ae34 100644 --- a/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp +++ b/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp @@ -729,10 +729,10 @@ void FullyConnected::setPostOps(dnnl::primitive_attr& attr, const VectorDims& di // and prepack runtime attributes accordingly for better performance bool withAMX = selected_pd->getImplementationType() & impl_desc_type::amx; int icBlock = withAMX ? 2 : 1; - if (!decompressionMultiply.empty()) - dnnlpoc.appendDecompressionScales(decompressionMultiply, icBlock); - if (!decompressionSubtract.empty()) - dnnlpoc.appendDecompressionZeroPoints(decompressionSubtract, icBlock); + if (decompressionMultiplyPtr) + dnnlpoc.appendDecompressionScales(decompressionMultiplyPtr, icBlock); + if (decompressionSubtractPtr) + dnnlpoc.appendDecompressionZeroPoints(decompressionSubtractPtr, icBlock); for (size_t i = 0; i < fusedWith.size(); ++i) { auto& node = fusedWith[i]; @@ -1133,26 +1133,32 @@ bool FullyConnected::useSparseWeightsDecompression() { } void FullyConnected::fuseDecompressionMultiply(const NodePtr& constData) { - fuseDecompressionConstant(constData, decompressionMultiply); + fuseDecompressionConstant(constData, decompressionMultiplyPtr); } void FullyConnected::fuseDecompressionSubtract(const NodePtr& constData) { - fuseDecompressionConstant(constData, decompressionSubtract); + fuseDecompressionConstant(constData, decompressionSubtractPtr); } -void FullyConnected::fuseDecompressionConstant(const NodePtr& constData, std::vector& decompressionValues) { +void FullyConnected::fuseDecompressionConstant(const NodePtr& constData, MemoryCPtr& decompressionValuesPtr) { auto *constInputNode = dynamic_cast(constData.get()); if (!constInputNode) { IE_THROW() << "Cannot cast " << constData->getName() << " to Input"; } - auto constBlob = constInputNode->getMemoryPtr(); - const auto elementsCount = constBlob->getDescWithType()->getPaddedElementsCount(); - decompressionValues.resize(elementsCount); - cpu_convert(constBlob->getData(), - &decompressionValues[0], - DnnlExtensionUtils::DataTypeToIEPrecision(constBlob->getDataType()), - Precision::FP32, - elementsCount); + const auto decompression_prc = InferenceEngine::Precision::FP32; + if (constInputNode->getOriginalOutputPrecisionAtPort(0) == decompression_prc) { + decompressionValuesPtr = constInputNode->getMemoryPtr(); + } else { + const auto constBlob = constInputNode->getMemoryPtr(); + DnnlBlockedMemoryDesc memoryDesc(decompression_prc, constBlob->getShape()); + decompressionValuesPtr = std::make_shared(getEngine(), memoryDesc, nullptr, false); + const auto elementsCount = constBlob->getDescWithType()->getPaddedElementsCount(); + cpu_convert(constBlob->getData(), + decompressionValuesPtr->getData(), + DnnlExtensionUtils::DataTypeToIEPrecision(constBlob->getDataType()), + Precision::FP32, + elementsCount); + } } DnnlMemoryDescPtr FullyConnected::makeTransposedWeightDescriptor(DnnlMemoryDescPtr desc) { diff --git a/src/plugins/intel_cpu/src/nodes/fullyconnected.h b/src/plugins/intel_cpu/src/nodes/fullyconnected.h index 8c25927ca06f36..956767bcea1219 100644 --- a/src/plugins/intel_cpu/src/nodes/fullyconnected.h +++ b/src/plugins/intel_cpu/src/nodes/fullyconnected.h @@ -61,10 +61,7 @@ class FullyConnected : public Node { } void fuseDecompressionMultiply(const NodePtr& constData); - const std::vector& getDecompressionMultiply() const { return decompressionMultiply; } - void fuseDecompressionSubtract(const NodePtr& constData); - const std::vector& getDecompressionSubtract() const { return decompressionSubtract; } private: void createDescriptorInternal(const dnnl::memory::desc &inputDesc, @@ -102,7 +99,7 @@ class FullyConnected : public Node { const dnnl::engine& engine); bool canBeExecutedInConv1x1() const; - void fuseDecompressionConstant(const NodePtr& constData, std::vector& decompressionValues); + void fuseDecompressionConstant(const NodePtr& constData, MemoryCPtr& decompressionValuesPtr); // sparse weights bool useSparseWeights = false; @@ -121,8 +118,8 @@ class FullyConnected : public Node { void prepareWeightsUsingDummyShape(); #endif bool useWeightsDecompressionImpl = false; - std::vector decompressionSubtract; - std::vector decompressionMultiply; + MemoryCPtr decompressionSubtractPtr = nullptr; + MemoryCPtr decompressionMultiplyPtr = nullptr; // FC with transposed weights bool weightsNonTransposed = false; diff --git a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp index bff7379e4aa684..d8bb3454918f8c 100644 --- a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp +++ b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp @@ -207,9 +207,16 @@ void Transformations::PreLpt(const std::vector& defaultPrecis if (useLpt) { CPU_REGISTER_PASS_COMMON(manager, ov::pass::MarkDequantizationSubgraph, defaultPrecisions); } else { + // We need to fuse Transpose to MatMul to have a simpler callback for the next transformation + CPU_REGISTER_PASS_COMMON(manager, ov::pass::TransposeMatMul); + const ov::element::TypeVector decompression_precisions{ + ov::element::u8, + // TODO: Uncomment when group decompression is supported + // ov::element::nf4 + }; // MarkDequantizationSubgraph is used even in non-LPT pipeline on X64 platforms - // in order to keep compressed u8 MatMul weights with decompression operations as is - CPU_REGISTER_PASS_X64(manager, ov::pass::MarkDequantizationSubgraph, ov::element::TypeVector{ov::element::u8}, true); + // in order to keep compressed MatMul weights with decompression operations as is + CPU_REGISTER_PASS_X64(manager, ov::pass::MarkDequantizationSubgraph, decompression_precisions, true); CPU_SET_CALLBACK_X64(manager, [](const_node_ptr &node) -> bool { auto get_single_consumer = [](const_node_ptr &node) -> std::shared_ptr { const auto consumers = node->get_output_target_inputs(0); @@ -224,12 +231,14 @@ void Transformations::PreLpt(const std::vector& defaultPrecis if (ov::is_type(consumer)) { return false; - } else if (ov::is_type(consumer)) { - consumer = get_single_consumer(consumer); - if (consumer != nullptr && ov::is_type(consumer)) { - return false; - } } + // TODO: Uncomment when group decompression is supported + // else if (ov::is_type(consumer)) { + // consumer = get_single_consumer(consumer); + // if (consumer != nullptr && ov::is_type(consumer)) { + // return false; + // } + // } return true; }, ov::pass::MarkDequantizationSubgraph); } diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_weights_decompression.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_weights_decompression.cpp index 9fcd4d58689399..e29ed1f4bfce94 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_weights_decompression.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_weights_decompression.cpp @@ -14,9 +14,9 @@ using namespace ov::test; namespace SubgraphTestsDefinitions { /* - * Subtract_const(U8) + * Subtract_const(U8/NF4) * / - * Weights(U8) Convert(F32) + * Weights(U8/NF4) Convert(F32) * | / * Convert(F32) Reshape * \ / Multiply_const(F32) @@ -31,21 +31,34 @@ namespace SubgraphTestsDefinitions { * | * Bias */ -using MatmulWeightsDecompressionParams = std::tuple, // input shapes - ov::test::ElementType, // weights precision - bool, // transpose on weights - bool, // decompression subtract - bool, // reshape on decompression constants + +struct ShapeParams { + ShapeParams() = default; + ShapeParams(InputShape data_shape, ov::Shape weights_shape, int weights_group_size = -1) + : data_shape(std::move(data_shape)), + weights_shape(std::move(weights_shape)), + weights_group_size(weights_group_size) {} + + InputShape data_shape; + ov::Shape weights_shape; + // Decompression group size. If the value is equal to -1, ordinary decompression is used + int weights_group_size; +}; +using MatmulWeightsDecompressionParams = std::tuple, // additional config fusingSpecificParams, - bool>; // should use decompression implementation + bool>; // should use decompression implementation class MatmulWeightsDecompression : public testing::WithParamInterface, virtual public SubgraphBaseTest, public CpuTestWithFusing { public: static std::string getTestCaseName(testing::TestParamInfo obj) { - std::vector inputShapes; + ShapeParams shape_params; ov::test::ElementType weights_precision; bool transpose; bool decompression_sub; @@ -54,7 +67,7 @@ class MatmulWeightsDecompression : public testing::WithParamInterface initSubgraph(std::vector& inputShapes, - const ov::element::Type data_precision, - const ov::element::Type weights_precision, - const bool transpose_weights, - const bool add_subtract, - const bool reshape_on_decompression) { - ov::ParameterVector params{std::make_shared(data_precision, inputShapes[0])}; + std::shared_ptr initDecompressionWeights(const ov::Shape& weights_shape, + const int group_size, + const ov::element::Type data_precision, + const ov::element::Type weights_precision, + const bool transpose_weights, + const bool add_subtract, + const bool reshape_on_decompression_constant) { auto transpose_if_necessary = [&](const ov::Shape& shape) { - if (!transpose_weights) - return shape; - auto transposed_shape = shape; - std::swap(*transposed_shape.rbegin(), *(transposed_shape.rbegin() + 1)); - return transposed_shape; + auto result_shape = shape; + if (transpose_weights) + std::swap(*result_shape.rbegin(), *(result_shape.rbegin() + 1)); + return result_shape; }; - auto weights_shape = transpose_if_necessary(inputShapes[1].to_shape()); - auto weights = ngraph::builder::makeConstant(weights_precision, weights_shape, {}, true); + const bool group_decompression = group_size != -1; + // Weights has shape [I, O], where + // I - input channels + // O - output channels + // In case of group decompression, input channels dimension is split into 2: I -> [N, G], where + // N - number of groups + // G - group size + auto transformed_weights_shape = transpose_if_necessary(weights_shape); + if (group_decompression) { + OPENVINO_ASSERT(weights_shape[0] % group_size == 0, + "Weights output channels count (", + weights_shape[0], + ") must be divisible by decompression group size (", + group_size, + ")."); + auto in_channel_idx = transpose_weights ? transformed_weights_shape.size() - 1 : transformed_weights_shape.size() - 2; + transformed_weights_shape[in_channel_idx] = weights_shape[0] / group_size; + transformed_weights_shape.insert(transformed_weights_shape.begin() + in_channel_idx + 1, group_size); + } + auto weights = ngraph::builder::makeConstant(weights_precision, transformed_weights_shape, {}, true); weights->set_friendly_name("Compressed_weights"); auto weights_convert = std::make_shared(weights, data_precision); std::shared_ptr mul_parent = weights_convert; - auto output_channels = transpose_weights ? *(weights_shape.rbegin() + 1) : *weights_shape.rbegin(); - auto scaleshift_target_shape = transpose_if_necessary(ov::Shape{1, output_channels}); - auto scaleshift_const_shape = reshape_on_decompression ? ov::Shape{output_channels} : scaleshift_target_shape; + auto output_channels = *weights_shape.rbegin(); + + // Decompression constants shape: + // Ordinary decompression: [O, 1] + // Group decompression: [O, N, 1] + ov::Shape scaleshift_target_shape{output_channels}; + scaleshift_target_shape.insert(scaleshift_target_shape.begin(), group_decompression ? weights_shape[0] / group_size : 1); + scaleshift_target_shape = transpose_if_necessary(scaleshift_target_shape); + if (group_decompression) { + auto in_channel_idx = transpose_weights ? scaleshift_target_shape.size() - 1 : scaleshift_target_shape.size() - 2; + scaleshift_target_shape.insert(scaleshift_target_shape.begin() + in_channel_idx + 1, 1); + } + + auto scaleshift_const_shape = scaleshift_target_shape; + if (reshape_on_decompression_constant) + scaleshift_const_shape.erase(std::remove(scaleshift_const_shape.begin(), scaleshift_const_shape.end(), 1), scaleshift_const_shape.end()); if (add_subtract) { auto shift_const = ngraph::builder::makeConstant(weights_precision, scaleshift_const_shape, {}, true); std::shared_ptr shift_convert = std::make_shared(shift_const, data_precision); - if (reshape_on_decompression) { + if (reshape_on_decompression_constant) { auto shift_reshape_const = ov::opset10::Constant::create(ov::element::i32, {scaleshift_target_shape.size()}, scaleshift_target_shape); auto shift_reshape = std::make_shared(shift_convert, shift_reshape_const, false); shift_convert = shift_reshape; @@ -130,31 +162,54 @@ class MatmulWeightsDecompression : public testing::WithParamInterface scale_const = ngraph::builder::makeConstant(data_precision, scaleshift_const_shape, {}, true); - if (reshape_on_decompression) { + if (reshape_on_decompression_constant) { auto scale_reshape_const = ov::opset10::Constant::create(ov::element::i32, {scaleshift_target_shape.size()}, scaleshift_target_shape); auto scale_reshape = std::make_shared(scale_const, scale_reshape_const, false); scale_const = scale_reshape; } - auto multiply = std::make_shared(mul_parent, scale_const); + std::shared_ptr last_node = std::make_shared(mul_parent, scale_const); - std::shared_ptr matmul_weights = multiply; + if (group_decompression) { + auto reshape_target_shape = transpose_weights ? std::vector{-1, static_cast(weights_shape[0])} + : std::vector{static_cast(weights_shape[0]), -1}; + auto target_shape_node = ov::opset10::Constant::create(ov::element::i32, {reshape_target_shape.size()}, reshape_target_shape); + last_node = std::make_shared(last_node, target_shape_node, false); + } if (transpose_weights) { - const size_t rank = matmul_weights->get_output_partial_shape(0).size(); + const size_t rank = last_node->get_output_partial_shape(0).size(); std::vector order(rank); std::iota(order.begin(), order.end(), 0); std::swap(*order.rbegin(), *(order.rbegin() + 1)); auto transpose_constant = ov::opset10::Constant::create(ov::element::i32, {rank}, order); - auto transpose = std::make_shared(matmul_weights, transpose_constant); - matmul_weights = transpose; + last_node = std::make_shared(last_node, transpose_constant); } - auto matMul = builder::makeMatMul(params[0], matmul_weights); + return last_node; + } + + std::shared_ptr initSubgraph(const ov::PartialShape& data_shape, + const ov::Shape& weights_shape, + const int group_size, + const ov::element::Type data_precision, + const ov::element::Type weights_precision, + const bool transpose_weights, + const bool add_subtract, + const bool reshape_on_decompression) { + ov::ParameterVector params{std::make_shared(data_precision, data_shape)}; + const auto weights_subgraph = initDecompressionWeights(weights_shape, + group_size, + data_precision, + weights_precision, + transpose_weights, + add_subtract, + reshape_on_decompression); + auto matMul = builder::makeMatMul(params[0], weights_subgraph); return makeNgraphFunction(data_precision, params, matMul, "MatmulWeightsDecompression"); } void SetUp() override { targetDevice = ov::test::utils::DEVICE_CPU; - std::vector inputShapes; + ShapeParams shape_params; ov::test::ElementType weights_precision; bool transpose_weights; bool decompression_sub; @@ -163,7 +218,7 @@ class MatmulWeightsDecompression : public testing::WithParamInterface(test_param); - bool should_fuse = std::get<7>(test_param); + const auto& weights_precision = std::get<1>(test_param); + // TODO: remove this condition when group decompression is supported + if (weights_precision == ov::element::nf4 || std::get<0>(test_param).weights_group_size != -1) { + return; + } + bool weights_found = false; for (const auto& n : compiledModel.get_runtime_model()->get_ordered_ops()) { if (n->get_friendly_name() == "Compressed_weights") { ASSERT_EQ(n->get_output_element_type(0), weights_precision); + weights_found = true; } } + ASSERT_TRUE(weights_found); - std::map additional_config = std::get<5>(test_param); + const bool should_fuse = std::get<7>(test_param); const size_t expected_count = should_fuse ? 0 : 1; CheckNumberOfNodesWithType(compiledModel, "Convert", expected_count); CheckNumberOfNodesWithType(compiledModel, "Eltwise", expected_count); @@ -235,22 +303,24 @@ bool shouldUseDecompressionKernelBasic() { return shouldUseDecompressionKernelBig(); } -const std::vector weights_precisions = {ov::element::u8}; -const std::vector> input_shapes_basic = { - {{{-1, -1, -1}, {{1, 4, 16}, {10, 16, 16}}}, {{}, {{16, 32}}}}, - {{{}, {{1, 4, 16}}}, {{}, {{1, 16, 32}}}}, - {{{}, {{10, 40, 496}}}, {{}, {{1, 496, 240}}}}, - {{{}, {{1, 4, 48}}}, {{}, {{48, 256}}}}, - {{{}, {{11, 339, 377}}}, {{}, {{377, 335}}}}, +const std::vector weights_precisions = {ov::element::u8, ov::element::nf4}; +const std::vector input_shapes_basic = { + {{{-1, -1, -1}, {{1, 4, 16}, {10, 16, 16}}}, {16, 32}}, + {{{}, {{1, 4, 16}}}, {16, 32}, 2ul}, + {{{}, {{1, 4, 16}}}, {1, 16, 32}}, + {{{}, {{10, 40, 496}}}, {1, 496, 240}}, + {{{}, {{1, 4, 48}}}, {48, 256}}, + {{{}, {{11, 339, 377}}}, {377, 335}}, }; -const std::vector> input_shapes_big = { - {{{-1, -1, -1}, {{10, 40, 480}, {11, 40, 480}}}, {{}, {{1, 480, 256}}}}, - {{{}, {{1, 4, 32}}}, {{}, {{32, 256}}}}, - {{{}, {{1, 4, 512}}}, {{}, {{512, 256}}}}, - {{{}, {{1, 16, 32}}}, {{}, {{32, 64}}}}, - {{{}, {{2, 4, 32}}}, {{}, {{32, 65}}}}, - {{{}, {{3, 12, 768}}}, {{}, {{768, 1024}}}}, - {{{}, {{11, 339, 577}}}, {{}, {{577, 335}}}}, +const std::vector input_shapes_big = { + {{{-1, -1, -1}, {{10, 40, 480}, {11, 40, 480}}}, {1, 480, 256}}, + {{{-1, 1, 4096}, {{1, 1, 4096}}}, {4096, 3840}, 128ul}, + {{{}, {{1, 4, 32}}}, {32, 256}}, + {{{}, {{1, 4, 512}}}, {512, 256}}, + {{{}, {{1, 16, 32}}}, {32, 64}}, + {{{}, {{2, 4, 32}}}, {32, 65}}, + {{{}, {{3, 12, 768}}}, {768, 1024}}, + {{{}, {{11, 339, 577}}}, {577, 335}}, }; const std::vector fusingParamsSet { emptyFusingSpec, @@ -281,12 +351,14 @@ INSTANTIATE_TEST_SUITE_P(smoke_MatMulCompressedWeights_big, ::testing::Values(shouldUseDecompressionKernelBig())), MatmulWeightsDecompression::getTestCaseName); -const std::vector> input_shapes_corner_cases_basic = { - {{{-1, -1, -1}, {{1, 4, 16}}}, {{}, {{1, 16, 32}}}}, - {{{-1, -1, -1}, {{1, 4, 16}}}, {{}, {{16, 32}}}}, +const std::vector input_shapes_corner_cases_basic = { + {{{-1, -1, -1}, {{1, 4, 16}}}, {1, 16, 32}}, + {{{-1, -1, -1}, {{1, 4, 16}}}, {16, 32}}, + {{{-1, -1, -1}, {{1, 4, 16}}}, {16, 32}, 4ul}, }; -const std::vector> input_shapes_corner_cases_big = { - {{{-1, -1, -1}, {{10, 40, 480}, {11, 40, 480}}}, {{}, {{1, 480, 256}}}}, +const std::vector input_shapes_corner_cases_big = { + {{{-1, -1, -1}, {{10, 40, 480}, {11, 40, 480}}}, {1, 480, 256}}, + {{{-1, -1, -1}, {{1, 1, 4096}}}, {4096, 4096}, 128ul}, }; const std::vector transpose_weights = {true, false}; @@ -317,5 +389,4 @@ INSTANTIATE_TEST_SUITE_P(smoke_MatMulCompressedWeights_corner_cases_big, ::testing::Values(shouldUseDecompressionKernelBig())), MatmulWeightsDecompression::getTestCaseName); } // namespace - } // namespace SubgraphTestsDefinitions diff --git a/src/tests/ov_helpers/ov_models/include/ov_models/builders.hpp b/src/tests/ov_helpers/ov_models/include/ov_models/builders.hpp index 664147ae1b7eb6..7fa2e675372f26 100644 --- a/src/tests/ov_helpers/ov_models/include/ov_models/builders.hpp +++ b/src/tests/ov_helpers/ov_models/include/ov_models/builders.hpp @@ -70,6 +70,7 @@ std::shared_ptr makeConstant(const ov::element::Type& type, makeNode(ov::element::Type_t::u32); makeNode(ov::element::Type_t::u64); makeNode(ov::element::Type_t::boolean); + makeNode(ov::element::Type_t::nf4); #undef makeNode default: throw std::runtime_error("Unhandled precision"); From 3403e6c02873bb0fa9f0cf410d24bbb79991e93c Mon Sep 17 00:00:00 2001 From: Przemyslaw Wysocki Date: Wed, 11 Oct 2023 14:22:26 +0200 Subject: [PATCH 154/257] Add torch lower bound (#20362) --- tests/constraints.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/constraints.txt b/tests/constraints.txt index 3d6aac51540044..fca15ab7cbe228 100644 --- a/tests/constraints.txt +++ b/tests/constraints.txt @@ -23,4 +23,4 @@ pytest-html==3.2.0 pytest-timeout==2.1.0 jax<=0.4.14 jaxlib<=0.4.14 -torch<2.1.0 \ No newline at end of file +torch<2.1.0,>=1.13 \ No newline at end of file From 9bedafb560fd4bc0e4627c8cac67cc0236a72e86 Mon Sep 17 00:00:00 2001 From: Ekaterina Aidova Date: Wed, 11 Oct 2023 17:33:32 +0400 Subject: [PATCH 155/257] [PT FE]: support aten::erf and aten::adaptive_avg_pool1d (#20350) * [PT FE]: support aten::erf and aten::adaptive_avg_pool1d * align adaptive avg pools for different sizes * refactor adaptive max pool --- .../pytorch/src/op/adaptive_avg_pool3d.cpp | 47 ------ .../pytorch/src/op/adaptive_max_pool2d.cpp | 25 --- .../pytorch/src/op/adaptive_poolnd.cpp | 123 +++++++++++++++ src/frontends/pytorch/src/op/erf.cpp | 37 +++++ src/frontends/pytorch/src/op_table.cpp | 12 +- .../pytorch_tests/test_adaptive_avg_pool.py | 101 ++++++++++++ .../pytorch_tests/test_adaptive_avg_pool3d.py | 39 ----- .../pytorch_tests/test_adaptive_max_pool.py | 144 ++++++++++++++++++ .../test_adaptive_max_pool_2d.py | 53 ------- tests/layer_tests/pytorch_tests/test_erf.py | 57 +++++++ 10 files changed, 473 insertions(+), 165 deletions(-) delete mode 100644 src/frontends/pytorch/src/op/adaptive_avg_pool3d.cpp delete mode 100644 src/frontends/pytorch/src/op/adaptive_max_pool2d.cpp create mode 100644 src/frontends/pytorch/src/op/adaptive_poolnd.cpp create mode 100644 src/frontends/pytorch/src/op/erf.cpp create mode 100644 tests/layer_tests/pytorch_tests/test_adaptive_avg_pool.py delete mode 100644 tests/layer_tests/pytorch_tests/test_adaptive_avg_pool3d.py create mode 100644 tests/layer_tests/pytorch_tests/test_adaptive_max_pool.py delete mode 100644 tests/layer_tests/pytorch_tests/test_adaptive_max_pool_2d.py create mode 100644 tests/layer_tests/pytorch_tests/test_erf.py diff --git a/src/frontends/pytorch/src/op/adaptive_avg_pool3d.cpp b/src/frontends/pytorch/src/op/adaptive_avg_pool3d.cpp deleted file mode 100644 index 42aa3da1d8586b..00000000000000 --- a/src/frontends/pytorch/src/op/adaptive_avg_pool3d.cpp +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "openvino/frontend/pytorch/node_context.hpp" -#include "openvino/op/adaptive_avg_pool.hpp" -#include "openvino/op/concat.hpp" -#include "openvino/op/constant.hpp" -#include "openvino/op/reshape.hpp" -#include "openvino/op/shape_of.hpp" -#include "openvino/op/slice.hpp" -#include "openvino/op/tile.hpp" -#include "utils.hpp" - -namespace ov { -namespace frontend { -namespace pytorch { -namespace op { - -using namespace ov::op; - -OutputVector translate_adaptive_avg_pool3d(const NodeContext& context) { - num_inputs_check(context, 2, 2); - auto const_tile_params = context.mark_node(v0::Constant::create(element::i32, Shape{5}, {1, 1, 1, 1, 1})); - auto const_0 = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {0})); - auto const_1 = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {1})); - auto const_neg_3 = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-3})); - - auto input_tensor = context.get_input(0); - auto given_shape = context.get_input(1); - - auto input_shape = context.mark_node(std::make_shared(input_tensor, element::i32)); - auto shape_begin = - context.mark_node(std::make_shared(input_shape, const_0, const_neg_3, const_1, const_0)); - auto output_shape = context.mark_node(std::make_shared(OutputVector{shape_begin, given_shape}, 0)); - - auto tile = context.mark_node(std::make_shared(input_tensor, const_tile_params)); - auto adaptive_avg_pool = context.mark_node(std::make_shared(tile, given_shape)); - auto reshape = context.mark_node(std::make_shared(adaptive_avg_pool, output_shape, false)); - - return {reshape}; -}; - -} // namespace op -} // namespace pytorch -} // namespace frontend -} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/adaptive_max_pool2d.cpp b/src/frontends/pytorch/src/op/adaptive_max_pool2d.cpp deleted file mode 100644 index 5705fd22e70f47..00000000000000 --- a/src/frontends/pytorch/src/op/adaptive_max_pool2d.cpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "openvino/frontend/pytorch/node_context.hpp" -#include "openvino/op/adaptive_max_pool.hpp" -#include "utils.hpp" - -namespace ov { -namespace frontend { -namespace pytorch { -namespace op { - -OutputVector translate_adaptive_max_pool2d(const NodeContext& context) { - num_inputs_check(context, 2, 2); - auto x = context.get_input(0); - auto y = context.get_input(1); - auto adaptive_max_pool = context.mark_node(std::make_shared(x, y, ov::element::i32)); - return {adaptive_max_pool->output(0), adaptive_max_pool->output(1)}; -}; - -} // namespace op -} // namespace pytorch -} // namespace frontend -} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/adaptive_poolnd.cpp b/src/frontends/pytorch/src/op/adaptive_poolnd.cpp new file mode 100644 index 00000000000000..9c349e50c8ed86 --- /dev/null +++ b/src/frontends/pytorch/src/op/adaptive_poolnd.cpp @@ -0,0 +1,123 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/op/adaptive_avg_pool.hpp" +#include "openvino/op/adaptive_max_pool.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/slice.hpp" +#include "openvino/op/tile.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +using namespace ov::op; + +namespace { + +std::tuple, Output> get_tile_input_and_output_shape(const NodeContext& context, + const Output& input_tensor, + const Output& given_shape, + const Output& tile_shape, + const Output& slice_end) { + auto const_0 = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {0})); + auto const_1 = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {1})); + auto input_shape = context.mark_node(std::make_shared(input_tensor, element::i32)); + auto shape_begin = + context.mark_node(std::make_shared(input_shape, const_0, slice_end, const_1, const_0)); + Output output_shape = + context.mark_node(std::make_shared(OutputVector{shape_begin, given_shape}, 0)); + Output tile = context.mark_node(std::make_shared(input_tensor, tile_shape)); + return std::make_tuple(tile, output_shape); +}; + +OutputVector translate_adaptive_avg_pool_base(const NodeContext& context, + const Output& tile_shape, + const Output& slice_end) { + num_inputs_check(context, 2, 2); + + auto input_tensor = context.get_input(0); + auto given_shape = context.get_input(1); + Output tile_input; + Output output_shape; + std::tie(tile_input, output_shape) = + get_tile_input_and_output_shape(context, input_tensor, given_shape, tile_shape, slice_end); + auto adaptive_avg_pool = context.mark_node(std::make_shared(tile_input, given_shape)); + auto reshape = context.mark_node(std::make_shared(adaptive_avg_pool, output_shape, false)); + return {reshape}; +}; + +OutputVector translate_adaptive_max_pool_base(const NodeContext& context, + const Output& tile_shape, + const Output& slice_end) { + num_inputs_check(context, 2, 2); + + auto input_tensor = context.get_input(0); + auto given_shape = context.get_input(1); + Output tile_input; + Output output_shape; + std::tie(tile_input, output_shape) = + get_tile_input_and_output_shape(context, input_tensor, given_shape, tile_shape, slice_end); + + auto adaptive_max_pool = + context.mark_node(std::make_shared(tile_input, given_shape, element::i32)); + auto pooled_tensor = adaptive_max_pool->output(0); + auto pooled_indices = adaptive_max_pool->output(1); + // adaptive max pool in torch return indices in i64, indices_element_type i64 is not implented on ov runtime side + pooled_indices = context.mark_node(std::make_shared(pooled_indices, element::i64)); + pooled_tensor = context.mark_node(std::make_shared(pooled_tensor, output_shape, false)); + pooled_indices = context.mark_node(std::make_shared(pooled_indices, output_shape, false)); + // aten::adaptive_max_pool{n}d always returns tuple with 2 tensors: pooled tensor and indicies + // output selecting only first or preserve both made outside of operation by return_indices flag + return {pooled_tensor, pooled_indices}; +}; +} // namespace + +OutputVector translate_adaptive_avg_pool3d(const NodeContext& context) { + auto const_tile_params = context.mark_node(v0::Constant::create(element::i32, Shape{5}, {1, 1, 1, 1, 1})); + auto const_neg_3 = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-3})); + return translate_adaptive_avg_pool_base(context, const_tile_params, const_neg_3); +}; + +OutputVector translate_adaptive_avg_pool2d(const NodeContext& context) { + auto const_tile_params = context.mark_node(v0::Constant::create(element::i32, Shape{4}, {1, 1, 1, 1})); + auto const_neg_2 = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-2})); + return translate_adaptive_avg_pool_base(context, const_tile_params, const_neg_2); +}; + +OutputVector translate_adaptive_avg_pool1d(const NodeContext& context) { + auto const_tile_params = context.mark_node(v0::Constant::create(element::i32, Shape{3}, {1, 1, 1})); + auto const_neg_1 = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-1})); + return translate_adaptive_avg_pool_base(context, const_tile_params, const_neg_1); +}; + +OutputVector translate_adaptive_max_pool3d(const NodeContext& context) { + auto const_tile_params = context.mark_node(v0::Constant::create(element::i32, Shape{5}, {1, 1, 1, 1, 1})); + auto const_neg_3 = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-3})); + return translate_adaptive_max_pool_base(context, const_tile_params, const_neg_3); +}; + +OutputVector translate_adaptive_max_pool2d(const NodeContext& context) { + auto const_tile_params = context.mark_node(v0::Constant::create(element::i32, Shape{4}, {1, 1, 1, 1})); + auto const_neg_2 = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-2})); + return translate_adaptive_max_pool_base(context, const_tile_params, const_neg_2); +}; + +OutputVector translate_adaptive_max_pool1d(const NodeContext& context) { + auto const_tile_params = context.mark_node(v0::Constant::create(element::i32, Shape{3}, {1, 1, 1})); + auto const_neg_1 = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-1})); + return translate_adaptive_max_pool_base(context, const_tile_params, const_neg_1); +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/erf.cpp b/src/frontends/pytorch/src/op/erf.cpp new file mode 100644 index 00000000000000..0525035bd43e24 --- /dev/null +++ b/src/frontends/pytorch/src/op/erf.cpp @@ -0,0 +1,37 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/erf.hpp" + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/op/convert.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_erf(const NodeContext& context) { + // aten::erf(Tensor self) -> Tensor + // aten::erf.out(Tensor self, Tensor(!a) out) -> Tensor(!a) + num_inputs_check(context, 1, 2); + auto x = context.get_input(0); + auto xdtype = x.get_element_type(); + // in torch, erf return always float dtype, while ov cast to input dtype + if (xdtype.is_dynamic() || !xdtype.is_real()) { + x = context.mark_node(std::make_shared(x, element::f32)); + } + + auto y = context.mark_node(std::make_shared(x)); + if (!context.input_is_none(1)) { + context.mutate_input(1, y); + } + return {y}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index 20c53dbe52bc9f..47969ddb57d1c6 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -17,7 +17,11 @@ namespace op { // TorchScript translations OP_CONVERTER(translate_adaptive_avg_pool3d); +OP_CONVERTER(translate_adaptive_avg_pool2d); +OP_CONVERTER(translate_adaptive_avg_pool1d); +OP_CONVERTER(translate_adaptive_max_pool3d); OP_CONVERTER(translate_adaptive_max_pool2d); +OP_CONVERTER(translate_adaptive_max_pool1d); OP_CONVERTER(translate_add); OP_CONVERTER(translate_addcmul); OP_CONVERTER(translate_addmm); @@ -56,6 +60,7 @@ OP_CONVERTER(translate_elu); OP_CONVERTER(translate_embedding); OP_CONVERTER(translate_embedding_bag); OP_CONVERTER(translate_empty); +OP_CONVERTER(translate_erf); OP_CONVERTER(translate_expand); OP_CONVERTER(translate_expand_as); OP_CONVERTER(translate_eye); @@ -232,9 +237,12 @@ const std::map get_supported_ops_ts() { {"aten::acos_", op::inplace_op>}, {"aten::acosh", op::translate_1to1_match_1_inputs_with_fp32_type_alignment}, {"aten::acosh_", op::inplace_op>}, - {"aten::adaptive_avg_pool2d", op::quantizable_op>}, + {"aten::adaptive_avg_pool1d", op::quantizable_op}, + {"aten::adaptive_avg_pool2d", op::quantizable_op}, {"aten::adaptive_avg_pool3d", op::quantizable_op}, + {"aten::adaptive_max_pool1d", op::quantizable_op}, {"aten::adaptive_max_pool2d", op::quantizable_op}, + {"aten::adaptive_max_pool3d", op::quantizable_op}, {"aten::add", op::translate_add}, {"aten::add_", op::inplace_op}, {"aten::addcmul", op::translate_addcmul}, @@ -305,6 +313,8 @@ const std::map get_supported_ops_ts() { {"aten::embedding_bag", op::translate_embedding_bag}, {"aten::empty", op::translate_empty}, {"aten::eq", op::translate_1to1_match_2_inputs_align_types}, + {"aten::erf", op::translate_erf}, + {"aten::erf_", op::inplace_op}, {"aten::exp", op::translate_1to1_match_1_inputs_with_fp32_type_alignment}, {"aten::exp_", op::inplace_op>}, {"aten::expand", op::translate_expand}, diff --git a/tests/layer_tests/pytorch_tests/test_adaptive_avg_pool.py b/tests/layer_tests/pytorch_tests/test_adaptive_avg_pool.py new file mode 100644 index 00000000000000..f50dc8a4051519 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_adaptive_avg_pool.py @@ -0,0 +1,101 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import torch + +from pytorch_layer_test_class import PytorchLayerTest + + +@pytest.mark.parametrize('input_tensor', (np.random.randn(1, 2, 8, 9, 10).astype(np.float32), + np.random.randn(2, 8, 9, 10).astype(np.float32))) +@pytest.mark.parametrize('output_size', ([5, 7, 9], 7)) +class TestAdaptiveAvgPool3D(PytorchLayerTest): + + def _prepare_input(self): + return (self.input_tensor,) + + def create_model(self, output_size): + class aten_adaptive_avg_pool3d(torch.nn.Module): + + def __init__(self, output_size) -> None: + super().__init__() + self.output_size = output_size + + def forward(self, input_tensor): + return torch.nn.functional.adaptive_avg_pool3d(input_tensor, self.output_size) + + ref_net = None + + return aten_adaptive_avg_pool3d(output_size), ref_net, "aten::adaptive_avg_pool3d" + + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.precommit_ts_backend + @pytest.mark.precommit_fx_backend + def test_adaptive_avg_pool3d(self, ie_device, precision, ir_version, input_tensor, output_size): + self.input_tensor = input_tensor + self._test(*self.create_model(output_size), ie_device, precision, ir_version) + + +@pytest.mark.parametrize('input_tensor', [np.random.randn(2, 8, 9, 10).astype(np.float32), np.random.randn(8, 9, 10).astype(np.float32)]) +@pytest.mark.parametrize('output_size', ([7, 9], 7)) +class TestAdaptiveAvgPool2D(PytorchLayerTest): + + def _prepare_input(self): + return (self.input_tensor,) + + def create_model(self, output_size): + class aten_adaptive_avg_pool2d(torch.nn.Module): + + def __init__(self, output_size) -> None: + super().__init__() + self.output_size = output_size + + def forward(self, input_tensor): + return torch.nn.functional.adaptive_avg_pool2d(input_tensor, self.output_size) + + ref_net = None + + return aten_adaptive_avg_pool2d(output_size), ref_net, "aten::adaptive_avg_pool2d" + + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.precommit_ts_backend + @pytest.mark.precommit_fx_backend + def test_adaptive_avg_pool2d(self, ie_device, precision, ir_version, input_tensor, output_size): + self.input_tensor = input_tensor + self._test(*self.create_model(output_size), ie_device, precision, ir_version) + + +@pytest.mark.parametrize('input_tensor', [np.random.randn(8, 9, 10).astype(np.float32), np.random.randn(9, 10).astype(np.float32)] ) +@pytest.mark.parametrize('output_size', ( 7, )) +class TestAdaptiveAvgPool1D(PytorchLayerTest): + + def _prepare_input(self): + return (self.input_tensor,) + + def create_model(self, output_size): + class aten_adaptive_avg_pool1d(torch.nn.Module): + + def __init__(self, output_size) -> None: + super().__init__() + self.output_size = output_size + + def forward(self, input_tensor): + return torch.nn.functional.adaptive_avg_pool1d(input_tensor, self.output_size) + + ref_net = None + + return aten_adaptive_avg_pool1d(output_size), ref_net, "aten::adaptive_avg_pool1d" + + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.precommit_ts_backend + @pytest.mark.precommit_fx_backend + def test_adaptive_avg_pool1d(self, ie_device, precision, ir_version, input_tensor, output_size): + self.input_tensor = input_tensor + self._test(*self.create_model(output_size), ie_device, precision, ir_version) + + diff --git a/tests/layer_tests/pytorch_tests/test_adaptive_avg_pool3d.py b/tests/layer_tests/pytorch_tests/test_adaptive_avg_pool3d.py deleted file mode 100644 index 2f2dffa15ebcf0..00000000000000 --- a/tests/layer_tests/pytorch_tests/test_adaptive_avg_pool3d.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import pytest -import torch - -from pytorch_layer_test_class import PytorchLayerTest - - -@pytest.mark.parametrize('input_tensor', (np.random.randn(1, 2, 8, 9, 10).astype(np.float32), - np.random.randn(2, 8, 9, 10).astype(np.float32))) -@pytest.mark.parametrize('output_size', ([5, 7, 9], 7)) -class TestAdaptiveAvgPool3D(PytorchLayerTest): - - def _prepare_input(self): - return (self.input_tensor,) - - def create_model(self, output_size): - class aten_adaptive_avg_pool3d(torch.nn.Module): - - def __init__(self, output_size) -> None: - super().__init__() - self.output_size = output_size - - def forward(self, input_tensor): - return torch.nn.functional.adaptive_avg_pool3d(input_tensor, self.output_size) - - ref_net = None - - return aten_adaptive_avg_pool3d(output_size), ref_net, "aten::adaptive_avg_pool3d" - - @pytest.mark.nightly - @pytest.mark.precommit - @pytest.mark.precommit_ts_backend - @pytest.mark.precommit_fx_backend - def test_adaptive_avg_pool3d(self, ie_device, precision, ir_version, input_tensor, output_size): - self.input_tensor = input_tensor - self._test(*self.create_model(output_size), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_adaptive_max_pool.py b/tests/layer_tests/pytorch_tests/test_adaptive_max_pool.py new file mode 100644 index 00000000000000..c01e58c2107eec --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_adaptive_max_pool.py @@ -0,0 +1,144 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import torch +import torch.nn.functional as F + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestAdaptiveMaxPool3D(PytorchLayerTest): + + def _prepare_input(self): + return (self.input_tensor,) + + def create_model(self, output_size=None, return_indices=False): + class aten_adaptive_max_pool3d(torch.nn.Module): + + def __init__(self, output_size=None, return_indices=False) -> None: + super().__init__() + self.output_size = output_size + self.return_indices = return_indices + + def forward(self, input_tensor): + if self.return_indices: + output, indices = F.adaptive_max_pool3d(input_tensor, self.output_size, True) + return output, indices + return F.adaptive_max_pool3d(input_tensor, self.output_size, False), input_tensor.to(torch.int64) + + ref_net = None + + return aten_adaptive_max_pool3d(output_size, return_indices), ref_net, "aten::adaptive_max_pool3d" + + @pytest.mark.parametrize('input_tensor', ([ + np.random.randn(2, 1, 1, 4, 4).astype(np.float32), + np.random.randn(4, 1, 3, 32, 32).astype(np.float32), + np.random.randn(1, 3, 32, 32).astype(np.float32) + ])) + @pytest.mark.parametrize('output_size', ([ + [2, 2, 2], + [4, 4, 4], + ])) + @pytest.mark.parametrize('return_indices', ([ + False, + True, + ])) + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.precommit_ts_backend + @pytest.mark.precommit_fx_backend + def test_adaptive_max_pool3d(self, ie_device, precision, ir_version, input_tensor, output_size, return_indices): + self.input_tensor = input_tensor + self._test(*self.create_model(output_size, return_indices), ie_device, precision, ir_version) + + +class TestAdaptiveMaxPool2D(PytorchLayerTest): + + def _prepare_input(self): + return (self.input_tensor,) + + def create_model(self, output_size=None, return_indices=False): + class aten_adaptive_max_pool2d(torch.nn.Module): + + def __init__(self, output_size=None, return_indices=False) -> None: + super().__init__() + self.output_size = output_size + self.return_indices = return_indices + + def forward(self, input_tensor): + if self.return_indices: + output, indices = F.adaptive_max_pool2d(input_tensor, self.output_size, True) + return output, indices + return F.adaptive_max_pool2d(input_tensor, self.output_size, False), input_tensor.to(torch.int64) + + ref_net = None + + return aten_adaptive_max_pool2d(output_size, return_indices), ref_net, "aten::adaptive_max_pool2d" + + @pytest.mark.parametrize('input_tensor', ([ + np.random.randn(2, 1, 4, 4).astype(np.float32), + np.random.randn(1, 3, 32, 32).astype(np.float32), + np.random.randn(3, 32, 32).astype(np.float32) + ])) + @pytest.mark.parametrize('output_size', ([ + [2, 2], + [4, 4], + ])) + @pytest.mark.parametrize('return_indices', ([ + False, + True, + ])) + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.precommit_ts_backend + @pytest.mark.precommit_fx_backend + def test_adaptive_max_pool2d(self, ie_device, precision, ir_version, input_tensor, output_size, return_indices): + self.input_tensor = input_tensor + self._test(*self.create_model(output_size, return_indices), ie_device, precision, ir_version) + + +class TestAdaptiveMaxPool1D(PytorchLayerTest): + + def _prepare_input(self): + return (self.input_tensor,) + + def create_model(self, output_size=None, return_indices=False): + class aten_adaptive_max_pool1d(torch.nn.Module): + + def __init__(self, output_size=None, return_indices=False) -> None: + super().__init__() + self.output_size = output_size + self.return_indices = return_indices + + def forward(self, input_tensor): + if self.return_indices: + output, indices = F.adaptive_max_pool1d(input_tensor, self.output_size, True) + return output, indices + return F.adaptive_max_pool1d(input_tensor, self.output_size, False), input_tensor.to(torch.int64) + + ref_net = None + + return aten_adaptive_max_pool1d(output_size, return_indices), ref_net, "aten::adaptive_max_pool1d" + + @pytest.mark.parametrize('input_tensor', ([ + np.random.randn(1, 4, 4).astype(np.float32), + np.random.randn(3, 32, 32).astype(np.float32), + np.random.randn(16, 8).astype(np.float32), + ])) + @pytest.mark.parametrize('output_size', ([ + 2, + 4, + ])) + @pytest.mark.parametrize('return_indices', ([ + False, + True, + ])) + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.precommit_ts_backend + @pytest.mark.precommit_fx_backend + def test_adaptive_max_pool1d(self, ie_device, precision, ir_version, input_tensor, output_size, return_indices): + self.input_tensor = input_tensor + self._test(*self.create_model(output_size, return_indices), ie_device, precision, ir_version) \ No newline at end of file diff --git a/tests/layer_tests/pytorch_tests/test_adaptive_max_pool_2d.py b/tests/layer_tests/pytorch_tests/test_adaptive_max_pool_2d.py deleted file mode 100644 index 661c8e1788163b..00000000000000 --- a/tests/layer_tests/pytorch_tests/test_adaptive_max_pool_2d.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import pytest -import torch -import torch.nn.functional as F - -from pytorch_layer_test_class import PytorchLayerTest - - -class TestAdaptiveMaxPool2D(PytorchLayerTest): - - def _prepare_input(self): - return (self.input_tensor,) - - def create_model(self, output_size=None, return_indices=False): - class aten_adaptive_max_pool2d(torch.nn.Module): - - def __init__(self, output_size=None, return_indices=False) -> None: - super().__init__() - self.output_size = output_size - self.return_indices = return_indices - - def forward(self, input_tensor): - if self.return_indices: - output, indices = F.adaptive_max_pool2d(input_tensor, self.output_size, True) - return output - return F.adaptive_max_pool2d(input_tensor, self.output_size, False) - - ref_net = None - - return aten_adaptive_max_pool2d(output_size, return_indices), ref_net, "aten::adaptive_max_pool2d" - - @pytest.mark.parametrize('input_tensor', ([ - np.random.randn(1, 1, 4, 4).astype(np.float32), - np.random.randn(1, 3, 32, 32).astype(np.float32) - ])) - @pytest.mark.parametrize('output_size', ([ - [2, 2], - [4, 4], - ])) - @pytest.mark.parametrize('return_indices', ([ - False, - True, - ])) - @pytest.mark.nightly - @pytest.mark.precommit - @pytest.mark.precommit_ts_backend - @pytest.mark.precommit_fx_backend - def test_adaptive_max_pool2d(self, ie_device, precision, ir_version, input_tensor, output_size, return_indices): - self.input_tensor = input_tensor - self._test(*self.create_model(output_size, return_indices), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_erf.py b/tests/layer_tests/pytorch_tests/test_erf.py new file mode 100644 index 00000000000000..ab4d1b8d9d7ae7 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_erf.py @@ -0,0 +1,57 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestErf(PytorchLayerTest): + def _prepare_input(self, input_dtype, out=False): + import numpy as np + x = np.linspace(-3, 3).astype(input_dtype) + if not out: + return (x, ) + return (x, np.zeros_like(x).astype(input_dtype)) + + def create_model(self, mode="", input_dtype="float32"): + import torch + dtypes = { + "float32": torch.float32, + "float64": torch.float64, + "int32": torch.int32 + } + + dtype = dtypes[input_dtype] + class aten_erf(torch.nn.Module): + def __init__(self, mode, dtype): + super(aten_erf, self).__init__() + self.dtype = dtype + if mode == "out": + self.forward = self.forward_out + elif mode == "inplace": + self.forward = self.forward_inplace + + def forward(self, x): + return torch.special.erf(x.to(self.dtype)) + + def forward_out(self, x, y): + return torch.special.erf(x.to(self.dtype), out=y), y + + def forward_inplace(self, x): + x = x.to(self.dtype) + return x.erf_(), x + + ref_net = None + + return aten_erf(mode, dtype), ref_net, "aten::erf" if mode != "inplace" else "aten::erf_" + + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.parametrize("mode,input_dtype", [ + ("", "float32"), ("", "float64"), ("", "int32"), + ("out", "float32"), ("out", "float64"), + ("inplace", "float32"), ("inplace", "float64")]) + def test_erf(self, mode, input_dtype, ie_device, precision, ir_version): + self._test(*self.create_model(mode, input_dtype), ie_device, precision, ir_version, + kwargs_to_prepare_input={"input_dtype": input_dtype, "out": mode == "out"} ) \ No newline at end of file From a844e597e82a35d0a71b45a6e8a1ea2a47e2fa74 Mon Sep 17 00:00:00 2001 From: Alexandra Sidorova Date: Wed, 11 Oct 2023 18:17:26 +0400 Subject: [PATCH 156/257] [Snippets][CPU] Disabled MHA tokenization with infer precision f16 (#20308) --- .../src/transformations/transformation_pipeline.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp index d8bb3454918f8c..28756ba21664e1 100644 --- a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp +++ b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp @@ -598,8 +598,12 @@ void Transformations::MainSnippets(void) { CPU_REGISTER_PASS_X64(snippetsManager, SnippetsMarkSkipped, inferencePrecision != ov::element::f32); CPU_REGISTER_PASS_X64(snippetsManager, snippets::pass::SnippetsTokenization, tokenization_config); + // - MHA has BRGEMM that is supported only on AVX512 platforms + // - CPU Plugin Subgraph supports only f32, bf16 (and quantized) BRGEMM + // [122494] Need to add support of f16 const bool isMHASupported = - dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core); // MHA has BRGEMM that is supported only on AVX512 platforms + dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core) && + one_of(inferencePrecision, ov::element::bf16, ov::element::f32); if (!isMHASupported) { CPU_DISABLE_PASS_X64(snippetsManager, snippets::pass::TokenizeMHASnippets); CPU_DISABLE_PASS_X64(snippetsManager, snippets::pass::ExtractReshapesFromMHA); From 10d1862715b9fcfbfd81c8e209e1725f884accc6 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Wed, 11 Oct 2023 23:38:00 +0400 Subject: [PATCH 157/257] Updated ade submodule (#20392) --- .github/workflows/windows.yml | 1 - thirdparty/ade | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index c23419a4463a47..a32810a514305c 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -110,7 +110,6 @@ jobs: run: | cmake -G "${{ env.CMAKE_GENERATOR }}" ` -DENABLE_CPPLINT=OFF ` - -DCMAKE_CXX_STANDARD=14 ` -DBUILD_nvidia_plugin=OFF ` -DBUILD_SHARED_LIBS=OFF ` -DENABLE_TESTS=ON ` diff --git a/thirdparty/ade b/thirdparty/ade index 50ff2423e234d8..0e8a2ccdd34f29 160000 --- a/thirdparty/ade +++ b/thirdparty/ade @@ -1 +1 @@ -Subproject commit 50ff2423e234d8e340de7ce959b920f6c8c7fe79 +Subproject commit 0e8a2ccdd34f29dba55894f5f3c5179809888b9e From aa7405fd0e32bc8e4a61a8abf878218c77f5e9cb Mon Sep 17 00:00:00 2001 From: Tomasz Jankowski Date: Wed, 11 Oct 2023 21:44:41 +0200 Subject: [PATCH 158/257] [Template plugin] Unify headers inclusion (#20326) * Use precise op version - Parameter * Use precise op version - Constant * Use precise op version - Result * Use precise op version - Add, Concat, Subtract * Use precise op version - NMS * Use precise op version - TopK, Broadcast * Use precise op version * Include openvino headers in quotes * Use precise op version - LSTMCell * Fix code style --- src/plugins/template/src/itt.hpp | 2 +- .../op_reference/batch_to_space.cpp | 17 +- .../op_reference/binary_convolution.cpp | 4 +- .../functional/op_reference/broadcast.cpp | 51 +-- .../tests/functional/op_reference/concat.cpp | 175 ++++---- .../op_reference/convert_color_i420.cpp | 6 +- .../op_reference/convert_color_nv12.cpp | 6 +- .../op_reference/deformable_psroi_pooling.cpp | 7 +- .../op_reference/depth_to_space.cpp | 13 +- .../tests/functional/op_reference/einsum.cpp | 9 +- .../op_reference/embedding_segments_sum.cpp | 3 +- .../op_reference/embeddingbag_offsetssum.cpp | 3 +- .../op_reference/embeddingbag_packedsum.cpp | 3 +- .../op_reference/extract_image_patches.cpp | 8 +- .../tests/functional/op_reference/gather.cpp | 31 +- .../functional/op_reference/gather_tree.cpp | 14 +- .../group_convolution_backprop.cpp | 4 +- .../tests/functional/op_reference/if.cpp | 4 +- .../tests/functional/op_reference/loop.cpp | 61 +-- .../functional/op_reference/lstm_cell.cpp | 91 ++-- .../functional/op_reference/matrix_nms.cpp | 71 ++-- .../functional/op_reference/nms_rotated.cpp | 30 +- .../op_reference/non_max_suppression.cpp | 192 ++++----- .../tests/functional/op_reference/pad.cpp | 3 +- .../op_reference/prior_box_clustered.cpp | 8 +- .../op_reference/random_uniform.cpp | 13 +- .../functional/op_reference/roi_align.cpp | 29 +- .../tests/functional/op_reference/roll.cpp | 14 +- .../op_reference/shuffle_channels.cpp | 8 +- .../tests/functional/op_reference/slice.cpp | 26 +- .../op_reference/space_to_batch.cpp | 15 +- .../op_reference/space_to_depth.cpp | 13 +- .../op_reference/tensor_iterator.cpp | 94 +++-- .../tests/functional/op_reference/tile.cpp | 11 +- .../tests/functional/op_reference/topk.cpp | 387 +++++++++--------- .../ov_exec_net_import_export.cpp | 3 +- .../subgraph_reference/preprocess_opencv.cpp | 8 +- .../disable_transformations_test.cpp | 15 +- 38 files changed, 750 insertions(+), 702 deletions(-) diff --git a/src/plugins/template/src/itt.hpp b/src/plugins/template/src/itt.hpp index d8153bcff90ab6..a48f85ef747165 100644 --- a/src/plugins/template/src/itt.hpp +++ b/src/plugins/template/src/itt.hpp @@ -9,7 +9,7 @@ #pragma once -#include +#include "openvino/itt.hpp" namespace ov { namespace template_plugin { diff --git a/src/plugins/template/tests/functional/op_reference/batch_to_space.cpp b/src/plugins/template/tests/functional/op_reference/batch_to_space.cpp index 10583280efd619..96ac6fd261717f 100644 --- a/src/plugins/template/tests/functional/op_reference/batch_to_space.cpp +++ b/src/plugins/template/tests/functional/op_reference/batch_to_space.cpp @@ -2,11 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/batch_to_space.hpp" + #include #include "base_reference_test.hpp" -#include "openvino/opsets/opset1.hpp" -#include "openvino/opsets/opset2.hpp" +#include "openvino/op/parameter.hpp" using namespace reference_tests; using namespace ov; @@ -47,7 +48,7 @@ class ReferenceBatchToSpaceLayerTest : public testing::TestWithParam& obj) { - auto param = obj.param; + const auto& param = obj.param; std::ostringstream result; result << "dType=" << param.dataTensor.type; result << "_dShape=" << param.dataTensor.shape; @@ -69,11 +70,11 @@ class ReferenceBatchToSpaceLayerTest : public testing::TestWithParam CreateFunction(const BatchToSpaceParams& params) { - const auto data = std::make_shared(params.dataTensor.type, params.dataTensor.shape); - const auto blockShape = std::make_shared(element::i64, params.blockShapeTensor.shape); - const auto cropsBegin = std::make_shared(element::i64, params.cropsBeginTensor.shape); - const auto cropsEnd = std::make_shared(element::i64, params.cropsEndTensor.shape); - const auto batchToSpace = std::make_shared(data, blockShape, cropsBegin, cropsEnd); + const auto data = std::make_shared(params.dataTensor.type, params.dataTensor.shape); + const auto blockShape = std::make_shared(element::i64, params.blockShapeTensor.shape); + const auto cropsBegin = std::make_shared(element::i64, params.cropsBeginTensor.shape); + const auto cropsEnd = std::make_shared(element::i64, params.cropsEndTensor.shape); + const auto batchToSpace = std::make_shared(data, blockShape, cropsBegin, cropsEnd); return std::make_shared(NodeVector{batchToSpace}, ParameterVector{data, blockShape, cropsBegin, cropsEnd}); } diff --git a/src/plugins/template/tests/functional/op_reference/binary_convolution.cpp b/src/plugins/template/tests/functional/op_reference/binary_convolution.cpp index 1917163d832f4e..6b6089911a197c 100644 --- a/src/plugins/template/tests/functional/op_reference/binary_convolution.cpp +++ b/src/plugins/template/tests/functional/op_reference/binary_convolution.cpp @@ -7,7 +7,7 @@ #include #include "base_reference_test.hpp" -#include "openvino/opsets/opset8.hpp" +#include "openvino/op/constant.hpp" using namespace reference_tests; using namespace ov; @@ -95,7 +95,7 @@ class ReferenceBinaryConvolutionLayerTest : public testing::TestWithParam& filterData) { const op::PadType auto_pad{op::PadType::EXPLICIT}; const auto in = std::make_shared(params.inType, params.inputShape); - auto filter = std::make_shared(ov::element::u1, params.filterShape, &filterData[0]); + auto filter = std::make_shared(ov::element::u1, params.filterShape, &filterData[0]); const auto BinaryConvolution = std::make_shared(in, filter, params.strides, diff --git a/src/plugins/template/tests/functional/op_reference/broadcast.cpp b/src/plugins/template/tests/functional/op_reference/broadcast.cpp index 5cb9712c8b2b2d..415116a39af6cf 100644 --- a/src/plugins/template/tests/functional/op_reference/broadcast.cpp +++ b/src/plugins/template/tests/functional/op_reference/broadcast.cpp @@ -2,11 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/broadcast.hpp" + #include #include "base_reference_test.hpp" -#include "openvino/opsets/opset1.hpp" -#include "openvino/opsets/opset3.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/reverse.hpp" using namespace reference_tests; using namespace ov; @@ -56,10 +59,10 @@ class ReferenceBroadcastTest : public testing::TestWithParam, p private: static std::shared_ptr CreateFunction(const BroadcastParams& params) { - const auto A = std::make_shared(params.dataTensor.type, params.dataTensor.shape); + const auto A = std::make_shared(params.dataTensor.type, params.dataTensor.shape); const auto f = std::make_shared( - std::make_shared(A, - opset1::Constant::create(params.targetShapeTensor.type, + std::make_shared(A, + op::v0::Constant::create(params.targetShapeTensor.type, params.targetShapeTensor.shape, params.targetShapeTensor.data.data())), ParameterVector{A}); @@ -74,10 +77,10 @@ TEST_P(ReferenceBroadcastTest, CompareWithRefs) { class ReferenceBroadcastTestV3 : public ReferenceBroadcastTest { private: static std::shared_ptr CreateFunction(const BroadcastParams& params) { - const auto A = std::make_shared(params.dataTensor.type, params.dataTensor.shape); + const auto A = std::make_shared(params.dataTensor.type, params.dataTensor.shape); const auto f = std::make_shared( - std::make_shared(A, - opset1::Constant::create(params.targetShapeTensor.type, + std::make_shared(A, + op::v0::Constant::create(params.targetShapeTensor.type, params.targetShapeTensor.shape, params.targetShapeTensor.data.data())), ParameterVector{A}); @@ -132,13 +135,13 @@ class ReferenceBroadcastTestExplicitAxis : public testing::TestWithParam CreateFunction(const BroadcastParamsExplicitAxis& params) { - const auto A = std::make_shared(params.dataTensor.type, params.dataTensor.shape); + const auto A = std::make_shared(params.dataTensor.type, params.dataTensor.shape); const auto f = std::make_shared( - std::make_shared(A, - opset1::Constant::create(params.targetShapeTensor.type, + std::make_shared(A, + op::v0::Constant::create(params.targetShapeTensor.type, params.targetShapeTensor.shape, params.targetShapeTensor.data.data()), - opset1::Constant::create(params.axesMappingTensor.type, + op::v0::Constant::create(params.axesMappingTensor.type, params.axesMappingTensor.shape, params.axesMappingTensor.data.data())), ParameterVector{A}); @@ -203,15 +206,15 @@ class ReferenceBroadcastTestTestHelper : public testing::TestWithParam CreateFunction(const BroadcastParamsTestHelper& params) { - const auto A = std::make_shared(element::f32, params.shapeA); - const auto shape_const = opset1::Constant::create(element::u64, Shape{params.shapeR.size()}, params.shapeR); + const auto A = std::make_shared(element::f32, params.shapeA); + const auto shape_const = op::v0::Constant::create(element::u64, Shape{params.shapeR.size()}, params.shapeR); std::shared_ptr broadcast; if (params.axes.size() > 0) { auto axes_const = - opset1::Constant::create(element::i64, Shape{params.axes.size()}, params.axes.to_vector()); - broadcast = std::make_shared(A, shape_const, axes_const); + op::v0::Constant::create(element::i64, Shape{params.axes.size()}, params.axes.to_vector()); + broadcast = std::make_shared(A, shape_const, axes_const); } else { - broadcast = std::make_shared(A, shape_const); + broadcast = std::make_shared(A, shape_const); } auto f = std::make_shared(broadcast, ParameterVector{A}); return f; @@ -239,18 +242,18 @@ TEST_P(ReferenceBroadcastTestTestHelper, CompareWithRefs) { class ReferenceBroadcastTestExplicitAxisReversed : public ReferenceBroadcastTestExplicitAxis { private: static std::shared_ptr CreateFunction(const BroadcastParamsExplicitAxis& params) { - const auto A = std::make_shared(params.dataTensor.type, params.dataTensor.shape); + const auto A = std::make_shared(params.dataTensor.type, params.dataTensor.shape); auto broadcast = - std::make_shared(A, - opset1::Constant::create(params.targetShapeTensor.type, + std::make_shared(A, + op::v0::Constant::create(params.targetShapeTensor.type, params.targetShapeTensor.shape, params.targetShapeTensor.data.data()), - opset1::Constant::create(params.axesMappingTensor.type, + op::v0::Constant::create(params.axesMappingTensor.type, params.axesMappingTensor.shape, params.axesMappingTensor.data.data())); - auto reverse = std::make_shared(broadcast, - opset1::Constant::create(element::i64, {1}, {1}), - opset1::Reverse::Mode::INDEX); + auto reverse = std::make_shared(broadcast, + op::v0::Constant::create(element::i64, {1}, {1}), + op::v1::Reverse::Mode::INDEX); auto f = std::make_shared(NodeVector{reverse}, ParameterVector{A}); return f; } diff --git a/src/plugins/template/tests/functional/op_reference/concat.cpp b/src/plugins/template/tests/functional/op_reference/concat.cpp index b11a3d1e4a4658..da3a141b160a18 100644 --- a/src/plugins/template/tests/functional/op_reference/concat.cpp +++ b/src/plugins/template/tests/functional/op_reference/concat.cpp @@ -2,10 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/concat.hpp" + #include #include "base_reference_test.hpp" -#include "openvino/opsets/opset1.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/subtract.hpp" using namespace reference_tests; using namespace ov; @@ -68,17 +73,17 @@ class ReferenceConcatTest : public testing::TestWithParam, public private: static std::shared_ptr CreateFunction(const ConcatParams& params) { - std::shared_ptr A, B, C; + std::shared_ptr A, B, C; if (params.dynamicShape.is_dynamic()) { - A = std::make_shared(params.A.type, params.dynamicShape); - B = std::make_shared(params.B.type, params.dynamicShape); - C = std::make_shared(params.C.type, params.dynamicShape); + A = std::make_shared(params.A.type, params.dynamicShape); + B = std::make_shared(params.B.type, params.dynamicShape); + C = std::make_shared(params.C.type, params.dynamicShape); } else { - A = std::make_shared(params.A.type, params.A.shape); - B = std::make_shared(params.B.type, params.B.shape); - C = std::make_shared(params.C.type, params.C.shape); + A = std::make_shared(params.A.type, params.A.shape); + B = std::make_shared(params.B.type, params.B.shape); + C = std::make_shared(params.C.type, params.C.shape); } - auto f = std::make_shared(std::make_shared(NodeVector{A, B, C}, params.axis), + auto f = std::make_shared(std::make_shared(NodeVector{A, B, C}, params.axis), ParameterVector{A, B, C}); return f; } @@ -190,11 +195,11 @@ class ReferenceConcatTestVectorLarge : public testing::TestWithParam(element::f32, shape_a); + auto A = std::make_shared(element::f32, shape_a); inputs_param.push_back(A); inputs.push_back(A); } - function = std::make_shared(std::make_shared(inputs, 0), inputs_param); + function = std::make_shared(std::make_shared(inputs, 0), inputs_param); std::vector ref_result; for (uint32_t i = 0; i < params.numInputs; i++) { @@ -309,15 +314,15 @@ class ReferenceConcatTestInPlace2dTensor : public testing::TestWithParam CreateFunction(const ConcatParamsInPlace2dTensor& params) { - const auto A = std::make_shared(params.A.type, params.A.shape); - const auto B = std::make_shared(params.B.type, params.B.shape); - const auto add1 = std::make_shared(A, B); - const auto C = std::make_shared(params.C.type, params.C.shape); - const auto D = std::make_shared(params.D.type, params.D.shape); - const auto add2 = std::make_shared(C, D); - const auto subtract = std::make_shared(C, A); + const auto A = std::make_shared(params.A.type, params.A.shape); + const auto B = std::make_shared(params.B.type, params.B.shape); + const auto add1 = std::make_shared(A, B); + const auto C = std::make_shared(params.C.type, params.C.shape); + const auto D = std::make_shared(params.D.type, params.D.shape); + const auto add2 = std::make_shared(C, D); + const auto subtract = std::make_shared(C, A); const auto f = - std::make_shared(std::make_shared(NodeVector{add1, add2, subtract}, params.axis), + std::make_shared(std::make_shared(NodeVector{add1, add2, subtract}, params.axis), ParameterVector{A, B, C, D}); return f; } @@ -426,16 +431,16 @@ class ReferenceConcatTestInPlacePropagate2dTensor : public testing::TestWithPara private: static std::shared_ptr CreateFunction(const ConcatParamsInPlacePropagate2dTensor& params) { - const auto A = std::make_shared(params.A.type, params.A.shape); - const auto B = std::make_shared(params.B.type, params.B.shape); - const auto add1 = std::make_shared(A, B); - const auto C = std::make_shared(params.C.type, params.C.shape); - const auto D = std::make_shared(params.D.type, params.D.shape); - const auto add2 = std::make_shared(C, D); - const auto concat1 = std::make_shared(NodeVector{add1, add2}, params.axis); - const auto subtract = std::make_shared(C, A); + const auto A = std::make_shared(params.A.type, params.A.shape); + const auto B = std::make_shared(params.B.type, params.B.shape); + const auto add1 = std::make_shared(A, B); + const auto C = std::make_shared(params.C.type, params.C.shape); + const auto D = std::make_shared(params.D.type, params.D.shape); + const auto add2 = std::make_shared(C, D); + const auto concat1 = std::make_shared(NodeVector{add1, add2}, params.axis); + const auto subtract = std::make_shared(C, A); const auto f = - std::make_shared(std::make_shared(NodeVector{concat1, subtract}, params.axis), + std::make_shared(std::make_shared(NodeVector{concat1, subtract}, params.axis), ParameterVector{A, B, C, D}); return f; } @@ -534,12 +539,12 @@ class ReferenceConcatTestInPlaceTree1 : public testing::TestWithParam CreateFunction(const ConcatParamsInPlaceTree1& params) { - const auto A = std::make_shared(params.A.type, params.A.shape); - const auto B = std::make_shared(params.B.type, params.B.shape); - const auto add1 = std::make_shared(A, B); - const auto add2 = std::make_shared(A, B); - const auto concat = std::make_shared(NodeVector{add1, add2}, params.axis); - const auto f = std::make_shared(std::make_shared(concat, concat), ParameterVector{A, B}); + const auto A = std::make_shared(params.A.type, params.A.shape); + const auto B = std::make_shared(params.B.type, params.B.shape); + const auto add1 = std::make_shared(A, B); + const auto add2 = std::make_shared(A, B); + const auto concat = std::make_shared(NodeVector{add1, add2}, params.axis); + const auto f = std::make_shared(std::make_shared(concat, concat), ParameterVector{A, B}); return f; } }; @@ -635,15 +640,15 @@ class ReferenceConcatTestInPlaceTree2 : public testing::TestWithParam CreateFunction(const ConcatParamsInPlaceTree2& params) { - const auto A = std::make_shared(params.A.type, params.A.shape); - const auto B = std::make_shared(params.B.type, params.B.shape); - const auto add1 = std::make_shared(A, B); - const auto add2 = std::make_shared(A, B); - const auto concat1 = std::make_shared(NodeVector{add1, add2}, params.axis); - const auto concat2 = std::make_shared(NodeVector{add1, add2}, params.axis); - const auto concat12 = std::make_shared(NodeVector{concat1, concat2}, params.axis); + const auto A = std::make_shared(params.A.type, params.A.shape); + const auto B = std::make_shared(params.B.type, params.B.shape); + const auto add1 = std::make_shared(A, B); + const auto add2 = std::make_shared(A, B); + const auto concat1 = std::make_shared(NodeVector{add1, add2}, params.axis); + const auto concat2 = std::make_shared(NodeVector{add1, add2}, params.axis); + const auto concat12 = std::make_shared(NodeVector{concat1, concat2}, params.axis); const auto f = - std::make_shared(std::make_shared(concat12, concat12), ParameterVector{A, B}); + std::make_shared(std::make_shared(concat12, concat12), ParameterVector{A, B}); return f; } }; @@ -740,17 +745,17 @@ class ReferenceConcatTestInPlaceTree3 : public testing::TestWithParam CreateFunction(const ConcatParamsInPlaceTree3& params) { - const auto A = std::make_shared(params.A.type, params.A.shape); - const auto B = std::make_shared(params.B.type, params.B.shape); - const auto concat1 = std::make_shared(NodeVector{A, B}, params.axis); - const auto concat2 = std::make_shared(NodeVector{A, B}, params.axis); - const auto concat3 = std::make_shared(NodeVector{A, B}, params.axis); - const auto concat4 = std::make_shared(NodeVector{A, B}, params.axis); - const auto concat12 = std::make_shared(NodeVector{concat1, concat2}, params.axis); - const auto concat34 = std::make_shared(NodeVector{concat3, concat4}, params.axis); - const auto concat14 = std::make_shared(NodeVector{concat12, concat34}, params.axis); + const auto A = std::make_shared(params.A.type, params.A.shape); + const auto B = std::make_shared(params.B.type, params.B.shape); + const auto concat1 = std::make_shared(NodeVector{A, B}, params.axis); + const auto concat2 = std::make_shared(NodeVector{A, B}, params.axis); + const auto concat3 = std::make_shared(NodeVector{A, B}, params.axis); + const auto concat4 = std::make_shared(NodeVector{A, B}, params.axis); + const auto concat12 = std::make_shared(NodeVector{concat1, concat2}, params.axis); + const auto concat34 = std::make_shared(NodeVector{concat3, concat4}, params.axis); + const auto concat14 = std::make_shared(NodeVector{concat12, concat34}, params.axis); const auto f = - std::make_shared(std::make_shared(concat14, concat14), ParameterVector{A, B}); + std::make_shared(std::make_shared(concat14, concat14), ParameterVector{A, B}); return f; } }; @@ -848,12 +853,12 @@ class ReferenceConcatTestInPlaceAddConcat : public testing::TestWithParam CreateFunction(const ConcatParamsInPlaceAddConcat& params) { - const auto A = std::make_shared(params.A.type, params.A.shape); - const auto B = std::make_shared(params.B.type, params.B.shape); - const auto add1 = std::make_shared(A, B); - const auto add2 = std::make_shared(add1, add1); - const auto concat = std::make_shared(NodeVector{add1, add2}, params.axis); - const auto add3 = std::make_shared(concat, concat); + const auto A = std::make_shared(params.A.type, params.A.shape); + const auto B = std::make_shared(params.B.type, params.B.shape); + const auto add1 = std::make_shared(A, B); + const auto add2 = std::make_shared(add1, add1); + const auto concat = std::make_shared(NodeVector{add1, add2}, params.axis); + const auto add3 = std::make_shared(concat, concat); const auto f = std::make_shared(add3, ParameterVector{A, B}); return f; } @@ -950,16 +955,16 @@ class ReferenceConcatTestInPlaceAddConcat2 : public testing::TestWithParam CreateFunction(const ConcatParamsInPlaceAddConcat2& params) { - const auto A = std::make_shared(params.A.type, params.A.shape); - const auto B = std::make_shared(params.B.type, params.B.shape); - const auto add1 = std::make_shared(A, B); - const auto add2 = std::make_shared(A, B); - const auto add3 = std::make_shared(A, B); - const auto add4 = std::make_shared(A, B); - const auto add5 = std::make_shared(A, B); - const auto concat1 = std::make_shared(NodeVector{add1, add2, add3}, params.axis); - const auto concat2 = std::make_shared(NodeVector{add4, add2, add5}, params.axis); - const auto add6 = std::make_shared(concat1, concat2); + const auto A = std::make_shared(params.A.type, params.A.shape); + const auto B = std::make_shared(params.B.type, params.B.shape); + const auto add1 = std::make_shared(A, B); + const auto add2 = std::make_shared(A, B); + const auto add3 = std::make_shared(A, B); + const auto add4 = std::make_shared(A, B); + const auto add5 = std::make_shared(A, B); + const auto concat1 = std::make_shared(NodeVector{add1, add2, add3}, params.axis); + const auto concat2 = std::make_shared(NodeVector{add4, add2, add5}, params.axis); + const auto add6 = std::make_shared(concat1, concat2); const auto f = std::make_shared(add6, ParameterVector{A, B}); return f; } @@ -1061,10 +1066,10 @@ class ReferenceConcatTest5d : public testing::TestWithParam, pub private: static std::shared_ptr CreateFunction(const ConcatParams5d& params) { - const auto A = std::make_shared(params.A.type, params.A.shape); - const auto B = std::make_shared(params.B.type, params.B.shape); - const auto C = std::make_shared(params.C.type, params.C.shape); - const auto concat = std::make_shared(NodeVector{A, B, C}, params.axis); + const auto A = std::make_shared(params.A.type, params.A.shape); + const auto B = std::make_shared(params.B.type, params.B.shape); + const auto C = std::make_shared(params.C.type, params.C.shape); + const auto concat = std::make_shared(NodeVector{A, B, C}, params.axis); const auto f = std::make_shared(concat, ParameterVector{A, B, C}); return f; } @@ -1210,9 +1215,9 @@ class ReferenceConcatTestZeroLength1dLast : public testing::TestWithParam CreateFunction(const ConcatParamsZeroLength1dLast& params) { - const auto A = std::make_shared(params.A.type, params.A.shape); - const auto B = std::make_shared(params.B.type, params.B.shape); - const auto concat = std::make_shared(NodeVector{A, B}, params.axis); + const auto A = std::make_shared(params.A.type, params.A.shape); + const auto B = std::make_shared(params.B.type, params.B.shape); + const auto concat = std::make_shared(NodeVector{A, B}, params.axis); const auto f = std::make_shared(concat, ParameterVector{A, B}); return f; } @@ -1314,10 +1319,10 @@ class ReferenceConcatTestZeroLength1dMiddle : public testing::TestWithParam CreateFunction(const ConcatParamsZeroLength1dMiddle& params) { - const auto A = std::make_shared(params.A.type, params.A.shape); - const auto B = std::make_shared(params.B.type, params.B.shape); - const auto C = std::make_shared(params.C.type, params.C.shape); - const auto concat = std::make_shared(NodeVector{A, B, C}, params.axis); + const auto A = std::make_shared(params.A.type, params.A.shape); + const auto B = std::make_shared(params.B.type, params.B.shape); + const auto C = std::make_shared(params.C.type, params.C.shape); + const auto concat = std::make_shared(NodeVector{A, B, C}, params.axis); const auto f = std::make_shared(concat, ParameterVector{A, B, C}); return f; } @@ -1409,8 +1414,8 @@ class ReferenceConcatTestZeroZero : public testing::TestWithParam CreateFunction(const ConcatParamsZeroZero& params) { - const auto constant_1 = std::make_shared(params.A.type, params.A.shape, params.A.data.data()); - const auto concat_1 = std::make_shared(NodeVector{constant_1, constant_1}, params.axis); + const auto constant_1 = std::make_shared(params.A.type, params.A.shape, params.A.data.data()); + const auto concat_1 = std::make_shared(NodeVector{constant_1, constant_1}, params.axis); const auto f = std::make_shared(concat_1, ParameterVector{}); return f; } @@ -1511,10 +1516,10 @@ class ReferenceConcatTestZeroLength4dMiddle : public testing::TestWithParam CreateFunction(const ConcatParamsZeroLength4dMiddle& params) { - const auto A = std::make_shared(params.A.type, params.A.shape); - const auto B = std::make_shared(params.B.type, params.B.shape); - const auto C = std::make_shared(params.C.type, params.C.shape); - const auto concat = std::make_shared(NodeVector{A, B, C}, params.axis); + const auto A = std::make_shared(params.A.type, params.A.shape); + const auto B = std::make_shared(params.B.type, params.B.shape); + const auto C = std::make_shared(params.C.type, params.C.shape); + const auto concat = std::make_shared(NodeVector{A, B, C}, params.axis); const auto f = std::make_shared(concat, ParameterVector{A, B, C}); return f; } diff --git a/src/plugins/template/tests/functional/op_reference/convert_color_i420.cpp b/src/plugins/template/tests/functional/op_reference/convert_color_i420.cpp index 844fdeb116344c..2e9747596965b6 100644 --- a/src/plugins/template/tests/functional/op_reference/convert_color_i420.cpp +++ b/src/plugins/template/tests/functional/op_reference/convert_color_i420.cpp @@ -4,13 +4,13 @@ #include -#include -#include -#include #include #include "base_reference_test.hpp" #include "functional_test_utils/skip_tests_config.hpp" +#include "openvino/core/model.hpp" +#include "openvino/op/i420_to_bgr.hpp" +#include "openvino/op/i420_to_rgb.hpp" using namespace ov; using namespace InferenceEngine; diff --git a/src/plugins/template/tests/functional/op_reference/convert_color_nv12.cpp b/src/plugins/template/tests/functional/op_reference/convert_color_nv12.cpp index 26d9c31bae3f1e..77781766478765 100644 --- a/src/plugins/template/tests/functional/op_reference/convert_color_nv12.cpp +++ b/src/plugins/template/tests/functional/op_reference/convert_color_nv12.cpp @@ -4,13 +4,13 @@ #include -#include -#include -#include #include #include "base_reference_test.hpp" #include "functional_test_utils/skip_tests_config.hpp" +#include "openvino/core/model.hpp" +#include "openvino/op/nv12_to_bgr.hpp" +#include "openvino/op/nv12_to_rgb.hpp" using namespace ov; using namespace InferenceEngine; diff --git a/src/plugins/template/tests/functional/op_reference/deformable_psroi_pooling.cpp b/src/plugins/template/tests/functional/op_reference/deformable_psroi_pooling.cpp index 858c5d287f4206..36f3e46b8d38ad 100644 --- a/src/plugins/template/tests/functional/op_reference/deformable_psroi_pooling.cpp +++ b/src/plugins/template/tests/functional/op_reference/deformable_psroi_pooling.cpp @@ -2,13 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/deformable_psroi_pooling.hpp" + #include #include #include "base_reference_test.hpp" #include "openvino/op/psroi_pooling.hpp" -#include "openvino/opsets/opset1.hpp" using namespace reference_tests; using namespace ov; @@ -186,7 +187,7 @@ class ReferenceDeformablePSROIPoolingLayerTest : public testing::TestWithParam(params.roisType, params.roisShape); if (params.offsetsShape.size() != 0) { const auto offsets = std::make_shared(params.offsetsType, params.offsetsShape); - const auto DeformablePSROIPooling = std::make_shared(input, + const auto DeformablePSROIPooling = std::make_shared(input, rois, offsets, params.outputDim, @@ -200,7 +201,7 @@ class ReferenceDeformablePSROIPoolingLayerTest : public testing::TestWithParam(NodeVector{DeformablePSROIPooling}, ParameterVector{input, rois, offsets}); } else { - const auto DeformablePSROIPooling = std::make_shared(input, + const auto DeformablePSROIPooling = std::make_shared(input, rois, params.outputDim, params.spatialScale, diff --git a/src/plugins/template/tests/functional/op_reference/depth_to_space.cpp b/src/plugins/template/tests/functional/op_reference/depth_to_space.cpp index 6f683ed5452765..83c91f93f92a11 100644 --- a/src/plugins/template/tests/functional/op_reference/depth_to_space.cpp +++ b/src/plugins/template/tests/functional/op_reference/depth_to_space.cpp @@ -2,10 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/depth_to_space.hpp" + #include #include "base_reference_test.hpp" -#include "openvino/opsets/opset1.hpp" +#include "openvino/op/parameter.hpp" using namespace reference_tests; using namespace ov; @@ -56,11 +58,10 @@ class ReferenceDepthToSpaceLayerTest : public testing::TestWithParam CreateFunction(const DepthToSpaceParams& params) { - opset1::DepthToSpace::DepthToSpaceMode mode = params.mode == "DEPTH_FIRST" - ? opset1::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST - : opset1::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST; - const auto data = std::make_shared(params.dataTensor.type, params.dataTensor.shape); - const auto depthToSpace = std::make_shared(data, mode, params.blockSize); + const auto mode = params.mode == "DEPTH_FIRST" ? op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST + : op::v0::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST; + const auto data = std::make_shared(params.dataTensor.type, params.dataTensor.shape); + const auto depthToSpace = std::make_shared(data, mode, params.blockSize); return std::make_shared(NodeVector{depthToSpace}, ParameterVector{data}); } }; diff --git a/src/plugins/template/tests/functional/op_reference/einsum.cpp b/src/plugins/template/tests/functional/op_reference/einsum.cpp index 400fdc445c5387..e01dc16293e17e 100644 --- a/src/plugins/template/tests/functional/op_reference/einsum.cpp +++ b/src/plugins/template/tests/functional/op_reference/einsum.cpp @@ -2,11 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/einsum.hpp" + #include #include "base_reference_test.hpp" -#include "openvino/opsets/opset1.hpp" -#include "openvino/opsets/opset7.hpp" +#include "openvino/op/parameter.hpp" using namespace reference_tests; using namespace ov; @@ -56,11 +57,11 @@ class ReferenceEinsumTest : public testing::TestWithParam, public OutputVector output_vector; ParameterVector param_vector; for (const auto& input_tensor : params.inputs) { - auto param = std::make_shared(input_tensor.type, input_tensor.shape); + auto param = std::make_shared(input_tensor.type, input_tensor.shape); output_vector.push_back(param); param_vector.push_back(param); } - const auto einsum = std::make_shared(output_vector, params.equation); + const auto einsum = std::make_shared(output_vector, params.equation); const auto f = std::make_shared(OutputVector{einsum}, param_vector); return f; } diff --git a/src/plugins/template/tests/functional/op_reference/embedding_segments_sum.cpp b/src/plugins/template/tests/functional/op_reference/embedding_segments_sum.cpp index d308d2c2a5d69f..4726ccdffd6a51 100644 --- a/src/plugins/template/tests/functional/op_reference/embedding_segments_sum.cpp +++ b/src/plugins/template/tests/functional/op_reference/embedding_segments_sum.cpp @@ -4,9 +4,8 @@ #include -#include - #include "base_reference_test.hpp" +#include "shared_test_classes/base/layer_test_utils.hpp" using namespace reference_tests; using namespace ov; diff --git a/src/plugins/template/tests/functional/op_reference/embeddingbag_offsetssum.cpp b/src/plugins/template/tests/functional/op_reference/embeddingbag_offsetssum.cpp index f7e7bdcafaa7f0..9967e99b8e2317 100644 --- a/src/plugins/template/tests/functional/op_reference/embeddingbag_offsetssum.cpp +++ b/src/plugins/template/tests/functional/op_reference/embeddingbag_offsetssum.cpp @@ -4,9 +4,8 @@ #include -#include - #include "base_reference_test.hpp" +#include "shared_test_classes/base/layer_test_utils.hpp" using namespace reference_tests; using namespace ov; diff --git a/src/plugins/template/tests/functional/op_reference/embeddingbag_packedsum.cpp b/src/plugins/template/tests/functional/op_reference/embeddingbag_packedsum.cpp index 80af12d4053c8f..ea9dfe34decb22 100644 --- a/src/plugins/template/tests/functional/op_reference/embeddingbag_packedsum.cpp +++ b/src/plugins/template/tests/functional/op_reference/embeddingbag_packedsum.cpp @@ -4,9 +4,8 @@ #include -#include - #include "base_reference_test.hpp" +#include "shared_test_classes/base/layer_test_utils.hpp" using namespace reference_tests; using namespace ov; diff --git a/src/plugins/template/tests/functional/op_reference/extract_image_patches.cpp b/src/plugins/template/tests/functional/op_reference/extract_image_patches.cpp index 62fc41dbea9b2c..af82d7e3c74872 100644 --- a/src/plugins/template/tests/functional/op_reference/extract_image_patches.cpp +++ b/src/plugins/template/tests/functional/op_reference/extract_image_patches.cpp @@ -5,8 +5,8 @@ #include #include "base_reference_test.hpp" -#include "openvino/opsets/opset1.hpp" -#include "openvino/opsets/opset3.hpp" +#include "openvino/op/extractimagepatches.hpp" +#include "openvino/op/parameter.hpp" using namespace reference_tests; using namespace ov; @@ -61,8 +61,8 @@ class ReferenceExtractImagePatchesTest : public testing::TestWithParam CreateModel(const ExtractImagePatchesParams& params) { - const auto data = std::make_shared(params.data.type, params.data.shape); - const auto extrace_image_patches = std::make_shared(data, + const auto data = std::make_shared(params.data.type, params.data.shape); + const auto extrace_image_patches = std::make_shared(data, params.sizes, params.strides, params.rates, diff --git a/src/plugins/template/tests/functional/op_reference/gather.cpp b/src/plugins/template/tests/functional/op_reference/gather.cpp index 916cee50407836..99b1c82a8d91d8 100644 --- a/src/plugins/template/tests/functional/op_reference/gather.cpp +++ b/src/plugins/template/tests/functional/op_reference/gather.cpp @@ -2,12 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/gather.hpp" + #include #include "base_reference_test.hpp" -#include "openvino/opsets/opset1.hpp" -#include "openvino/opsets/opset7.hpp" -#include "openvino/opsets/opset8.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" using namespace reference_tests; using namespace ov; @@ -62,11 +63,11 @@ class ReferenceGatherTest : public testing::TestWithParam, public private: static std::shared_ptr CreateFunction(const GatherParams& params) { - const auto P = std::make_shared(params.dataTensor.type, params.dataTensor.shape); - const auto I = std::make_shared(params.indicesTensor.type, params.indicesTensor.shape); + const auto P = std::make_shared(params.dataTensor.type, params.dataTensor.shape); + const auto I = std::make_shared(params.indicesTensor.type, params.indicesTensor.shape); const auto A = - opset1::Constant::create(params.axisTensor.type, params.axisTensor.shape, params.axisTensor.data.data()); - const auto G = std::make_shared(P, I, A); + op::v0::Constant::create(params.axisTensor.type, params.axisTensor.shape, params.axisTensor.data.data()); + const auto G = std::make_shared(P, I, A); const auto f = std::make_shared(G, ParameterVector{P, I}); return f; } @@ -129,11 +130,11 @@ class ReferenceGatherTestV7 : public testing::TestWithParam, pub private: static std::shared_ptr CreateFunction(const GatherParamsV7& params) { - const auto P = std::make_shared(params.dataTensor.type, params.dataTensor.shape); - const auto I = std::make_shared(params.indicesTensor.type, params.indicesTensor.shape); + const auto P = std::make_shared(params.dataTensor.type, params.dataTensor.shape); + const auto I = std::make_shared(params.indicesTensor.type, params.indicesTensor.shape); const auto A = - opset1::Constant::create(params.axisTensor.type, params.axisTensor.shape, params.axisTensor.data.data()); - const auto G = std::make_shared(P, I, A, params.batchDims); + op::v0::Constant::create(params.axisTensor.type, params.axisTensor.shape, params.axisTensor.data.data()); + const auto G = std::make_shared(P, I, A, params.batchDims); const auto f = std::make_shared(G, ParameterVector{P, I}); return f; } @@ -146,11 +147,11 @@ TEST_P(ReferenceGatherTestV7, CompareWithRefs) { class ReferenceGatherTestV8 : public ReferenceGatherTestV7 { private: static std::shared_ptr CreateFunction(const GatherParamsV7& params) { - const auto P = std::make_shared(params.dataTensor.type, params.dataTensor.shape); - const auto I = std::make_shared(params.indicesTensor.type, params.indicesTensor.shape); + const auto P = std::make_shared(params.dataTensor.type, params.dataTensor.shape); + const auto I = std::make_shared(params.indicesTensor.type, params.indicesTensor.shape); const auto A = - opset1::Constant::create(params.axisTensor.type, params.axisTensor.shape, params.axisTensor.data.data()); - const auto G = std::make_shared(P, I, A, params.batchDims); + op::v0::Constant::create(params.axisTensor.type, params.axisTensor.shape, params.axisTensor.data.data()); + const auto G = std::make_shared(P, I, A, params.batchDims); const auto f = std::make_shared(G, ParameterVector{P, I}); return f; } diff --git a/src/plugins/template/tests/functional/op_reference/gather_tree.cpp b/src/plugins/template/tests/functional/op_reference/gather_tree.cpp index 696820760e8fea..57a8b796431fc2 100644 --- a/src/plugins/template/tests/functional/op_reference/gather_tree.cpp +++ b/src/plugins/template/tests/functional/op_reference/gather_tree.cpp @@ -2,10 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/gather_tree.hpp" + #include #include "base_reference_test.hpp" -#include "openvino/opsets/opset1.hpp" +#include "openvino/op/parameter.hpp" using namespace reference_tests; using namespace ov; @@ -59,11 +61,11 @@ class ReferenceGatherTreeTest : public testing::TestWithParam, private: static std::shared_ptr CreateFunction(const GatherTreeParams& params) { - const auto step_ids = std::make_shared(params.stepIds.type, params.stepIds.shape); - const auto parent_idx = std::make_shared(params.parentIdx.type, params.parentIdx.shape); - const auto max_seq_len = std::make_shared(params.maxSeqLen.type, params.maxSeqLen.shape); - const auto end_token = std::make_shared(params.endToken.type, params.endToken.shape); - const auto gather_tree = std::make_shared(step_ids, parent_idx, max_seq_len, end_token); + const auto step_ids = std::make_shared(params.stepIds.type, params.stepIds.shape); + const auto parent_idx = std::make_shared(params.parentIdx.type, params.parentIdx.shape); + const auto max_seq_len = std::make_shared(params.maxSeqLen.type, params.maxSeqLen.shape); + const auto end_token = std::make_shared(params.endToken.type, params.endToken.shape); + const auto gather_tree = std::make_shared(step_ids, parent_idx, max_seq_len, end_token); const auto f = std::make_shared(gather_tree, ParameterVector{step_ids, parent_idx, max_seq_len, end_token}); return f; diff --git a/src/plugins/template/tests/functional/op_reference/group_convolution_backprop.cpp b/src/plugins/template/tests/functional/op_reference/group_convolution_backprop.cpp index 6cc21eb9cb4f04..b378fe18cc27cf 100644 --- a/src/plugins/template/tests/functional/op_reference/group_convolution_backprop.cpp +++ b/src/plugins/template/tests/functional/op_reference/group_convolution_backprop.cpp @@ -5,8 +5,8 @@ #include #include "base_reference_test.hpp" +#include "openvino/op/constant.hpp" #include "openvino/op/group_conv.hpp" -#include "openvino/opsets/opset8.hpp" using namespace reference_tests; using namespace ov; @@ -187,7 +187,7 @@ class ReferenceGroupConvolutionBackpropDataLayerOutShapeTest const auto in = std::make_shared(params.inType, params.inputShape); const auto filter = std::make_shared(params.inType, params.filterShape); - auto output_shape = std::make_shared(element::i64, + auto output_shape = std::make_shared(element::i64, params.constantOutputShape, params.constantOutputShapeData); const auto GroupConvolutionBackpropData = diff --git a/src/plugins/template/tests/functional/op_reference/if.cpp b/src/plugins/template/tests/functional/op_reference/if.cpp index 81b454c01484cf..eef66bb3e5ad61 100644 --- a/src/plugins/template/tests/functional/op_reference/if.cpp +++ b/src/plugins/template/tests/functional/op_reference/if.cpp @@ -5,11 +5,11 @@ #include #include -#include #include -#include #include "base_reference_test.hpp" +#include "ie_core.hpp" +#include "shared_test_classes/base/layer_test_utils.hpp" using namespace reference_tests; using namespace ov; diff --git a/src/plugins/template/tests/functional/op_reference/loop.cpp b/src/plugins/template/tests/functional/op_reference/loop.cpp index 7ff49b7173f9ab..ffdbc0b8dc6ee2 100644 --- a/src/plugins/template/tests/functional/op_reference/loop.cpp +++ b/src/plugins/template/tests/functional/op_reference/loop.cpp @@ -2,14 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "openvino/op/loop.hpp" -#include -#include +#include #include "base_reference_test.hpp" #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/skip_tests_config.hpp" +#include "openvino/core/model.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/result.hpp" namespace { enum LOOP_IN_TYPE { INVARIANT, MERGED }; @@ -30,36 +35,36 @@ struct LoopDynamicInputs : public LoopFunctionalBase { const int64_t& trip_count_value, const std::vector& loop_in_type, const ov::element::Type& net_type) override { - auto X = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); - auto Y = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); - auto M = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); + auto X = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); + auto Y = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); + auto M = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto Xi = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); - auto Yi = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); - auto M_body = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); - auto body_condition = std::make_shared(ov::element::boolean, ov::Shape{1}, true); + auto Xi = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); + auto Yi = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); + auto M_body = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); + auto body_condition = std::make_shared(ov::element::boolean, ov::Shape{1}, true); - auto trip_count = std::make_shared(ov::element::i64, ov::Shape{1}, 3); - auto exec_condition = std::make_shared(ov::element::boolean, ov::Shape{1}, true); + auto trip_count = std::make_shared(ov::element::i64, ov::Shape{1}, 3); + auto exec_condition = std::make_shared(ov::element::boolean, ov::Shape{1}, true); // Body - auto sum = std::make_shared(Xi, Yi); - auto Zo = std::make_shared(sum, M_body); + auto sum = std::make_shared(Xi, Yi); + auto Zo = std::make_shared(sum, M_body); auto body = std::make_shared(ov::OutputVector{body_condition, Zo}, ov::ParameterVector{Xi, Yi, M_body}); - auto loop = std::make_shared(trip_count, exec_condition); + auto loop = std::make_shared(trip_count, exec_condition); loop->set_function(body); loop->set_invariant_input(Xi, X); loop->set_invariant_input(Yi, Y); loop->set_merged_input(M_body, M, Zo); - loop->set_special_body_ports(ov::opset8::Loop::SpecialBodyPorts{-1, 0}); + loop->set_special_body_ports(ov::op::v5::Loop::SpecialBodyPorts{-1, 0}); // Output is last Zo - auto result = std::make_shared(loop->get_iter_value(Zo, -1)); + auto result = std::make_shared(loop->get_iter_value(Zo, -1)); return std::make_shared(ov::ResultVector{result}, ov::ParameterVector{X, Y, M}); } }; @@ -131,7 +136,7 @@ struct LoopStaticInputs : public LoopFunctionalBase { const ov::element::Type& net_type) override { ov::ParameterVector loop_params; for (auto&& input : loop_inputs) { - loop_params.emplace_back(std::make_shared(input.type, input.shape)); + loop_params.emplace_back(std::make_shared(input.type, input.shape)); } // Set up the cell body, a function from (Xi, Yi) -> (Zo) @@ -139,26 +144,26 @@ struct LoopStaticInputs : public LoopFunctionalBase { const std::vector body_params_shapes(loop_inputs.size(), ov::PartialShape::dynamic()); ov::ParameterVector body_params; for (const auto& pshape : body_params_shapes) { - body_params.emplace_back(std::make_shared(net_type, pshape)); + body_params.emplace_back(std::make_shared(net_type, pshape)); } const auto body_condition_const = - std::make_shared(ov::element::boolean, ov::Shape{1}, true); - const auto exec_condition = std::make_shared(ov::element::boolean, ov::Shape{1}, true); + std::make_shared(ov::element::boolean, ov::Shape{1}, true); + const auto exec_condition = std::make_shared(ov::element::boolean, ov::Shape{1}, true); std::shared_ptr trip_count_input; - trip_count_input = std::make_shared(ov::element::i64, ov::Shape{1}, trip_count); + trip_count_input = std::make_shared(ov::element::i64, ov::Shape{1}, trip_count); // Body std::shared_ptr Zo = body_params[0]; for (size_t i = 1; i < body_params.size(); ++i) { - Zo = std::make_shared(body_params[i], Zo); + Zo = std::make_shared(body_params[i], Zo); } const auto body = std::make_shared(ov::OutputVector{body_condition_const, Zo}, body_params); - const auto loop = std::make_shared(trip_count_input, exec_condition); + const auto loop = std::make_shared(trip_count_input, exec_condition); loop->set_function(body); - loop->set_special_body_ports(ov::opset8::Loop::SpecialBodyPorts{-1, 0}); + loop->set_special_body_ports(ov::op::v5::Loop::SpecialBodyPorts{-1, 0}); for (size_t i = 0; i < body_params.size(); ++i) { if (loop_in_type[i] == LOOP_IN_TYPE::INVARIANT) { @@ -177,9 +182,9 @@ struct LoopStaticInputs : public LoopFunctionalBase { // start=0, stride=1, part_size=1, end=-1, axis=1 const auto out2 = loop->get_concatenated_slices(Zo, 0, 1, 1, -1, 1); - const auto result0 = std::make_shared(out0); - const auto result1 = std::make_shared(out1); - const auto result2 = std::make_shared(out2); + const auto result0 = std::make_shared(out0); + const auto result1 = std::make_shared(out1); + const auto result2 = std::make_shared(out2); const auto function = std::make_shared(ov::ResultVector{result0, result1, result2}, loop_params, "loop"); return function; diff --git a/src/plugins/template/tests/functional/op_reference/lstm_cell.cpp b/src/plugins/template/tests/functional/op_reference/lstm_cell.cpp index 8df731e9f6f459..7b8686dcaf8c4b 100644 --- a/src/plugins/template/tests/functional/op_reference/lstm_cell.cpp +++ b/src/plugins/template/tests/functional/op_reference/lstm_cell.cpp @@ -2,11 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/lstm_cell.hpp" + #include #include "base_reference_test.hpp" -#include "openvino/opsets/opset1.hpp" -#include "openvino/opsets/opset4.hpp" +#include "openvino/op/parameter.hpp" using namespace reference_tests; using namespace ov; @@ -84,15 +85,15 @@ class ReferenceLSTMCellTest : public testing::TestWithParam, pub private: static std::shared_ptr CreateFunction(const LSTMCellParams& params) { - const auto X = std::make_shared(params.X.type, params.X.shape); - const auto W = std::make_shared(params.W.type, params.W.shape); - const auto R = std::make_shared(params.R.type, params.R.shape); - const auto H_t = std::make_shared(params.H_t.type, params.H_t.shape); - const auto C_t = std::make_shared(params.C_t.type, params.C_t.shape); - const auto B = std::make_shared(params.B.type, params.B.shape); + const auto X = std::make_shared(params.X.type, params.X.shape); + const auto W = std::make_shared(params.W.type, params.W.shape); + const auto R = std::make_shared(params.R.type, params.R.shape); + const auto H_t = std::make_shared(params.H_t.type, params.H_t.shape); + const auto C_t = std::make_shared(params.C_t.type, params.C_t.shape); + const auto B = std::make_shared(params.B.type, params.B.shape); const auto lstm_cell = - std::make_shared(X, + std::make_shared(X, H_t, C_t, op::util::convert_lstm_node_format(W, op::util::LSTMWeightsFormat::IOFC), @@ -117,15 +118,15 @@ class ReferenceLSTMCellTestBiasDefaultAttrs : public ReferenceLSTMCellTest { private: static std::shared_ptr CreateFunction(const LSTMCellParams& params) { - const auto X = std::make_shared(params.X.type, params.X.shape); - const auto W = std::make_shared(params.W.type, params.W.shape); - const auto R = std::make_shared(params.R.type, params.R.shape); - const auto H_t = std::make_shared(params.H_t.type, params.H_t.shape); - const auto C_t = std::make_shared(params.C_t.type, params.C_t.shape); - const auto B = std::make_shared(params.B.type, params.B.shape); + const auto X = std::make_shared(params.X.type, params.X.shape); + const auto W = std::make_shared(params.W.type, params.W.shape); + const auto R = std::make_shared(params.R.type, params.R.shape); + const auto H_t = std::make_shared(params.H_t.type, params.H_t.shape); + const auto C_t = std::make_shared(params.C_t.type, params.C_t.shape); + const auto B = std::make_shared(params.B.type, params.B.shape); const auto lstm_cell = - std::make_shared(X, + std::make_shared(X, H_t, C_t, op::util::convert_lstm_node_format(W, op::util::LSTMWeightsFormat::IOFC), @@ -152,14 +153,14 @@ class ReferenceLSTMCellTestBiasClip : public ReferenceLSTMCellTest { static std::shared_ptr CreateFunction(const LSTMCellParams& params) { const float clip_threshold = 3.5f; - const auto X = std::make_shared(params.X.type, params.X.shape); - const auto W = std::make_shared(params.W.type, params.W.shape); - const auto R = std::make_shared(params.R.type, params.R.shape); - const auto H_t = std::make_shared(params.H_t.type, params.H_t.shape); - const auto C_t = std::make_shared(params.C_t.type, params.C_t.shape); - const auto B = std::make_shared(params.B.type, params.B.shape); + const auto X = std::make_shared(params.X.type, params.X.shape); + const auto W = std::make_shared(params.W.type, params.W.shape); + const auto R = std::make_shared(params.R.type, params.R.shape); + const auto H_t = std::make_shared(params.H_t.type, params.H_t.shape); + const auto C_t = std::make_shared(params.C_t.type, params.C_t.shape); + const auto B = std::make_shared(params.B.type, params.B.shape); - const auto lstm_cell = std::make_shared(X, + const auto lstm_cell = std::make_shared(X, H_t, C_t, W, @@ -191,15 +192,15 @@ TEST_P(ReferenceLSTMCellTestBiasClip, CompareWithRefs) { class ReferenceLSTMCellV1Test : public ReferenceLSTMCellTest { private: static std::shared_ptr CreateFunction(const LSTMCellParams& params) { - const auto X = std::make_shared(params.X.type, params.X.shape); - const auto W = std::make_shared(params.W.type, params.W.shape); - const auto R = std::make_shared(params.R.type, params.R.shape); - const auto H_t = std::make_shared(params.H_t.type, params.H_t.shape); - const auto C_t = std::make_shared(params.C_t.type, params.C_t.shape); - const auto B = std::make_shared(params.B.type, params.B.shape); + const auto X = std::make_shared(params.X.type, params.X.shape); + const auto W = std::make_shared(params.W.type, params.W.shape); + const auto R = std::make_shared(params.R.type, params.R.shape); + const auto H_t = std::make_shared(params.H_t.type, params.H_t.shape); + const auto C_t = std::make_shared(params.C_t.type, params.C_t.shape); + const auto B = std::make_shared(params.B.type, params.B.shape); const auto lstm_cell = - std::make_shared(X, + std::make_shared(X, H_t, C_t, op::util::convert_lstm_node_format(W, op::util::LSTMWeightsFormat::IOFC), @@ -215,15 +216,15 @@ class ReferenceLSTMCellV1Test : public ReferenceLSTMCellTest { class ReferenceLSTMCellV1TestBiasDefaultAttrs : public ReferenceLSTMCellTestBiasDefaultAttrs { private: static std::shared_ptr CreateFunction(const LSTMCellParams& params) { - const auto X = std::make_shared(params.X.type, params.X.shape); - const auto W = std::make_shared(params.W.type, params.W.shape); - const auto R = std::make_shared(params.R.type, params.R.shape); - const auto H_t = std::make_shared(params.H_t.type, params.H_t.shape); - const auto C_t = std::make_shared(params.C_t.type, params.C_t.shape); - const auto B = std::make_shared(params.B.type, params.B.shape); + const auto X = std::make_shared(params.X.type, params.X.shape); + const auto W = std::make_shared(params.W.type, params.W.shape); + const auto R = std::make_shared(params.R.type, params.R.shape); + const auto H_t = std::make_shared(params.H_t.type, params.H_t.shape); + const auto C_t = std::make_shared(params.C_t.type, params.C_t.shape); + const auto B = std::make_shared(params.B.type, params.B.shape); const auto lstm_cell = - std::make_shared(X, + std::make_shared(X, H_t, C_t, op::util::convert_lstm_node_format(W, op::util::LSTMWeightsFormat::IOFC), @@ -256,15 +257,15 @@ class ReferenceLSTMCellV1TestBiasClip : public ReferenceLSTMCellTestBiasClip { static std::shared_ptr CreateFunction(const LSTMCellParams& params) { const float clip_threshold = 3.5f; - const auto X = std::make_shared(params.X.type, params.X.shape); - const auto W = std::make_shared(params.W.type, params.W.shape); - const auto R = std::make_shared(params.R.type, params.R.shape); - const auto H_t = std::make_shared(params.H_t.type, params.H_t.shape); - const auto C_t = std::make_shared(params.C_t.type, params.C_t.shape); - const auto B = std::make_shared(params.B.type, params.B.shape); - const auto P = std::make_shared(params.P.type, params.P.shape); + const auto X = std::make_shared(params.X.type, params.X.shape); + const auto W = std::make_shared(params.W.type, params.W.shape); + const auto R = std::make_shared(params.R.type, params.R.shape); + const auto H_t = std::make_shared(params.H_t.type, params.H_t.shape); + const auto C_t = std::make_shared(params.C_t.type, params.C_t.shape); + const auto B = std::make_shared(params.B.type, params.B.shape); + const auto P = std::make_shared(params.P.type, params.P.shape); - const auto lstm_cell = std::make_shared(X, + const auto lstm_cell = std::make_shared(X, H_t, C_t, W, diff --git a/src/plugins/template/tests/functional/op_reference/matrix_nms.cpp b/src/plugins/template/tests/functional/op_reference/matrix_nms.cpp index 4c2fd3ee50755c..f0adb506f6fa21 100644 --- a/src/plugins/template/tests/functional/op_reference/matrix_nms.cpp +++ b/src/plugins/template/tests/functional/op_reference/matrix_nms.cpp @@ -2,18 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/matrix_nms.hpp" + #include #include "base_reference_test.hpp" -#include "openvino/opsets/opset1.hpp" -#include "openvino/opsets/opset8.hpp" +#include "openvino/op/parameter.hpp" using namespace reference_tests; using namespace ov; namespace { struct MatrixNmsParams { - MatrixNmsParams(const opset8::MatrixNms::Attributes& attrs, + MatrixNmsParams(const op::v8::MatrixNms::Attributes& attrs, const reference_tests::Tensor& boxes, const reference_tests::Tensor& scores, const reference_tests::Tensor& expectedSelectedScores, @@ -28,7 +29,7 @@ struct MatrixNmsParams { expectedValidOutputs(expectedValidOutputs), testcaseName(testcaseName) {} - opset8::MatrixNms::Attributes attrs; + op::v8::MatrixNms::Attributes attrs; reference_tests::Tensor boxes; reference_tests::Tensor scores; reference_tests::Tensor expectedSelectedScores; @@ -71,9 +72,9 @@ class ReferenceMatrixNmsTest : public testing::TestWithParam, p private: static std::shared_ptr CreateFunction(const MatrixNmsParams& params) { - const auto boxes = std::make_shared(params.boxes.type, PartialShape::dynamic()); - const auto scores = std::make_shared(params.scores.type, PartialShape::dynamic()); - const auto nms = std::make_shared(boxes, scores, params.attrs); + const auto boxes = std::make_shared(params.boxes.type, PartialShape::dynamic()); + const auto scores = std::make_shared(params.scores.type, PartialShape::dynamic()); + const auto nms = std::make_shared(boxes, scores, params.attrs); const auto f = std::make_shared(nms->outputs(), ParameterVector{boxes, scores}); return f; } @@ -91,14 +92,14 @@ std::vector generateParams() { std::vector params{ MatrixNmsParams( { - opset8::MatrixNms::SortResultType::SCORE, // sort_result_type + op::v8::MatrixNms::SortResultType::SCORE, // sort_result_type false, // sort_result_across_batch ET_IND, // output_type 0.0f, // score_threshold 3, // nms_top_k -1, // keep_top_k 0, // background_class - opset8::MatrixNms::DecayFunction::LINEAR, // decay_function + op::v8::MatrixNms::DecayFunction::LINEAR, // decay_function 2.0f, // gaussian_sigma 0.0f, // post_threshold true, // normalized @@ -135,14 +136,14 @@ std::vector generateParams() { "matrix_nms_output_type_i64"), MatrixNmsParams( { - opset8::MatrixNms::SortResultType::SCORE, // sort_result_type + op::v8::MatrixNms::SortResultType::SCORE, // sort_result_type false, // sort_result_across_batch ET_IND, // output_type 0.0f, // score_threshold 3, // nms_top_k -1, // keep_top_k 0, // background_class - opset8::MatrixNms::DecayFunction::LINEAR, // decay_function + op::v8::MatrixNms::DecayFunction::LINEAR, // decay_function 2.0f, // gaussian_sigma 0.0f, // post_threshold true, // normalized @@ -179,14 +180,14 @@ std::vector generateParams() { "matrix_nms_output_type_i32"), MatrixNmsParams( { - opset8::MatrixNms::SortResultType::SCORE, // sort_result_type + op::v8::MatrixNms::SortResultType::SCORE, // sort_result_type false, // sort_result_across_batch ET_IND, // output_type 0.0f, // score_threshold 3, // nms_top_k -1, // keep_top_k 0, // background_class - opset8::MatrixNms::DecayFunction::GAUSSIAN, // decay_function + op::v8::MatrixNms::DecayFunction::GAUSSIAN, // decay_function 2.0f, // gaussian_sigma 0.0f, // post_threshold true, // normalized @@ -223,14 +224,14 @@ std::vector generateParams() { "matrix_nms_gaussian"), MatrixNmsParams( { - opset8::MatrixNms::SortResultType::SCORE, // sort_result_type + op::v8::MatrixNms::SortResultType::SCORE, // sort_result_type false, // sort_result_across_batch ET_IND, // output_type 0.0f, // score_threshold 3, // nms_top_k -1, // keep_top_k 0, // background_class - opset8::MatrixNms::DecayFunction::LINEAR, // decay_function + op::v8::MatrixNms::DecayFunction::LINEAR, // decay_function 2.0f, // gaussian_sigma 0.0f, // post_threshold true, // normalized @@ -258,14 +259,14 @@ std::vector generateParams() { "matrix_nms_two_batches_two_classes"), MatrixNmsParams( { - opset8::MatrixNms::SortResultType::SCORE, // sort_result_type + op::v8::MatrixNms::SortResultType::SCORE, // sort_result_type true, // sort_result_across_batch ET_IND, // output_type 0.0f, // score_threshold 3, // nms_top_k -1, // keep_top_k -1, // background_class - opset8::MatrixNms::DecayFunction::LINEAR, // decay_function + op::v8::MatrixNms::DecayFunction::LINEAR, // decay_function 2.0f, // gaussian_sigma 0.5f, // post_threshold true, // normalized @@ -296,14 +297,14 @@ std::vector generateParams() { "matrix_nms_two_batches_two_classes_by_score_cross_batch"), MatrixNmsParams( { - opset8::MatrixNms::SortResultType::CLASSID, // sort_result_type + op::v8::MatrixNms::SortResultType::CLASSID, // sort_result_type true, // sort_result_across_batch ET_IND, // output_type 0.0f, // score_threshold 3, // nms_top_k -1, // keep_top_k -1, // background_class - opset8::MatrixNms::DecayFunction::LINEAR, // decay_function + op::v8::MatrixNms::DecayFunction::LINEAR, // decay_function 2.0f, // gaussian_sigma 0.5f, // post_threshold true, // normalized @@ -334,14 +335,14 @@ std::vector generateParams() { "matrix_nms_two_batches_two_classes_by_classid_cross_batch"), MatrixNmsParams( { - opset8::MatrixNms::SortResultType::CLASSID, // sort_result_type + op::v8::MatrixNms::SortResultType::CLASSID, // sort_result_type false, // sort_result_across_batch ET_IND, // output_type 0.0f, // score_threshold 3, // nms_top_k 3, // keep_top_k 0, // background_class - opset8::MatrixNms::DecayFunction::LINEAR, // decay_function + op::v8::MatrixNms::DecayFunction::LINEAR, // decay_function 2.0f, // gaussian_sigma 0.0f, // post_threshold true, // normalized @@ -369,14 +370,14 @@ std::vector generateParams() { "matrix_nms_by_keep_top_k"), MatrixNmsParams( { - opset8::MatrixNms::SortResultType::SCORE, // sort_result_type + op::v8::MatrixNms::SortResultType::SCORE, // sort_result_type false, // sort_result_across_batch ET_IND, // output_type 0.0f, // score_threshold 3, // nms_top_k -1, // keep_top_k -1, // background_class - opset8::MatrixNms::DecayFunction::LINEAR, // decay_function + op::v8::MatrixNms::DecayFunction::LINEAR, // decay_function 2.0f, // gaussian_sigma 0.0f, // post_threshold true, // normalized @@ -400,14 +401,14 @@ std::vector generateParams() { "matrix_nms_background"), MatrixNmsParams( { - opset8::MatrixNms::SortResultType::SCORE, // sort_result_type + op::v8::MatrixNms::SortResultType::SCORE, // sort_result_type false, // sort_result_across_batch ET_IND, // output_type 0.0f, // score_threshold 3, // nms_top_k -1, // keep_top_k -1, // background_class - opset8::MatrixNms::DecayFunction::LINEAR, // decay_function + op::v8::MatrixNms::DecayFunction::LINEAR, // decay_function 2.0f, // gaussian_sigma 0.0f, // post_threshold true, // normalized @@ -442,14 +443,14 @@ std::vector generateParams() { "matrix_nms_flipped_coordinates"), MatrixNmsParams( { - opset8::MatrixNms::SortResultType::SCORE, // sort_result_type + op::v8::MatrixNms::SortResultType::SCORE, // sort_result_type false, // sort_result_across_batch ET_IND, // output_type 0.0f, // score_threshold 3, // nms_top_k -1, // keep_top_k -1, // background_class - opset8::MatrixNms::DecayFunction::LINEAR, // decay_function + op::v8::MatrixNms::DecayFunction::LINEAR, // decay_function 2.0f, // gaussian_sigma 0.8f, // post_threshold true, // normalized @@ -468,14 +469,14 @@ std::vector generateParams() { "matrix_nms_post_threshold"), MatrixNmsParams( { - opset8::MatrixNms::SortResultType::SCORE, // sort_result_type + op::v8::MatrixNms::SortResultType::SCORE, // sort_result_type false, // sort_result_across_batch ET_IND, // output_type 0.0f, // score_threshold 3, // nms_top_k -1, // keep_top_k -1, // background_class - opset8::MatrixNms::DecayFunction::LINEAR, // decay_function + op::v8::MatrixNms::DecayFunction::LINEAR, // decay_function 2.0f, // gaussian_sigma 0.3f, // post_threshold true, // normalized @@ -497,14 +498,14 @@ std::vector generateParams() { "matrix_nms_identical_boxes"), MatrixNmsParams( { - opset8::MatrixNms::SortResultType::SCORE, // sort_result_type + op::v8::MatrixNms::SortResultType::SCORE, // sort_result_type false, // sort_result_across_batch ET_IND, // output_type 0.0f, // score_threshold 2, // nms_top_k -1, // keep_top_k -1, // background_class - opset8::MatrixNms::DecayFunction::LINEAR, // decay_function + op::v8::MatrixNms::DecayFunction::LINEAR, // decay_function 2.0f, // gaussian_sigma 0.0f, // post_threshold true, // normalized @@ -523,14 +524,14 @@ std::vector generateParams() { "matrix_nms_nms_top_k"), MatrixNmsParams( { - opset8::MatrixNms::SortResultType::SCORE, // sort_result_type + op::v8::MatrixNms::SortResultType::SCORE, // sort_result_type false, // sort_result_across_batch ET_IND, // output_type 0.0f, // score_threshold 3, // nms_top_k -1, // keep_top_k -1, // background_class - opset8::MatrixNms::DecayFunction::LINEAR, // decay_function + op::v8::MatrixNms::DecayFunction::LINEAR, // decay_function 2.0f, // gaussian_sigma 0.0f, // post_threshold true, // normalized @@ -545,14 +546,14 @@ std::vector generateParams() { "matrix_nms_single_box"), MatrixNmsParams( { - opset8::MatrixNms::SortResultType::SCORE, // sort_result_type + op::v8::MatrixNms::SortResultType::SCORE, // sort_result_type false, // sort_result_across_batch ET_IND, // output_type 2.0f, // score_threshold 3, // nms_top_k -1, // keep_top_k -1, // background_class - opset8::MatrixNms::DecayFunction::LINEAR, // decay_function + op::v8::MatrixNms::DecayFunction::LINEAR, // decay_function 2.0f, // gaussian_sigma 0.0f, // post_threshold true, // normalized diff --git a/src/plugins/template/tests/functional/op_reference/nms_rotated.cpp b/src/plugins/template/tests/functional/op_reference/nms_rotated.cpp index d22aa82911830d..34f0bc074ec2f1 100644 --- a/src/plugins/template/tests/functional/op_reference/nms_rotated.cpp +++ b/src/plugins/template/tests/functional/op_reference/nms_rotated.cpp @@ -2,11 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/nms_rotated.hpp" + #include #include "base_reference_test.hpp" -#include "openvino/opsets/opset1.hpp" -#include "openvino/opsets/opset13.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" using namespace reference_tests; using namespace ov; @@ -72,19 +74,19 @@ class ReferenceNMSRotatedTest : public testing::TestWithParam, private: static std::shared_ptr CreateModel(const NMSRotatedParams& params) { - const auto boxes = std::make_shared(params.boxes.type, params.boxes.shape); - const auto scores = std::make_shared(params.scores.type, params.scores.shape); + const auto boxes = std::make_shared(params.boxes.type, params.boxes.shape); + const auto scores = std::make_shared(params.scores.type, params.scores.shape); const auto max_output_boxes_per_class = - std::make_shared(params.maxOutputBoxesPerClass.type, + std::make_shared(params.maxOutputBoxesPerClass.type, params.maxOutputBoxesPerClass.shape, params.maxOutputBoxesPerClass.data.data()); - const auto iou_threshold = std::make_shared(params.iouThreshold.type, + const auto iou_threshold = std::make_shared(params.iouThreshold.type, params.iouThreshold.shape, params.iouThreshold.data.data()); - const auto score_threshold = std::make_shared(params.scoreThreshold.type, + const auto score_threshold = std::make_shared(params.scoreThreshold.type, params.scoreThreshold.shape, params.scoreThreshold.data.data()); - const auto nms = std::make_shared(boxes, + const auto nms = std::make_shared(boxes, scores, max_output_boxes_per_class, iou_threshold, @@ -113,16 +115,16 @@ class ReferenceNMSRotatedTestWithoutConstants : public ReferenceNMSRotatedTest { private: static std::shared_ptr CreateModel(const NMSRotatedParams& params) { - const auto boxes = std::make_shared(params.boxes.type, params.boxes.shape); - const auto scores = std::make_shared(params.scores.type, params.scores.shape); + const auto boxes = std::make_shared(params.boxes.type, params.boxes.shape); + const auto scores = std::make_shared(params.scores.type, params.scores.shape); const auto max_output_boxes_per_class = - std::make_shared(params.maxOutputBoxesPerClass.type, + std::make_shared(params.maxOutputBoxesPerClass.type, params.maxOutputBoxesPerClass.shape); const auto iou_threshold = - std::make_shared(params.iouThreshold.type, params.iouThreshold.shape); + std::make_shared(params.iouThreshold.type, params.iouThreshold.shape); const auto score_threshold = - std::make_shared(params.scoreThreshold.type, params.scoreThreshold.shape); - const auto nms = std::make_shared(boxes, + std::make_shared(params.scoreThreshold.type, params.scoreThreshold.shape); + const auto nms = std::make_shared(boxes, scores, max_output_boxes_per_class, iou_threshold, diff --git a/src/plugins/template/tests/functional/op_reference/non_max_suppression.cpp b/src/plugins/template/tests/functional/op_reference/non_max_suppression.cpp index 333ea4cf0d2845..7a3e8f28b03550 100644 --- a/src/plugins/template/tests/functional/op_reference/non_max_suppression.cpp +++ b/src/plugins/template/tests/functional/op_reference/non_max_suppression.cpp @@ -2,13 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/non_max_suppression.hpp" + #include #include "base_reference_test.hpp" -#include "openvino/opsets/opset1.hpp" -#include "openvino/opsets/opset3.hpp" -#include "openvino/opsets/opset4.hpp" -#include "openvino/opsets/opset5.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" using namespace reference_tests; using namespace ov; @@ -21,7 +21,7 @@ struct NonMaxSuppressionParams { reference_tests::Tensor iouThreshold; reference_tests::Tensor scoreThreshold; reference_tests::Tensor softNmsSigma; - opset5::NonMaxSuppression::BoxEncodingType boxEncoding; + op::v5::NonMaxSuppression::BoxEncodingType boxEncoding; reference_tests::Tensor expectedSelectedIndices; reference_tests::Tensor expectedSelectedScores; reference_tests::Tensor expectedValidOutputs; @@ -75,22 +75,22 @@ class ReferenceNonMaxSuppressionTest : public testing::TestWithParam CreateFunction(const NonMaxSuppressionParams& params) { - const auto boxes = std::make_shared(params.boxes.type, params.boxes.shape); - const auto scores = std::make_shared(params.scores.type, params.scores.shape); + const auto boxes = std::make_shared(params.boxes.type, params.boxes.shape); + const auto scores = std::make_shared(params.scores.type, params.scores.shape); const auto max_output_boxes_per_class = - std::make_shared(params.maxOutputBoxesPerClass.type, + std::make_shared(params.maxOutputBoxesPerClass.type, params.maxOutputBoxesPerClass.shape, params.maxOutputBoxesPerClass.data.data()); - const auto iou_threshold = std::make_shared(params.iouThreshold.type, + const auto iou_threshold = std::make_shared(params.iouThreshold.type, params.iouThreshold.shape, params.iouThreshold.data.data()); - const auto score_threshold = std::make_shared(params.scoreThreshold.type, + const auto score_threshold = std::make_shared(params.scoreThreshold.type, params.scoreThreshold.shape, params.scoreThreshold.data.data()); - const auto soft_nms_sigma = std::make_shared(params.softNmsSigma.type, + const auto soft_nms_sigma = std::make_shared(params.softNmsSigma.type, params.softNmsSigma.shape, params.softNmsSigma.data.data()); - const auto nms = std::make_shared(boxes, + const auto nms = std::make_shared(boxes, scores, max_output_boxes_per_class, iou_threshold, @@ -122,18 +122,18 @@ class ReferenceNonMaxSuppressionTestWithoutConstants : public ReferenceNonMaxSup private: static std::shared_ptr CreateFunction(const NonMaxSuppressionParams& params) { - const auto boxes = std::make_shared(params.boxes.type, params.boxes.shape); - const auto scores = std::make_shared(params.scores.type, params.scores.shape); + const auto boxes = std::make_shared(params.boxes.type, params.boxes.shape); + const auto scores = std::make_shared(params.scores.type, params.scores.shape); const auto max_output_boxes_per_class = - std::make_shared(params.maxOutputBoxesPerClass.type, + std::make_shared(params.maxOutputBoxesPerClass.type, params.maxOutputBoxesPerClass.shape); const auto iou_threshold = - std::make_shared(params.iouThreshold.type, params.iouThreshold.shape); + std::make_shared(params.iouThreshold.type, params.iouThreshold.shape); const auto score_threshold = - std::make_shared(params.scoreThreshold.type, params.scoreThreshold.shape); + std::make_shared(params.scoreThreshold.type, params.scoreThreshold.shape); const auto soft_nms_sigma = - std::make_shared(params.softNmsSigma.type, params.softNmsSigma.shape); - const auto nms = std::make_shared(boxes, + std::make_shared(params.softNmsSigma.type, params.softNmsSigma.shape); + const auto nms = std::make_shared(boxes, scores, max_output_boxes_per_class, iou_threshold, @@ -173,7 +173,7 @@ std::vector generateParams() { .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) .softNmsSigma(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) - .boxEncoding(opset5::NonMaxSuppression::BoxEncodingType::CENTER) + .boxEncoding(op::v5::NonMaxSuppression::BoxEncodingType::CENTER) .expectedSelectedIndices( reference_tests::Tensor(ET_IND, {3, 3}, std::vector{0, 0, 3, 0, 0, 0, 0, 0, 5})) .expectedSelectedScores( @@ -190,7 +190,7 @@ std::vector generateParams() { .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) .softNmsSigma(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) - .boxEncoding(opset5::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v5::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices( reference_tests::Tensor(ET_IND, {3, 3}, std::vector{0, 0, 3, 0, 0, 0, 0, 0, 5})) .expectedSelectedScores( @@ -211,7 +211,7 @@ std::vector generateParams() { .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) .softNmsSigma(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) - .boxEncoding(opset5::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v5::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices(reference_tests::Tensor(ET_IND, {1, 3}, std::vector{0, 0, 0})) .expectedSelectedScores(reference_tests::Tensor(ET_TH, {1, 3}, std::vector{0.0, 0.0, 0.9})) .expectedValidOutputs(reference_tests::Tensor(ET_IND, {1}, std::vector{1})) @@ -226,7 +226,7 @@ std::vector generateParams() { .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) .softNmsSigma(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) - .boxEncoding(opset5::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v5::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices(reference_tests::Tensor(ET_IND, {2, 3}, std::vector{0, 0, 3, 0, 0, 0})) .expectedSelectedScores( reference_tests::Tensor(ET_TH, {2, 3}, std::vector{0.0, 0.0, 0.95, 0.0, 0.0, 0.9})) @@ -240,7 +240,7 @@ std::vector generateParams() { .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) .softNmsSigma(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) - .boxEncoding(opset5::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v5::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices(reference_tests::Tensor(ET_IND, {1, 3}, std::vector{0, 0, 0})) .expectedSelectedScores(reference_tests::Tensor(ET_TH, {1, 3}, std::vector{0.0, 0.0, 0.9})) .expectedValidOutputs(reference_tests::Tensor(ET_IND, {1}, std::vector{1})) @@ -255,7 +255,7 @@ std::vector generateParams() { .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) .softNmsSigma(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) - .boxEncoding(opset5::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v5::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices( reference_tests::Tensor(ET_IND, {3, 3}, std::vector{0, 0, 3, 0, 0, 0, 0, 0, 5})) .expectedSelectedScores( @@ -272,7 +272,7 @@ std::vector generateParams() { .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.4f})) .softNmsSigma(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) - .boxEncoding(opset5::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v5::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices(reference_tests::Tensor(ET_IND, {2, 3}, std::vector{0, 0, 3, 0, 0, 0})) .expectedSelectedScores( reference_tests::Tensor(ET_TH, {2, 3}, std::vector{0.0, 0.0, 0.95, 0.0, 0.0, 0.9})) @@ -294,7 +294,7 @@ std::vector generateParams() { .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) .softNmsSigma(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) - .boxEncoding(opset5::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v5::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices( reference_tests::Tensor(ET_IND, {4, 3}, std::vector{0, 0, 3, 0, 0, 0, 1, 0, 3, 1, 0, 0})) .expectedSelectedScores(reference_tests::Tensor( @@ -316,7 +316,7 @@ std::vector generateParams() { .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) .softNmsSigma(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) - .boxEncoding(opset5::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v5::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices( reference_tests::Tensor(ET_IND, {4, 3}, std::vector{0, 0, 3, 0, 0, 0, 0, 1, 3, 0, 1, 0})) .expectedSelectedScores(reference_tests::Tensor( @@ -363,7 +363,7 @@ std::vector generateParamsWithoutConstants() { .iouThreshold(reference_tests::Tensor(ET_TH, {1}, std::vector{0.4f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {1}, std::vector{0.2f})) .softNmsSigma(reference_tests::Tensor(ET_TH, {1}, std::vector{0.0f})) - .boxEncoding(opset5::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v5::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices(reference_tests::Tensor(ET_IND, {1, 3}, std::vector{0, 0, 3})) .expectedSelectedScores(reference_tests::Tensor(ET_TH, {1, 3}, std::vector{0.0f, 0.0f, 0.95f})) .expectedValidOutputs(reference_tests::Tensor(ET_IND, {1}, std::vector{1})) @@ -422,7 +422,7 @@ struct NonMaxSuppression4Params { reference_tests::Tensor maxOutputBoxesPerClass; reference_tests::Tensor iouThreshold; reference_tests::Tensor scoreThreshold; - opset4::NonMaxSuppression::BoxEncodingType boxEncoding; + op::v4::NonMaxSuppression::BoxEncodingType boxEncoding; reference_tests::Tensor expectedSelectedIndices; std::string testcaseName; }; @@ -465,19 +465,19 @@ class ReferenceNonMaxSuppression4Test : public testing::TestWithParam CreateFunction(const NonMaxSuppression4Params& params) { - const auto boxes = std::make_shared(params.boxes.type, params.boxes.shape); - const auto scores = std::make_shared(params.scores.type, params.scores.shape); + const auto boxes = std::make_shared(params.boxes.type, params.boxes.shape); + const auto scores = std::make_shared(params.scores.type, params.scores.shape); const auto max_output_boxes_per_class = - std::make_shared(params.maxOutputBoxesPerClass.type, + std::make_shared(params.maxOutputBoxesPerClass.type, params.maxOutputBoxesPerClass.shape, params.maxOutputBoxesPerClass.data.data()); - const auto iou_threshold = std::make_shared(params.iouThreshold.type, + const auto iou_threshold = std::make_shared(params.iouThreshold.type, params.iouThreshold.shape, params.iouThreshold.data.data()); - const auto score_threshold = std::make_shared(params.scoreThreshold.type, + const auto score_threshold = std::make_shared(params.scoreThreshold.type, params.scoreThreshold.shape, params.scoreThreshold.data.data()); - const auto nms = std::make_shared(boxes, + const auto nms = std::make_shared(boxes, scores, max_output_boxes_per_class, iou_threshold, @@ -505,16 +505,16 @@ class ReferenceNonMaxSuppression4TestWithoutConstants : public ReferenceNonMaxSu private: static std::shared_ptr CreateFunction(const NonMaxSuppression4Params& params) { - const auto boxes = std::make_shared(params.boxes.type, params.boxes.shape); - const auto scores = std::make_shared(params.scores.type, params.scores.shape); + const auto boxes = std::make_shared(params.boxes.type, params.boxes.shape); + const auto scores = std::make_shared(params.scores.type, params.scores.shape); const auto max_output_boxes_per_class = - std::make_shared(params.maxOutputBoxesPerClass.type, + std::make_shared(params.maxOutputBoxesPerClass.type, params.maxOutputBoxesPerClass.shape); const auto iou_threshold = - std::make_shared(params.iouThreshold.type, params.iouThreshold.shape); + std::make_shared(params.iouThreshold.type, params.iouThreshold.shape); const auto score_threshold = - std::make_shared(params.scoreThreshold.type, params.scoreThreshold.shape); - const auto nms = std::make_shared(boxes, + std::make_shared(params.scoreThreshold.type, params.scoreThreshold.shape); + const auto nms = std::make_shared(boxes, scores, max_output_boxes_per_class, iou_threshold, @@ -552,7 +552,7 @@ std::vector generateParams4() { .maxOutputBoxesPerClass(reference_tests::Tensor(ET_BOX, {}, std::vector{3})) .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) - .boxEncoding(opset4::NonMaxSuppression::BoxEncodingType::CENTER) + .boxEncoding(op::v4::NonMaxSuppression::BoxEncodingType::CENTER) .expectedSelectedIndices( reference_tests::Tensor(ET_IND, {3, 3}, std::vector{0, 0, 3, 0, 0, 0, 0, 0, 5})) .testcaseName("nonmaxsuppression_center_point_box_format"), @@ -565,7 +565,7 @@ std::vector generateParams4() { .maxOutputBoxesPerClass(reference_tests::Tensor(ET_BOX, {}, std::vector{3})) .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) - .boxEncoding(opset4::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v4::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices( reference_tests::Tensor(ET_IND, {3, 3}, std::vector{0, 0, 3, 0, 0, 0, 0, 0, 5})) .testcaseName("nonmaxsuppression_flipped_coordinates"), @@ -582,7 +582,7 @@ std::vector generateParams4() { .maxOutputBoxesPerClass(reference_tests::Tensor(ET_BOX, {}, std::vector{1})) .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) - .boxEncoding(opset4::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v4::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices(reference_tests::Tensor(ET_IND, {1, 3}, std::vector{0, 0, 0})) .testcaseName("nonmaxsuppression_identical_boxes"), @@ -594,7 +594,7 @@ std::vector generateParams4() { .maxOutputBoxesPerClass(reference_tests::Tensor(ET_BOX, {}, std::vector{2})) .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) - .boxEncoding(opset4::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v4::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices(reference_tests::Tensor(ET_IND, {2, 3}, std::vector{0, 0, 3, 0, 0, 0})) .testcaseName("nonmaxsuppression_limit_output_size"), @@ -604,7 +604,7 @@ std::vector generateParams4() { .maxOutputBoxesPerClass(reference_tests::Tensor(ET_BOX, {}, std::vector{3})) .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) - .boxEncoding(opset4::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v4::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices(reference_tests::Tensor(ET_IND, {1, 3}, std::vector{0, 0, 0})) .testcaseName("nonmaxsuppression_single_box"), @@ -616,7 +616,7 @@ std::vector generateParams4() { .maxOutputBoxesPerClass(reference_tests::Tensor(ET_BOX, {}, std::vector{3})) .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) - .boxEncoding(opset4::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v4::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices( reference_tests::Tensor(ET_IND, {3, 3}, std::vector{0, 0, 3, 0, 0, 0, 0, 0, 5})) .testcaseName("nonmaxsuppression_suppress_by_IOU"), @@ -629,7 +629,7 @@ std::vector generateParams4() { .maxOutputBoxesPerClass(reference_tests::Tensor(ET_BOX, {}, std::vector{2})) .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.4f})) - .boxEncoding(opset4::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v4::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices(reference_tests::Tensor(ET_IND, {2, 3}, std::vector{0, 0, 3, 0, 0, 0})) .testcaseName("nonmaxsuppression_suppress_by_IOU_and_scores"), @@ -647,7 +647,7 @@ std::vector generateParams4() { .maxOutputBoxesPerClass(reference_tests::Tensor(ET_BOX, {}, std::vector{2})) .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) - .boxEncoding(opset4::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v4::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices( reference_tests::Tensor(ET_IND, {4, 3}, std::vector{0, 0, 3, 0, 0, 0, 1, 0, 3, 1, 0, 0})) .testcaseName("nonmaxsuppression_two_batches"), @@ -663,7 +663,7 @@ std::vector generateParams4() { .maxOutputBoxesPerClass(reference_tests::Tensor(ET_BOX, {}, std::vector{2})) .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) - .boxEncoding(opset4::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v4::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices( reference_tests::Tensor(ET_IND, {4, 3}, std::vector{0, 0, 3, 0, 0, 0, 0, 1, 3, 0, 1, 0})) .testcaseName("nonmaxsuppression_two_classes"), @@ -704,7 +704,7 @@ std::vector generateParams4WithoutConstants() { .maxOutputBoxesPerClass(reference_tests::Tensor(ET_BOX, {}, std::vector{1})) .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.4f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.2f})) - .boxEncoding(opset4::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v4::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices(reference_tests::Tensor(ET_IND, {1, 3}, std::vector{0, 0, 3})) .testcaseName("nonmaxsuppression_suppress_by_IOU_and_scores_without_constants"), }; @@ -761,7 +761,7 @@ struct NonMaxSuppression3Params { reference_tests::Tensor maxOutputBoxesPerClass; reference_tests::Tensor iouThreshold; reference_tests::Tensor scoreThreshold; - opset3::NonMaxSuppression::BoxEncodingType boxEncoding; + op::v3::NonMaxSuppression::BoxEncodingType boxEncoding; reference_tests::Tensor expectedSelectedIndices; std::string testcaseName; }; @@ -804,19 +804,19 @@ class ReferenceNonMaxSuppression3Test : public testing::TestWithParam CreateFunction(const NonMaxSuppression3Params& params) { - const auto boxes = std::make_shared(params.boxes.type, params.boxes.shape); - const auto scores = std::make_shared(params.scores.type, params.scores.shape); + const auto boxes = std::make_shared(params.boxes.type, params.boxes.shape); + const auto scores = std::make_shared(params.scores.type, params.scores.shape); const auto max_output_boxes_per_class = - std::make_shared(params.maxOutputBoxesPerClass.type, + std::make_shared(params.maxOutputBoxesPerClass.type, params.maxOutputBoxesPerClass.shape, params.maxOutputBoxesPerClass.data.data()); - const auto iou_threshold = std::make_shared(params.iouThreshold.type, + const auto iou_threshold = std::make_shared(params.iouThreshold.type, params.iouThreshold.shape, params.iouThreshold.data.data()); - const auto score_threshold = std::make_shared(params.scoreThreshold.type, + const auto score_threshold = std::make_shared(params.scoreThreshold.type, params.scoreThreshold.shape, params.scoreThreshold.data.data()); - const auto nms = std::make_shared(boxes, + const auto nms = std::make_shared(boxes, scores, max_output_boxes_per_class, iou_threshold, @@ -844,16 +844,16 @@ class ReferenceNonMaxSuppression3TestWithoutConstants : public ReferenceNonMaxSu private: static std::shared_ptr CreateFunction(const NonMaxSuppression3Params& params) { - const auto boxes = std::make_shared(params.boxes.type, params.boxes.shape); - const auto scores = std::make_shared(params.scores.type, params.scores.shape); + const auto boxes = std::make_shared(params.boxes.type, params.boxes.shape); + const auto scores = std::make_shared(params.scores.type, params.scores.shape); const auto max_output_boxes_per_class = - std::make_shared(params.maxOutputBoxesPerClass.type, + std::make_shared(params.maxOutputBoxesPerClass.type, params.maxOutputBoxesPerClass.shape); const auto iou_threshold = - std::make_shared(params.iouThreshold.type, params.iouThreshold.shape); + std::make_shared(params.iouThreshold.type, params.iouThreshold.shape); const auto score_threshold = - std::make_shared(params.scoreThreshold.type, params.scoreThreshold.shape); - const auto nms = std::make_shared(boxes, + std::make_shared(params.scoreThreshold.type, params.scoreThreshold.shape); + const auto nms = std::make_shared(boxes, scores, max_output_boxes_per_class, iou_threshold, @@ -891,7 +891,7 @@ std::vector generateParams3() { .maxOutputBoxesPerClass(reference_tests::Tensor(ET_BOX, {}, std::vector{3})) .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) - .boxEncoding(opset3::NonMaxSuppression::BoxEncodingType::CENTER) + .boxEncoding(op::v3::NonMaxSuppression::BoxEncodingType::CENTER) .expectedSelectedIndices( reference_tests::Tensor(ET_IND, {3, 3}, std::vector{0, 0, 3, 0, 0, 0, 0, 0, 5})) .testcaseName("nonmaxsuppression_center_point_box_format"), @@ -904,7 +904,7 @@ std::vector generateParams3() { .maxOutputBoxesPerClass(reference_tests::Tensor(ET_BOX, {}, std::vector{3})) .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) - .boxEncoding(opset3::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v3::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices( reference_tests::Tensor(ET_IND, {3, 3}, std::vector{0, 0, 3, 0, 0, 0, 0, 0, 5})) .testcaseName("nonmaxsuppression_flipped_coordinates"), @@ -921,7 +921,7 @@ std::vector generateParams3() { .maxOutputBoxesPerClass(reference_tests::Tensor(ET_BOX, {}, std::vector{1})) .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) - .boxEncoding(opset3::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v3::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices(reference_tests::Tensor(ET_IND, {1, 3}, std::vector{0, 0, 0})) .testcaseName("nonmaxsuppression_identical_boxes"), @@ -933,7 +933,7 @@ std::vector generateParams3() { .maxOutputBoxesPerClass(reference_tests::Tensor(ET_BOX, {}, std::vector{2})) .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) - .boxEncoding(opset3::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v3::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices(reference_tests::Tensor(ET_IND, {2, 3}, std::vector{0, 0, 3, 0, 0, 0})) .testcaseName("nonmaxsuppression_limit_output_size"), @@ -943,7 +943,7 @@ std::vector generateParams3() { .maxOutputBoxesPerClass(reference_tests::Tensor(ET_BOX, {}, std::vector{3})) .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) - .boxEncoding(opset3::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v3::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices(reference_tests::Tensor(ET_IND, {1, 3}, std::vector{0, 0, 0})) .testcaseName("nonmaxsuppression_single_box"), @@ -955,7 +955,7 @@ std::vector generateParams3() { .maxOutputBoxesPerClass(reference_tests::Tensor(ET_BOX, {}, std::vector{3})) .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) - .boxEncoding(opset3::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v3::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices( reference_tests::Tensor(ET_IND, {3, 3}, std::vector{0, 0, 3, 0, 0, 0, 0, 0, 5})) .testcaseName("nonmaxsuppression_suppress_by_IOU"), @@ -968,7 +968,7 @@ std::vector generateParams3() { .maxOutputBoxesPerClass(reference_tests::Tensor(ET_BOX, {}, std::vector{2})) .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.4f})) - .boxEncoding(opset3::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v3::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices(reference_tests::Tensor(ET_IND, {2, 3}, std::vector{0, 0, 3, 0, 0, 0})) .testcaseName("nonmaxsuppression_suppress_by_IOU_and_scores"), @@ -983,7 +983,7 @@ std::vector generateParams3() { .maxOutputBoxesPerClass(reference_tests::Tensor(ET_BOX, {}, std::vector{2})) .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) - .boxEncoding(opset3::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v3::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices( reference_tests::Tensor(ET_IND, {4, 3}, std::vector{0, 0, 3, 0, 0, 0, 0, 1, 3, 0, 1, 0})) .testcaseName("nonmaxsuppression_two_classes"), @@ -1024,7 +1024,7 @@ std::vector generateParams3WithoutConstants() { .maxOutputBoxesPerClass(reference_tests::Tensor(ET_BOX, {}, std::vector{1})) .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.4f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.2f})) - .boxEncoding(opset3::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v3::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices(reference_tests::Tensor(ET_IND, {1, 3}, std::vector{0, 0, 3})) .testcaseName("nonmaxsuppression_suppress_by_IOU_and_scores_without_constants"), }; @@ -1081,7 +1081,7 @@ struct NonMaxSuppression1Params { reference_tests::Tensor maxOutputBoxesPerClass; reference_tests::Tensor iouThreshold; reference_tests::Tensor scoreThreshold; - opset1::NonMaxSuppression::BoxEncodingType boxEncoding; + op::v1::NonMaxSuppression::BoxEncodingType boxEncoding; reference_tests::Tensor expectedSelectedIndices; std::string testcaseName; }; @@ -1124,19 +1124,19 @@ class ReferenceNonMaxSuppression1Test : public testing::TestWithParam CreateFunction(const NonMaxSuppression1Params& params) { - const auto boxes = std::make_shared(params.boxes.type, params.boxes.shape); - const auto scores = std::make_shared(params.scores.type, params.scores.shape); + const auto boxes = std::make_shared(params.boxes.type, params.boxes.shape); + const auto scores = std::make_shared(params.scores.type, params.scores.shape); const auto max_output_boxes_per_class = - std::make_shared(params.maxOutputBoxesPerClass.type, + std::make_shared(params.maxOutputBoxesPerClass.type, params.maxOutputBoxesPerClass.shape, params.maxOutputBoxesPerClass.data.data()); - const auto iou_threshold = std::make_shared(params.iouThreshold.type, + const auto iou_threshold = std::make_shared(params.iouThreshold.type, params.iouThreshold.shape, params.iouThreshold.data.data()); - const auto score_threshold = std::make_shared(params.scoreThreshold.type, + const auto score_threshold = std::make_shared(params.scoreThreshold.type, params.scoreThreshold.shape, params.scoreThreshold.data.data()); - const auto nms = std::make_shared(boxes, + const auto nms = std::make_shared(boxes, scores, max_output_boxes_per_class, iou_threshold, @@ -1163,16 +1163,16 @@ class ReferenceNonMaxSuppression1TestWithoutConstants : public ReferenceNonMaxSu private: static std::shared_ptr CreateFunction(const NonMaxSuppression1Params& params) { - const auto boxes = std::make_shared(params.boxes.type, params.boxes.shape); - const auto scores = std::make_shared(params.scores.type, params.scores.shape); + const auto boxes = std::make_shared(params.boxes.type, params.boxes.shape); + const auto scores = std::make_shared(params.scores.type, params.scores.shape); const auto max_output_boxes_per_class = - std::make_shared(params.maxOutputBoxesPerClass.type, + std::make_shared(params.maxOutputBoxesPerClass.type, params.maxOutputBoxesPerClass.shape); const auto iou_threshold = - std::make_shared(params.iouThreshold.type, params.iouThreshold.shape); + std::make_shared(params.iouThreshold.type, params.iouThreshold.shape); const auto score_threshold = - std::make_shared(params.scoreThreshold.type, params.scoreThreshold.shape); - const auto nms = std::make_shared(boxes, + std::make_shared(params.scoreThreshold.type, params.scoreThreshold.shape); + const auto nms = std::make_shared(boxes, scores, max_output_boxes_per_class, iou_threshold, @@ -1209,7 +1209,7 @@ std::vector generateParams1() { .maxOutputBoxesPerClass(reference_tests::Tensor(ET_BOX, {}, std::vector{3})) .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) - .boxEncoding(opset1::NonMaxSuppression::BoxEncodingType::CENTER) + .boxEncoding(op::v1::NonMaxSuppression::BoxEncodingType::CENTER) .expectedSelectedIndices( reference_tests::Tensor(ET_IND, {3, 3}, std::vector{0, 0, 3, 0, 0, 0, 0, 0, 5})) .testcaseName("nonmaxsuppression_center_point_box_format"), @@ -1222,7 +1222,7 @@ std::vector generateParams1() { .maxOutputBoxesPerClass(reference_tests::Tensor(ET_BOX, {}, std::vector{3})) .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) - .boxEncoding(opset1::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v1::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices( reference_tests::Tensor(ET_IND, {3, 3}, std::vector{0, 0, 3, 0, 0, 0, 0, 0, 5})) .testcaseName("nonmaxsuppression_flipped_coordinates"), @@ -1239,7 +1239,7 @@ std::vector generateParams1() { .maxOutputBoxesPerClass(reference_tests::Tensor(ET_BOX, {}, std::vector{1})) .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) - .boxEncoding(opset1::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v1::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices(reference_tests::Tensor(ET_IND, {1, 3}, std::vector{0, 0, 0})) .testcaseName("nonmaxsuppression_identical_boxes"), @@ -1251,7 +1251,7 @@ std::vector generateParams1() { .maxOutputBoxesPerClass(reference_tests::Tensor(ET_BOX, {}, std::vector{2})) .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) - .boxEncoding(opset1::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v1::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices(reference_tests::Tensor(ET_IND, {2, 3}, std::vector{0, 0, 3, 0, 0, 0})) .testcaseName("nonmaxsuppression_limit_output_size"), @@ -1261,7 +1261,7 @@ std::vector generateParams1() { .maxOutputBoxesPerClass(reference_tests::Tensor(ET_BOX, {}, std::vector{3})) .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) - .boxEncoding(opset1::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v1::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices(reference_tests::Tensor(ET_IND, {1, 3}, std::vector{0, 0, 0})) .testcaseName("nonmaxsuppression_single_box"), @@ -1273,7 +1273,7 @@ std::vector generateParams1() { .maxOutputBoxesPerClass(reference_tests::Tensor(ET_BOX, {}, std::vector{3})) .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) - .boxEncoding(opset1::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v1::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices( reference_tests::Tensor(ET_IND, {3, 3}, std::vector{0, 0, 3, 0, 0, 0, 0, 0, 5})) .testcaseName("nonmaxsuppression_suppress_by_IOU"), @@ -1286,7 +1286,7 @@ std::vector generateParams1() { .maxOutputBoxesPerClass(reference_tests::Tensor(ET_BOX, {}, std::vector{2})) .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.4f})) - .boxEncoding(opset1::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v1::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices(reference_tests::Tensor(ET_IND, {2, 3}, std::vector{0, 0, 3, 0, 0, 0})) .testcaseName("nonmaxsuppression_suppress_by_IOU_and_scores"), @@ -1301,7 +1301,7 @@ std::vector generateParams1() { .maxOutputBoxesPerClass(reference_tests::Tensor(ET_BOX, {}, std::vector{2})) .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.5f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.0f})) - .boxEncoding(opset1::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v1::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices( reference_tests::Tensor(ET_IND, {4, 3}, std::vector{0, 0, 3, 0, 0, 0, 0, 1, 3, 0, 1, 0})) .testcaseName("nonmaxsuppression_two_classes"), @@ -1339,7 +1339,7 @@ std::vector generateParams1WithoutConstants() { .maxOutputBoxesPerClass(reference_tests::Tensor(ET_BOX, {}, std::vector{1})) .iouThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.4f})) .scoreThreshold(reference_tests::Tensor(ET_TH, {}, std::vector{0.2f})) - .boxEncoding(opset1::NonMaxSuppression::BoxEncodingType::CORNER) + .boxEncoding(op::v1::NonMaxSuppression::BoxEncodingType::CORNER) .expectedSelectedIndices(reference_tests::Tensor(ET_IND, {1, 3}, std::vector{0, 0, 3})) .testcaseName("nonmaxsuppression_suppress_by_IOU_and_scores_without_constants"), }; diff --git a/src/plugins/template/tests/functional/op_reference/pad.cpp b/src/plugins/template/tests/functional/op_reference/pad.cpp index eca5f7dbd074a7..4396aadf415661 100644 --- a/src/plugins/template/tests/functional/op_reference/pad.cpp +++ b/src/plugins/template/tests/functional/op_reference/pad.cpp @@ -6,9 +6,8 @@ #include -#include - #include "base_reference_test.hpp" +#include "functional_test_utils/skip_tests_config.hpp" #include "openvino/op/constant.hpp" using namespace reference_tests; diff --git a/src/plugins/template/tests/functional/op_reference/prior_box_clustered.cpp b/src/plugins/template/tests/functional/op_reference/prior_box_clustered.cpp index 9097c3a2019f8b..358477ee9ed40a 100644 --- a/src/plugins/template/tests/functional/op_reference/prior_box_clustered.cpp +++ b/src/plugins/template/tests/functional/op_reference/prior_box_clustered.cpp @@ -2,11 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/prior_box_clustered.hpp" + #include #include "base_reference_test.hpp" +#include "openvino/op/constant.hpp" #include "openvino/op/prior_box.hpp" -#include "openvino/opsets/opset1.hpp" using namespace reference_tests; using namespace ov; @@ -76,9 +78,9 @@ class ReferencePriorBoxClusteredLayerTest : public testing::TestWithParam CreateFunction(const PriorBoxClusteredParams& params) { auto LS = - std::make_shared(params.inType, params.layerShapeShape, params.layerShapeData.data()); + std::make_shared(params.inType, params.layerShapeShape, params.layerShapeData.data()); auto IS = - std::make_shared(params.inType, params.imageShapeShape, params.imageShapeData.data()); + std::make_shared(params.inType, params.imageShapeShape, params.imageShapeData.data()); const auto PriorBoxClustered = std::make_shared(LS, IS, params.attrs); return std::make_shared(NodeVector{PriorBoxClustered}, ParameterVector{}); } diff --git a/src/plugins/template/tests/functional/op_reference/random_uniform.cpp b/src/plugins/template/tests/functional/op_reference/random_uniform.cpp index 9da97ef7c0b426..706a7217c1dbae 100644 --- a/src/plugins/template/tests/functional/op_reference/random_uniform.cpp +++ b/src/plugins/template/tests/functional/op_reference/random_uniform.cpp @@ -2,10 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/random_uniform.hpp" + #include #include "base_reference_test.hpp" -#include "openvino/opsets/opset8.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" using namespace ov; @@ -64,11 +67,11 @@ class ReferenceRandomUniformLayerTest : public testing::TestWithParam(min_val.type, min_val.shape); - const auto max_val_param = std::make_shared(max_val.type, max_val.shape); - auto out_shape_ = std::make_shared(element::i64, Shape{out_shape.size()}, out_shape); + const auto min_val_param = std::make_shared(min_val.type, min_val.shape); + const auto max_val_param = std::make_shared(max_val.type, max_val.shape); + auto out_shape_ = std::make_shared(element::i64, Shape{out_shape.size()}, out_shape); - return std::make_shared(NodeVector{std::make_shared(out_shape_, + return std::make_shared(NodeVector{std::make_shared(out_shape_, min_val_param, max_val_param, out_type, diff --git a/src/plugins/template/tests/functional/op_reference/roi_align.cpp b/src/plugins/template/tests/functional/op_reference/roi_align.cpp index ede8f75ace3c04..3bad155201bc2e 100644 --- a/src/plugins/template/tests/functional/op_reference/roi_align.cpp +++ b/src/plugins/template/tests/functional/op_reference/roi_align.cpp @@ -2,14 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/roi_align.hpp" + #include #include "base_reference_test.hpp" -#include "openvino/opsets/opset1.hpp" -#include "openvino/opsets/opset3.hpp" -#include "openvino/opsets/opset4.hpp" -#include "openvino/opsets/opset5.hpp" -#include "openvino/opsets/opset9.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" using namespace reference_tests; using namespace ov; @@ -133,12 +132,12 @@ class ReferenceROIAlignTest : public testing::TestWithParam, pub private: static std::shared_ptr CreateFunction(const ROIAlignParams& params) { - const auto featureMap = std::make_shared(params.iType, params.pShape); + const auto featureMap = std::make_shared(params.iType, params.pShape); const auto coords = - std::make_shared(params.coords.type, params.coords.shape, params.coords.data.data()); + std::make_shared(params.coords.type, params.coords.shape, params.coords.data.data()); const auto roisIdx = - std::make_shared(params.roiIdx.type, params.roiIdx.shape, params.roiIdx.data.data()); - const auto roi_align = std::make_shared(featureMap, + std::make_shared(params.roiIdx.type, params.roiIdx.shape, params.roiIdx.data.data()); + const auto roi_align = std::make_shared(featureMap, coords, roisIdx, params.pooledH, @@ -185,14 +184,14 @@ class ReferenceROIAlignV9Test : public testing::TestWithParam, private: static std::shared_ptr CreateFunction(const ROIAlignV9Params& params) { - const auto featureMap = std::make_shared(params.iType, params.pShape); + const auto featureMap = std::make_shared(params.iType, params.pShape); const auto coords = - std::make_shared(params.coords.type, params.coords.shape, params.coords.data.data()); + std::make_shared(params.coords.type, params.coords.shape, params.coords.data.data()); const auto roisIdx = - std::make_shared(params.roiIdx.type, params.roiIdx.shape, params.roiIdx.data.data()); - const auto pooling_mode = EnumNames::as_enum(params.poolingMode); - const auto aligned_mode = EnumNames::as_enum(params.alignedMode); - const auto roi_align = std::make_shared(featureMap, + std::make_shared(params.roiIdx.type, params.roiIdx.shape, params.roiIdx.data.data()); + const auto pooling_mode = EnumNames::as_enum(params.poolingMode); + const auto aligned_mode = EnumNames::as_enum(params.alignedMode); + const auto roi_align = std::make_shared(featureMap, coords, roisIdx, params.pooledH, diff --git a/src/plugins/template/tests/functional/op_reference/roll.cpp b/src/plugins/template/tests/functional/op_reference/roll.cpp index b9d2b54f5debb0..63293f1c323347 100644 --- a/src/plugins/template/tests/functional/op_reference/roll.cpp +++ b/src/plugins/template/tests/functional/op_reference/roll.cpp @@ -2,11 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/roll.hpp" + #include #include "base_reference_test.hpp" -#include "openvino/opsets/opset1.hpp" -#include "openvino/opsets/opset7.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" using namespace reference_tests; using namespace ov; @@ -61,14 +63,14 @@ class ReferenceRollLayerTest : public testing::TestWithParam, public private: static std::shared_ptr CreateFunction(const RollParams& params) { - const auto data = std::make_shared(params.dataTensor.type, params.dataTensor.shape); - const auto shift = std::make_shared(params.shiftTensor.type, + const auto data = std::make_shared(params.dataTensor.type, params.dataTensor.shape); + const auto shift = std::make_shared(params.shiftTensor.type, params.shiftTensor.shape, params.shiftTensor.data.data()); - const auto axes = std::make_shared(params.axesTensor.type, + const auto axes = std::make_shared(params.axesTensor.type, params.axesTensor.shape, params.axesTensor.data.data()); - const auto roll = std::make_shared(data, shift, axes); + const auto roll = std::make_shared(data, shift, axes); return std::make_shared(NodeVector{roll}, ParameterVector{data}); } }; diff --git a/src/plugins/template/tests/functional/op_reference/shuffle_channels.cpp b/src/plugins/template/tests/functional/op_reference/shuffle_channels.cpp index 7b605e62680c73..f3cd395bf4aadc 100644 --- a/src/plugins/template/tests/functional/op_reference/shuffle_channels.cpp +++ b/src/plugins/template/tests/functional/op_reference/shuffle_channels.cpp @@ -2,10 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/shuffle_channels.hpp" + #include #include "base_reference_test.hpp" -#include "openvino/opsets/opset1.hpp" +#include "openvino/op/parameter.hpp" using namespace reference_tests; using namespace ov; @@ -59,8 +61,8 @@ class ReferenceShuffleChannelsLayerTest : public testing::TestWithParam CreateFunction(const ShuffleChannelsParams& params) { - const auto data = std::make_shared(params.dataTensor.type, params.dataTensor.shape); - const auto function = std::make_shared(data, params.axis, params.group); + const auto data = std::make_shared(params.dataTensor.type, params.dataTensor.shape); + const auto function = std::make_shared(data, params.axis, params.group); return std::make_shared(NodeVector{function}, ParameterVector{data}); } }; diff --git a/src/plugins/template/tests/functional/op_reference/slice.cpp b/src/plugins/template/tests/functional/op_reference/slice.cpp index 2fa64b2dd458d6..21455df6786460 100644 --- a/src/plugins/template/tests/functional/op_reference/slice.cpp +++ b/src/plugins/template/tests/functional/op_reference/slice.cpp @@ -2,12 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/slice.hpp" + #include #include #include "base_reference_test.hpp" -#include "openvino/opsets/opset8.hpp" +#include "openvino/op/parameter.hpp" using namespace ov; @@ -102,13 +104,13 @@ class ReferenceSliceLayerTest : public testing::TestWithParam, publ const reference_tests::Tensor& stop, const reference_tests::Tensor& step, const reference_tests::Tensor& axes) { - const auto data_param = std::make_shared(data.type, data.shape); - const auto start_param = std::make_shared(start.type, start.shape); - const auto stop_param = std::make_shared(stop.type, stop.shape); - const auto step_param = std::make_shared(step.type, step.shape); - const auto axes_param = std::make_shared(axes.type, axes.shape); + const auto data_param = std::make_shared(data.type, data.shape); + const auto start_param = std::make_shared(start.type, start.shape); + const auto stop_param = std::make_shared(stop.type, stop.shape); + const auto step_param = std::make_shared(step.type, step.shape); + const auto axes_param = std::make_shared(axes.type, axes.shape); - const auto slice = std::make_shared(data_param, start_param, stop_param, step_param, axes_param); + const auto slice = std::make_shared(data_param, start_param, stop_param, step_param, axes_param); return std::make_shared(NodeVector{slice}, ParameterVector{data_param, start_param, stop_param, step_param, axes_param}); } @@ -118,12 +120,12 @@ class ReferenceSliceLayerTest : public testing::TestWithParam, publ const reference_tests::Tensor& start, const reference_tests::Tensor& stop, const reference_tests::Tensor& step) { - const auto data_param = std::make_shared(data.type, data.shape); - const auto start_param = std::make_shared(start.type, start.shape); - const auto stop_param = std::make_shared(stop.type, stop.shape); - const auto step_param = std::make_shared(step.type, step.shape); + const auto data_param = std::make_shared(data.type, data.shape); + const auto start_param = std::make_shared(start.type, start.shape); + const auto stop_param = std::make_shared(stop.type, stop.shape); + const auto step_param = std::make_shared(step.type, step.shape); - const auto slice = std::make_shared(data_param, start_param, stop_param, step_param); + const auto slice = std::make_shared(data_param, start_param, stop_param, step_param); return std::make_shared(NodeVector{slice}, ParameterVector{data_param, start_param, stop_param, step_param}); } diff --git a/src/plugins/template/tests/functional/op_reference/space_to_batch.cpp b/src/plugins/template/tests/functional/op_reference/space_to_batch.cpp index 39cadfe89560df..32bcf21cab7f3b 100644 --- a/src/plugins/template/tests/functional/op_reference/space_to_batch.cpp +++ b/src/plugins/template/tests/functional/op_reference/space_to_batch.cpp @@ -2,12 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/space_to_batch.hpp" + #include #include "base_reference_test.hpp" #include "openvino/op/constant.hpp" -#include "openvino/opsets/opset1.hpp" -#include "openvino/opsets/opset2.hpp" +#include "openvino/op/parameter.hpp" using namespace reference_tests; using namespace ov; @@ -70,11 +71,11 @@ class ReferenceSpaceToBatchLayerTest : public testing::TestWithParam CreateFunction(const SpaceToBatchParams& params) { - const auto data = std::make_shared(params.dataTensor.type, params.dataTensor.shape); - const auto blockShape = std::make_shared(element::i64, params.blockShapeTensor.shape); - const auto padsBegin = std::make_shared(element::i64, params.padsBeginTensor.shape); - const auto padsEnd = std::make_shared(element::i64, params.padsEndTensor.shape); - const auto batchToSpace = std::make_shared(data, blockShape, padsBegin, padsEnd); + const auto data = std::make_shared(params.dataTensor.type, params.dataTensor.shape); + const auto blockShape = std::make_shared(element::i64, params.blockShapeTensor.shape); + const auto padsBegin = std::make_shared(element::i64, params.padsBeginTensor.shape); + const auto padsEnd = std::make_shared(element::i64, params.padsEndTensor.shape); + const auto batchToSpace = std::make_shared(data, blockShape, padsBegin, padsEnd); return std::make_shared(NodeVector{batchToSpace}, ParameterVector{data, blockShape, padsBegin, padsEnd}); } diff --git a/src/plugins/template/tests/functional/op_reference/space_to_depth.cpp b/src/plugins/template/tests/functional/op_reference/space_to_depth.cpp index 7420376f802af7..19a6b71c03463c 100644 --- a/src/plugins/template/tests/functional/op_reference/space_to_depth.cpp +++ b/src/plugins/template/tests/functional/op_reference/space_to_depth.cpp @@ -2,10 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/space_to_depth.hpp" + #include #include "base_reference_test.hpp" -#include "openvino/opsets/opset1.hpp" +#include "openvino/op/parameter.hpp" using namespace reference_tests; using namespace ov; @@ -56,11 +58,10 @@ class ReferenceSpaceToDepthLayerTest : public testing::TestWithParam CreateFunction(const SpaceToDepthParams& params) { - opset1::SpaceToDepth::SpaceToDepthMode mode = params.mode == "DEPTH_FIRST" - ? opset1::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST - : opset1::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST; - const auto data = std::make_shared(params.dataTensor.type, params.dataTensor.shape); - const auto SpaceToDepth = std::make_shared(data, mode, params.blockSize); + const auto mode = params.mode == "DEPTH_FIRST" ? op::v0::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST + : op::v0::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST; + const auto data = std::make_shared(params.dataTensor.type, params.dataTensor.shape); + const auto SpaceToDepth = std::make_shared(data, mode, params.blockSize); return std::make_shared(NodeVector{SpaceToDepth}, ParameterVector{data}); } }; diff --git a/src/plugins/template/tests/functional/op_reference/tensor_iterator.cpp b/src/plugins/template/tests/functional/op_reference/tensor_iterator.cpp index 5c53422b0dad64..477585ce5a7319 100644 --- a/src/plugins/template/tests/functional/op_reference/tensor_iterator.cpp +++ b/src/plugins/template/tests/functional/op_reference/tensor_iterator.cpp @@ -2,13 +2,23 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "openvino/op/tensor_iterator.hpp" -#include -#include +#include #include "base_reference_test.hpp" #include "functional_test_utils/skip_tests_config.hpp" +#include "openvino/core/model.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/gru_cell.hpp" +#include "openvino/op/lstm_cell.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/rnn_cell.hpp" +#include "openvino/op/squeeze.hpp" +#include "openvino/op/unsqueeze.hpp" namespace { struct TIFunctionalBase { @@ -23,26 +33,26 @@ struct TIFunctionalBase { struct TIDynamicInputs : public TIFunctionalBase { std::shared_ptr create_function(const std::vector& ti_inputs, const std::vector& results) override { - auto X = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); - auto Y = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); - auto M = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); + auto X = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); + auto Y = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); + auto M = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto Xi = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); - auto Yi = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); - auto M_body = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); - auto body_condition = std::make_shared(ov::element::boolean, ov::Shape{1}, true); + auto Xi = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); + auto Yi = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); + auto M_body = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); + auto body_condition = std::make_shared(ov::element::boolean, ov::Shape{1}, true); - auto trip_count = std::make_shared(ov::element::i64, ov::Shape{1}, 3); - auto exec_condition = std::make_shared(ov::element::boolean, ov::Shape{1}, true); + auto trip_count = std::make_shared(ov::element::i64, ov::Shape{1}, 3); + auto exec_condition = std::make_shared(ov::element::boolean, ov::Shape{1}, true); // Body - auto sum = std::make_shared(Xi, Yi); - auto Zo = std::make_shared(sum, M_body); + auto sum = std::make_shared(Xi, Yi); + auto Zo = std::make_shared(sum, M_body); auto body = std::make_shared(ov::OutputVector{body_condition, Zo}, ov::ParameterVector{Xi, Yi, M_body}); - auto tensor_iterator = std::make_shared(); + auto tensor_iterator = std::make_shared(); tensor_iterator->set_function(body); tensor_iterator->set_sliced_input(Xi, X, 0, 1, 1, -1, 1); @@ -257,14 +267,14 @@ struct TIStaticInputs : public TIStaticFunctionalBase { std::shared_ptr create_function(const TensorIteratorStaticParams& params) override { std::vector inputShapes; std::shared_ptr function; - auto tensor_iterator = std::make_shared(); + auto tensor_iterator = std::make_shared(); // Each case consist of 3 steps: // 1. Create TensorIterator body. // 2. Set PortMap // 3. Create outer function auto axis = - std::make_shared(ov::element::i64, + std::make_shared(ov::element::i64, ov::Shape{1}, std::vector{static_cast(params.sequenceAxis)}); switch (params.body_type) { @@ -290,13 +300,13 @@ struct TIStaticInputs : public TIStaticFunctionalBase { ov::ParameterVector body_params{std::make_shared(params.iType, inputShapes[0]), std::make_shared(params.iType, inputShapes[1]), std::make_shared(params.iType, inputShapes[2])}; - auto squeeze = std::make_shared(body_params[0], axis); + auto squeeze = std::make_shared(body_params[0], axis); ov::OutputVector out_vector = {squeeze, body_params[1], body_params[2]}; - auto W = std::make_shared(params.W.type, params.W.shape, params.W.data.data()); - auto R = std::make_shared(params.R.type, params.R.shape, params.R.data.data()); - auto B = std::make_shared(params.B.type, params.B.shape, params.B.data.data()); - auto lstm_cell = std::make_shared(out_vector[0], + auto W = std::make_shared(params.W.type, params.W.shape, params.W.data.data()); + auto R = std::make_shared(params.R.type, params.R.shape, params.R.data.data()); + auto B = std::make_shared(params.B.type, params.B.shape, params.B.data.data()); + auto lstm_cell = std::make_shared(out_vector[0], out_vector[1], out_vector[2], W, @@ -308,10 +318,10 @@ struct TIStaticInputs : public TIStaticFunctionalBase { std::vector{}, params.clip); - auto unsqueeze = std::make_shared(lstm_cell->output(0), axis); - ov::ResultVector results{std::make_shared(unsqueeze), - std::make_shared(lstm_cell->output(0)), - std::make_shared(lstm_cell->output(1))}; + auto unsqueeze = std::make_shared(lstm_cell->output(0), axis); + ov::ResultVector results{std::make_shared(unsqueeze), + std::make_shared(lstm_cell->output(0)), + std::make_shared(lstm_cell->output(1))}; auto body = std::make_shared(results, body_params, "lstm_cell"); tensor_iterator->set_function(body); @@ -357,13 +367,13 @@ struct TIStaticInputs : public TIStaticFunctionalBase { ov::ParameterVector body_params{std::make_shared(params.iType, inputShapes[0]), std::make_shared(params.iType, inputShapes[1])}; - auto squeeze = std::make_shared(body_params[0], axis); + auto squeeze = std::make_shared(body_params[0], axis); ov::OutputVector out_vector = {squeeze, body_params[1]}; - auto W = std::make_shared(params.W.type, params.W.shape, params.W.data.data()); - auto R = std::make_shared(params.R.type, params.R.shape, params.R.data.data()); - auto B = std::make_shared(params.B.type, params.B.shape, params.B.data.data()); - auto gru_cell = std::make_shared(out_vector[0], + auto W = std::make_shared(params.W.type, params.W.shape, params.W.data.data()); + auto R = std::make_shared(params.R.type, params.R.shape, params.R.data.data()); + auto B = std::make_shared(params.B.type, params.B.shape, params.B.data.data()); + auto gru_cell = std::make_shared(out_vector[0], out_vector[1], W, R, @@ -375,9 +385,9 @@ struct TIStaticInputs : public TIStaticFunctionalBase { params.clip, false); - auto unsqueeze = std::make_shared(gru_cell->output(0), axis); - ov::ResultVector results{std::make_shared(gru_cell->output(0)), - std::make_shared(unsqueeze)}; + auto unsqueeze = std::make_shared(gru_cell->output(0), axis); + ov::ResultVector results{std::make_shared(gru_cell->output(0)), + std::make_shared(unsqueeze)}; auto body = std::make_shared(results, body_params, "gru_cell"); tensor_iterator->set_function(body); @@ -420,13 +430,13 @@ struct TIStaticInputs : public TIStaticFunctionalBase { inputShapes[0][params.sequenceAxis] = 1; // sliced dimension ov::ParameterVector body_params{std::make_shared(params.iType, inputShapes[0]), std::make_shared(params.iType, inputShapes[1])}; - auto squeeze = std::make_shared(body_params[0], axis); + auto squeeze = std::make_shared(body_params[0], axis); ov::OutputVector out_vector = {squeeze, body_params[1]}; - auto W = std::make_shared(params.W.type, params.W.shape, params.W.data.data()); - auto R = std::make_shared(params.R.type, params.R.shape, params.R.data.data()); - auto B = std::make_shared(params.B.type, params.B.shape, params.B.data.data()); - auto rnn_cell = std::make_shared(out_vector[0], + auto W = std::make_shared(params.W.type, params.W.shape, params.W.data.data()); + auto R = std::make_shared(params.R.type, params.R.shape, params.R.data.data()); + auto B = std::make_shared(params.B.type, params.B.shape, params.B.data.data()); + auto rnn_cell = std::make_shared(out_vector[0], out_vector[1], W, R, @@ -437,9 +447,9 @@ struct TIStaticInputs : public TIStaticFunctionalBase { std::vector{}, params.clip); - auto unsqueeze = std::make_shared(rnn_cell->output(0), axis); - ov::ResultVector results{std::make_shared(rnn_cell), - std::make_shared(unsqueeze)}; + auto unsqueeze = std::make_shared(rnn_cell->output(0), axis); + ov::ResultVector results{std::make_shared(rnn_cell), + std::make_shared(unsqueeze)}; auto body = std::make_shared(results, body_params, "rnn_cell"); tensor_iterator->set_function(body); diff --git a/src/plugins/template/tests/functional/op_reference/tile.cpp b/src/plugins/template/tests/functional/op_reference/tile.cpp index 2a99ee45b11cb4..6caa3d64154e95 100644 --- a/src/plugins/template/tests/functional/op_reference/tile.cpp +++ b/src/plugins/template/tests/functional/op_reference/tile.cpp @@ -2,10 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/tile.hpp" + #include #include "base_reference_test.hpp" -#include "openvino/opsets/opset1.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" using namespace reference_tests; using namespace ov; @@ -55,10 +58,10 @@ class ReferenceTileTest : public testing::TestWithParam, public Comm private: static std::shared_ptr CreateFunction(const TileParams& params) { - const auto A = std::make_shared(params.A.type, params.A.shape); + const auto A = std::make_shared(params.A.type, params.A.shape); const auto repeats = - std::make_shared(params.repeats.type, params.repeats.shape, params.repeats.data.data()); - const auto tile = std::make_shared(A, repeats); + std::make_shared(params.repeats.type, params.repeats.shape, params.repeats.data.data()); + const auto tile = std::make_shared(A, repeats); const auto f = std::make_shared(NodeVector{tile}, ParameterVector{A}); return f; } diff --git a/src/plugins/template/tests/functional/op_reference/topk.cpp b/src/plugins/template/tests/functional/op_reference/topk.cpp index f3fac57bd2acf1..b702a97519d1db 100644 --- a/src/plugins/template/tests/functional/op_reference/topk.cpp +++ b/src/plugins/template/tests/functional/op_reference/topk.cpp @@ -2,12 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/topk.hpp" + #include #include "base_reference_test.hpp" -#include "openvino/opsets/opset1.hpp" -#include "openvino/opsets/opset11.hpp" -#include "openvino/opsets/opset3.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" using namespace reference_tests; using namespace ov; @@ -17,8 +18,8 @@ struct TopKParams { TopKParams(const reference_tests::Tensor& A, const reference_tests::Tensor& k, const int64_t axis, - const opset1::TopK::Mode mode, - const opset1::TopK::SortType sort, + const op::v1::TopK::Mode mode, + const op::v1::TopK::SortType sort, const reference_tests::Tensor& result0, const reference_tests::Tensor& result1, const size_t outIdx, @@ -36,8 +37,8 @@ struct TopKParams { reference_tests::Tensor A; reference_tests::Tensor k; int64_t axis; - opset1::TopK::Mode mode; - opset1::TopK::SortType sort; + op::v1::TopK::Mode mode; + op::v1::TopK::SortType sort; reference_tests::Tensor result0; reference_tests::Tensor result1; size_t outIdx; @@ -116,17 +117,17 @@ class ReferenceTopKTestResnet50 : public testing::TestWithParam CreateFunction(const TopKParamsResnet50& params) { - const auto A = std::make_shared(params.A.type, params.A.shape); - const auto B = std::make_shared(A, - opset1::Constant::create(element::i64, {}, {5}), + const auto A = std::make_shared(params.A.type, params.A.shape); + const auto B = std::make_shared(A, + op::v0::Constant::create(element::i64, {}, {5}), 1, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_VALUES); - const auto C = std::make_shared(A, - opset1::Constant::create(element::i64, {}, {1}), + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES); + const auto C = std::make_shared(A, + op::v0::Constant::create(element::i64, {}, {1}), 1, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_VALUES); + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES); const auto out5_value = B->output(0); const auto out5_index = B->output(1); @@ -247,9 +248,9 @@ class ReferenceTopKTestMaxMinSort : public ReferenceTopKTest { private: static std::shared_ptr CreateFunction(const TopKParams& params) { - const auto A = std::make_shared(params.A.type, params.A.shape); - const auto k = opset1::Constant::create(params.k.type, params.k.shape, params.k.data.data()); - const auto B = std::make_shared(A, k, params.axis, params.mode, params.sort); + const auto A = std::make_shared(params.A.type, params.A.shape); + const auto k = op::v0::Constant::create(params.k.type, params.k.shape, params.k.data.data()); + const auto B = std::make_shared(A, k, params.axis, params.mode, params.sort); const auto f = std::make_shared(B->outputs(), ParameterVector{A}); return f; } @@ -279,8 +280,8 @@ std::vector generateParamsMaxMinSort() { }({128, 1000})), reference_tests::Tensor(ET2, {}, std::vector{5}), 1, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::NONE, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::NONE, reference_tests::Tensor(ET, {128, 5}, [](std::vector rshape, std::vector shape) -> std::vector { @@ -323,8 +324,8 @@ std::vector generateParamsMaxMinSort() { }({128, 1000})), reference_tests::Tensor(ET2, {}, std::vector{5}), 1, - opset1::TopK::Mode::MIN, - opset1::TopK::SortType::NONE, + op::v1::TopK::Mode::MIN, + op::v1::TopK::SortType::NONE, reference_tests::Tensor(ET, {128, 5}, [](std::vector rshape) -> std::vector { @@ -368,8 +369,8 @@ std::vector generateParamsMaxMinSort() { }({128, 1000})), reference_tests::Tensor(ET2, {}, std::vector{5}), 1, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {128, 5}, [](std::vector rshape, std::vector shape) -> std::vector { @@ -408,8 +409,8 @@ std::vector generateParamsMaxMinSort() { }({128, 1000})), reference_tests::Tensor(ET2, {}, std::vector{5}), 1, - opset1::TopK::Mode::MIN, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MIN, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {128, 5}, [](std::vector rshape) -> std::vector { @@ -449,8 +450,8 @@ std::vector generateParamsMaxMinSort() { }({128, 1000})), reference_tests::Tensor(ET2, {}, std::vector{5}), 1, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_INDICES, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_INDICES, reference_tests::Tensor(ET, {128, 5}, [](std::vector rshape, std::vector shape) -> std::vector { @@ -493,8 +494,8 @@ std::vector generateParamsMaxMinSort() { }({128, 1000})), reference_tests::Tensor(ET2, {}, std::vector{5}), 1, - opset1::TopK::Mode::MIN, - opset1::TopK::SortType::SORT_INDICES, + op::v1::TopK::Mode::MIN, + op::v1::TopK::SortType::SORT_INDICES, reference_tests::Tensor(ET, {128, 5}, [](std::vector rshape) -> std::vector { @@ -523,8 +524,8 @@ std::vector generateParamsMaxMinSort() { TopKParams(reference_tests::Tensor(ET, {5}, std::vector{3, 1, 2, 5, 4}), reference_tests::Tensor(ET2, {}, std::vector{3}), 0, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {3}, std::vector{5, 4, 3}), reference_tests::Tensor(ET_OUT, {3}, std::vector{3, 4, 0}), 0, @@ -533,8 +534,8 @@ std::vector generateParamsMaxMinSort() { TopKParams(reference_tests::Tensor(ET, {5}, std::vector{3, 1, 2, 5, 4}), reference_tests::Tensor(ET2, {}, std::vector{3}), 0, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_INDICES, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_INDICES, reference_tests::Tensor(ET, {3}, std::vector{3, 5, 4}), reference_tests::Tensor(ET_OUT, {3}, std::vector{0, 3, 4}), 0, @@ -543,8 +544,8 @@ std::vector generateParamsMaxMinSort() { TopKParams(reference_tests::Tensor(ET, {5}, std::vector{3, 1, 2, 5, 4}), reference_tests::Tensor(ET2, {}, std::vector{3}), 0, - opset1::TopK::Mode::MIN, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MIN, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {3}, std::vector{1, 2, 3}), reference_tests::Tensor(ET_OUT, {3}, std::vector{1, 2, 0}), 0, @@ -553,8 +554,8 @@ std::vector generateParamsMaxMinSort() { TopKParams(reference_tests::Tensor(ET, {5}, std::vector{3, 1, 2, 5, 4}), reference_tests::Tensor(ET2, {}, std::vector{3}), 0, - opset1::TopK::Mode::MIN, - opset1::TopK::SortType::SORT_INDICES, + op::v1::TopK::Mode::MIN, + op::v1::TopK::SortType::SORT_INDICES, reference_tests::Tensor(ET, {3}, std::vector{3, 1, 2}), reference_tests::Tensor(ET_OUT, {3}, std::vector{0, 1, 2}), 0, @@ -602,9 +603,9 @@ class ReferenceTopKTestBackend : public ReferenceTopKTest { private: static std::shared_ptr CreateFunction(const TopKParams& params) { - const auto A = std::make_shared(params.A.type, params.A.shape); - const auto k = opset1::Constant::create(params.k.type, params.k.shape, params.k.data.data()); - const auto B = std::make_shared(A, k, params.axis, params.mode, params.sort); + const auto A = std::make_shared(params.A.type, params.A.shape); + const auto k = op::v0::Constant::create(params.k.type, params.k.shape, params.k.data.data()); + const auto B = std::make_shared(A, k, params.axis, params.mode, params.sort); const auto f = std::make_shared(B->outputs(), ParameterVector{A}); return f; } @@ -674,9 +675,9 @@ class ReferenceTopKTest1dMaxMin : public ReferenceTopKTest { private: static std::shared_ptr CreateFunction(const TopKParams& params, size_t out_idx) { - const auto A = std::make_shared(params.A.type, params.A.shape); - const auto k = opset1::Constant::create(params.k.type, params.k.shape, params.k.data.data()); - const auto B = std::make_shared(A, k, params.axis, params.mode, params.sort); + const auto A = std::make_shared(params.A.type, params.A.shape); + const auto k = op::v0::Constant::create(params.k.type, params.k.shape, params.k.data.data()); + const auto B = std::make_shared(A, k, params.axis, params.mode, params.sort); const auto f = std::make_shared(OutputVector{B->output(out_idx)}, ParameterVector{A}); return f; } @@ -695,8 +696,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {6}, std::vector{1, 2, 3, 4, 5, 6}), reference_tests::Tensor(ET2, {}, std::vector{6}), 0, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {6}, std::vector{6, 5, 4, 3, 2, 1}), reference_tests::Tensor(ET_OUT, {6}, std::vector{5, 4, 3, 2, 1, 0}), 0, @@ -705,8 +706,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {6}, std::vector{1, 2, 3, 4, 5, 6}), reference_tests::Tensor(ET2, {}, std::vector{6}), 0, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {6}, std::vector{6, 5, 4, 3, 2, 1}), reference_tests::Tensor(ET_OUT, {6}, std::vector{5, 4, 3, 2, 1, 0}), 1, @@ -715,8 +716,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {6}, std::vector{1, 2, 3, 4, 5, 6}), reference_tests::Tensor(ET2, {}, std::vector{3}), 0, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {3}, std::vector{6, 5, 4}), reference_tests::Tensor(ET_OUT, {3}, std::vector{5, 4, 3}), 0, @@ -725,8 +726,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {6}, std::vector{1, 2, 3, 4, 5, 6}), reference_tests::Tensor(ET2, {}, std::vector{3}), 0, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {3}, std::vector{6, 5, 4}), reference_tests::Tensor(ET_OUT, {3}, std::vector{5, 4, 3}), 1, @@ -735,8 +736,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {6}, std::vector{1, 2, 3, 4, 5, 6}), reference_tests::Tensor(ET2, {}, std::vector{1}), 0, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {1}, std::vector{6}), reference_tests::Tensor(ET_OUT, {1}, std::vector{5}), 0, @@ -745,8 +746,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {6}, std::vector{1, 2, 3, 4, 5, 6}), reference_tests::Tensor(ET2, {}, std::vector{1}), 0, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {1}, std::vector{6}), reference_tests::Tensor(ET_OUT, {1}, std::vector{5}), 1, @@ -755,8 +756,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {6}, std::vector{6, 5, 4, 3, 2, 1}), reference_tests::Tensor(ET2, {}, std::vector{6}), 0, - opset1::TopK::Mode::MIN, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MIN, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {6}, std::vector{1, 2, 3, 4, 5, 6}), reference_tests::Tensor(ET_OUT, {6}, std::vector{5, 4, 3, 2, 1, 0}), 0, @@ -765,8 +766,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {6}, std::vector{6, 5, 4, 3, 2, 1}), reference_tests::Tensor(ET2, {}, std::vector{6}), 0, - opset1::TopK::Mode::MIN, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MIN, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {6}, std::vector{1, 2, 3, 4, 5, 6}), reference_tests::Tensor(ET_OUT, {6}, std::vector{5, 4, 3, 2, 1, 0}), 1, @@ -775,8 +776,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {6}, std::vector{6, 5, 4, 3, 2, 1}), reference_tests::Tensor(ET2, {}, std::vector{3}), 0, - opset1::TopK::Mode::MIN, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MIN, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {3}, std::vector{1, 2, 3}), reference_tests::Tensor(ET_OUT, {3}, std::vector{5, 4, 3}), 0, @@ -785,8 +786,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {6}, std::vector{6, 5, 4, 3, 2, 1}), reference_tests::Tensor(ET2, {}, std::vector{3}), 0, - opset1::TopK::Mode::MIN, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MIN, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {3}, std::vector{1, 2, 3}), reference_tests::Tensor(ET_OUT, {3}, std::vector{5, 4, 3}), 1, @@ -795,8 +796,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {6}, std::vector{6, 5, 4, 3, 2, 1}), reference_tests::Tensor(ET2, {}, std::vector{1}), 0, - opset1::TopK::Mode::MIN, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MIN, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {1}, std::vector{1}), reference_tests::Tensor(ET_OUT, {1}, std::vector{5}), 0, @@ -805,8 +806,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {6}, std::vector{6, 5, 4, 3, 2, 1}), reference_tests::Tensor(ET2, {}, std::vector{1}), 0, - opset1::TopK::Mode::MIN, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MIN, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {1}, std::vector{1}), reference_tests::Tensor(ET_OUT, {1}, std::vector{5}), 1, @@ -815,8 +816,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {2, 3, 2}, std::vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}), reference_tests::Tensor(ET2, {}, std::vector{3}), 1, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {2, 3, 2}, std::vector{10, 12, 9, 4, 8, 2, 11, 7, 6, 3, 5, 1}), reference_tests::Tensor(ET_OUT, {2, 3, 2}, std::vector{1, 1, 0, 2, 2, 0, 2, 2, 0, 1, 1, 0}), 0, @@ -825,8 +826,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {2, 3, 2}, std::vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}), reference_tests::Tensor(ET2, {}, std::vector{3}), 1, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {2, 3, 2}, std::vector{10, 12, 9, 4, 8, 2, 11, 7, 6, 3, 5, 1}), reference_tests::Tensor(ET_OUT, {2, 3, 2}, std::vector{1, 1, 0, 2, 2, 0, 2, 2, 0, 1, 1, 0}), 1, @@ -854,8 +855,8 @@ std::vector generateParams1dMaxMin() { 215, 287, 200, 272, 208, 280, 216, 288}), reference_tests::Tensor(ET2, {}, std::vector{2}), 1, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor( ET, {2, 2, 3, 2, 4}, @@ -897,8 +898,8 @@ std::vector generateParams1dMaxMin() { 215, 287, 200, 272, 208, 280, 216, 288}), reference_tests::Tensor(ET2, {}, std::vector{2}), 1, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor( ET, {2, 2, 3, 2, 4}, @@ -921,8 +922,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {2, 3, 2}, std::vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}), reference_tests::Tensor(ET2, {}, std::vector{2}), 1, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {2, 2, 2}, std::vector{10, 12, 9, 4, 11, 7, 6, 3}), reference_tests::Tensor(ET_OUT, {2, 2, 2}, std::vector{1, 1, 0, 2, 2, 2, 0, 1}), 0, @@ -931,8 +932,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {2, 3, 2}, std::vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}), reference_tests::Tensor(ET2, {}, std::vector{2}), 1, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {2, 2, 2}, std::vector{10, 12, 9, 4, 11, 7, 6, 3}), reference_tests::Tensor(ET_OUT, {2, 2, 2}, std::vector{1, 1, 0, 2, 2, 2, 0, 1}), 1, @@ -941,8 +942,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {2, 3, 2}, std::vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}), reference_tests::Tensor(ET2, {}, std::vector{1}), 1, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {2, 1, 2}, std::vector{10, 12, 11, 7}), reference_tests::Tensor(ET_OUT, {2, 1, 2}, std::vector{1, 1, 2, 2}), 0, @@ -951,8 +952,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {2, 3, 2}, std::vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}), reference_tests::Tensor(ET2, {}, std::vector{1}), 1, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {2, 1, 2}, std::vector{10, 12, 11, 7}), reference_tests::Tensor(ET_OUT, {2, 1, 2}, std::vector{1, 1, 2, 2}), 1, @@ -961,8 +962,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {2, 3, 2}, std::vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), reference_tests::Tensor(ET2, {}, std::vector{3}), 1, - opset1::TopK::Mode::MIN, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MIN, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {2, 3, 2}, std::vector{8, 2, 10, 4, 12, 9, 5, 1, 6, 3, 11, 7}), reference_tests::Tensor(ET_OUT, {2, 3, 2}, std::vector{2, 0, 1, 2, 0, 1, 1, 0, 0, 1, 2, 2}), 0, @@ -971,8 +972,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {2, 3, 2}, std::vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), reference_tests::Tensor(ET2, {}, std::vector{3}), 1, - opset1::TopK::Mode::MIN, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MIN, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {2, 3, 2}, std::vector{8, 2, 10, 4, 12, 9, 5, 1, 6, 3, 11, 7}), reference_tests::Tensor(ET_OUT, {2, 3, 2}, std::vector{2, 0, 1, 2, 0, 1, 1, 0, 0, 1, 2, 2}), 1, @@ -981,8 +982,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {2, 3, 2}, std::vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), reference_tests::Tensor(ET2, {}, std::vector{2}), 1, - opset1::TopK::Mode::MIN, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MIN, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {2, 2, 2}, std::vector{8, 2, 10, 4, 5, 1, 6, 3}), reference_tests::Tensor(ET_OUT, {2, 2, 2}, std::vector{2, 0, 1, 2, 1, 0, 0, 1}), 0, @@ -991,8 +992,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {2, 3, 2}, std::vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), reference_tests::Tensor(ET2, {}, std::vector{2}), 1, - opset1::TopK::Mode::MIN, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MIN, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {2, 2, 2}, std::vector{8, 2, 10, 4, 5, 1, 6, 3}), reference_tests::Tensor(ET_OUT, {2, 2, 2}, std::vector{2, 0, 1, 2, 1, 0, 0, 1}), 1, @@ -1001,8 +1002,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {2, 3, 2}, std::vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), reference_tests::Tensor(ET2, {}, std::vector{1}), 1, - opset1::TopK::Mode::MIN, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MIN, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {2, 1, 2}, std::vector{8, 2, 5, 1}), reference_tests::Tensor(ET_OUT, {2, 1, 2}, std::vector{2, 0, 1, 0}), 0, @@ -1011,8 +1012,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {2, 3, 2}, std::vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), reference_tests::Tensor(ET2, {}, std::vector{1}), 1, - opset1::TopK::Mode::MIN, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MIN, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {2, 1, 2}, std::vector{8, 2, 5, 1}), reference_tests::Tensor(ET_OUT, {2, 1, 2}, std::vector{2, 0, 1, 0}), 1, @@ -1021,8 +1022,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {4, 3}, std::vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}), reference_tests::Tensor(ET2, {}, std::vector{4}), 0, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {4, 3}, std::vector{12, 11, 10, 9, 8, 7, 6, 2, 5, 3, 1, 4}), reference_tests::Tensor(ET_OUT, {4, 3}, std::vector{1, 3, 0, 0, 1, 3, 2, 0, 2, 3, 2, 1}), 0, @@ -1031,8 +1032,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {4, 3}, std::vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}), reference_tests::Tensor(ET2, {}, std::vector{4}), 0, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {4, 3}, std::vector{12, 11, 10, 9, 8, 7, 6, 2, 5, 3, 1, 4}), reference_tests::Tensor(ET_OUT, {4, 3}, std::vector{1, 3, 0, 0, 1, 3, 2, 0, 2, 3, 2, 1}), 1, @@ -1041,8 +1042,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {4, 3}, std::vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}), reference_tests::Tensor(ET2, {}, std::vector{2}), 0, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {2, 3}, std::vector{12, 11, 10, 9, 8, 7}), reference_tests::Tensor(ET_OUT, {2, 3}, std::vector{1, 3, 0, 0, 1, 3}), 0, @@ -1051,8 +1052,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {4, 3}, std::vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}), reference_tests::Tensor(ET2, {}, std::vector{2}), 0, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {2, 3}, std::vector{12, 11, 10, 9, 8, 7}), reference_tests::Tensor(ET_OUT, {2, 3}, std::vector{1, 3, 0, 0, 1, 3}), 1, @@ -1061,8 +1062,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {4, 3}, std::vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}), reference_tests::Tensor(ET2, {}, std::vector{1}), 0, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {1, 3}, std::vector{12, 11, 10}), reference_tests::Tensor(ET_OUT, {1, 3}, std::vector{1, 3, 0}), 0, @@ -1071,8 +1072,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {4, 3}, std::vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}), reference_tests::Tensor(ET2, {}, std::vector{1}), 0, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {1, 3}, std::vector{12, 11, 10}), reference_tests::Tensor(ET_OUT, {1, 3}, std::vector{1, 3, 0}), 1, @@ -1081,8 +1082,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {2, 4}, std::vector{1, 3, 2, 4, 1, 3, 3, 2}), reference_tests::Tensor(ET2, {}, std::vector{1}), 1, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {2, 1}, std::vector{4, 3}), reference_tests::Tensor(ET_OUT, {2, 1}, std::vector{3, 1}), 0, @@ -1091,8 +1092,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {2, 4}, std::vector{1, 3, 2, 4, 1, 3, 3, 2}), reference_tests::Tensor(ET2, {}, std::vector{1}), 1, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {2, 1}, std::vector{4, 3}), reference_tests::Tensor(ET_OUT, {2, 1}, std::vector{3, 1}), 1, @@ -1101,8 +1102,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {4, 3}, std::vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), reference_tests::Tensor(ET2, {}, std::vector{4}), 0, - opset1::TopK::Mode::MIN, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MIN, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {4, 3}, std::vector{3, 1, 4, 6, 2, 5, 9, 8, 7, 12, 11, 10}), reference_tests::Tensor(ET_OUT, {4, 3}, std::vector{3, 2, 1, 2, 0, 2, 1, 1, 3, 0, 3, 0}), 0, @@ -1111,8 +1112,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {4, 3}, std::vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), reference_tests::Tensor(ET2, {}, std::vector{4}), 0, - opset1::TopK::Mode::MIN, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MIN, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {4, 3}, std::vector{3, 1, 4, 6, 2, 5, 9, 8, 7, 12, 11, 10}), reference_tests::Tensor(ET_OUT, {4, 3}, std::vector{3, 2, 1, 2, 0, 2, 1, 1, 3, 0, 3, 0}), 1, @@ -1121,8 +1122,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {4, 3}, std::vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), reference_tests::Tensor(ET2, {}, std::vector{2}), 0, - opset1::TopK::Mode::MIN, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MIN, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {2, 3}, std::vector{3, 1, 4, 6, 2, 5}), reference_tests::Tensor(ET_OUT, {2, 3}, std::vector{3, 2, 1, 2, 0, 2}), 0, @@ -1131,8 +1132,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {4, 3}, std::vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), reference_tests::Tensor(ET2, {}, std::vector{2}), 0, - opset1::TopK::Mode::MIN, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MIN, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {2, 3}, std::vector{3, 1, 4, 6, 2, 5}), reference_tests::Tensor(ET_OUT, {2, 3}, std::vector{3, 2, 1, 2, 0, 2}), 1, @@ -1141,8 +1142,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {4, 3}, std::vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), reference_tests::Tensor(ET2, {}, std::vector{1}), 0, - opset1::TopK::Mode::MIN, - opset1::TopK::SortType::NONE, + op::v1::TopK::Mode::MIN, + op::v1::TopK::SortType::NONE, reference_tests::Tensor(ET, {1, 3}, std::vector{3, 1, 4}), reference_tests::Tensor(ET_OUT, {1, 3}, std::vector{3, 2, 1}), 0, @@ -1151,8 +1152,8 @@ std::vector generateParams1dMaxMin() { TopKParams(reference_tests::Tensor(ET, {4, 3}, std::vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), reference_tests::Tensor(ET2, {}, std::vector{1}), 0, - opset1::TopK::Mode::MIN, - opset1::TopK::SortType::NONE, + op::v1::TopK::Mode::MIN, + op::v1::TopK::SortType::NONE, reference_tests::Tensor(ET, {1, 3}, std::vector{3, 1, 4}), reference_tests::Tensor(ET_OUT, {1, 3}, std::vector{3, 2, 1}), 1, @@ -1190,9 +1191,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_TopK_With_Hardcoded_Refs, class ReferenceTopKTestInt64 : public ReferenceTopKTest1dMaxMin { private: static std::shared_ptr CreateFunction(const TopKParams& params, size_t out_idx) { - const auto A = std::make_shared(params.A.type, params.A.shape); - const auto k = opset1::Constant::create(params.k.type, params.k.shape, params.k.data.data()); - const auto B = std::make_shared(A, k, params.axis, params.mode, params.sort, element::i64); + const auto A = std::make_shared(params.A.type, params.A.shape); + const auto k = op::v0::Constant::create(params.k.type, params.k.shape, params.k.data.data()); + const auto B = std::make_shared(A, k, params.axis, params.mode, params.sort, element::i64); const auto f = std::make_shared(OutputVector{B->output(out_idx)}, ParameterVector{A}); return f; } @@ -1211,8 +1212,8 @@ std::vector generateParamsInt64() { TopKParams(reference_tests::Tensor(ET, {2, 3, 2}, std::vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}), reference_tests::Tensor(ET2, {}, std::vector{3}), 1, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {2, 3, 2}, std::vector{10, 12, 9, 4, 8, 2, 11, 7, 6, 3, 5, 1}), reference_tests::Tensor(ET_OUT, {2, 3, 2}, std::vector{1, 1, 0, 2, 2, 0, 2, 2, 0, 1, 1, 0}), 0, @@ -1220,8 +1221,8 @@ std::vector generateParamsInt64() { TopKParams(reference_tests::Tensor(ET, {2, 3, 2}, std::vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}), reference_tests::Tensor(ET2, {}, std::vector{3}), 1, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {2, 3, 2}, std::vector{10, 12, 9, 4, 8, 2, 11, 7, 6, 3, 5, 1}), reference_tests::Tensor(ET_OUT, {2, 3, 2}, std::vector{1, 1, 0, 2, 2, 0, 2, 2, 0, 1, 1, 0}), 1, @@ -1258,9 +1259,9 @@ class ReferenceTopKTestSingleOutput : public ReferenceTopKTest { private: static std::shared_ptr CreateFunction(const TopKParams& params) { - const auto A = std::make_shared(params.A.type, params.A.shape); - const auto k = opset1::Constant::create(params.k.type, params.k.shape, params.k.data.data()); - const auto B = std::make_shared(A, k, params.axis, params.mode, params.sort); + const auto A = std::make_shared(params.A.type, params.A.shape); + const auto k = op::v0::Constant::create(params.k.type, params.k.shape, params.k.data.data()); + const auto B = std::make_shared(A, k, params.axis, params.mode, params.sort); const auto f = std::make_shared(OutputVector{B->output(1)}, ParameterVector{A}); return f; } @@ -1279,8 +1280,8 @@ std::vector generateParamsSingleOutput() { TopKParams(reference_tests::Tensor(ET, {2, 3, 2}, std::vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), reference_tests::Tensor(ET2, {}, std::vector{2}), 1, - opset1::TopK::Mode::MIN, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MIN, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {2, 2, 2}, std::vector{}), reference_tests::Tensor(ET_OUT, {2, 2, 2}, std::vector{2, 0, 1, 2, 1, 0, 0, 1}), 0, @@ -1318,36 +1319,36 @@ INSTANTIATE_TEST_SUITE_P(smoke_TopK_With_Hardcoded_Refs, ReferenceTopKTest::getTestCaseName); TEST(ReferenceTopKTestInvalid, topk_v1_invalid_strings) { - const auto data = std::make_shared(element::f32, Shape{1, 2, 3}); - const auto k = opset1::Constant::create(element::i64, Shape{}, {1}); - EXPECT_THROW(opset1::TopK(data, k, 0, "max", "invalid_mode"), ov::AssertFailure); - EXPECT_THROW(opset1::TopK(data, k, 0, "invalid_sort", "index"), ov::AssertFailure); + const auto data = std::make_shared(element::f32, Shape{1, 2, 3}); + const auto k = op::v0::Constant::create(element::i64, Shape{}, {1}); + EXPECT_THROW(op::v1::TopK(data, k, 0, "max", "invalid_mode"), ov::AssertFailure); + EXPECT_THROW(op::v1::TopK(data, k, 0, "invalid_sort", "index"), ov::AssertFailure); } TEST(ReferenceTopKTestInvalid, topk_v1_invalid_k) { - const auto data = std::make_shared(element::f32, Shape{1, 2, 3}); - const auto k_non_scalar = opset1::Constant::create(element::i64, Shape{2}, {1, 2}); - EXPECT_THROW(opset1::TopK(data, k_non_scalar, 0, "max", "index"), ov::NodeValidationFailure); - const auto k_float = opset1::Constant::create(element::f32, Shape{}, {1.0f}); - EXPECT_THROW(opset1::TopK(data, k_float, 0, "max", "index"), ov::NodeValidationFailure); - const auto k_negative = opset1::Constant::create(element::i8, Shape{}, {-1}); - EXPECT_THROW(opset1::TopK(data, k_negative, 0, "max", "index"), ov::NodeValidationFailure); + const auto data = std::make_shared(element::f32, Shape{1, 2, 3}); + const auto k_non_scalar = op::v0::Constant::create(element::i64, Shape{2}, {1, 2}); + EXPECT_THROW(op::v1::TopK(data, k_non_scalar, 0, "max", "index"), ov::NodeValidationFailure); + const auto k_float = op::v0::Constant::create(element::f32, Shape{}, {1.0f}); + EXPECT_THROW(op::v1::TopK(data, k_float, 0, "max", "index"), ov::NodeValidationFailure); + const auto k_negative = op::v0::Constant::create(element::i8, Shape{}, {-1}); + EXPECT_THROW(op::v1::TopK(data, k_negative, 0, "max", "index"), ov::NodeValidationFailure); } class ReferenceTopKTestResnet50V3 : public ReferenceTopKTestResnet50 { private: static std::shared_ptr CreateFunction(const TopKParamsResnet50& params) { - const auto A = std::make_shared(params.A.type, params.A.shape); - const auto B = std::make_shared(A, - opset1::Constant::create(element::i64, {}, {5}), + const auto A = std::make_shared(params.A.type, params.A.shape); + const auto B = std::make_shared(A, + op::v0::Constant::create(element::i64, {}, {5}), 1, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_VALUES); - const auto C = std::make_shared(A, - opset1::Constant::create(element::i64, {}, {1}), + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES); + const auto C = std::make_shared(A, + op::v0::Constant::create(element::i64, {}, {1}), 1, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_VALUES); + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES); const auto out5_value = B->output(0); const auto out5_index = B->output(1); @@ -1371,9 +1372,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_TopK_With_Hardcoded_Refs, class ReferenceTopKTestMaxMinSortV3 : public ReferenceTopKTestMaxMinSort { private: static std::shared_ptr CreateFunction(const TopKParams& params) { - const auto A = std::make_shared(params.A.type, params.A.shape); - const auto k = opset1::Constant::create(params.k.type, params.k.shape, params.k.data.data()); - const auto B = std::make_shared(A, k, params.axis, params.mode, params.sort); + const auto A = std::make_shared(params.A.type, params.A.shape); + const auto k = op::v0::Constant::create(params.k.type, params.k.shape, params.k.data.data()); + const auto B = std::make_shared(A, k, params.axis, params.mode, params.sort); const auto f = std::make_shared(B->outputs(), ParameterVector{A}); return f; } @@ -1391,9 +1392,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_TopK_With_Hardcoded_Refs, class ReferenceTopKTestBackendV3 : public ReferenceTopKTestBackend { private: static std::shared_ptr CreateFunction(const TopKParams& params) { - const auto A = std::make_shared(params.A.type, params.A.shape); - const auto k = opset1::Constant::create(params.k.type, params.k.shape, params.k.data.data()); - const auto B = std::make_shared(A, k, params.axis, params.mode, params.sort); + const auto A = std::make_shared(params.A.type, params.A.shape); + const auto k = op::v0::Constant::create(params.k.type, params.k.shape, params.k.data.data()); + const auto B = std::make_shared(A, k, params.axis, params.mode, params.sort); const auto f = std::make_shared(B->outputs(), ParameterVector{A}); return f; } @@ -1411,9 +1412,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_TopK_With_Hardcoded_Refs, class ReferenceTopKTest1dMaxMinV3 : public ReferenceTopKTest1dMaxMin { private: static std::shared_ptr CreateFunction(const TopKParams& params, size_t out_idx) { - const auto A = std::make_shared(params.A.type, params.A.shape); - const auto k = opset1::Constant::create(params.k.type, params.k.shape, params.k.data.data()); - const auto B = std::make_shared(A, k, params.axis, params.mode, params.sort); + const auto A = std::make_shared(params.A.type, params.A.shape); + const auto k = op::v0::Constant::create(params.k.type, params.k.shape, params.k.data.data()); + const auto B = std::make_shared(A, k, params.axis, params.mode, params.sort); const auto f = std::make_shared(OutputVector{B->output(out_idx)}, ParameterVector{A}); return f; } @@ -1431,9 +1432,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_TopK_With_Hardcoded_Refs, class ReferenceTopKTestInt64V3 : public ReferenceTopKTestInt64 { private: static std::shared_ptr CreateFunction(const TopKParams& params, size_t out_idx) { - const auto A = std::make_shared(params.A.type, params.A.shape); - const auto k = opset1::Constant::create(params.k.type, params.k.shape, params.k.data.data()); - const auto B = std::make_shared(A, k, params.axis, params.mode, params.sort, element::i64); + const auto A = std::make_shared(params.A.type, params.A.shape); + const auto k = op::v0::Constant::create(params.k.type, params.k.shape, params.k.data.data()); + const auto B = std::make_shared(A, k, params.axis, params.mode, params.sort, element::i64); const auto f = std::make_shared(OutputVector{B->output(out_idx)}, ParameterVector{A}); return f; } @@ -1451,9 +1452,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_TopK_With_Hardcoded_Refs, class ReferenceTopKTestSingleOutputV3 : public ReferenceTopKTestSingleOutput { private: static std::shared_ptr CreateFunction(const TopKParams& params) { - const auto A = std::make_shared(params.A.type, params.A.shape); - const auto k = opset1::Constant::create(params.k.type, params.k.shape, params.k.data.data()); - const auto B = std::make_shared(A, k, params.axis, params.mode, params.sort); + const auto A = std::make_shared(params.A.type, params.A.shape); + const auto k = op::v0::Constant::create(params.k.type, params.k.shape, params.k.data.data()); + const auto B = std::make_shared(A, k, params.axis, params.mode, params.sort); const auto f = std::make_shared(OutputVector{B->output(1)}, ParameterVector{A}); return f; } @@ -1469,20 +1470,20 @@ INSTANTIATE_TEST_SUITE_P(smoke_TopK_With_Hardcoded_Refs, ReferenceTopKTestSingleOutputV3::getTestCaseName); TEST(ReferenceTopKTestInvalidV3, topk_v3_invalid_strings) { - const auto data = std::make_shared(element::f32, Shape{1, 2, 3}); - const auto k = opset1::Constant::create(element::i64, Shape{}, {1}); - EXPECT_THROW(opset3::TopK(data, k, 0, "max", "invalid_mode"), ov::AssertFailure); - EXPECT_THROW(opset3::TopK(data, k, 0, "invalid_sort", "index"), ov::AssertFailure); + const auto data = std::make_shared(element::f32, Shape{1, 2, 3}); + const auto k = op::v0::Constant::create(element::i64, Shape{}, {1}); + EXPECT_THROW(op::v3::TopK(data, k, 0, "max", "invalid_mode"), ov::AssertFailure); + EXPECT_THROW(op::v3::TopK(data, k, 0, "invalid_sort", "index"), ov::AssertFailure); } TEST(ReferenceTopKTestInvalidV3, topk_v3_invalid_k) { - const auto data = std::make_shared(element::f32, Shape{1, 2, 3}); - const auto k_non_scalar = opset1::Constant::create(element::i64, Shape{2}, {1, 2}); - EXPECT_THROW(opset3::TopK(data, k_non_scalar, 0, "max", "index"), ov::NodeValidationFailure); - const auto k_float = opset1::Constant::create(element::f32, Shape{}, {1.0f}); - EXPECT_THROW(opset3::TopK(data, k_float, 0, "max", "index"), ov::NodeValidationFailure); - const auto k_negative = opset1::Constant::create(element::i8, Shape{}, {-1}); - EXPECT_THROW(opset3::TopK(data, k_negative, 0, "max", "index"), ov::NodeValidationFailure); + const auto data = std::make_shared(element::f32, Shape{1, 2, 3}); + const auto k_non_scalar = op::v0::Constant::create(element::i64, Shape{2}, {1, 2}); + EXPECT_THROW(op::v3::TopK(data, k_non_scalar, 0, "max", "index"), ov::NodeValidationFailure); + const auto k_float = op::v0::Constant::create(element::f32, Shape{}, {1.0f}); + EXPECT_THROW(op::v3::TopK(data, k_float, 0, "max", "index"), ov::NodeValidationFailure); + const auto k_negative = op::v0::Constant::create(element::i8, Shape{}, {-1}); + EXPECT_THROW(op::v3::TopK(data, k_negative, 0, "max", "index"), ov::NodeValidationFailure); } class ReferenceTopKv11StableTest : public ReferenceTopKTest { @@ -1502,12 +1503,12 @@ class ReferenceTopKv11StableTest : public ReferenceTopKTest { private: static std::shared_ptr CreateFunction(const TopKParams& params) { - const auto A = std::make_shared(params.A.type, params.A.shape); - const auto k = opset11::Constant::create(params.k.type, params.k.shape, params.k.data.data()); + const auto A = std::make_shared(params.A.type, params.A.shape); + const auto k = op::v0::Constant::create(params.k.type, params.k.shape, params.k.data.data()); const auto topk_stable = - std::make_shared(A, k, params.axis, params.mode, params.sort, params.result1.type, true); + std::make_shared(A, k, params.axis, params.mode, params.sort, params.result1.type, true); const auto topk_unstable = - std::make_shared(A, k, params.axis, params.mode, params.sort, params.result1.type, false); + std::make_shared(A, k, params.axis, params.mode, params.sort, params.result1.type, false); return std::make_shared( OutputVector{topk_stable->output(0), topk_stable->output(1), topk_unstable->output(0)}, @@ -1528,8 +1529,8 @@ std::vector generateParamsForStableTest() { TopKParams(reference_tests::Tensor(ET, {2, 7}, std::vector{5, 4, 3, 1, 7, 1, 3, 2, 1, 2, 5, 1, 7, 3}), reference_tests::Tensor(ET2, {}, std::vector{3}), 1, - opset1::TopK::Mode::MIN, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MIN, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {2, 3}, std::vector{1, 1, 3, 1, 1, 2}), reference_tests::Tensor(ET_OUT, {2, 3}, std::vector{3, 5, 2, 1, 4, 0}), 0, @@ -1541,8 +1542,8 @@ std::vector generateParamsForStableTest() { }), reference_tests::Tensor(ET2, {}, std::vector{4}), 0, - opset1::TopK::Mode::MAX, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {4, 3}, std::vector{8, 9, 5, 7, 8, 4, 7, 7, 3, 5, 7, 2}), reference_tests::Tensor(ET_OUT, {4, 3}, std::vector{5, 1, 4, 1, 3, 5, 4, 0, 6, 0, 2, 2}), 0, @@ -1552,8 +1553,8 @@ std::vector generateParamsForStableTest() { std::vector{1, 3, 3, 1, 2, 4, 2, 2, 3, 7, 7, 1, 7, 9, 7, 5, 7, 7}), reference_tests::Tensor(ET2, {}, std::vector{2}), 1, - opset1::TopK::Mode::MIN, - opset1::TopK::SortType::SORT_VALUES, + op::v1::TopK::Mode::MIN, + op::v1::TopK::SortType::SORT_VALUES, reference_tests::Tensor(ET, {2, 2, 3}, std::vector{1, 2, 3, 1, 2, 3, 5, 7, 1, 7, 7, 7}), reference_tests::Tensor(ET_OUT, {2, 2, 3}, std::vector{0, 1, 0, 1, 2, 2, 2, 0, 0, 0, 2, 1}), 0, diff --git a/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp b/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp index 391c2919c26e74..388150d628600b 100644 --- a/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp +++ b/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp @@ -1,9 +1,8 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include - #include "behavior/compiled_model/import_export.hpp" +#include "common_test_utils/test_constants.hpp" #include "ie_plugin_config.hpp" using namespace ov::test::behavior; diff --git a/src/plugins/template/tests/functional/subgraph_reference/preprocess_opencv.cpp b/src/plugins/template/tests/functional/subgraph_reference/preprocess_opencv.cpp index 32b4097eb58550..a19631713ad3c1 100644 --- a/src/plugins/template/tests/functional/subgraph_reference/preprocess_opencv.cpp +++ b/src/plugins/template/tests/functional/subgraph_reference/preprocess_opencv.cpp @@ -8,13 +8,13 @@ # include # include -# include -# include -# include -# include # include "base_reference_test.hpp" +# include "openvino/core/preprocess/pre_post_process.hpp" # include "ov_models/builders.hpp" +# include "shared_test_classes/base/layer_test_utils.hpp" +# include "shared_test_classes/single_layer/convert_color_i420.hpp" +# include "shared_test_classes/single_layer/convert_color_nv12.hpp" using namespace ov; using namespace ov::preprocess; diff --git a/src/plugins/template/tests/functional/transformations/disable_transformations_test.cpp b/src/plugins/template/tests/functional/transformations/disable_transformations_test.cpp index 6ad9c5c024f18a..7d5f878869f4ec 100644 --- a/src/plugins/template/tests/functional/transformations/disable_transformations_test.cpp +++ b/src/plugins/template/tests/functional/transformations/disable_transformations_test.cpp @@ -11,21 +11,24 @@ #include "common_test_utils/graph_comparator.hpp" #include "common_test_utils/ov_test_utils.hpp" #include "functional_test_utils/ov_plugin_cache.hpp" -#include "openvino/opsets/opset11.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/convert_like.hpp" +#include "openvino/op/parameter.hpp" #include "template/properties.hpp" TEST(DisableTransformationsTests, TestTemplatePluginProperty) { std::shared_ptr m(nullptr), m_ref(nullptr); { - auto data = std::make_shared(ov::element::f32, ov::Shape{3, 1, 2}); - auto like = ov::opset11::Constant::create(ov::element::i32, ov::Shape{1}, {1}); - auto cvtlike = std::make_shared(data, like); + auto data = std::make_shared(ov::element::f32, ov::Shape{3, 1, 2}); + auto like = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{1}, {1}); + auto cvtlike = std::make_shared(data, like); m = std::make_shared(ov::NodeVector{cvtlike}, ov::ParameterVector{data}); } { - auto data = std::make_shared(ov::element::f32, ov::Shape{3, 1, 2}); - auto cvt = std::make_shared(data, ov::element::i32); + auto data = std::make_shared(ov::element::f32, ov::Shape{3, 1, 2}); + auto cvt = std::make_shared(data, ov::element::i32); m_ref = std::make_shared(ov::NodeVector{cvt}, ov::ParameterVector{data}); } From d41e7fcc30dbfb56650903f74987f99d41ce0675 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Thu, 12 Oct 2023 03:00:14 +0400 Subject: [PATCH 159/257] Revert "[CI] [GHA] Skip `test_div_uint8_cpu` on macOS only; unskip `test_onnx/test_backend.py` in GHA workflows (#20367)" (#20402) This reverts commit a3d6d0bca952d206bb1c89eaf8d9114e2bc1a28a. --- .github/workflows/linux.yml | 3 ++- .github/workflows/mac.yml | 5 +++-- .github/workflows/windows.yml | 2 +- .../python/tests_compatibility/test_onnx/test_backend.py | 9 --------- src/frontends/onnx/tests/tests_python/test_backend.py | 9 --------- 5 files changed, 6 insertions(+), 22 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 6bd6ef2342afbf..a3c7e9a4e1c250 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -896,7 +896,8 @@ jobs: run: | python3 -m pytest -s ${INSTALL_TEST_DIR}/pyngraph \ --junitxml=${INSTALL_TEST_DIR}/TEST-Pyngraph.xml \ - --ignore=${INSTALL_TEST_DIR}/pyngraph/tests_compatibility/test_onnx/test_zoo_models.py + --ignore=${INSTALL_TEST_DIR}/pyngraph/tests_compatibility/test_onnx/test_zoo_models.py \ + --ignore=${INSTALL_TEST_DIR}/pyngraph/tests_compatibility/test_onnx/test_backend.py - name: Python API 2.0 Tests run: | diff --git a/.github/workflows/mac.yml b/.github/workflows/mac.yml index 5f9658fd303f52..5097a6bb006b87 100644 --- a/.github/workflows/mac.yml +++ b/.github/workflows/mac.yml @@ -485,11 +485,12 @@ jobs: python3 -m pip install $ov_dev_wheel_name[mxnet,caffe,kaldi,onnx,tensorflow2] popd - - name: Python API 1.0 Tests + - name: nGraph and IE Python Bindings Tests run: | python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/pyngraph \ --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml \ - --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests/test_onnx/test_zoo_models.py + --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests/test_onnx/test_zoo_models.py \ + --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests/test_onnx/test_backend.py - name: Python API 2.0 Tests run: | diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index a32810a514305c..92e3f491aca928 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -309,7 +309,7 @@ jobs: shell: cmd run: | set PYTHONPATH=${{ env.OPENVINO_REPO }}\tools\mo;${{ env.LAYER_TESTS_INSTALL_DIR }};%PYTHONPATH% - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/pyngraph ${{ env.PYTHON_STATIC_ARGS }} --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests_compatibility/test_onnx/test_zoo_models.py + call "${{ env.INSTALL_DIR }}\\setupvars.bat" && python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/pyngraph ${{ env.PYTHON_STATIC_ARGS }} --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests_compatibility/test_onnx/test_zoo_models.py --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests_compatibility/test_onnx/test_backend.py - name: Python API 2.0 Tests shell: cmd diff --git a/src/bindings/python/tests_compatibility/test_onnx/test_backend.py b/src/bindings/python/tests_compatibility/test_onnx/test_backend.py index 396cddb80a598f..87f53223c2d672 100644 --- a/src/bindings/python/tests_compatibility/test_onnx/test_backend.py +++ b/src/bindings/python/tests_compatibility/test_onnx/test_backend.py @@ -3,8 +3,6 @@ import logging -from sys import platform - import onnx.backend.test from tests_compatibility import ( BACKEND_NAME, @@ -34,7 +32,6 @@ xfail_issue_48052, xfail_issue_52463, xfail_issue_58033, - xfail_issue_58676, xfail_issue_63033, xfail_issue_63036, xfail_issue_63043, @@ -812,12 +809,6 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None ), ] -if platform == 'darwin': - tests_expected_to_fail.append(( - xfail_issue_58676, - "OnnxBackendNodeModelTest.test_div_uint8_cpu" - )) - for test_group in tests_expected_to_fail: for test_case in test_group[1:]: expect_fail("{}".format(test_case), test_group[0]) diff --git a/src/frontends/onnx/tests/tests_python/test_backend.py b/src/frontends/onnx/tests/tests_python/test_backend.py index d75cfcf77aeefd..d1ef686bdd4124 100644 --- a/src/frontends/onnx/tests/tests_python/test_backend.py +++ b/src/frontends/onnx/tests/tests_python/test_backend.py @@ -4,8 +4,6 @@ import logging -from sys import platform - import onnx.backend.test from tests import ( BACKEND_NAME, @@ -34,7 +32,6 @@ xfail_issue_48052, xfail_issue_52463, xfail_issue_58033, - xfail_issue_58676, xfail_issue_63033, xfail_issue_63036, xfail_issue_63043, @@ -686,12 +683,6 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None ), ] -if platform == 'darwin': - tests_expected_to_fail.append(( - xfail_issue_58676, - "OnnxBackendNodeModelTest.test_div_uint8_cpu" - )) - for test_group in tests_expected_to_fail: for test_case in test_group[1:]: expect_fail(f"{test_case}", test_group[0]) From 57279938c0cbe4cd47bf4f757d167141425bfc25 Mon Sep 17 00:00:00 2001 From: Andrei Gorbachev Date: Thu, 12 Oct 2023 05:25:53 +0100 Subject: [PATCH 160/257] [GPU] Refactor AdaptivePooling, BatchNorm, BatchToSpace (#20357) * adaptive_pooling * batch_norm * batch_to_space --- .../single_layer_tests/adaptive_pooling.cpp | 43 +++-- .../single_layer_tests/batch_norm.cpp | 33 ++-- .../single_layer_tests/batch_to_space.cpp | 151 ++++++------------ 3 files changed, 86 insertions(+), 141 deletions(-) diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/adaptive_pooling.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/adaptive_pooling.cpp index 86e13507e63e14..e6026e85469871 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/adaptive_pooling.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/adaptive_pooling.cpp @@ -5,24 +5,21 @@ #include #include -#include "single_layer_tests/adaptive_pooling.hpp" +#include "single_op_tests/adaptive_pooling.hpp" #include "common_test_utils/test_constants.hpp" -using namespace ngraph::helpers; -using namespace LayerTestsDefinitions; -using namespace ngraph::element; - namespace { +using ov::test::AdaPoolLayerTest; const std::vector poolingModes = {"max", "avg"}; -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16, +const std::vector types = { + ov::element::f16, + ov::element::f32 }; -const std::vector> inputShapes1D = { - {1, 3, 5}, - {1, 1, 17}, +const std::vector> inputShapes1D = { + {{1, 3, 5}}, + {{1, 1, 17}}, }; const std::vector> outputShapes1D = { {2}, @@ -31,16 +28,16 @@ const std::vector> outputShapes1D = { INSTANTIATE_TEST_SUITE_P(smoke_AdaptivePooling1D, AdaPoolLayerTest, ::testing::Combine( - ::testing::ValuesIn(inputShapes1D), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapes1D)), ::testing::ValuesIn(outputShapes1D), ::testing::ValuesIn(poolingModes), - ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(types), ::testing::Values(ov::test::utils::DEVICE_GPU)), AdaPoolLayerTest::getTestCaseName); -const std::vector> inputShapes2D = { - {1, 3, 4, 6}, - {1, 1, 17, 5}, +const std::vector> inputShapes2D = { + {{1, 3, 4, 6}}, + {{1, 1, 17, 5}}, }; const std::vector> outputShapes2D = { {2, 4}, @@ -49,16 +46,16 @@ const std::vector> outputShapes2D = { INSTANTIATE_TEST_SUITE_P(smoke_AdaptivePooling2D, AdaPoolLayerTest, ::testing::Combine( - ::testing::ValuesIn(inputShapes2D), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapes2D)), ::testing::ValuesIn(outputShapes2D), ::testing::ValuesIn(poolingModes), - ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(types), ::testing::Values(ov::test::utils::DEVICE_GPU)), AdaPoolLayerTest::getTestCaseName); -const std::vector> inputShapes3D = { - {1, 1, 3, 3, 3}, - {1, 3, 5, 7, 11}, +const std::vector> inputShapes3D = { + {{1, 1, 3, 3, 3}}, + {{1, 3, 5, 7, 11}}, }; const std::vector> outputShapes3D = { {2, 2, 2}, @@ -67,10 +64,10 @@ const std::vector> outputShapes3D = { INSTANTIATE_TEST_SUITE_P(smoke_AdaptivePooling3D, AdaPoolLayerTest, ::testing::Combine( - ::testing::ValuesIn(inputShapes3D), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapes3D)), ::testing::ValuesIn(outputShapes3D), ::testing::ValuesIn(poolingModes), - ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(types), ::testing::Values(ov::test::utils::DEVICE_GPU)), AdaPoolLayerTest::getTestCaseName); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/batch_norm.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/batch_norm.cpp index 0ef10601c3f1b8..da3cbe708b0628 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/batch_norm.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/batch_norm.cpp @@ -4,14 +4,13 @@ #include -#include "single_layer_tests/batch_norm.hpp" - -using namespace LayerTestsDefinitions; +#include "single_op_tests/batch_norm.hpp" namespace { -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16 +using ov::test::BatchNormLayerTest; +const std::vector types = { + ov::element::f16, + ov::element::f32 }; const std::vector epsilon = { @@ -19,23 +18,19 @@ const std::vector epsilon = { 1e-5, 1e-4 }; -const std::vector> inputShapes = { - {1, 3}, - {2, 5}, - {1, 3, 10}, - {1, 3, 1, 1}, - {2, 5, 4, 4}, -}; +const std::vector> inputShapes = { + {{1, 3}}, + {{2, 5}}, + {{1, 3, 10}}, + {{1, 3, 1, 1}}, + {{2, 5, 4, 4}}, +}; const auto batchNormParams = testing::Combine( testing::ValuesIn(epsilon), - testing::ValuesIn(netPrecisions), - testing::Values(InferenceEngine::Precision::UNSPECIFIED), - testing::Values(InferenceEngine::Precision::UNSPECIFIED), - testing::Values(InferenceEngine::Layout::ANY), - testing::Values(InferenceEngine::Layout::ANY), - testing::ValuesIn(inputShapes), + testing::ValuesIn(types), + testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapes)), testing::Values(ov::test::utils::DEVICE_GPU) ); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/batch_to_space.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/batch_to_space.cpp index b35117443cf47f..e79d3b55551797 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/batch_to_space.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/batch_to_space.cpp @@ -4,172 +4,125 @@ #include -#include "single_layer_tests/batch_to_space.hpp" +#include "single_op_tests/batch_to_space.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; - namespace { +using ov::test::BatchToSpaceLayerTest; +using ov::test::batchToSpaceParamsTuple; + auto bts_only_test_cases = []() { return std::vector{batchToSpaceParamsTuple({1, 2, 2}, {0, 0, 0}, {0, 0, 0}, - {4, 1, 1}, - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Layout::ANY, - InferenceEngine::Layout::ANY, + ov::test::static_shapes_to_test_representation(std::vector({ + {4, 1, 1}})), + ov::element::f32, ov::test::utils::DEVICE_GPU), batchToSpaceParamsTuple({1, 1, 2, 2}, {0, 0, 0, 0}, {0, 0, 0, 0}, - {4, 1, 1, 1}, - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Layout::ANY, - InferenceEngine::Layout::ANY, + ov::test::static_shapes_to_test_representation(std::vector({ + {4, 1, 1, 1}})), + ov::element::f32, ov::test::utils::DEVICE_GPU), batchToSpaceParamsTuple({1, 1, 2, 2}, {0, 0, 0, 0}, {0, 0, 0, 0}, - {4, 3, 1, 1}, - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Layout::ANY, - InferenceEngine::Layout::ANY, + ov::test::static_shapes_to_test_representation(std::vector({ + {4, 3, 1, 1}})), + ov::element::f32, ov::test::utils::DEVICE_GPU), batchToSpaceParamsTuple({1, 1, 2, 2}, {0, 0, 0, 0}, {0, 0, 0, 0}, - {4, 1, 2, 2}, - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Layout::ANY, - InferenceEngine::Layout::ANY, + ov::test::static_shapes_to_test_representation(std::vector({ + {4, 1, 2, 2}})), + ov::element::f32, ov::test::utils::DEVICE_GPU), batchToSpaceParamsTuple({1, 1, 2, 2}, {0, 0, 0, 0}, {0, 0, 0, 0}, - {8, 1, 1, 2}, - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Layout::ANY, - InferenceEngine::Layout::ANY, + ov::test::static_shapes_to_test_representation(std::vector({ + {8, 1, 1, 2}})), + ov::element::f32, ov::test::utils::DEVICE_GPU), batchToSpaceParamsTuple({1, 1, 3, 2, 2}, {0, 0, 1, 0, 3}, {0, 0, 2, 0, 0}, - {24, 1, 2, 1, 2}, - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Layout::ANY, - InferenceEngine::Layout::ANY, + ov::test::static_shapes_to_test_representation(std::vector({ + {24, 1, 2, 1, 2}})), + ov::element::f32, ov::test::utils::DEVICE_GPU), batchToSpaceParamsTuple({1, 1, 2, 2}, {0, 0, 0, 0}, {0, 0, 0, 0}, - {4, 1, 1, 1}, - InferenceEngine::Precision::I8, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Layout::ANY, - InferenceEngine::Layout::ANY, + ov::test::static_shapes_to_test_representation(std::vector({ + {4, 1, 1, 1}})), + ov::element::i8, ov::test::utils::DEVICE_GPU), batchToSpaceParamsTuple({1, 1, 2, 2}, {0, 0, 0, 0}, {0, 0, 0, 0}, - {4, 3, 1, 1}, - InferenceEngine::Precision::I8, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Layout::ANY, - InferenceEngine::Layout::ANY, + ov::test::static_shapes_to_test_representation(std::vector({ + {4, 3, 1, 1}})), + ov::element::i8, ov::test::utils::DEVICE_GPU), batchToSpaceParamsTuple({1, 1, 2, 2}, {0, 0, 0, 0}, {0, 0, 0, 0}, - {4, 1, 2, 2}, - InferenceEngine::Precision::I8, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Layout::ANY, - InferenceEngine::Layout::ANY, + ov::test::static_shapes_to_test_representation(std::vector({ + {4, 1, 2, 2}})), + ov::element::i8, ov::test::utils::DEVICE_GPU), batchToSpaceParamsTuple({1, 1, 2, 2}, {0, 0, 0, 0}, {0, 0, 0, 0}, - {8, 1, 1, 2}, - InferenceEngine::Precision::I8, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Layout::ANY, - InferenceEngine::Layout::ANY, + ov::test::static_shapes_to_test_representation(std::vector({ + {8, 1, 1, 2}})), + ov::element::i8, ov::test::utils::DEVICE_GPU), batchToSpaceParamsTuple({1, 1, 3, 2, 2}, {0, 0, 1, 0, 3}, {0, 0, 2, 0, 0}, - {24, 1, 2, 1, 2}, - InferenceEngine::Precision::I8, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Layout::ANY, - InferenceEngine::Layout::ANY, + ov::test::static_shapes_to_test_representation(std::vector({ + {24, 1, 2, 1, 2}})), + ov::element::i8, ov::test::utils::DEVICE_GPU), batchToSpaceParamsTuple({1, 1, 2, 2}, {0, 0, 0, 0}, {0, 0, 0, 0}, - {4, 1, 1, 1}, - InferenceEngine::Precision::U8, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Layout::ANY, - InferenceEngine::Layout::ANY, + ov::test::static_shapes_to_test_representation(std::vector({ + {4, 1, 1, 1}})), + ov::element::u8, ov::test::utils::DEVICE_GPU), batchToSpaceParamsTuple({1, 1, 2, 2}, {0, 0, 0, 0}, {0, 0, 0, 0}, - {4, 3, 1, 1}, - InferenceEngine::Precision::U8, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Layout::ANY, - InferenceEngine::Layout::ANY, + ov::test::static_shapes_to_test_representation(std::vector({ + {4, 3, 1, 1}})), + ov::element::u8, ov::test::utils::DEVICE_GPU), batchToSpaceParamsTuple({1, 1, 2, 2}, {0, 0, 0, 0}, {0, 0, 0, 0}, - {4, 1, 2, 2}, - InferenceEngine::Precision::U8, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Layout::ANY, - InferenceEngine::Layout::ANY, + ov::test::static_shapes_to_test_representation(std::vector({ + {4, 1, 2, 2}})), + ov::element::u8, ov::test::utils::DEVICE_GPU), batchToSpaceParamsTuple({1, 1, 2, 2}, {0, 0, 0, 0}, {0, 0, 0, 0}, - {8, 1, 1, 2}, - InferenceEngine::Precision::U8, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Layout::ANY, - InferenceEngine::Layout::ANY, + ov::test::static_shapes_to_test_representation(std::vector({ + {8, 1, 1, 2}})), + ov::element::u8, ov::test::utils::DEVICE_GPU), batchToSpaceParamsTuple({1, 1, 3, 2, 2}, {0, 0, 1, 0, 3}, {0, 0, 2, 0, 0}, - {24, 1, 2, 1, 2}, - InferenceEngine::Precision::U8, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Precision::UNSPECIFIED, - InferenceEngine::Layout::ANY, - InferenceEngine::Layout::ANY, + ov::test::static_shapes_to_test_representation(std::vector({ + {24, 1, 2, 1, 2}})), + ov::element::u8, ov::test::utils::DEVICE_GPU)}; }; From 79703ff714c48d9049d2dfefea1bc8447f5f37dc Mon Sep 17 00:00:00 2001 From: Alexander Kozlov Date: Thu, 12 Oct 2023 10:02:44 +0400 Subject: [PATCH 161/257] Added a of Gen AI landing page (#20253) * Added a draft of Gen AI landing page * Update docs/articles_en/openvino_workflow/gen_ai.md Co-authored-by: Helena Kloosterman * Applied comments * Update docs/articles_en/openvino_workflow/gen_ai.md Co-authored-by: Tatiana Savina * Update docs/articles_en/openvino_workflow/gen_ai.md Co-authored-by: Tatiana Savina * Update docs/articles_en/openvino_workflow/gen_ai.md Co-authored-by: Tatiana Savina * Update docs/articles_en/openvino_workflow/gen_ai.md Co-authored-by: Tatiana Savina * Update docs/articles_en/openvino_workflow/gen_ai.md Co-authored-by: Tatiana Savina * Update docs/articles_en/openvino_workflow/gen_ai.md Co-authored-by: Tatiana Savina * Update docs/articles_en/openvino_workflow/gen_ai.md Co-authored-by: Tatiana Savina * Update docs/articles_en/openvino_workflow/gen_ai.md Co-authored-by: Tatiana Savina * Update docs/articles_en/openvino_workflow/gen_ai.md Co-authored-by: Tatiana Savina * Update docs/articles_en/openvino_workflow/gen_ai.md Co-authored-by: Tatiana Savina * Changed the placement of Gen AI doc * Update docs/articles_en/openvino_workflow/gen_ai.md Co-authored-by: Nico Galoppo * Update docs/articles_en/openvino_workflow/gen_ai.md Co-authored-by: Nico Galoppo * Update docs/articles_en/openvino_workflow/gen_ai.md Co-authored-by: Nico Galoppo * Update docs/articles_en/openvino_workflow/gen_ai.md Co-authored-by: Nico Galoppo * Update docs/articles_en/openvino_workflow/gen_ai.md Co-authored-by: Nico Galoppo * Update docs/articles_en/openvino_workflow/gen_ai.md Co-authored-by: Nico Galoppo * Update docs/articles_en/openvino_workflow/gen_ai.md Co-authored-by: Nico Galoppo * Added examples with Optimum-Intel * Update docs/articles_en/openvino_workflow/gen_ai.md Co-authored-by: Tatiana Savina * Update docs/articles_en/openvino_workflow/gen_ai.md Co-authored-by: Tatiana Savina * Update docs/articles_en/openvino_workflow/gen_ai.md Co-authored-by: Tatiana Savina * Update docs/articles_en/openvino_workflow/gen_ai.md Co-authored-by: Tatiana Savina * Update docs/articles_en/openvino_workflow/gen_ai.md Co-authored-by: Tatiana Savina * Update docs/articles_en/openvino_workflow/gen_ai.md Co-authored-by: Tatiana Savina * Update docs/articles_en/openvino_workflow/gen_ai.md Co-authored-by: Tatiana Savina * Update docs/articles_en/openvino_workflow/gen_ai.md Co-authored-by: Tatiana Savina * Update docs/articles_en/openvino_workflow/gen_ai.md Co-authored-by: Tatiana Savina * Update docs/articles_en/openvino_workflow/gen_ai.md Co-authored-by: Tatiana Savina * Update docs/articles_en/openvino_workflow/gen_ai.md Co-authored-by: Tatiana Savina * Update docs/articles_en/openvino_workflow/gen_ai.md Co-authored-by: Tatiana Savina * Update docs/articles_en/openvino_workflow/gen_ai.md Co-authored-by: Tatiana Savina * Update docs/articles_en/openvino_workflow/gen_ai.md Co-authored-by: Tatiana Savina --------- Co-authored-by: Helena Kloosterman Co-authored-by: Tatiana Savina Co-authored-by: Nico Galoppo --- docs/articles_en/learn_openvino.md | 1 + docs/articles_en/openvino_workflow/gen_ai.md | 171 +++++++++++++++++++ docs/home.rst | 7 + 3 files changed, 179 insertions(+) create mode 100644 docs/articles_en/openvino_workflow/gen_ai.md diff --git a/docs/articles_en/learn_openvino.md b/docs/articles_en/learn_openvino.md index 2eb6700a5a1975..cb3a6c75b10a3b 100644 --- a/docs/articles_en/learn_openvino.md +++ b/docs/articles_en/learn_openvino.md @@ -14,6 +14,7 @@ Interactive Tutorials (Python) Sample Applications (Python & C++) + Generative AI Optimization and Deployment This section will help you get a hands-on experience with OpenVINO even if you are just starting diff --git a/docs/articles_en/openvino_workflow/gen_ai.md b/docs/articles_en/openvino_workflow/gen_ai.md new file mode 100644 index 00000000000000..4ecb55fcc2427c --- /dev/null +++ b/docs/articles_en/openvino_workflow/gen_ai.md @@ -0,0 +1,171 @@ +# Optimize and Deploy Generative AI Models {#gen_ai_guide} + +@sphinxdirective + +Generative AI is an innovative technique that creates new data, such as text, images, video, or audio, using neural networks. OpenVINO accelerates Generative AI use cases as they mostly rely on model inference, allowing for faster development and better performance. When it comes to generative models, OpenVINO supports: + +* Conversion, optimization and inference for text, image and audio generative models, for example, Llama 2, MPT, OPT, Stable Diffusion, Stable Diffusion XL, etc. +* Int8 weight compression for text generation models. +* Storage format reduction (fp16 precision for non-compressed models and int8 for compressed models). +* Inference on CPU and GPU platforms, including integrated Intel® Processor Graphics, discrete Intel® Arc™ A-Series Graphics, and discrete Intel® Data Center GPU Flex Series. + + +OpenVINO offers two main paths for Generative AI use cases: + +* Using OpenVINO as a backend for Hugging Face frameworks (transformers, diffusers) through the `Optimum Intel `__ extension. +* Using OpenVINO native APIs (Python and C++) with custom pipeline code. + + +In both cases, OpenVINO runtime and tools are used, the difference is mostly in the preferred API and the final solution's footprint. Native APIs enable the use of generative models in C++ applications, ensure minimal runtime dependencies, and minimize application footprint. The Native APIs approach requires the implementation of glue code (generation loop, text tokenization, or scheduler functions), which is hidden within Hugging Face libraries for a better developer experience. + +It is recommended to start with Hugging Face frameworks. Experiment with different models and scenarios to find your fit, and then consider converting to OpenVINO native APIs based on your specific requirements. + +Optimum Intel provides interfaces that enable model optimization (weight compression) using `Neural Network Compression Framework (NNCF) `__, and export models to the OpenVINO model format for use in native API applications. + +The table below summarizes the differences between Hugging Face and Native APIs approaches. + +.. list-table:: + :widths: 20 25 55 + :header-rows: 1 + + * - + - Hugging Face through OpenVINO + - OpenVINO Native API + * - Model support + - Broad set of Models + - Broad set of Models + * - APIs + - Python (Hugging Face API) + - Python, C++ (OpenVINO API) + * - Model Format + - Source Framework / OpenVINO + - OpenVINO + * - Inference code + - Hugging Face based + - Custom inference pipelines + * - Additional dependencies + - Many Hugging Face dependencies + - Lightweight (e.g. numpy, etc.) + * - Application footprint + - Large + - Small + * - Pre/post-processing and glue code + - Available at Hugging Face out-of-the-box + - OpenVINO samples and notebooks + * - Performance + - Good + - Best + + +Running Generative AI Models using Hugging Face Optimum Intel +############################################################## + +Prerequisites ++++++++++++++++++++++++++++ + +* Create a Python environment. +* Install Optimum Intel: + +.. code-block:: console + + pip install optimum[openvino,nncf] + + +To start using OpenVINO as a backend for Hugging Face, change the original Hugging Face code in two places: + +.. code-block:: diff + + -from transformers import AutoModelForCausalLM + +from optimum.intel import OVModelForCausalLM + + model_id = "meta-llama/Llama-2-7b-chat-hf" + -model = AutoModelForCausalLM.from_pretrained(model_id) + +model = OVModelForCausalLM.from_pretrained(model_id, export=True) + + +After that, you can call ``save_pretrained()`` method to save model to the folder in the OpenVINO Intermediate Representation and use it further. + +.. code-block:: python + + model.save_pretrained(model_dir) + + +Alternatively, you can download and convert the model using CLI interface: ``optimum-cli export openvino --model meta-llama/Llama-2-7b-chat-hf llama_openvino``. +In this case, you can load the converted model in OpenVINO representation directly from the disk: + +.. code-block:: python + + model_id = "llama_openvino" + model = OVModelForCausalLM.from_pretrained(model_id) + + +By default, inference will run on CPU. To select a different inference device, for example, GPU, add ``device="GPU"`` to the ``from_pretrained()`` call. To switch to a different device after the model has been loaded, use the ``.to()`` method. The device naming convention is the same as in OpenVINO native API: + +.. code-block:: python + + model.to("GPU") + + +Optimum-Intel API also provides out-of-the-box model optimization through weight compression using NNCF which substantially reduces the model footprint and inference latency: + +.. code-block:: python + + model = OVModelForCausalLM.from_pretrained(model_id, export=True, load_in_8bit=True) + + +Weight compression is applied by default to models larger than one billion parameters and is also available for CLI interface as the ``--int8`` option. + +Below are some examples of using Optimum-Intel for model conversion and inference: + +* `Stable Diffusion v2.1 using Optimum-Intel OpenVINO `__ +* `Image generation with Stable Diffusion XL and OpenVINO `__ +* `Instruction following using Databricks Dolly 2.0 and OpenVINO `__ +* `Create an LLM-powered Chatbot using OpenVINO `__ + +Working with Models Tuned with LoRA +++++++++++++++++++++++++++++++++++++ + +Low-rank Adaptation (LoRA) is a popular method to tune Generative AI models to a downstream task or custom data. However, it requires some extra steps to be done for efficient deployment using the Hugging Face API. Namely, the trained adapters should be fused into the baseline model to avoid extra computation. This is how it can be done for Large Language Models (LLMs): + +.. code-block:: python + + model_id = "meta-llama/Llama-2-7b-chat-hf" + lora_adaptor = "./lora_adaptor" + + model = AutoModelForCausalLM.from_pretrained(model_id, use_cache=True) + model = PeftModelForCausalLM.from_pretrained(model, lora_adaptor) + model.merge_and_unload() + model.get_base_model().save_pretrained("fused_lora_model") + + +Now the model can be converted to OpenVINO using Optimum Intel Python API or CLI interfaces mentioned above. + +Running Generative AI Models using Native OpenVINO APIs +######################################################## + +To run Generative AI models using native OpenVINO APIs you need to follow regular **Сonvert -> Optimize -> Deploy** path with a few simplifications. + +To convert model from Hugging Face you can use Optimum-Intel export feature that allows to export model in OpenVINO format without invoking conversion API and tools directly, as it is shown above. In this case, the conversion process is a bit more simplified. You can still use a regular conversion path if model comes from outside of Hugging Face ecosystem, i.e., in source framework format (PyTorch, etc.) + +Model optimization can be performed within Hugging Face or directly using NNCF as described in the :doc:`weight compression guide `. + +Inference code that uses native API cannot benefit from Hugging Face pipelines. You need to write your custom code or take it from the available examples. Below are some examples of popular Generative AI scenarios: + +* In case of LLMs for text generation, you need to handle tokenization, inference and token selection loop, and de-tokenization. If token selection involves beam search, it also needs to be written. +* For image generation models, you need to make a pipeline that includes several model inferences: inference for source (e.g., text) encoder models, inference loop for diffusion process and inference for decoding part. Scheduler code is also required. + +To write such pipelines, you can follow the examples provided as part of OpenVINO: + +* `llama2.openvino `__ +* `LLM optimization by custom operation embedding for OpenVINO `__ +* `C++ Implementation of Stable Diffusion `__ + + +Additional Resources +############################ + +* `Optimum Intel documentation `_ +* :doc:`LLM Weight Compression ` +* `Neural Network Compression Framework `_ + +@endsphinxdirective diff --git a/docs/home.rst b/docs/home.rst index 4ed32d3aea261b..dc3f721856c5bf 100644 --- a/docs/home.rst +++ b/docs/home.rst @@ -96,6 +96,13 @@ OpenVINO 2023.0 Optimize generation of the graph model with PyTorch 2.0 torch.compile() backend + .. grid-item-card:: Generative AI optimization and deployment + :link: gen_ai_guide + :link-alt: gen ai + :link-type: doc + + Generative AI optimization and deployment + Feature Overview ############################## From 2a3d5b9d14f7ac5aa927d2b4a95419508f3c1801 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Thu, 12 Oct 2023 11:34:01 +0400 Subject: [PATCH 162/257] Disable OMP threading on macOS (#20405) --- cmake/features.cmake | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/cmake/features.cmake b/cmake/features.cmake index 48c50bc3f3ea52..2e3ef5d4aa82b1 100644 --- a/cmake/features.cmake +++ b/cmake/features.cmake @@ -80,14 +80,16 @@ else() set(THREADING_DEFAULT "TBB") endif() +set(THREADING_OPTIONS "TBB" "TBB_AUTO" "SEQ") +if(NOT APPLE) + list(APPEND THREADING_OPTIONS "OMP") +endif() + set(THREADING "${THREADING_DEFAULT}" CACHE STRING "Threading") -set_property(CACHE THREADING PROPERTY STRINGS "TBB" "TBB_AUTO" "OMP" "SEQ") +set_property(CACHE THREADING PROPERTY STRINGS ${THREADING_OPTIONS}) list (APPEND OV_OPTIONS THREADING) -if (NOT THREADING STREQUAL "TBB" AND - NOT THREADING STREQUAL "TBB_AUTO" AND - NOT THREADING STREQUAL "OMP" AND - NOT THREADING STREQUAL "SEQ") - message(FATAL_ERROR "THREADING should be set to TBB (default), TBB_AUTO, OMP or SEQ") +if(NOT THREADING IN_LIST THREADING_OPTIONS) + message(FATAL_ERROR "THREADING should be set to either ${THREADING_OPTIONS}") endif() if((THREADING STREQUAL "TBB" OR THREADING STREQUAL "TBB_AUTO") AND From ddaf8e80f9740bedc734b493f0c219dd25fd9423 Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Thu, 12 Oct 2023 10:54:28 +0200 Subject: [PATCH 163/257] Update 247-code-language-id-with-output.rst (#20412) --- docs/notebooks/247-code-language-id-with-output.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/notebooks/247-code-language-id-with-output.rst b/docs/notebooks/247-code-language-id-with-output.rst index 61eab5b121da3b..2d0c9d3019b418 100644 --- a/docs/notebooks/247-code-language-id-with-output.rst +++ b/docs/notebooks/247-code-language-id-with-output.rst @@ -69,7 +69,7 @@ will allow to automatically convert models to the OpenVINO™ IR format. Install prerequisites ~~~~~~~~~~~~~~~~~~~~~ -First, complete the :doc:`repository installation steps `. +First, complete the `repository installation steps <../notebooks_installation.html>`__. Then, the following cell will install: - HuggingFace Optimum with OpenVINO support - HuggingFace Evaluate to benchmark results From 4da61fc7e7eef6790b3a6890503bdae37c76c6ad Mon Sep 17 00:00:00 2001 From: Vladimir Paramuzov Date: Thu, 12 Oct 2023 13:15:59 +0400 Subject: [PATCH 164/257] [TESTS] Fix random generator for custom signed types (#20393) * [TESTS] Fix random generator for custom signed types * Increase threashold for bf16 CPU tests --- .../tests/functional/single_layer_tests/normalize.cpp | 4 ++++ .../include/common_test_utils/data_utils.hpp | 3 ++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/normalize.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/normalize.cpp index 6e7277ce2e343f..c0e1427e32f476 100755 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/normalize.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/normalize.cpp @@ -80,6 +80,10 @@ class NormalizeL2LayerCPUTest : public testing::WithParamInterface& targetInputStaticShapes) override { diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/data_utils.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/data_utils.hpp index eacad438e30dfb..55994c7f6b90ae 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/data_utils.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/data_utils.hpp @@ -5,6 +5,7 @@ #pragma once #include +#include #include #include @@ -223,7 +224,7 @@ void inline fill_data_random(T* pointer, const uint32_t k_range = k * range; // range with respect to k random.Generate(k_range); - if (start_from < 0 && !std::is_signed::value) { + if (start_from < 0 && !std::numeric_limits::is_signed) { start_from = 0; } for (std::size_t i = 0; i < size; i++) { From 2604c33d1be1020c3af6f83293c6fe1ed9f64dce Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Thu, 12 Oct 2023 13:23:18 +0400 Subject: [PATCH 165/257] Don't add mxnet extras for openvino-dev on macOS arm64 (#20403) --- .github/workflows/linux.yml | 2 +- .github/workflows/mac.yml | 2 +- .github/workflows/windows.yml | 2 +- tools/openvino_dev/CMakeLists.txt | 5 ++++- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index a3c7e9a4e1c250..bf602387ae8c6d 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -885,7 +885,7 @@ jobs: # Find and install OV dev wheel pushd ${INSTALL_DIR}/tools ov_dev_wheel_name=$(find . -name 'openvino_dev*.whl') - python3 -m pip install $ov_dev_wheel_name[mxnet,caffe,kaldi,onnx,tensorflow2] + python3 -m pip install $ov_dev_wheel_name[mxnet,caffe,kaldi,onnx,tensorflow2,pytorch] popd # diff --git a/.github/workflows/mac.yml b/.github/workflows/mac.yml index 5097a6bb006b87..1c21bd87e0a7ae 100644 --- a/.github/workflows/mac.yml +++ b/.github/workflows/mac.yml @@ -482,7 +482,7 @@ jobs: # Find and install OV dev wheel pushd ${{ env.INSTALL_DIR }}/tools ov_dev_wheel_name=$(find . -name 'openvino_dev*.whl') - python3 -m pip install $ov_dev_wheel_name[mxnet,caffe,kaldi,onnx,tensorflow2] + python3 -m pip install $ov_dev_wheel_name[mxnet,caffe,kaldi,onnx,tensorflow2,pytorch] popd - name: nGraph and IE Python Bindings Tests diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 92e3f491aca928..493a1e47ba6e0a 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -303,7 +303,7 @@ jobs: # Find and install the dev OV wheel $ovDevWheelPath=Get-ChildItem -Path "${{ env.INSTALL_DIR }}\tools" -Filter openvino_dev*.whl | % { $_.FullName } - python3 -m pip install "$ovDevWheelPath[mxnet,caffe,kaldi,onnx,tensorflow2]" + python3 -m pip install "$ovDevWheelPath[mxnet,caffe,kaldi,onnx,tensorflow2,pytorch]" - name: Python API 1.0 Tests shell: cmd diff --git a/tools/openvino_dev/CMakeLists.txt b/tools/openvino_dev/CMakeLists.txt index 494ac86c725acf..00223262494ca2 100644 --- a/tools/openvino_dev/CMakeLists.txt +++ b/tools/openvino_dev/CMakeLists.txt @@ -36,7 +36,10 @@ set(WHEEL_BUILD "${OpenVINO_VERSION_BUILD}" CACHE STRING "Build number of this r ov_cpack_add_component(${OV_CPACK_COMP_OPENVINO_DEV_REQ_FILES} HIDDEN) set(REQUIREMENTS_IN "${CMAKE_CURRENT_SOURCE_DIR}/requirements_dev.txt.in") -set(EXTRAS_LIST _ caffe kaldi mxnet onnx pytorch tensorflow tensorflow2) +set(EXTRAS_LIST _ caffe kaldi onnx pytorch tensorflow tensorflow2) +if(NOT (APPLE AND AARCH64)) + list(APPEND EXTRAS_LIST mxnet) +endif() foreach(EXTRAS IN LISTS EXTRAS_LIST) if(EXTRAS STREQUAL "_") From 3455580780bdfcd74108d4f5e23889e924c8e584 Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Thu, 12 Oct 2023 11:28:03 +0200 Subject: [PATCH 166/257] [PT FE] Fix pad default value (#20401) --- .../pytorch/src/transforms/prim_list_construct_pad.cpp | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/frontends/pytorch/src/transforms/prim_list_construct_pad.cpp b/src/frontends/pytorch/src/transforms/prim_list_construct_pad.cpp index 2147ddc17d9c84..74b4e8cc03bb4e 100644 --- a/src/frontends/pytorch/src/transforms/prim_list_construct_pad.cpp +++ b/src/frontends/pytorch/src/transforms/prim_list_construct_pad.cpp @@ -71,13 +71,19 @@ PrimListConstructPadReplacer::PrimListConstructPadReplacer() { input_node = pad_op->input_value(0); padding = pad_op->input_value(1); auto mode_node = pad_op->input_value(2).get_node_shared_ptr(); - pad_value = pad_op->input_value(3); if (const auto& fw_node_mode = cast_fw_node(mode_node, "prim::Constant")) { const auto& attrs = fw_node_mode->get_attrs(); if (attrs.find("string_value") != attrs.end()) { mode = attrs.at("string_value"); } } + pad_value = pad_op->input_value(3); + if (const auto& fw_node_mode = cast_fw_node(pad_value.get_node_shared_ptr(), "prim::Constant")) { + const auto& attrs = fw_node_mode->get_attrs(); + if (attrs.find("none_value") != attrs.end()) { + pad_value = v0::Constant::create(element::f32, Shape{}, {0}); + } + } } else if ((pad_op = cast_fw_node(m.get_match_root(), "aten::reflection_pad2d"))) { mode = "reflect"; input_node = pad_op->input_value(0); From 83e80a4ddf77d814a98fdaf246789deb361eef43 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Thu, 12 Oct 2023 14:00:31 +0400 Subject: [PATCH 167/257] Removed macOS post-commit pipeline (#20415) --- .ci/azure/mac.yml | 229 ---------------------------------------------- 1 file changed, 229 deletions(-) delete mode 100644 .ci/azure/mac.yml diff --git a/.ci/azure/mac.yml b/.ci/azure/mac.yml deleted file mode 100644 index f1a20fb49b3055..00000000000000 --- a/.ci/azure/mac.yml +++ /dev/null @@ -1,229 +0,0 @@ -trigger: - branches: - include: - - master - - 'releases/*' - paths: - exclude: - - '*/docs/*' - - 'docs/*' - - '*/*.md' - - '*.md' - - '*/layer_tests_summary/*' - - '*/conformance/*' - - 'tests/layer_tests/*' - -pr: - drafts: 'false' - branches: - include: - - 'master' - - 'releases/*' - paths: - exclude: - - '*/docs/*' - - 'docs/*' - - '*/*.md' - - '*.md' - - '*/layer_tests_summary/*' - - '*/conformance/*' - - 'tests/layer_tests/*' - -resources: - repositories: - - repository: openvino_contrib - type: github - endpoint: openvinotoolkit - name: openvinotoolkit/openvino_contrib - ref: master - -variables: - - group: github - -jobs: -- job: Mac - # About 250% of total time (perfomace of Mac hosts is unstable, 360 is max) - timeoutInMinutes: '360' - - pool: - vmImage: 'macOS-11' - - variables: - system.debug: true - VSTS_HTTP_RETRY: 5 - VSTS_HTTP_TIMEOUT: 200 - BUILD_TYPE: Release - REPO_DIR: $(Build.Repository.LocalPath) - OPENVINO_CONTRIB_REPO_DIR: $(REPO_DIR)/../openvino_contrib - WORK_DIR: $(Pipeline.Workspace)/_w - BUILD_DIR: $(WORK_DIR)/build - INSTALL_DIR: $(WORK_DIR)/install_pkg - INSTALL_TEST_DIR: $(INSTALL_DIR)/tests - SETUPVARS: . $(INSTALL_DIR)/setupvars.sh - TMP_DIR: /tmp - CCACHE_DIR: $(WORK_DIR)/ccache/mac - - steps: - - task: UsePythonVersion@0 - inputs: - versionSpec: '3.11.2' - addToPath: true - architecture: 'x64' - githubToken: $(auth_token) - displayName: Setup Python 3.11 - name: setupPython - - - script: | - whoami - uname -a - echo Python3 info ; which python3 ; python3 --version - echo Python info ; which python ; python --version - echo Java info ; which java ; java -version - echo gcc info ; which gcc ; gcc --version - echo cmake info ; which cmake ; cmake --version - xcrun --sdk macosx --show-sdk-version - env - sysctl -a - displayName: 'System info' - - - script: | - set -e - rm -rf $(WORK_DIR) ; mkdir $(WORK_DIR) - rm -rf $(BUILD_DIR) ; mkdir $(BUILD_DIR) - displayName: 'Make dir' - - - checkout: self - clean: 'true' - submodules: 'true' - path: openvino - - - checkout: openvino_contrib - clean: 'true' - submodules: 'true' - path: openvino_contrib - - - script: | - set -e - brew install cython automake - python3 -m pip install -r $(REPO_DIR)/src/frontends/onnx/tests/requirements.txt - # Speed up build - brew install ninja ccache - displayName: 'Install dependencies' - - - script: | - export PATH="/usr/local/opt/cython/bin:$PATH" - cmake \ - -G Ninja \ - -DENABLE_CPPLINT=OFF \ - -DCMAKE_VERBOSE_MAKEFILE=ON \ - -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) \ - -DCMAKE_COMPILE_WARNING_AS_ERROR=ON \ - -DENABLE_PYTHON=ON \ - -DENABLE_STRICT_DEPENDENCIES=OFF \ - -DOPENVINO_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)/modules \ - -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \ - -DCMAKE_C_COMPILER_LAUNCHER=ccache \ - -DBUILD_nvidia_plugin=OFF \ - -S $(REPO_DIR) \ - -B $(BUILD_DIR) - displayName: 'CMake OpenVINO' - - - script: ls -alR $(REPO_DIR)/temp/ - displayName: 'List temp SDKs' - - - task: Cache@2 - inputs: - key: 'ccache | "$(Agent.OS)"' - path: $(CCACHE_DIR) - restoreKeys: | - ccache | "$(Agent.OS)" - displayName: Cache - enabled: 'false' - - - script: ccache --zero-stats --max-size=10G --show-config - displayName: 'Clean ccache stats' - - - script: cmake --build $(BUILD_DIR) --parallel --config $(BUILD_TYPE) - env: - CCACHE_DIR: $(CCACHE_DIR) - CCACHE_TEMPDIR: $(TMP_DIR)/ccache - CCACHE_BASEDIR: $(Pipeline.Workspace) - CCACHE_MAXSIZE: 10G - displayName: 'Build Mac' - - - script: ccache --show-stats - displayName: 'Show ccache stats' - - - script: ls -alR $(REPO_DIR)/bin/ - displayName: 'List bin files' - - - script: cmake -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -P $(BUILD_DIR)/cmake_install.cmake - displayName: 'Install' - - - script: ls -alR $(INSTALL_DIR) - displayName: 'List install files' - - - script: cmake -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -DCOMPONENT=tests -P $(BUILD_DIR)/cmake_install.cmake - displayName: 'Install tests' - - - script: ls -alR $(INSTALL_DIR) - displayName: 'List install files' - - - script: $(SETUPVARS) && $(INSTALL_TEST_DIR)/ov_core_unit_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-OVCoreUT.xml - displayName: 'OV Core UT' - enabled: 'false' - - - script: $(SETUPVARS) && $(INSTALL_TEST_DIR)/ov_proxy_plugin_tests --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-OVProxyTests.xml - displayName: 'OV Proxy Plugin Tests' - enabled: 'false' - - - script: $(SETUPVARS) && $(INSTALL_TEST_DIR)/ov_hetero_unit_tests --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-OVHeteroUnitTests.xml - displayName: 'OV Hetero Unit Tests' - enabled: 'false' - - - script: $(SETUPVARS) && $(INSTALL_TEST_DIR)/ov_hetero_func_tests --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-OVHeteroFuncTests.xml - displayName: 'OV Hetero Func Tests' - enabled: 'false' - - - script: $(SETUPVARS) && $(INSTALL_TEST_DIR)/ov_ir_frontend_tests --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-IRFrontend.xml - displayName: 'IR Frontend Tests' - enabled: 'false' - - - script: $(SETUPVARS) && $(INSTALL_TEST_DIR)/ov_onnx_frontend_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU*--gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ONNXFrontend.xml - displayName: 'ONNX Frontend Tests' - enabled: 'false' - - - script: $(SETUPVARS) && $(INSTALL_TEST_DIR)/ov_cpu_unit_tests --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ov_cpu_unit_tests.xml - displayName: 'Intel CPU Unit Tests' - enabled: 'false' - - - script: $(SETUPVARS) && $(INSTALL_TEST_DIR)/ov_auto_unit_tests --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ov_auto_unit_tests.xml - displayName: 'AUTO UT' - enabled: 'false' - - - script: $(SETUPVARS) && $(INSTALL_TEST_DIR)/ov_cpu_func_tests --gtest_filter=*smoke* --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ov_cpu_func_tests.xml - displayName: 'CPU FuncTests' - enabled: 'false' - - - script: | - $(SETUPVARS) && $(INSTALL_TEST_DIR)/InferenceEngineCAPITests --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-InferenceEngineCAPITests.xml - displayName: 'IE CAPITests' - enabled: 'false' - - - script: | - $(SETUPVARS) && $(INSTALL_TEST_DIR)/ov_capi_test --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ov_capi_test.xml - displayName: 'IE CAPITests' - enabled: 'false' - - - task: PublishTestResults@2 - condition: always() - inputs: - testResultsFormat: 'JUnit' # Options: JUnit, NUnit, VSTest, xUnit, cTest - testResultsFiles: '**/TEST-*.xml' - #searchFolder: '$(BUILD_DIR)' - mergeTestResults: false # Optional - #failTaskOnFailedTests: false # Optional - #testRunTitle: 'Pre/Post-Commit' # Optional - buildPlatform: 'x64' # Optional - buildConfiguration: 'Mac' # Optional - #publishRunAttachments: true # Optional From 23acd5a3512e94825c47d22368473bd8fa0d0ad7 Mon Sep 17 00:00:00 2001 From: Tatiana Savina Date: Thu, 12 Oct 2023 12:32:32 +0200 Subject: [PATCH 168/257] [DOCS] Pypi pages description change (#20352) * pypi pages change * add description * change command order * Update docs/install_guides/pypi-openvino-rt.md Co-authored-by: Ilya Lavrenov * change column name --------- Co-authored-by: Ilya Lavrenov --- .../installing-openvino-overview.md | 2 +- docs/install_guides/pypi-openvino-dev.md | 4 ++-- docs/install_guides/pypi-openvino-rt.md | 23 +++++++++++++------ 3 files changed, 19 insertions(+), 10 deletions(-) diff --git a/docs/articles_en/get started/installing-openvino-overview.md b/docs/articles_en/get started/installing-openvino-overview.md index c703ee386157f8..f0c6e61981635f 100644 --- a/docs/articles_en/get started/installing-openvino-overview.md +++ b/docs/articles_en/get started/installing-openvino-overview.md @@ -30,7 +30,7 @@ all necessary components. The OpenVINO Development Tools is still available for older versions of OpenVINO, - as well as the current one, from the GitHub repository. + as well as the current one, from the GitHub repository and PyPI. :doc:`Learn more `. .. tip:: diff --git a/docs/install_guides/pypi-openvino-dev.md b/docs/install_guides/pypi-openvino-dev.md index b7c4d4d397a242..8d53e6488f1602 100644 --- a/docs/install_guides/pypi-openvino-dev.md +++ b/docs/install_guides/pypi-openvino-dev.md @@ -3,6 +3,7 @@ > **NOTE**: This version is pre-release software and has not undergone full release validation or qualification. No support is offered on pre-release software and APIs/behavior are subject to change. It should NOT be incorporated into any production software/solution and instead should be used only for early testing and integration while awaiting a final release version of this software. + Intel® Distribution of OpenVINO™ toolkit is an open-source toolkit for optimizing and deploying AI inference. It can be used to develop applications and solutions based on deep learning tasks, such as: emulation of human vision, automatic speech recognition, natural language processing, recommendation systems, etc. It provides high-performance and rich deployment options, from edge to cloud. OpenVINO™ Development Tools enables you to download models from Open Model Zoo, convert your own models to OpenVINO IR, as well as optimize and tune pre-trained deep learning models. See [What's in the Package](#whats-in-the-package) for more information. @@ -119,8 +120,7 @@ For example, to install and configure the components for working with TensorFlow | Component | Console Script | Description | |------------------|---------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [Legacy Model conversion API](https://docs.openvino.ai/nightly/openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html) | `mo` |**Model conversion API** imports, converts, and optimizes models that were trained in popular frameworks to a format usable by OpenVINO components.
Supported frameworks include Caffe\*, TensorFlow\*, MXNet\*, PaddlePaddle\*, and ONNX\*. | -| [Benchmark Tool](https://docs.openvino.ai/nightly/openvino_inference_engine_tools_benchmark_tool_README.html)| `benchmark_app` | **Benchmark Application** allows you to estimate deep learning inference performance on supported devices for synchronous and asynchronous modes. | +| [Legacy Model conversion API](https://docs.openvino.ai/nightly/openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html) | `mo` |**Model conversion API** imports, converts, and optimizes models that were trained in popular frameworks to a format usable by OpenVINO components.
Supported frameworks include Caffe\*, TensorFlow\*, MXNet\*, PaddlePaddle\*, and ONNX\*. | | | [Accuracy Checker](https://docs.openvino.ai/nightly/omz_tools_accuracy_checker.html) and
[Annotation Converter](https://docs.openvino.ai/nightly/omz_tools_accuracy_checker_annotation_converters.html) | `accuracy_check`
`convert_annotation` |**Accuracy Checker** is a deep learning accuracy validation tool that allows you to collect accuracy metrics against popular datasets. The main advantages of the tool are the flexibility of configuration and a set of supported datasets, preprocessing, postprocessing, and metrics.
**Annotation Converter** is a utility that prepares datasets for evaluation with Accuracy Checker. | | [Post-Training Optimization Tool](https://docs.openvino.ai/nightly/pot_introduction.html)| `pot` |**Post-Training Optimization Tool** allows you to optimize trained models with advanced capabilities, such as quantization and low-precision optimizations, without the need to retrain or fine-tune models. | | [Model Downloader and other Open Model Zoo tools](https://docs.openvino.ai/nightly/omz_tools_downloader.html)| `omz_downloader`
`omz_converter`
`omz_quantizer`
`omz_info_dumper`| **Model Downloader** is a tool for getting access to the collection of high-quality and extremely fast pre-trained deep learning [public](@ref omz_models_group_public) and [Intel](@ref omz_models_group_intel)-trained models. These free pre-trained models can be used to speed up the development and production deployment process without training your own models. The tool downloads model files from online sources and, if necessary, patches them to make them more usable with model conversion API. A number of additional tools are also provided to automate the process of working with downloaded models:
**Model Converter** is a tool for converting Open Model Zoo models that are stored in an original deep learning framework format into the OpenVINO Intermediate Representation (IR) using model conversion API.
**Model Quantizer** is a tool for automatic quantization of full-precision models in the IR format into low-precision versions using the Post-Training Optimization Tool.
**Model Information Dumper** is a helper utility for dumping information about the models to a stable, machine-readable format. | diff --git a/docs/install_guides/pypi-openvino-rt.md b/docs/install_guides/pypi-openvino-rt.md index 157f6959122d45..c5d8dc0de156b6 100644 --- a/docs/install_guides/pypi-openvino-rt.md +++ b/docs/install_guides/pypi-openvino-rt.md @@ -1,11 +1,11 @@ -# OpenVINO™ Runtime +# OpenVINO™ > **NOTE**: This version is pre-release software and has not undergone full release validation or qualification. No support is offered on pre-release software and APIs/behavior are subject to change. It should NOT be incorporated into any production software/solution and instead should be used only for early testing and integration while awaiting a final release version of this software. Intel® Distribution of OpenVINO™ toolkit is an open-source toolkit for optimizing and deploying AI inference. It can be used to develop applications and solutions based on deep learning tasks, such as: emulation of human vision, automatic speech recognition, natural language processing, recommendation systems, etc. It provides high-performance and rich deployment options, from edge to cloud. -If you have already finished developing your models and converting them to the OpenVINO model format, you can install OpenVINO Runtime to deploy your applications on various devices. The [OpenVINO™ Runtime](https://docs.openvino.ai/2023.1/openvino_docs_OV_UG_OV_Runtime_User_Guide.html) Python package includes a set of libraries for an easy inference integration with your products. +If you have already finished developing your models and converting them to the OpenVINO model format, you can install OpenVINO Runtime to deploy your applications on various devices. The [OpenVINO™](https://docs.openvino.ai/2023.1/openvino_docs_OV_UG_OV_Runtime_User_Guide.html) Python package includes a set of libraries for an easy inference integration with your products. ## System Requirements @@ -15,7 +15,7 @@ Before you start the installation, check the supported operating systems and req > **NOTE**: This package can be installed on other versions of Linux and Windows OSes, but only the specific versions above are fully validated. -## Install the OpenVINO™ Runtime Package +## Install OpenVINO™ ### Step 1. Set Up Python Virtual Environment @@ -37,15 +37,16 @@ python3 -m venv openvino_env ### Step 2. Activate Virtual Environment -On Linux and macOS: -```sh -source openvino_env/bin/activate -``` On Windows: ```sh openvino_env\Scripts\activate ``` +On Linux and macOS: +```sh +source openvino_env/bin/activate +``` + ### Step 3. Set Up and Update PIP to the Highest Version Run the command below: @@ -70,6 +71,14 @@ python -c "from openvino.runtime import Core; print(Core().available_devices)" If installation was successful, you will see the list of available devices. +## What's in the Package + +| Component | Content | Description | +|------------------|---------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [OpenVINO Runtime](https://docs.openvino.ai/2023.1/openvino_docs_OV_UG_OV_Runtime_User_Guide.html) | `openvino package` |**OpenVINO Runtime** is a set of C++ libraries with C and Python bindings providing a common API to deliver inference solutions on the platform of your choice. Use the OpenVINO Runtime API to read PyTorch\*, TensorFlow\*, TensorFlow Lite\*, ONNX\*, and PaddlePaddle\* models and execute them on preferred devices. OpenVINO Runtime uses a plugin architecture and includes the following plugins: [CPU](https://docs.openvino.ai/2023.1/openvino_docs_OV_UG_supported_plugins_CPU.html), [GPU](https://docs.openvino.ai/2023.1/openvino_docs_OV_UG_supported_plugins_GPU.html), [Auto Batch](https://docs.openvino.ai/2023.1/openvino_docs_OV_UG_Automatic_Batching.html), [Auto](https://docs.openvino.ai/2023.1/openvino_docs_OV_UG_supported_plugins_AUTO.html), [Hetero](https://docs.openvino.ai/2023.1/openvino_docs_OV_UG_Hetero_execution.html). +| [OpenVINO Model Converter (OVC)](https://docs.openvino.ai/2023.1/openvino_docs_model_processing_introduction.html#convert-a-model-in-cli-ovc) | `ovc` |**OpenVINO Model Converter** converts models that were trained in popular frameworks to a format usable by OpenVINO components.
Supported frameworks include ONNX\*, TensorFlow\*, TensorFlow Lite\*, and PaddlePaddle\*. | +| [Benchmark Tool](https://docs.openvino.ai/2023.1/openvino_inference_engine_tools_benchmark_tool_README.html)| `benchmark_app` | **Benchmark Application** allows you to estimate deep learning inference performance on supported devices for synchronous and asynchronous modes. | + ## Troubleshooting For general troubleshooting steps and issues, see [Troubleshooting Guide for OpenVINO Installation](https://docs.openvino.ai/2023.1/openvino_docs_get_started_guide_troubleshooting.html). The following sections also provide explanations to several error messages. From 075333e94dba07d79fd6165fbc0114aefecb1b60 Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Thu, 12 Oct 2023 12:51:04 +0200 Subject: [PATCH 169/257] [core]Migrate Tile operator to new API (#20255) * Migrate Tile to new API * Remove visit_attributes as is same as base class --- src/core/include/openvino/op/tile.hpp | 3 +- src/core/reference/src/op/tile.cpp | 87 ++++++++++++--------------- src/core/src/op/tile.cpp | 66 ++++++++++---------- 3 files changed, 74 insertions(+), 82 deletions(-) diff --git a/src/core/include/openvino/op/tile.hpp b/src/core/include/openvino/op/tile.hpp index d7459477dac75a..9cb72cc5adf2ef 100644 --- a/src/core/include/openvino/op/tile.hpp +++ b/src/core/include/openvino/op/tile.hpp @@ -23,7 +23,6 @@ class OPENVINO_API Tile : public Op { /// \param repeats The node producing the per-dimension replication factor Tile(const Output& data, const Output& repeats); - bool visit_attributes(AttributeVisitor& visitor) override; void validate_and_infer_types() override; std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; @@ -31,7 +30,7 @@ class OPENVINO_API Tile : public Op { bool evaluate_lower(TensorVector& outputs) const override; bool evaluate_upper(TensorVector& outputs) const override; bool has_evaluate() const override; - bool evaluate(ov::TensorVector& output_values, const ov::TensorVector& input_values) const override; + bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override; bool evaluate_label(TensorLabelVector& output_labels) const override; }; } // namespace v0 diff --git a/src/core/reference/src/op/tile.cpp b/src/core/reference/src/op/tile.cpp index f88e56392a26a8..0a3132e32b4807 100644 --- a/src/core/reference/src/op/tile.cpp +++ b/src/core/reference/src/op/tile.cpp @@ -5,65 +5,56 @@ #include "openvino/reference/tile.hpp" #include -#include #include -#include -using namespace ov; - -namespace { -/// \brief For each axis calculates the product of inner axes -/// If dims has shape (2, 3, 4) then for 2 (first axis) the inner axes would be (3, 4) -/// and for 3 (second axis) it would be (4) -/// If dims has shape(2, 3, 4) then the output vector would be (3 * 4, 4, 1) -/// The outermost axis is not used. For innermost axis it is always 1. -/// \param[in] dims Shape of the output -/// -/// \return Vector containing calculated values for each axis. -std::vector create_pitches(const Shape& dims) { - std::vector pitch; - pitch.resize(dims.size() - 1); - std::partial_sum(dims.rbegin(), dims.rend() - 1, pitch.rbegin(), std::multiplies()); - pitch.push_back(1); - return pitch; -} -} // namespace - -void reference::tile(const char* arg, - char* out, - const Shape& in_shape, - const Shape& out_shape, - const size_t elem_size, - const std::vector& repeats) { - Shape in_shape_expanded(in_shape); - in_shape_expanded.insert(in_shape_expanded.begin(), out_shape.size() - in_shape.size(), 1); - size_t block_size = 0; - int64_t num_repeats = 0; - const int input_rank = static_cast(in_shape_expanded.size()); - const int64_t last_dim = in_shape_expanded[input_rank - 1]; - const std::vector pitches = create_pitches(out_shape); - const char* copy = nullptr; - - std::vector indices(in_shape_expanded.size() - 1, 0); - size_t axis = indices.size(); +namespace ov { +namespace reference { +/** + * @brief Reference implementation of Tile operator + * + * @param arg Pointer to input data. + * @param out Pointer to output data. + * @param in_shape Input data shape. + * @param out_shape Output data shape. + * @param elem_size Single data element size im bytes. + * @param repeats Vector with repeats values for axes (same rank as out_shape). + */ +void tile(const char* arg, + char* out, + const Shape& in_shape, + const Shape& out_shape, + const size_t elem_size, + const std::vector& repeats) { if (std::all_of(repeats.begin(), repeats.end(), [](int64_t repeat) { return repeat == 0; })) { return; } + decltype(arg) copy_from; + typename std::decay::type block_size; + typename std::decay::type num_repeats; + + auto in_shape_expanded = in_shape; + in_shape_expanded.insert(in_shape_expanded.begin(), out_shape.size() - in_shape.size(), 1); + const auto last_dim = in_shape_expanded.back(); + const auto pitches = row_major_strides(out_shape); + + std::vector indices(in_shape_expanded.size() - 1, 0); + auto axis = indices.size(); + // Copy and repeat data for innermost axis as many times as described in the repeats parameter while (axis <= indices.size()) { block_size = last_dim * elem_size; - memcpy(out, arg, block_size); + std::memcpy(out, arg, block_size); out += block_size; arg += block_size; - copy = out - block_size; - num_repeats = repeats[input_rank - 1] - 1; + copy_from = out - block_size; + num_repeats = repeats.back() - 1; for (int64_t i = 0; i < num_repeats; ++i) { - memcpy(out, copy, block_size); + std::memcpy(out, copy_from, block_size); out += block_size; } @@ -75,14 +66,16 @@ void reference::tile(const char* arg, } indices[axis] = 0; - ptrdiff_t pitch = pitches[axis] * in_shape_expanded[axis]; + auto pitch = pitches[axis] * in_shape_expanded[axis]; block_size = pitch * elem_size; - copy = out - block_size; + copy_from = out - block_size; num_repeats = repeats[axis] - 1; - for (int64_t i = 0; i < num_repeats; i++) { - memcpy(out, copy, block_size); + for (int64_t i = 0; i < num_repeats; ++i) { + std::memcpy(out, copy_from, block_size); out += block_size; } } } } +} // namespace reference +} // namespace ov diff --git a/src/core/src/op/tile.cpp b/src/core/src/op/tile.cpp index 6696ec8676a5f1..f6274a60136ac9 100644 --- a/src/core/src/op/tile.cpp +++ b/src/core/src/op/tile.cpp @@ -4,24 +4,22 @@ #include "openvino/op/tile.hpp" -#include - #include "bound_evaluate.hpp" #include "itt.hpp" #include "openvino/op/util/precision_sensitive_attribute.hpp" #include "openvino/reference/tile.hpp" +#include "tile_shape_inference.hpp" + +namespace ov { +namespace op { +namespace v0 { -ov::op::v0::Tile::Tile(const Output& data, const Output& repeats) : Op({data, repeats}) { +Tile::Tile(const Output& data, const Output& repeats) : Op({data, repeats}) { ov::mark_as_precision_sensitive(input(1)); constructor_validate_and_infer_types(); } -bool ov::op::v0::Tile::visit_attributes(ov::AttributeVisitor& visitor) { - OV_OP_SCOPE(v0_Tile_visit_attributes); - return true; -} - -void ov::op::v0::Tile::validate_and_infer_types() { +void Tile::validate_and_infer_types() { OV_OP_SCOPE(v0_Tile_validate_and_infer_types); // Repeats should have integer data type. For now we only allow i64 @@ -30,7 +28,6 @@ void ov::op::v0::Tile::validate_and_infer_types() { repeats_et.is_integral(), "Tile repeats must have any integer element type, but has ", repeats_et); - OPENVINO_SUPPRESS_DEPRECATED_START auto output_shapes = shape_infer(this, get_node_input_partial_shapes(*this)); OPENVINO_SUPPRESS_DEPRECATED_END @@ -40,53 +37,53 @@ void ov::op::v0::Tile::validate_and_infer_types() { set_input_is_relevant_to_shape(1); } -std::shared_ptr ov::op::v0::Tile::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr Tile::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v0_Tile_clone_with_new_inputs); check_new_args_count(this, new_args); return std::make_shared(new_args.at(0), new_args.at(1)); } -bool ov::op::v0::Tile::evaluate(ov::TensorVector& output_values, const ov::TensorVector& input_values) const { +bool Tile::evaluate(TensorVector& outputs, const TensorVector& inputs) const { OV_OP_SCOPE(v0_Tile_evaluate); - const auto& data = input_values[0]; - const auto& axis = input_values[1]; - auto& output = output_values[0]; - auto repeats_val = get_tensor_data_as(axis, ov::util::Cast()); - const auto repeats_rank = repeats_val.size(); - - std::vector input_shapes = {data.get_shape(), axis.get_shape()}; - - const auto& output_shape = shape_infer(this, input_shapes, make_tensor_accessor(input_values)).front().to_shape(); - output.set_shape(output_shape); - repeats_val.insert(repeats_val.begin(), output_shape.size() - repeats_rank, 1); - ov::reference::tile(static_cast(data.data()), - static_cast(output.data()), - data.get_shape(), - output_shape, - data.get_element_type().size(), - repeats_val); + OPENVINO_ASSERT(outputs.size() == 1); + OPENVINO_ASSERT(inputs.size() == 2); + + const auto& d = inputs[0]; + const auto& r = inputs[1]; + auto repeats = get_tensor_data_as(r); + + const std::vector input_shapes{d.get_shape(), r.get_shape()}; + const auto output_shape = shape_infer(this, input_shapes, make_tensor_accessor(inputs)).front().to_shape(); + outputs[0].set_shape(output_shape); + repeats.insert(repeats.begin(), output_shape.size() - repeats.size(), 1); + reference::tile(static_cast(d.data()), + static_cast(outputs[0].data()), + d.get_shape(), + output_shape, + d.get_element_type().size(), + repeats); return true; } -bool ov::op::v0::Tile::has_evaluate() const { +bool Tile::has_evaluate() const { OV_OP_SCOPE(v0_Tile_has_evaluate); return true; } -bool ov::op::v0::Tile::evaluate_lower(ov::TensorVector& output_values) const { +bool Tile::evaluate_lower(TensorVector& output_values) const { OV_OP_SCOPE(v0_Tile_evaluate_lower); return get_input_tensor(1).has_and_set_bound() && default_lower_bound_evaluator(this, output_values); } -bool ov::op::v0::Tile::evaluate_upper(ov::TensorVector& output_values) const { +bool Tile::evaluate_upper(TensorVector& output_values) const { OV_OP_SCOPE(v0_Tile_evaluate_upper); return get_input_tensor(1).has_and_set_bound() && default_upper_bound_evaluator(this, output_values); } -bool ov::op::v0::Tile::evaluate_label(TensorLabelVector& output_labels) const { +bool Tile::evaluate_label(TensorLabelVector& output_labels) const { OV_OP_SCOPE(v0_Tile_evaluate_label); OPENVINO_ASSERT(output_labels.size() == 1); @@ -94,3 +91,6 @@ bool ov::op::v0::Tile::evaluate_label(TensorLabelVector& output_labels) const { return get_input_tensor(1).has_and_set_bound() && default_label_evaluator(this, output_labels); OPENVINO_SUPPRESS_DEPRECATED_END } +} // namespace v0 +} // namespace op +} // namespace ov From 9e4ec43f7ca03dce7c1d15c0787c794c657b4c50 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Thu, 12 Oct 2023 15:29:30 +0400 Subject: [PATCH 170/257] Disabled sporadic test_backend.py::OnnxBackendNodeModelTest::test_div_uint8_cpu test (#20419) --- .github/workflows/mac.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/mac.yml b/.github/workflows/mac.yml index 1c21bd87e0a7ae..f1318923b3e0c7 100644 --- a/.github/workflows/mac.yml +++ b/.github/workflows/mac.yml @@ -489,8 +489,8 @@ jobs: run: | python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/pyngraph \ --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml \ - --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests/test_onnx/test_zoo_models.py \ - --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests/test_onnx/test_backend.py + --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests_compatibility/test_onnx/test_zoo_models.py \ + --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests_compatibility/test_onnx/test_backend.py - name: Python API 2.0 Tests run: | From 518a879a83c1dca33bfaa5cad803b7d15cbfcda7 Mon Sep 17 00:00:00 2001 From: Irina Efode Date: Thu, 12 Oct 2023 15:59:42 +0400 Subject: [PATCH 171/257] [CONFORMANCE][PRECOMMIT] Add one sporadically interapted test to expected failures (#20420) --- .../skip_configs/CPU/expected_failures_OP.csv | 1 + 1 file changed, 1 insertion(+) diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/skip_configs/CPU/expected_failures_OP.csv b/src/tests/test_utils/functional_test_utils/layer_tests_summary/skip_configs/CPU/expected_failures_OP.csv index 23d497e22f5d12..84d3e26eb35fc2 100644 --- a/src/tests/test_utils/functional_test_utils/layer_tests_summary/skip_configs/CPU/expected_failures_OP.csv +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/skip_configs/CPU/expected_failures_OP.csv @@ -1129,3 +1129,4 @@ conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_IR=PriorBox- conformance_PRelu/ReadIRTest.ImportExport/Op=PRelu.1_Type=f32_IR=20e7e74f55eb5fb78014cce7e0665d6925bbefd708dd9ccff12dbfbea2a330dd_Device=CPU_Shape=static_Config=(),5.69266e-06 conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_IR=RegionYolo-1_750_Device=CPU_Shape=static_Config=(),5.06332e-06 conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=i32_IR=28f23780d4ca0d40671caf79d5cd9223ad8f6dc2fa5ade2521f3d99586eeeb7f_Device=CPU_Shape=static_Config=(),9.72615e-07 +conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_IR=c301804445f273eef62f41f02204711d9d6e571da28c76ab447d7d90983b0032_Device=CPU_Shape=dynamic_Config=(),0.000113281 From 377e92714905922de0b20979458425b0282c2dd0 Mon Sep 17 00:00:00 2001 From: Vladislav Golubev Date: Thu, 12 Oct 2023 14:34:53 +0200 Subject: [PATCH 172/257] [Snippets] Changed BrgemmCopyB shape inference (#19957) --- .../snippets/include/snippets/op/brgemm.hpp | 8 -- .../snippets/include/snippets/op/buffer.hpp | 3 +- .../shape_inference/shape_infer_instances.hpp | 7 ++ src/common/snippets/src/op/brgemm.cpp | 71 ---------------- .../shape_inference/shape_infer_instances.cpp | 72 ++++++++++++++++ .../src/shape_inference/shape_inference.cpp | 2 +- src/plugins/intel_cpu/src/nodes/subgraph.cpp | 2 + .../snippets/x64/op/brgemm_copy_b.cpp | 82 ++++++++----------- .../snippets/x64/op/brgemm_copy_b.hpp | 9 +- .../snippets/x64/op/brgemm_cpu.cpp | 24 +----- .../snippets/x64/op/brgemm_cpu.hpp | 6 -- .../set_brgemm_copy_b_buffers_shape.cpp | 38 +++++++++ .../set_brgemm_copy_b_buffers_shape.hpp | 27 ++++++ .../pass/set_brgemm_cpu_blocking_params.cpp | 24 +----- .../snippets/x64/shape_inference.cpp | 4 +- 15 files changed, 197 insertions(+), 182 deletions(-) create mode 100644 src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/set_brgemm_copy_b_buffers_shape.cpp create mode 100644 src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/set_brgemm_copy_b_buffers_shape.hpp diff --git a/src/common/snippets/include/snippets/op/brgemm.hpp b/src/common/snippets/include/snippets/op/brgemm.hpp index 50cca60bbbc29d..8ba681fb8e9353 100644 --- a/src/common/snippets/include/snippets/op/brgemm.hpp +++ b/src/common/snippets/include/snippets/op/brgemm.hpp @@ -39,14 +39,6 @@ class Brgemm : public MemoryAccess { bool has_evaluate() const override { return false; } - class ShapeInfer : public IShapeInferSnippets { - protected: - std::vector> m_io_layouts; - public: - explicit ShapeInfer(const std::shared_ptr& n); - Result infer(const std::vector& input_shapes) override; - }; - protected: ov::element::Type get_output_type() const; std::vector get_planar_input_shapes(const std::vector>& inputs) const; diff --git a/src/common/snippets/include/snippets/op/buffer.hpp b/src/common/snippets/include/snippets/op/buffer.hpp index 7a644644dd7417..9f522ed3d45688 100644 --- a/src/common/snippets/include/snippets/op/buffer.hpp +++ b/src/common/snippets/include/snippets/op/buffer.hpp @@ -44,9 +44,10 @@ class Buffer : public ov::op::Op { size_t get_id() const { return m_id; } Type get_type() const { return m_type; } - ov::Shape get_allocation_shape() const { return m_shape; } int64_t get_offset() const { return m_offset; } void set_id(size_t id) { m_id = id; } + const ov::Shape& get_allocation_shape() const { return m_shape; } + void set_allocation_shape(const ov::Shape& allocation_shape) { m_shape = allocation_shape; } void set_offset(int64_t offset) { m_offset = offset; } size_t get_byte_size() const; diff --git a/src/common/snippets/include/snippets/shape_inference/shape_infer_instances.hpp b/src/common/snippets/include/snippets/shape_inference/shape_infer_instances.hpp index f673f8ff997558..af69ad905111e8 100644 --- a/src/common/snippets/include/snippets/shape_inference/shape_infer_instances.hpp +++ b/src/common/snippets/include/snippets/shape_inference/shape_infer_instances.hpp @@ -61,5 +61,12 @@ class HorizonOpShapeInfer : public IShapeInferSnippets { Result infer(const std::vector& input_shapes) override; }; +class BrgemmShapeInfer : public IShapeInferSnippets { + std::vector> m_io_layouts; +public: + explicit BrgemmShapeInfer(const std::shared_ptr& n); + Result infer(const std::vector& input_shapes) override; +}; + } // namespace snippets } // namespace ov diff --git a/src/common/snippets/src/op/brgemm.cpp b/src/common/snippets/src/op/brgemm.cpp index 1f415a4f64b57f..b64a4328a83b1c 100644 --- a/src/common/snippets/src/op/brgemm.cpp +++ b/src/common/snippets/src/op/brgemm.cpp @@ -188,77 +188,6 @@ ov::PartialShape Brgemm::get_output_partial_shape(const std::vector& n) { - for (const auto& in : n->inputs()) { - const auto& port = lowered::PortDescriptorUtils::get_port_descriptor_ptr(in); - m_io_layouts.push_back(port->get_layout()); - } - m_io_layouts.push_back(get_output_layout(n)); -} - -IShapeInferSnippets::Result Brgemm::ShapeInfer::infer(const std::vector& input_shapes) { - OPENVINO_ASSERT(input_shapes.size() == 2, "BRGEMM expects 2 input shapes for shape inference"); - - // Todo: Ideally we should use the layout stored in PortDescriptors. Can we do it? - const auto& arg0_shape = snippets::utils::get_planar_vdims(input_shapes[0].get(), m_io_layouts[0]); - const auto& arg1_shape = snippets::utils::get_planar_vdims(input_shapes[1].get(), m_io_layouts[1]); - - size_t arg0_rank = arg0_shape.size(), arg1_rank = arg1_shape.size(); - - // temporary shapes to calculate output shape - VectorDims arg0_shape_tmp(arg0_shape), arg1_shape_tmp(arg1_shape); - - // one-dimensional tensors unsqueezing is applied to each input independently. - if (arg0_rank == 1) { - // If the first input is 1D tensor, it is unsqueezed to 2D tensor (row vector) - // by adding axes with size 1 at ROW_INDEX_DIM, to the left of the shape. - // For example {S} will be reshaped to {1, S}. - arg0_shape_tmp.insert(arg0_shape_tmp.begin(), 1); - arg0_rank = arg0_shape_tmp.size(); - } - if (arg1_rank == 1) { - // If the second input is 1D tensor, it is unsqueezed to 2D tensor (column vector) - // by adding axes with size 1 at COL_INDEX_DIM, to the right of the shape. - // For example {S} will be reshaped to {S, 1}. - arg1_shape_tmp.insert(arg1_shape_tmp.end(), 1); - arg1_rank = arg1_shape_tmp.size(); - } - - // add 1 to begin to align shape ranks if needed - if (arg0_rank < arg1_rank) - arg0_shape_tmp.insert(arg0_shape_tmp.begin(), arg1_rank - arg0_rank, 1); - else if (arg0_rank > arg1_rank) - arg1_shape_tmp.insert(arg1_shape_tmp.begin(), arg0_rank - arg1_rank, 1); - - size_t max_rank = arg0_shape_tmp.size(); - VectorDims output_shape(max_rank); - for (size_t i = 0; i < max_rank - 2; ++i) { - if (arg0_shape_tmp[i] == arg1_shape_tmp[i]) { - output_shape[i] = arg0_shape_tmp[i]; - } else { - if (arg0_shape_tmp[i] == 1 || arg0_shape_tmp[i] == DYNAMIC_DIMENSION) - output_shape[i] = arg1_shape_tmp[i]; - else if (arg1_shape_tmp[i] == 1 || arg1_shape_tmp[i] == DYNAMIC_DIMENSION) - output_shape[i] = arg0_shape_tmp[i]; - else - OPENVINO_THROW("Incompatible Brgemm batch dimension"); - } - } - output_shape[output_shape.size() - 2] = arg0_shape_tmp[arg0_shape_tmp.size() - 2]; // M - output_shape[output_shape.size() - 1] = arg1_shape_tmp[arg1_shape_tmp.size() - 1]; // N - - // removing the temporary axes from originally 1D tensors. - if (arg0_shape.size() == 1) { - output_shape.erase(output_shape.begin() + output_shape.size() - 2); - } - if (arg1_shape.size() == 1) { - output_shape.erase(output_shape.begin() + output_shape.size() - 1); - } - output_shape = snippets::utils::get_planar_vdims(output_shape, m_io_layouts[2]); - return {{output_shape}, snippets::ShapeInferStatus::success}; -} - } // namespace op } // namespace snippets } // namespace ov diff --git a/src/common/snippets/src/shape_inference/shape_infer_instances.cpp b/src/common/snippets/src/shape_inference/shape_infer_instances.cpp index e8df307a0b93ab..61404d208fd5a7 100644 --- a/src/common/snippets/src/shape_inference/shape_infer_instances.cpp +++ b/src/common/snippets/src/shape_inference/shape_infer_instances.cpp @@ -3,6 +3,7 @@ // #include "snippets/shape_inference/shape_infer_instances.hpp" #include "snippets/snippets_isa.hpp" +#include "snippets/utils.hpp" #include "openvino/op/select.hpp" namespace ov { namespace snippets { @@ -160,5 +161,76 @@ Result HorizonOpShapeInfer::infer(const std::vector& input_shapes return {{output_shapes}, ShapeInferStatus::success}; } +BrgemmShapeInfer::BrgemmShapeInfer(const std::shared_ptr& n) { + for (const auto& in : n->inputs()) { + const auto& port = lowered::PortDescriptorUtils::get_port_descriptor_ptr(in); + m_io_layouts.push_back(port->get_layout()); + } + const auto& port = lowered::PortDescriptorUtils::get_port_descriptor_ptr(n->output(0)); + m_io_layouts.push_back(port->get_layout()); +} + +Result BrgemmShapeInfer::infer(const std::vector& input_shapes) { + OPENVINO_ASSERT(input_shapes.size() == 2 || input_shapes.size() == 3, "BRGEMM expects 2 or 3 input shapes for shape inference"); + + // Todo: Ideally we should use the layout stored in PortDescriptors. Can we do it? + const auto& arg0_shape = ov::snippets::utils::get_planar_vdims(input_shapes[0].get(), m_io_layouts[0]); + const auto& arg1_shape = ov::snippets::utils::get_planar_vdims(input_shapes[1].get(), m_io_layouts[1]); + + size_t arg0_rank = arg0_shape.size(), arg1_rank = arg1_shape.size(); + + // temporary shapes to calculate output shape + VectorDims arg0_shape_tmp(arg0_shape), arg1_shape_tmp(arg1_shape); + + // one-dimensional tensors unsqueezing is applied to each input independently. + if (arg0_rank == 1) { + // If the first input is 1D tensor, it is unsqueezed to 2D tensor (row vector) + // by adding axes with size 1 at ROW_INDEX_DIM, to the left of the shape. + // For example {S} will be reshaped to {1, S}. + arg0_shape_tmp.insert(arg0_shape_tmp.begin(), 1); + arg0_rank = arg0_shape_tmp.size(); + } + if (arg1_rank == 1) { + // If the second input is 1D tensor, it is unsqueezed to 2D tensor (column vector) + // by adding axes with size 1 at COL_INDEX_DIM, to the right of the shape. + // For example {S} will be reshaped to {S, 1}. + arg1_shape_tmp.insert(arg1_shape_tmp.end(), 1); + arg1_rank = arg1_shape_tmp.size(); + } + + // add 1 to begin to align shape ranks if needed + if (arg0_rank < arg1_rank) + arg0_shape_tmp.insert(arg0_shape_tmp.begin(), arg1_rank - arg0_rank, 1); + else if (arg0_rank > arg1_rank) + arg1_shape_tmp.insert(arg1_shape_tmp.begin(), arg0_rank - arg1_rank, 1); + + size_t max_rank = arg0_shape_tmp.size(); + VectorDims output_shape(max_rank); + for (size_t i = 0; i < max_rank - 2; ++i) { + if (arg0_shape_tmp[i] == arg1_shape_tmp[i]) { + output_shape[i] = arg0_shape_tmp[i]; + } else { + if (arg0_shape_tmp[i] == 1 || arg0_shape_tmp[i] == DYNAMIC_DIMENSION) + output_shape[i] = arg1_shape_tmp[i]; + else if (arg1_shape_tmp[i] == 1 || arg1_shape_tmp[i] == DYNAMIC_DIMENSION) + output_shape[i] = arg0_shape_tmp[i]; + else + OPENVINO_THROW("Incompatible Brgemm batch dimension"); + } + } + output_shape[output_shape.size() - 2] = arg0_shape_tmp[arg0_shape_tmp.size() - 2]; // M + output_shape[output_shape.size() - 1] = arg1_shape_tmp[arg1_shape_tmp.size() - 1]; // N + + // removing the temporary axes from originally 1D tensors. + if (arg0_shape.size() == 1) { + output_shape.erase(output_shape.begin() + output_shape.size() - 2); + } + if (arg1_shape.size() == 1) { + output_shape.erase(output_shape.begin() + output_shape.size() - 1); + } + output_shape = ov::snippets::utils::get_planar_vdims(output_shape, m_io_layouts.back()); + return {{output_shape}, snippets::ShapeInferStatus::success}; +} + } // namespace snippets } // namespace ov diff --git a/src/common/snippets/src/shape_inference/shape_inference.cpp b/src/common/snippets/src/shape_inference/shape_inference.cpp index cfc4dc460d4b16..22470a13d3443f 100644 --- a/src/common/snippets/src/shape_inference/shape_inference.cpp +++ b/src/common/snippets/src/shape_inference/shape_inference.cpp @@ -58,11 +58,11 @@ const IShapeInferSnippetsFactory::TRegistry IShapeInferSnippetsFactory::registry SHAPE_INFER_PREDEFINED(op::Kernel, EmptyShapeInfer), SHAPE_INFER_PREDEFINED(op::Nop, EmptyShapeInfer), SHAPE_INFER_OP_SPECIFIC_EXTERNAL(opset1::Select, SelectShapeInfer), + SHAPE_INFER_OP_SPECIFIC_EXTERNAL(op::Brgemm, BrgemmShapeInfer), // Note that Result has no output PortConnectors, so the shape must be empty SHAPE_INFER_PREDEFINED(ov::op::v0::Result, EmptyShapeInfer), // SHAPE_INFER_OP_SPECIFIC(op::LoadReshape), - SHAPE_INFER_OP_SPECIFIC(op::Brgemm), SHAPE_INFER_OP_SPECIFIC(op::BroadcastLoad), SHAPE_INFER_OP_SPECIFIC(op::BroadcastMove), }; diff --git a/src/plugins/intel_cpu/src/nodes/subgraph.cpp b/src/plugins/intel_cpu/src/nodes/subgraph.cpp index dd2c756ba63849..c20ecbea76cdca 100644 --- a/src/plugins/intel_cpu/src/nodes/subgraph.cpp +++ b/src/plugins/intel_cpu/src/nodes/subgraph.cpp @@ -22,6 +22,7 @@ #include "snippets/pass/matmul_to_brgemm.hpp" #include "utils/cpu_utils.hpp" #include "emitters/x64/cpu_generator.hpp" +#include "transformations/snippets/x64/pass/lowered/set_brgemm_copy_b_buffers_shape.hpp" #include "transformations/snippets/x64/pass/lowered/fuse_load_store_and_convert.hpp" #include "transformations/snippets/x64/pass/lowered/brgemm_blocking.hpp" #include "transformations/snippets/x64/pass/mul_add_to_fma.hpp" @@ -618,6 +619,7 @@ void Snippet::SnippetJitExecutor::generate(const jit_snippets_compile_args* jcp) ov::snippets::lowered::pass::PassPipeline control_flow_pipeline; CPU_REGISTER_PASS_X64(control_flow_pipeline, ov::intel_cpu::pass::FuseLoadStoreConvert) + CPU_REGISTER_PASS_X64(control_flow_pipeline, ov::intel_cpu::pass::SetBrgemmCopyBBuffersShape); // Note: we need to pass valid shapeInfer factory to generate, so it can be used in OptimizeDomain pass // in all other cases nGraph shape inference will be used until ticket # 113209 (PR 18563) is merged schedule = snippet_for_generation->generate(backend_passes, diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.cpp index e16088a15671b6..643b5d74fc963b 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.cpp @@ -57,39 +57,34 @@ void BrgemmCopyB::custom_constructor_validate_and_infer_types(std::vectorget_shape()); const auto& element_type = get_input_element_type(0); - const auto& pshape = snippets::utils::get_planar_pshape(input(0)); - validate(pshape, element_type); -} - -void BrgemmCopyB::validate(const ov::PartialShape& pshape, const ov::element::Type& element_type) { - NGRAPH_CHECK(one_of(element_type, element::bf16, element::i8), - "BrgemmCopyB doesn't support element type" + element_type.get_type_name()); - - if (pshape.is_dynamic()) { - set_output_type(0, element_type, ov::PartialShape {ov::Dimension::dynamic()}); - if (is_with_compensations()) { - set_output_type(1, ov::element::f32, ov::PartialShape {ov::Dimension::dynamic()}); - } - return; - } - - const auto shape = pshape.get_shape(); - const auto N = *shape.rbegin(); - const auto K = *(shape.rbegin() + 1); - - set_output_type(0, element_type, ov::PartialShape{ov::Dimension(rnd_up(K, m_brgemmVNNIFactor)), - ov::Dimension(rnd_up(N, m_N_blk))}); + const auto& planar_pshape = snippets::utils::get_planar_pshape(shape, port->get_layout()); + set_output_type(0, element_type, planar_pshape); if (is_with_compensations()) { - set_output_type(1, ov::element::f32, ov::PartialShape{ov::Dimension(rnd_up(N, m_N_blk))}); + set_output_type(1, ov::element::f32, planar_pshape); } + validate(planar_pshape, element_type); +} + +void BrgemmCopyB::validate(const ov::PartialShape& planar_pshape, const ov::element::Type& element_type) { + OPENVINO_ASSERT(one_of(element_type, element::bf16, element::i8), + "BrgemmCopyB doesn't support element type" + element_type.get_type_name()); } void intel_cpu::BrgemmCopyB::compute_block_size_values(const size_t blk_size_k, const size_t blk_size_n) { @@ -98,6 +93,17 @@ void intel_cpu::BrgemmCopyB::compute_block_size_values(const size_t blk_size_k, m_N_blk = blk_size_n != 0 ? blk_size_n : *input_shape.rbegin(); } +ov::Shape intel_cpu::BrgemmCopyB::get_data_repacking_shape(const ov::snippets::VectorDims& planar_dims) const { + const auto& N = *planar_dims.rbegin(); + const auto& K = *(planar_dims.rbegin() + 1); + return ov::Shape{rnd_up(K, m_brgemmVNNIFactor), rnd_up(N, m_N_blk)}; +} + +ov::Shape intel_cpu::BrgemmCopyB::get_compensation_shape(const ov::snippets::VectorDims& planar_dims) const { + const auto& N = *planar_dims.rbegin(); + return ov::Shape{rnd_up(N, m_N_blk)}; +} + std::shared_ptr intel_cpu::BrgemmCopyB::clone_with_new_inputs(const OutputVector& new_args) const { INTERNAL_OP_SCOPE(BrgemmRepack_clone_with_new_inputs); check_new_args_count(this, new_args); @@ -120,29 +126,13 @@ BrgemmCopyB::ShapeInfer::ShapeInfer(const std::shared_ptr& n) { OPENVINO_ASSERT(brg_copyb, "Got invalid node in BrgemmCopyB::ShapeInfer"); m_layout = snippets::lowered::PortDescriptorUtils::get_port_descriptor_ptr(n->input(0))->get_layout(); m_num_outs = brg_copyb->get_output_size(); - m_N_blk = brg_copyb->get_n_block_size(); - m_brgemmVNNIFactor = brg_copyb->m_brgemmVNNIFactor; } -snippets::IShapeInferSnippets::Result BrgemmCopyB::ShapeInfer::infer(const std::vector& input_shapes) { +ov::snippets::IShapeInferSnippets::Result BrgemmCopyB::ShapeInfer::infer(const std::vector& input_shapes) { OPENVINO_ASSERT(input_shapes.size() == 1, "Got unexpected number of input shapes"); - const auto& old_shape = input_shapes[0].get(); - snippets::VectorDims planar_shape; - planar_shape.reserve(old_shape.size()); - for (const auto idx : m_layout) - planar_shape.push_back(old_shape[idx]); - const auto N = *planar_shape.rbegin(); - const auto K = *(planar_shape.rbegin() + 1); - OPENVINO_ASSERT(N != DYNAMIC_DIMENSION && K != DYNAMIC_DIMENSION, - "BrgemmCopyB shape infer got dynamic N or K dimension, which is not supported"); - - std::vector new_shapes(m_num_outs); - new_shapes[0].push_back(rnd_up(K, m_brgemmVNNIFactor)); - new_shapes[0].push_back(rnd_up(N, m_N_blk)); - if (m_num_outs == 2) { - new_shapes[1].push_back(rnd_up(N, m_N_blk)); - } - return {new_shapes, snippets::ShapeInferStatus::success}; + const auto planar_shape = ov::snippets::utils::get_planar_vdims(input_shapes[0].get(), m_layout); + std::vector new_shapes(m_num_outs, planar_shape); + return {new_shapes, ov::snippets::ShapeInferStatus::success}; } } // namespace intel_cpu diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.hpp index 62703049aeaa38..9274ad026e5f01 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.hpp @@ -5,6 +5,7 @@ #pragma once #include "snippets/op/memory_access.hpp" +#include "snippets/shape_types.hpp" #include namespace ov { @@ -43,7 +44,11 @@ class BrgemmCopyB : public snippets::op::MemoryAccess { void set_k_block_size(size_t block_size) { m_K_blk = block_size; } void set_n_block_size(size_t block_size) { m_N_blk = block_size; } + ov::Shape get_data_repacking_shape(const ov::snippets::VectorDims& planar_dims) const; + ov::Shape get_compensation_shape(const ov::snippets::VectorDims& planar_dims) const; + Type get_type() const { return m_type; } + size_t get_brgemm_vnni_factor() const { return m_brgemmVNNIFactor; } element::Type get_src_element_type() const { return m_src_type; } bool is_with_compensations() const { return m_type == Type::WithCompensations; } @@ -55,8 +60,6 @@ class BrgemmCopyB : public snippets::op::MemoryAccess { class ShapeInfer : public snippets::IShapeInferSnippets { std::vector m_layout{}; size_t m_num_outs = 1; - size_t m_N_blk = 64; - size_t m_brgemmVNNIFactor = 1; public: explicit ShapeInfer(const std::shared_ptr& n); Result infer(const std::vector& input_shapes) override; @@ -64,7 +67,7 @@ class BrgemmCopyB : public snippets::op::MemoryAccess { private: void custom_constructor_validate_and_infer_types(std::vector layout_input = {}); - void validate(const ov::PartialShape& pshape, const ov::element::Type& element_type); + void validate(const ov::PartialShape& planar_pshape, const ov::element::Type& element_type); void compute_block_size_values(const size_t blk_size_k, const size_t blk_size_n); Type m_type = Type::OnlyRepacking; diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_cpu.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_cpu.cpp index 03e3325376cbf5..20f7fccafe311a 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_cpu.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_cpu.cpp @@ -114,21 +114,9 @@ void BrgemmCPU::validate_with_scratchpad(const ov::Shape& shape_b) const { // Additional check for 3rd input if (one_of(m_type, Type::WithCompensations, Type::AMX)) { const auto& pshape = get_input_partial_shape(2); - NGRAPH_CHECK(pshape.is_static(), "BRGEMM Scratch must have static shape"); - const auto shape = pshape.to_shape(); - const auto type = get_input_element_type(2); + OPENVINO_ASSERT(pshape.is_static(), "BRGEMM Scratch must have static shape"); if (is_with_compensations()) { - const auto expected_type = ov::element::f32; - NGRAPH_CHECK(expected_type == type, "BRGEMM Scratch with compensations must have FP32 element type"); - const auto N = *shape_b.rbegin(); - // If N block size is not set, there is no meaning in validating the scratchpad shape - if (m_N_blk != N) { - const auto expected_shape = ov::Shape{rnd_up(N, m_N_blk)}; - NGRAPH_CHECK(expected_shape == shape, "BRGEMM Scratch with compensations must have shape {rnd_up(N, m_N_blk)}"); - } - } else { - NGRAPH_CHECK(ov::shape_size(shape) == SCRATCH_BYTE_SIZE && type == ov::element::u8, - "BRGEMM Scratch for space workplace must be static, have U8 element type and size equal to " + std::to_string(SCRATCH_BYTE_SIZE)); + OPENVINO_ASSERT(get_input_element_type(2) == ov::element::f32, "BRGEMM Scratch with compensations must have FP32 element type"); } } } @@ -181,13 +169,5 @@ size_t BrgemmCPU::get_offset_scratch() const { return get_input_offset(2); } -BrgemmCPU::ShapeInfer::ShapeInfer(const std::shared_ptr& n) : Brgemm::ShapeInfer(n) { - const auto& brg = ov::as_type_ptr(n); - OPENVINO_ASSERT(brg, "Got invalid node in BrgemmCPU::ShapeInfer"); - const auto brgemm_copy = brg->is_with_data_repacking() ? brg->get_brgemm_copy() : nullptr; - if (brgemm_copy) - m_io_layouts[1] = snippets::lowered::PortDescriptorUtils::get_port_descriptor_ptr(brgemm_copy->input(0))->get_layout(); -} - } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_cpu.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_cpu.hpp index e1957bb66d2be1..bf07b7a8546eac 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_cpu.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_cpu.hpp @@ -69,12 +69,6 @@ class BrgemmCPU : public snippets::op::Brgemm { constexpr static size_t SCRATCH_BYTE_SIZE = 32 * 1024; - class ShapeInfer : public Brgemm::ShapeInfer { - public: - explicit ShapeInfer(const std::shared_ptr& n); - }; - - private: void custom_constructor_validate_and_infer_types(std::vector layout_a, std::vector layout_b, std::vector layout_c); void compute_block_size_values(const size_t blk_size_m, const size_t blk_size_k, const size_t blk_size_n); diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/set_brgemm_copy_b_buffers_shape.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/set_brgemm_copy_b_buffers_shape.cpp new file mode 100644 index 00000000000000..91bec8aee60d4a --- /dev/null +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/set_brgemm_copy_b_buffers_shape.cpp @@ -0,0 +1,38 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "snippets/itt.hpp" + +#include "set_brgemm_copy_b_buffers_shape.hpp" +#include "snippets/snippets_isa.hpp" +#include "snippets/utils.hpp" + +#include "transformations/snippets/x64/op/brgemm_copy_b.hpp" + +bool ov::intel_cpu::pass::SetBrgemmCopyBBuffersShape::run(snippets::lowered::LinearIR& linear_ir) { + OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "Snippets::SetBrgemmCopyBBuffersShape") + + auto get_buffer_from_output = [](const snippets::lowered::ExpressionPtr& expr, const size_t out_idx) { + const auto& consumers = expr->get_output_port_connector(out_idx)->get_consumers(); + OPENVINO_ASSERT(consumers.size() == 1, "BrgemmCopyB must have only 1 consumer"); + const auto buffer = ov::as_type_ptr(consumers.begin()->get_expr()->get_node()); + OPENVINO_ASSERT(buffer, "BrgemmCopyB consumer must be Buffer"); + return buffer; + }; + + bool modified = false; + for (const auto& expr : linear_ir) { + if (auto copy_b = ov::as_type_ptr(expr->get_node())) { + const auto buffer = get_buffer_from_output(expr, 0); + const auto& out_desc = expr->get_output_port_descriptor(0); + buffer->set_allocation_shape(copy_b->get_data_repacking_shape(out_desc->get_shape())); + if (copy_b->is_with_compensations()) { + const auto compensations_buffer = get_buffer_from_output(expr, 1); + compensations_buffer->set_allocation_shape(copy_b->get_compensation_shape(out_desc->get_shape())); + } + modified = true; + } + } + return modified; +} diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/set_brgemm_copy_b_buffers_shape.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/set_brgemm_copy_b_buffers_shape.hpp new file mode 100644 index 00000000000000..fcac51286e00a6 --- /dev/null +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/set_brgemm_copy_b_buffers_shape.hpp @@ -0,0 +1,27 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "snippets/lowered/pass/pass.hpp" + +namespace ov { +namespace intel_cpu { +namespace pass { + +/** + * @interface SetBrgemmCopyBBuffersShape + * @brief Sets the allocation shape for the Buffers after BrgemmCopyB node using BrgemmCopyB parameters + * @ingroup snippets + */ +class SetBrgemmCopyBBuffersShape: public snippets::lowered::pass::Pass { +public: + SetBrgemmCopyBBuffersShape() = default; + OPENVINO_RTTI("SetBrgemmCopyBBuffersShape", "Pass"); + bool run(snippets::lowered::LinearIR& linear_ir) override; +}; + +} // namespace pass +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/set_brgemm_cpu_blocking_params.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/set_brgemm_cpu_blocking_params.cpp index db6f34a4e746f1..df88ffa7edcd82 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/set_brgemm_cpu_blocking_params.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/set_brgemm_cpu_blocking_params.cpp @@ -22,18 +22,6 @@ namespace ov { namespace intel_cpu { -using namespace snippets::lowered; -namespace { -template -void change_desc_shape(const T& port) { - const auto desc = PortDescriptorUtils::get_port_descriptor_ptr(port); - const auto& shape = port.get_shape(); - if (desc->get_shape() != shape) { - desc->set_shape(shape); - } -} -} // namespace - pass::SetBrgemmCPUBlockingParams::SetBrgemmCPUBlockingParams() { MATCHER_SCOPE(SetBrgemmCPUBlockingParams); @@ -73,7 +61,7 @@ pass::SetBrgemmCPUBlockingParams::SetBrgemmCPUBlockingParams() { const bool isAMXSupported = dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core_amx); const auto precision = brgemm_copy_b->get_src_element_type(); - const auto brgemmVNNIFactor = 4 / precision.size(); + const auto brgemmVNNIFactor = brgemm_copy_b->get_brgemm_vnni_factor(); const bool use_amx = isAMXSupported && precision != ov::element::f32 && (K % brgemmVNNIFactor == 0) && (N % brgemmVNNIFactor == 0); const size_t copy_b_block_size_k = use_amx ? brgemm_block_size_k : K; @@ -81,18 +69,8 @@ pass::SetBrgemmCPUBlockingParams::SetBrgemmCPUBlockingParams() { brgemm_copy_b->set_k_block_size(copy_b_block_size_k); brgemm_copy_b->set_n_block_size(copy_b_block_size_n); - // since N block size affects output shapes, the validation must be called explicitly right after the block size changing - brgemm_copy_b->validate_and_infer_types(); - change_desc_shape(brgemm_copy_b->output(0)); - if (brgemm_copy_b->is_with_compensations()) - change_desc_shape(brgemm_copy_b->output(1)); } - brgemm->validate_and_infer_types(); - change_desc_shape(brgemm->input(1)); - if (brgemm->is_with_scratchpad()) - change_desc_shape(brgemm->input(2)); - return false; }; diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/shape_inference.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/shape_inference.cpp index d09f3f218e67d9..6bb833262a516c 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/shape_inference.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/shape_inference.cpp @@ -28,6 +28,8 @@ ShapeInferPtr CPUShapeInferSnippetsFactory::get_specific_op_shape_infer(const ov { OP::get_type_info_static(), [](const std::shared_ptr& n) { return std::make_shared();} } #define SHAPE_INFER_OP_SPECIFIC(OP) \ { OP::get_type_info_static(), [](const std::shared_ptr& n) { return std::make_shared(n);} } +#define SHAPE_INFER_OP_SPECIFIC_EXTERNAL(OP, InferType) \ + { OP::get_type_info_static(), [](const std::shared_ptr& n) { return std::make_shared(n);} } const CPUShapeInferSnippetsFactory::TRegistry CPUShapeInferSnippetsFactory::specific_ops_registry { SHAPE_INFER_PREDEFINED(ov::intel_cpu::FusedMulAdd, NumpyBroadcastShapeInfer), @@ -36,9 +38,9 @@ const CPUShapeInferSnippetsFactory::TRegistry CPUShapeInferSnippetsFactory::spec SHAPE_INFER_PREDEFINED(ov::intel_cpu::LoadConvertTruncation, PassThroughShapeInfer), SHAPE_INFER_PREDEFINED(ov::intel_cpu::StoreConvertSaturation, PassThroughShapeInfer), SHAPE_INFER_PREDEFINED(ov::intel_cpu::StoreConvertTruncation, PassThroughShapeInfer), + SHAPE_INFER_OP_SPECIFIC_EXTERNAL(ov::intel_cpu::BrgemmCPU, BrgemmShapeInfer), // SHAPE_INFER_OP_SPECIFIC(ov::intel_cpu::BrgemmCopyB), - SHAPE_INFER_OP_SPECIFIC(ov::intel_cpu::BrgemmCPU), }; #undef SHAPE_INFER_OP_SPECIFIC #undef SHAPE_INFER_PREDEFINED From 83c593be3b4feaec926d4bf70030044aeade87d3 Mon Sep 17 00:00:00 2001 From: Evgenya Nugmanova Date: Thu, 12 Oct 2023 17:54:52 +0400 Subject: [PATCH 173/257] Avoid dynamic rank in Snippets (#20387) * Avoid dynamic rank in Snippets * Update src/common/snippets/src/pass/collapse_subgraph.cpp Co-authored-by: Alexandra Sidorova --------- Co-authored-by: Alexandra Sidorova --- src/common/snippets/src/pass/collapse_subgraph.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/common/snippets/src/pass/collapse_subgraph.cpp b/src/common/snippets/src/pass/collapse_subgraph.cpp index 9acea3720e9550..35aa8f14308d99 100644 --- a/src/common/snippets/src/pass/collapse_subgraph.cpp +++ b/src/common/snippets/src/pass/collapse_subgraph.cpp @@ -179,7 +179,10 @@ auto is_supported_op(const std::shared_ptr &n) -> bool { auto has_supported_in_out(const std::shared_ptr &n) -> bool { auto supported = [&n](descriptor::Tensor& t) -> bool { - // Todo: int32 isn't supported in general because i32 emitters are required for bit-exact i32 calculations in some cases + // TODO [122585] Need to add dynamic rank support + if (t.get_partial_shape().rank().is_dynamic()) + return false; + // TODO [105804] int32 isn't supported in general because i32 emitters are required for bit-exact i32 calculations in some cases // So i32 is supported exclusively for transposes and broadcast return TokenizeSnippets::get_supported_element_types().count(t.get_element_type()) != 0 || (t.get_element_type() == ov::element::i32 && From ccb43b77a7bda0b548ef3c8262d002ee138b59b5 Mon Sep 17 00:00:00 2001 From: Oleg Pipikin Date: Thu, 12 Oct 2023 17:15:34 +0200 Subject: [PATCH 174/257] Disable f16 LSTMCellTest and LSTMSequenceTest for Apple arm64 (#20422) --- .../functional/shared_tests_instances/skip_tests_config.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 857caffecb713a..3cf22ebff921a3 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -197,6 +197,9 @@ std::vector disabledTestPatterns() { retVector.emplace_back(R"(.*smoke_TensorIteratorCommon/TensorIteratorTest.Inference.*_modelType=f16_targetDevice=CPU.*)"); retVector.emplace_back(R"(.*smoke_CtcGreedyDecoderBasic/CTCGreedyDecoderLayerTest.Inference.*netPRC=f16.*trgDev=CPU.*)"); retVector.emplace_back(R"(.*CTCGreedyDecoderSeqLenLayerTest.Inference.*dataPRC=f16.*trgDev=CPU.*)"); + // Issue: 122177 + retVector.emplace_back(R"(.*smoke_LSTMCellCommon/LSTMCellTest.Inference.*_modelType=f16.*)"); + retVector.emplace_back(R"(.*smoke_LSTMSequenceCommonZeroClip/LSTMSequenceTest.Inference.*_modelType=f16.*)"); #endif #if defined(OPENVINO_ARCH_X86) From 6519afd4d32d5a974c90cc624815c563946c6583 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Thu, 12 Oct 2023 19:33:24 +0400 Subject: [PATCH 175/257] Removed check for openvini::runtime::c in samples (#20425) --- samples/cpp/CMakeLists.txt | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/samples/cpp/CMakeLists.txt b/samples/cpp/CMakeLists.txt index 5051bc742927e7..de884bb05d279c 100644 --- a/samples/cpp/CMakeLists.txt +++ b/samples/cpp/CMakeLists.txt @@ -214,9 +214,7 @@ macro(ov_add_sample) find_package(Threads REQUIRED) find_package(OpenVINO REQUIRED COMPONENTS Runtime) - - # Conan does not generate openvino::runtime::c target - if(c_sample AND TARGET openvino::runtime::c) + if(c_sample) set(ov_link_libraries openvino::runtime::c) else() set(ov_link_libraries openvino::runtime) From 29475c738eb0d648cbe8aab7e906cc2b01ba04a7 Mon Sep 17 00:00:00 2001 From: Katarzyna Mitrus Date: Thu, 12 Oct 2023 18:18:34 +0200 Subject: [PATCH 176/257] BitwiseElementwise ops init in MO (#20386) --- tools/mo/openvino/tools/mo/ops/elementwise.py | 24 ++++++++++ .../unit_tests/mo/utils/ir_reader/ops_test.py | 47 +++++++++++++++++++ 2 files changed, 71 insertions(+) diff --git a/tools/mo/openvino/tools/mo/ops/elementwise.py b/tools/mo/openvino/tools/mo/ops/elementwise.py index ede0084a3744e4..8a29181c7f1826 100644 --- a/tools/mo/openvino/tools/mo/ops/elementwise.py +++ b/tools/mo/openvino/tools/mo/ops/elementwise.py @@ -246,3 +246,27 @@ def operation(a): if np.issubdtype(a.dtype, np.signedinteger): return float32_array(a.astype(np.float32) ** 0.5) return a ** 0.5 + + +class BitwiseAnd(Elementwise): + op = 'BitwiseAnd' + op_type = 'BitwiseAnd' + version = 'opset13' + + +class BitwiseOr(Elementwise): + op = 'BitwiseOr' + op_type = 'BitwiseOr' + version = 'opset13' + + +class BitwiseXor(Elementwise): + op = 'BitwiseXor' + op_type = 'BitwiseXor' + version = 'opset13' + + +class BitwiseNot(UnaryElementwise): + op = 'BitwiseNot' + op_type = 'BitwiseNot' + version = 'opset13' diff --git a/tools/mo/unit_tests/mo/utils/ir_reader/ops_test.py b/tools/mo/unit_tests/mo/utils/ir_reader/ops_test.py index 2df88dab8bdbb7..62cd013ad23093 100644 --- a/tools/mo/unit_tests/mo/utils/ir_reader/ops_test.py +++ b/tools/mo/unit_tests/mo/utils/ir_reader/ops_test.py @@ -6,6 +6,7 @@ import numpy as np from pathlib import Path +import openvino.runtime.opset13 as opset13 import openvino.runtime.opset12 as opset12 import openvino.runtime.opset11 as opset11 import openvino.runtime.opset10 as opset10 @@ -245,3 +246,49 @@ def test_group_norm_12(self): self.assertEqual(gn_node["version"], "opset12") self.assertEqual(gn_node['num_groups'], 1) self.assertEqual(gn_node['epsilon'], 1e-06) + + def test_bitwise_and_13(self): + a = opset13.parameter([4, 1], name="A", dtype=np.int32) + b = opset13.parameter([1, 2], name="B", dtype=np.int32) + + op = opset13.bitwise_and(a, b) + model = Model(op, [a, b]) + graph = TestOps.check_graph_can_save(model, "bitwise_and_model") + op_node = graph.get_op_nodes(op="BitwiseAnd")[0] + self.assertListEqual(op_node.out_port(0).data.get_shape().tolist(), [4, 2]) + self.assertEqual(op_node["version"], "opset13") + self.assertEqual(op_node["auto_broadcast"], "numpy") + + def test_bitwise_or_13(self): + a = opset13.parameter([4, 1], name="A", dtype=np.int32) + b = opset13.parameter([1, 2], name="B", dtype=np.int32) + + op = opset13.bitwise_or(a, b) + model = Model(op, [a, b]) + graph = TestOps.check_graph_can_save(model, "bitwise_or_model") + op_node = graph.get_op_nodes(op="BitwiseOr")[0] + self.assertListEqual(op_node.out_port(0).data.get_shape().tolist(), [4, 2]) + self.assertEqual(op_node["version"], "opset13") + self.assertEqual(op_node["auto_broadcast"], "numpy") + + def test_bitwise_xor_13(self): + a = opset13.parameter([4, 1], name="A", dtype=np.int32) + b = opset13.parameter([1, 2], name="B", dtype=np.int32) + + op = opset13.bitwise_xor(a, b) + model = Model(op, [a, b]) + graph = TestOps.check_graph_can_save(model, "bitwise_xor_model") + op_node = graph.get_op_nodes(op="BitwiseXor")[0] + self.assertListEqual(op_node.out_port(0).data.get_shape().tolist(), [4, 2]) + self.assertEqual(op_node["version"], "opset13") + self.assertEqual(op_node["auto_broadcast"], "numpy") + + def test_bitwise_not_13(self): + a = opset13.parameter([4, 2], name="A", dtype=np.int32) + + op = opset13.bitwise_not(a) + model = Model(op, [a]) + graph = TestOps.check_graph_can_save(model, "bitwise_not_model") + op_node = graph.get_op_nodes(op="BitwiseNot")[0] + self.assertListEqual(op_node.out_port(0).data.get_shape().tolist(), [4, 2]) + self.assertEqual(op_node["version"], "opset13") From 74690d038b9a886345bc98da067cc090686c057b Mon Sep 17 00:00:00 2001 From: Irina Efode Date: Thu, 12 Oct 2023 21:01:04 +0400 Subject: [PATCH 177/257] [CONFORMANCE][TOOLS] Repeat pattern extractor API (#20293) * Prepare API * Refactor api * Move model comparation to separate component * Cover by tests * Move align_in_info to utils * Change arch diagram --- docs/img/subgraphs_dumper_arch_diaram.png | 4 +- .../include/cache/graph_cache.hpp | 9 +- .../include/cache/meta/input_info.hpp | 3 + .../include/cache/meta/meta_info.hpp | 5 +- .../include/matchers/single_op/manager.hpp | 2 + .../include/matchers/single_op/single_op.hpp | 3 + .../include/matchers/subgraph/fused_names.hpp | 4 +- .../include/matchers/subgraph/manager.hpp | 25 +- .../matchers/subgraph/repeat_pattern.hpp | 46 +- .../include/matchers/subgraph/subgraph.hpp | 41 +- .../subgraphs_dumper/include/utils/model.hpp | 70 ++-- .../include/utils/model_comparator.hpp | 68 +++ .../subgraphs_dumper/include/utils/node.hpp | 9 + .../subgraphs_dumper/src/cache/cache.cpp | 4 +- .../src/cache/graph_cache.cpp | 129 +++--- .../src/cache/meta/meta_info.cpp | 4 - .../src/matchers/single_op/convolutions.cpp | 7 +- .../src/matchers/single_op/manager.cpp | 8 +- .../src/matchers/single_op/single_op.cpp | 49 +-- .../src/matchers/subgraph/fused_names.cpp | 19 +- .../src/matchers/subgraph/manager.cpp | 114 +---- .../src/matchers/subgraph/repeat_pattern.cpp | 175 ++++++-- .../src/matchers/subgraph/subgraph.cpp | 76 ---- .../subgraphs_dumper/src/utils/model.cpp | 93 +++++ .../src/utils/model_comparator.cpp | 136 ++++++ .../subgraphs_dumper/src/utils/node.cpp | 148 ++++--- .../subgraphs_dumper/tests/cache/cache.cpp | 18 + .../tests/matchers/subgraph/fused_names.cpp | 3 +- .../tests/matchers/subgraph/manager.cpp | 71 +--- .../matchers/subgraph/repeat_pattern.cpp | 82 +++- .../tests/matchers/subgraph/subgraph.cpp | 394 +++++++++--------- .../tests/test_models/model_0.hpp | 19 + .../tests/test_models/model_1.hpp | 29 ++ .../tests/test_models/model_2.hpp | 13 +- .../subgraphs_dumper/tests/utils/model.cpp | 68 ++- .../tests/utils/model_comparator.cpp | 137 ++++++ 36 files changed, 1315 insertions(+), 770 deletions(-) create mode 100644 src/tests/functional/plugin/conformance/subgraphs_dumper/include/utils/model_comparator.hpp delete mode 100644 src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/subgraph/subgraph.cpp create mode 100644 src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/model_comparator.cpp create mode 100644 src/tests/functional/plugin/conformance/subgraphs_dumper/tests/utils/model_comparator.cpp diff --git a/docs/img/subgraphs_dumper_arch_diaram.png b/docs/img/subgraphs_dumper_arch_diaram.png index a870c6ee0ad86b..7976674601780f 100644 --- a/docs/img/subgraphs_dumper_arch_diaram.png +++ b/docs/img/subgraphs_dumper_arch_diaram.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:caa4f76ba61548d1b60d7de1f78fb48dccbf5337117240353a9581f23c88bfa9 -size 216595 +oid sha256:45578db1c9ac5362340ea35fc8fa024e992c8beeb30e984d969ee80217c9031b +size 342214 diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/graph_cache.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/graph_cache.hpp index 130847a58ea8da..6bcedde7f9a114 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/graph_cache.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/graph_cache.hpp @@ -6,6 +6,7 @@ #include "cache/cache.hpp" #include "cache/meta/input_info.hpp" +#include "utils/model_comparator.hpp" #include "matchers/subgraph/manager.hpp" #include "matchers/subgraph/subgraph.hpp" #include "matchers/subgraph/fused_names.hpp" @@ -42,10 +43,12 @@ class GraphCache : public ICache { protected: std::map, MetaInfo> m_graph_cache; - ExtractorsManager m_manager = ExtractorsManager(); - static std::shared_ptr m_cache_instance; // cache byte size uint64_t m_graph_cache_bytesize = 0; + ExtractorsManager m_manager; + ModelComparator::Ptr m_model_comparator = ModelComparator::get(); + std::shared_ptr model_to_update = nullptr; + static std::shared_ptr m_cache_instance; GraphCache(const std::string& device = "") { ExtractorsManager::ExtractorsMap matchers = { @@ -59,7 +62,7 @@ class GraphCache : public ICache { void update_cache(const std::shared_ptr& model, const std::string& model_path, - std::map& input_info, + const std::map& input_info, const std::string& extractor_name, size_t model_op_cnt); }; diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/meta/input_info.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/meta/input_info.hpp index 43e2b2a6356ed1..50f92af4f280f8 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/meta/input_info.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/meta/input_info.hpp @@ -59,6 +59,9 @@ struct InputInfo { } InputInfo operator=(const InputInfo& input_info) { + if (this->is_const != input_info.is_const) { + throw std::runtime_error("Cast Const to Parameter! Impossible to update Input Info!"); + } this->ranges = input_info.ranges; if (ov::shape_size(this->max_shape.get_max_shape()) < ov::shape_size(input_info.max_shape.get_max_shape())) { this->max_shape = input_info.max_shape; diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/meta/meta_info.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/meta/meta_info.hpp index 54625bfac52b39..971e101584cf66 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/meta/meta_info.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/cache/meta/meta_info.hpp @@ -33,7 +33,10 @@ class MetaInfo { const std::string& extractor = "", const std::vector& ignored_inputs = {}); std::map get_input_info() const; - void set_input_info(const std::map& new_in_info) { input_info = new_in_info; }; + void set_input_info(const std::map& new_in_info) { + input_info.clear(); + input_info = new_in_info; + }; std::map get_model_info() const; std::string get_any_extractor() const { return *extractors.begin(); } diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/single_op/manager.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/single_op/manager.hpp index 8d8143d86250ba..f8b3540fa64302 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/single_op/manager.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/single_op/manager.hpp @@ -20,6 +20,8 @@ class MatchersManager { const std::shared_ptr &ref) const; void set_matchers(const MatchersMap& matchers = {}) { m_matchers = matchers; } + void set_shape_strict_match(bool shape_strict_match); + const MatchersMap& get_matchers() { return m_matchers; } iMatcherConfig::Ptr get_config(const std::shared_ptr &node) const; diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/single_op/single_op.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/single_op/single_op.hpp index d6ef95ad99864c..b86edfe06fa25d 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/single_op/single_op.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/single_op/single_op.hpp @@ -21,6 +21,7 @@ class SingleOpMatcher { const std::shared_ptr &ref) const; iMatcherConfig::Ptr get_config(const std::shared_ptr &node) const; + void set_strict_shape_match(bool strict_shape_match); protected: virtual void configure(const pugi::xml_document &cfg) {}; @@ -35,6 +36,8 @@ class SingleOpMatcher { const std::shared_ptr &ref) const; std::vector default_configs; + // match only shape ranks by default; + bool is_strict_shape_match = false; }; } // namespace subgraph_dumper diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/subgraph/fused_names.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/subgraph/fused_names.hpp index 5df31c77baaa0c..d9a78fe51220ce 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/subgraph/fused_names.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/subgraph/fused_names.hpp @@ -17,9 +17,7 @@ class FusedNamesExtractor final : public SubgraphExtractor { FusedNamesExtractor(const std::string& device = ""); ~FusedNamesExtractor(); - std::list extract(const std::shared_ptr &model, - bool is_extract_body = true, - bool is_copy_constants = true) override; + std::vector extract(const std::shared_ptr &modele) override; protected: std::unordered_set extract_compiled_model_names(const std::shared_ptr& model); diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/subgraph/manager.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/subgraph/manager.hpp index 8634585cf1a2ce..5e960e2d3197aa 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/subgraph/manager.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/subgraph/manager.hpp @@ -12,38 +12,19 @@ namespace subgraph_dumper { class ExtractorsManager { public: - // { model, subgraph, model_in_info, subgraph_in_info } - using ExtractedSubgraphTuple = std::tuple, std::shared_ptr, std::map, std::map>; using ExtractorsMap = std::map; explicit ExtractorsManager(const ExtractorsMap& extractors = {}) : m_extractors(extractors) {} - bool match(const std::shared_ptr &model, - const std::shared_ptr &ref_model, - std::map &in_info, - const std::map &in_info_ref); - ExtractedSubgraphTuple is_subgraph(const std::shared_ptr &model, - const std::shared_ptr &ref_model, - const std::map &in_info = {}, - const std::map &in_info_ref = {}); - std::list extract(const std::shared_ptr &model, - bool is_extract_body = true, - bool is_copy_constants = true); + std::vector extract(const std::shared_ptr &model, + bool is_extract_body = true, + bool is_copy_constants = true); void set_extractors(const ExtractorsMap& extractors = {}) { m_extractors = extractors; } ExtractorsMap get_extractors() { return m_extractors; } - std::map align_input_info(const std::shared_ptr& model, - const std::shared_ptr& model_ref, - const std::map &in_info, - const std::map &in_info_ref, - const std::map &matched_op = {}); - protected: ExtractorsMap m_extractors = {}; - - bool match(const std::shared_ptr &model, - const std::shared_ptr &ref); }; } // namespace subgraph_dumper diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/subgraph/repeat_pattern.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/subgraph/repeat_pattern.hpp index 874ed35be83662..f38ebfe507f5b7 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/subgraph/repeat_pattern.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/subgraph/repeat_pattern.hpp @@ -5,31 +5,45 @@ #pragma once #include + #include "matchers/subgraph/subgraph.hpp" -#include "matchers/single_op/single_op.hpp" -#include "matchers/single_op/convolutions.hpp" -#include "matchers/single_op/manager.hpp" namespace ov { namespace tools { namespace subgraph_dumper { class RepeatPatternExtractor final : public SubgraphExtractor { +private: + using InputVector = std::vector>; + using OutputVector = std::vector>; + public: - RepeatPatternExtractor() { - MatchersManager::MatchersMap matchers = { - { "generic_single_op", SingleOpMatcher::Ptr(new SingleOpMatcher) }, - { "convolutions", ConvolutionsMatcher::Ptr(new ConvolutionsMatcher) }, - }; - manager.set_matchers(matchers); - } - - std::list extract(const std::shared_ptr &model, - bool is_extract_body = true, - bool is_copy_constants = true) override; + using PatternBorders = std::pair; + ModelComparator::Ptr model_comparator = ModelComparator::get(); + + std::vector> + get_repeat_pattern_borders(const std::shared_ptr &model); + std::vector> + get_repeat_node_vectors(const std::shared_ptr &model); + + void set_recursive_extraction(bool _is_recursive_extraction); + std::vector extract(const std::shared_ptr &model) override; + +protected: + // {subgraph, node_vector, input_info} + using ExtractedRepeatPattern = std::tuple, ov::NodeVector, std::map>; + bool is_recursive_extraction = true; + + std::list> + find_repeat_patterns(const std::shared_ptr &model, + bool is_save_borders_only = false); + void update_extractor_cache(std::list>& extracted_patterns, + std::list>& secondary_extracted_patterns); + void update_extractor_cache(std::list>& extracted_patterns, + const std::shared_ptr& pattern, + const ov::NodeVector& pattern_node_vector, + const std::map& in_info); -private: - MatchersManager manager; }; } // namespace subgraph_dumper diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/subgraph/subgraph.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/subgraph/subgraph.hpp index 5f7dd9d8204b25..04e3b08ee9885f 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/subgraph/subgraph.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/matchers/subgraph/subgraph.hpp @@ -7,12 +7,8 @@ #include #include "openvino/op/util/op_types.hpp" -#include "common_test_utils/graph_comparator.hpp" -#include "cache/meta/input_info.hpp" -#include "matchers/single_op/single_op.hpp" -#include "matchers/single_op/convolutions.hpp" -#include "matchers/single_op/manager.hpp" +#include "utils/model_comparator.hpp" namespace ov { namespace tools { @@ -20,44 +16,19 @@ namespace subgraph_dumper { class SubgraphExtractor { public: - // { is_subgraph, model, subgraph, matched_ops{ model_op_name, graph_op_name }} - using IsSubgraphTuple = std::tuple, std::shared_ptr, std::map>; using Ptr = std::shared_ptr; - SubgraphExtractor() { - MatchersManager::MatchersMap matchers = { - { "generic_single_op", SingleOpMatcher::Ptr(new SingleOpMatcher) }, - { "convolutions", ConvolutionsMatcher::Ptr(new ConvolutionsMatcher) }, - }; - m_manager.set_matchers(matchers); + virtual std::vector extract(const std::shared_ptr &model) { + return std::vector{}; } - bool match(const std::shared_ptr &model, - const std::shared_ptr &ref_model) const; - IsSubgraphTuple is_subgraph(const std::shared_ptr &model, - const std::shared_ptr &ref_model) const; - - virtual std::list extract(const std::shared_ptr &model, - bool is_extract_body = true, - bool is_copy_constants = true) { - return std::list{}; - }; - void set_extractor_name(const std::string& _extractor_name) { extractor_name = _extractor_name; } + void set_extract_body(bool _is_extract_body) { is_extract_body = _is_extract_body; } + void set_save_const(bool _is_save_const) { is_save_const = _is_save_const; } protected: std::string extractor_name = ""; - FunctionsComparator comparator = FunctionsComparator::no_default() - .enable(FunctionsComparator::ATTRIBUTES) - .enable(FunctionsComparator::NODES) - .enable(FunctionsComparator::PRECISIONS); - MatchersManager m_manager = MatchersManager(); - - inline bool is_node_to_skip(const std::shared_ptr& node) const { - return ov::op::util::is_parameter(node) || - ov::op::util::is_constant(node) || - ov::op::util::is_output(node); - } + bool is_extract_body = true, is_save_const = true; }; } // namespace subgraph_dumper diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/utils/model.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/utils/model.hpp index 61ce8ce8d7a637..8b48b2074df711 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/utils/model.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/utils/model.hpp @@ -33,7 +33,7 @@ static std::vector FROTEND_REGEXP = { std::regex(R"(.*__model__)"), #endif #ifdef ENABLE_OV_TF_FRONTEND - std::regex(R"(.*\.pb)"), + std::regex(R"(.*\model.pb)"), #endif #ifdef ENABLE_OV_IR_FRONTEND std::regex(R"(.*\.xml)"), @@ -74,32 +74,24 @@ std::map> cache_models( void save_model_status_to_file(const std::map>& caching_status, const std::string& output_dir); -inline bool is_dynamic_model(const std::shared_ptr& model) { - for (const auto& parameter : model->get_parameters()) { - if (is_dynamic_node(parameter)) { - return true; - } - } - for (const auto& result : model->get_results()) { - if (is_dynamic_node(result)) { - return true; - } - } - return false; -} +bool is_dynamic_model(const std::shared_ptr& model); +std::string get_model_type(const std::shared_ptr& model); -inline std::string get_model_type(const std::shared_ptr& model) { - if (is_dynamic_model(model)) { - return "dynamic"; - } - return "static"; -} +std::map +get_input_info_by_model(const std::shared_ptr& model); -inline ExtractedPattern -generate_model(const std::set>& nodes, +std::map +align_input_info(const std::shared_ptr& model, + const std::shared_ptr& model_ref, + const std::map &in_info, + const std::map &in_info_ref, + const std::map &matched_op = {}); + +inline std::pair, std::map> +generate_model(ov::NodeVector& nodes, std::unordered_set& checked_ops, - const std::string& extractor_name, - bool is_copy_constants = true) { + bool is_copy_constants = true, + bool is_save_only_borders = false) { // map to recover graph using cloned nodes and original connections // { original_node_name, cloned_node } std::unordered_map> cloned_node_map; @@ -214,27 +206,51 @@ generate_model(const std::set>& nodes, // prepare unique model name based on operations from model std::string string_to_hash; for (const auto& op : model->get_ordered_ops()) { + bool is_erase_node = !is_save_only_borders; std::ostringstream result; result << op->get_type_info(); - for (const auto& in : op->inputs()) { + for (size_t i = 0; i < op->inputs().size(); ++i) { + const auto& in = op->input(i); + if (!is_node_to_skip(op->get_input_node_shared_ptr(i))) { + is_erase_node |= true; + } result << in.get_element_type(); result << in.get_partial_shape().rank(); result << in.get_partial_shape().is_static(); } for (const auto& out : op->outputs()) { + for (const auto& target_input : out.get_target_inputs()) { + if (!is_node_to_skip(target_input.get_node()->shared_from_this())) { + is_erase_node |= true; + break; + } + } result << out.get_element_type(); result << out.get_partial_shape().rank(); result << out.get_partial_shape().is_static(); } string_to_hash += result.str(); + if (is_erase_node) { + cloned_node_map.erase(op->get_friendly_name()); + } } for (const auto& in : model_input_info) { string_to_hash += (in.second.is_const ? "1" : "0"); } auto h1 = std::hash{}(string_to_hash); model->set_friendly_name(std::to_string(h1)); - - return { model, model_input_info, extractor_name }; + { + auto it = nodes.begin(); + while (it != nodes.end()) { + if (cloned_node_map.count((*it)->get_friendly_name())) { + nodes.erase(it); + } else { + ++it; + } + } + } + + return { model, model_input_info }; } } // namespace subgraph_dumper diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/utils/model_comparator.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/utils/model_comparator.hpp new file mode 100644 index 00000000000000..e3388fe9438ada --- /dev/null +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/utils/model_comparator.hpp @@ -0,0 +1,68 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "matchers/single_op/single_op.hpp" +#include "matchers/single_op/convolutions.hpp" +#include "matchers/single_op/manager.hpp" + +namespace ov { +namespace tools { +namespace subgraph_dumper { + +class ModelComparator { +public: + using Ptr = std::shared_ptr; + // { is_match, subgraph, graph, matched_nodes -> {subgraph_op_name, graph_op_name}} + using IsSubgraphTuple = std::tuple, std::shared_ptr, std::map>; + // { model, subgraph, graph, subgraph_in_info, model_in_info, } + using ExtractedSubgraphTuple = std::tuple, std::shared_ptr, std::map, std::map>; + + static std::shared_ptr get(bool in_is_match_shapes = false) { + if (m_instance == nullptr) { + m_instance = std::shared_ptr(new ModelComparator); + } + return m_instance; + } + + IsSubgraphTuple is_subgraph(const std::shared_ptr &model, + const std::shared_ptr &ref_model) const; + + bool match(const std::shared_ptr &node, + const std::shared_ptr &ref_node) const; + bool match(const std::shared_ptr &model, + const std::shared_ptr &ref_model) const; + + std::pair> + match(const std::shared_ptr &model, + const std::shared_ptr &ref_model, + const std::map &in_info, + const std::map &in_info_ref); + ExtractedSubgraphTuple + is_subgraph(const std::shared_ptr &model, + const std::shared_ptr &ref_model, + const std::map &in_info, + const std::map &in_info_ref); + + void set_match_coefficient(float _match_coefficient); + void set_shape_strict_match(bool is_shape_strict_match); + +protected: + MatchersManager m_manager = MatchersManager(); + float match_coefficient = 0.9f; + static std::shared_ptr m_instance; + + ModelComparator() { + MatchersManager::MatchersMap matchers = { + { "generic_single_op", SingleOpMatcher::Ptr(new SingleOpMatcher) }, + { "convolutions", ConvolutionsMatcher::Ptr(new ConvolutionsMatcher) }, + }; + m_manager.set_matchers(matchers); + } +}; + +} // namespace subgraph_dumper +} // namespace tools +} // namespace ov diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/utils/node.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/utils/node.hpp index c679707bf5b3ae..928ebd36935345 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/utils/node.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/utils/node.hpp @@ -26,6 +26,9 @@ inline InputInfo::Range get_const_ranges(const std::shared_ptr(min), static_cast(max)); } +InputInfo::Range get_const_ranges(const std::shared_ptr& const_node, + ov::element::Type elem_type); + std::map get_input_info_by_node(const std::shared_ptr& node); // replace all input node by parameters and constants instead of non input mode types @@ -111,6 +114,12 @@ inline size_t get_node_priority_by_version(const std::shared_ptr& node return priority; } + +inline bool is_node_to_skip(const std::shared_ptr& node) { + return ov::op::util::is_parameter(node) || + ov::op::util::is_constant(node) || + ov::op::util::is_output(node); +} } // namespace subgraph_dumper } // namespace tools diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/cache/cache.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/cache/cache.cpp index 38df8575dd730b..0938de3383667b 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/cache/cache.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/cache/cache.cpp @@ -42,8 +42,8 @@ bool ICache::serialize_model(const std::pair, MetaInf meta.serialize(meta_path); return true; } catch (std::exception &e) { - // std::cout << "[ ERROR ] Failed to serialize model: " << model_name - // << ". Exception: " << e.what() << std::endl; + std::cout << "[ ERROR ] Failed to serialize model: " << model_name + << ". Exception: " << e.what() << std::endl; ov::test::utils::removeFile(xml_path); ov::test::utils::removeFile(bin_path); ov::test::utils::removeFile(meta_path); diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/cache/graph_cache.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/cache/graph_cache.cpp index 1273f5cc342d1b..51ee4835419c15 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/cache/graph_cache.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/cache/graph_cache.cpp @@ -56,77 +56,92 @@ void GraphCache::update_cache(const std::shared_ptr& model, return; } while (!extracted_patterns.empty()) { - auto it = *extracted_patterns.begin(); + auto it = *extracted_patterns.rbegin(); update_cache(std::get<0>(it), model_meta_data, std::get<1>(it), std::get<2>(it), model_total_op); - extracted_patterns.pop_front(); + extracted_patterns.pop_back(); } } } void GraphCache::update_cache(const std::shared_ptr& extracted_model, const std::string& model_path, - std::map& input_info, + const std::map& input_info, const std::string& extractor_name, size_t model_op_cnt) { auto graph_name = extracted_model->get_friendly_name(); auto this_op_cnt = extracted_model->get_ops().size() - extracted_model->get_parameters().size() - extracted_model->get_results().size(); - std::string serialized_model_path = ""; - for (const auto& extractor : m_manager.get_extractors()) { - auto tmp_serialized_model_path = ov::util::path_join({ m_serialization_dir, m_cache_subdir, extractor.first, graph_name + ".xml" }); - if (ov::util::file_exists(serialized_model_path)) { - serialized_model_path = tmp_serialized_model_path; - break; + std::map updated_input_info; + if (!m_graph_cache.empty() && model_to_update != nullptr) { + auto comparator_res = m_model_comparator->match(extracted_model, model_to_update, + input_info, m_graph_cache.at(model_to_update).get_input_info()); + if (comparator_res.first) { + updated_input_info = comparator_res.second; + } else { + model_to_update = nullptr; } } - std::shared_ptr model_to_update = nullptr; - // if cached model was serialized - if (!serialized_model_path.empty()) { - // std::cout << "[ GRAPH CACHE ][ INFO ] Reading cached model: " << serialized_model_path << std::endl; - auto bin_path = ov::test::utils::replaceExt(serialized_model_path, ".bin"); - auto meta_path = ov::test::utils::replaceExt(serialized_model_path, ".meta"); - auto cached_model = ov::test::utils::PluginCache::get().core()->read_model(serialized_model_path); - auto cached_meta = MetaInfo::read_meta_from_file(meta_path); + if (model_to_update == nullptr) { + std::string serialized_model_path = ""; + for (const auto& extractor : m_manager.get_extractors()) { + auto tmp_serialized_model_path = ov::util::path_join({ m_serialization_dir, m_cache_subdir, extractor.first, graph_name + ".xml" }); + if (ov::util::file_exists(serialized_model_path)) { + serialized_model_path = tmp_serialized_model_path; + break; + } + } + // if cached model was serialized + if (!serialized_model_path.empty()) { + // std::cout << "[ GRAPH CACHE ][ INFO ] Reading cached model: " << serialized_model_path << std::endl; + auto bin_path = ov::test::utils::replaceExt(serialized_model_path, ".bin"); + auto meta_path = ov::test::utils::replaceExt(serialized_model_path, ".meta"); + auto cached_model = ov::test::utils::PluginCache::get().core()->read_model(serialized_model_path); + auto cached_meta = MetaInfo::read_meta_from_file(meta_path); - ov::test::utils::removeFile(serialized_model_path); - ov::test::utils::removeFile(bin_path); - ov::test::utils::removeFile(meta_path); + ov::test::utils::removeFile(serialized_model_path); + ov::test::utils::removeFile(bin_path); + ov::test::utils::removeFile(meta_path); - m_graph_cache.insert({ cached_model, cached_meta }); - m_graph_cache_bytesize += cached_model->get_graph_size(); + m_graph_cache.insert({ cached_model, cached_meta }); + m_graph_cache_bytesize += cached_model->get_graph_size(); - if (m_manager.match(extracted_model, cached_model, - input_info, cached_meta.get_input_info())) { - model_to_update = cached_model; - } - } else { - for (const auto& cached_model : m_graph_cache) { - if (m_manager.match(extracted_model, cached_model.first, - input_info, cached_model.second.get_input_info())) { - model_to_update = cached_model.first; - break; - } else { - auto is_subgraph = m_manager.is_subgraph(extracted_model, cached_model.first, - input_info, cached_model.second.get_input_info()); - // in case if one model is subgraph of other to update model meta info and remove subgraph from cache - if (std::get<0>(is_subgraph)) { - std::shared_ptr graph, subgraph; - std::map graph_in_info, subgraph_in_info; - std::tie(std::ignore, graph, subgraph, graph_in_info, subgraph_in_info) = is_subgraph; - if (subgraph == cached_model.first) { - auto meta = m_graph_cache[subgraph]; - meta.set_input_info(graph_in_info); - m_graph_cache.erase(subgraph); - m_graph_cache.insert({graph, meta}); - m_graph_cache_bytesize += (graph->get_graph_size() - subgraph->get_graph_size()); + auto comparator_res = m_model_comparator->match(extracted_model, cached_model, + input_info, cached_meta.get_input_info()); + if (comparator_res.first) { + model_to_update = cached_model; + updated_input_info = comparator_res.second; + } + } else { + for (const auto& cached_model : m_graph_cache) { + auto comparator_res = m_model_comparator->match(extracted_model, cached_model.first, + input_info, cached_model.second.get_input_info()); + if (comparator_res.first) { + model_to_update = cached_model.first; + updated_input_info = comparator_res.second; + break; + } else { + auto is_subgraph = m_model_comparator->is_subgraph(extracted_model, cached_model.first, + input_info, cached_model.second.get_input_info()); + // in case if one model is subgraph of other to update model meta info and remove subgraph from cache + if (std::get<0>(is_subgraph)) { + std::shared_ptr graph, subgraph; + std::map graph_in_info, subgraph_in_info; + std::tie(std::ignore, subgraph, graph, subgraph_in_info, graph_in_info) = is_subgraph; + if (subgraph == cached_model.first) { + auto meta = m_graph_cache[subgraph]; + meta.set_input_info(graph_in_info); + m_graph_cache.erase(subgraph); + m_graph_cache.insert({graph, meta}); + m_graph_cache_bytesize += (graph->get_graph_size() - subgraph->get_graph_size()); + } + m_graph_cache[cached_model.first].update(model_path, + subgraph_in_info, + model_op_cnt, + this_op_cnt, + extractor_name); + return; } - m_graph_cache[cached_model.first].update(model_path, - subgraph_in_info, - model_op_cnt, - this_op_cnt, - extractor_name); - return; } } } @@ -134,18 +149,22 @@ void GraphCache::update_cache(const std::shared_ptr& extracted_model, if (model_to_update == nullptr) { MetaInfo meta = MetaInfo(model_path, input_info, model_op_cnt, this_op_cnt, extractor_name); - m_graph_cache.insert({ extracted_model, meta }); + model_to_update = extracted_model; + m_graph_cache.insert({ model_to_update, meta }); m_graph_cache_bytesize += extracted_model->get_graph_size(); return; } - m_graph_cache[model_to_update].update(model_path, input_info, model_op_cnt, this_op_cnt, extractor_name); + m_graph_cache[model_to_update].update(model_path, updated_input_info, model_op_cnt, this_op_cnt, extractor_name); auto cached_model_size = model_to_update->get_graph_size(); auto pattern_model_size = extracted_model->get_graph_size(); if (pattern_model_size < cached_model_size) { m_graph_cache_bytesize -= (cached_model_size - pattern_model_size); auto meta = m_graph_cache[model_to_update]; + auto new_in_info = align_input_info(model_to_update, extracted_model, m_graph_cache.at(model_to_update).get_input_info(), input_info); + meta.set_input_info(new_in_info); m_graph_cache.erase(model_to_update); - m_graph_cache.insert({extracted_model, meta}); + model_to_update = extracted_model; + m_graph_cache.insert({model_to_update, meta}); } } diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/cache/meta/meta_info.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/cache/meta/meta_info.cpp index 68213db323c7a2..ab8cc56c92e4de 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/cache/meta/meta_info.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/cache/meta/meta_info.cpp @@ -169,7 +169,6 @@ void MetaInfo::update(const std::string& _model_path, size_t _this_op_cnt, const std::string& extractor, const std::vector& ignored_inputs) { - bool is_update_in_info = true; if (input_info.size() != _input_info.size()) { throw std::runtime_error("Incompatible input info!"); } @@ -193,9 +192,6 @@ void MetaInfo::update(const std::string& _model_path, if (!extractor.empty()) { extractors.insert(extractor); } - if (!is_update_in_info) { - return; - } for (const auto& in : _input_info) { if (std::find(ignored_inputs.begin(), ignored_inputs.end(), in.first) != ignored_inputs.begin()) { continue; diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/single_op/convolutions.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/single_op/convolutions.cpp index 30b99c580df314..3913f98fe5e9cd 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/single_op/convolutions.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/single_op/convolutions.cpp @@ -50,8 +50,11 @@ bool ConvolutionsMatcher::match_inputs(const std::shared_ptr &node, bool has_groups = std::dynamic_pointer_cast(node) || std::dynamic_pointer_cast(node); size_t kernel_size_offset = has_groups ? 3 : 2; - auto ref_weights_shape = ref->get_input_tensor(1).get_shape(); - auto cur_weights_shape = node->get_input_tensor(1).get_shape(); + auto ref_weights_shape = ref->get_input_partial_shape(1).get_shape(); + auto cur_weights_shape = node->get_input_partial_shape(1).get_shape(); + if (is_strict_shape_match && ref_weights_shape != cur_weights_shape) { + return false; + } const auto ref_kernel_size = std::vector(ref_weights_shape.begin() + kernel_size_offset, ref_weights_shape.end()); const auto cur_kernel_size = std::vector(cur_weights_shape.begin() + kernel_size_offset, diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/single_op/manager.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/single_op/manager.cpp index 675d808d92b42c..334128e65426ac 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/single_op/manager.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/single_op/manager.cpp @@ -17,9 +17,15 @@ iMatcherConfig::Ptr MatchersManager::get_config(const std::shared_ptr return nullptr; } +void MatchersManager::set_shape_strict_match(bool shape_strict_match) { + for (const auto& matcher : m_matchers) { + matcher.second->set_strict_shape_match(shape_strict_match); + } +} + bool MatchersManager::match(const std::shared_ptr &node, const std::shared_ptr &ref) const { - for (const auto &it : m_matchers) { + for (const auto& it : m_matchers) { if (it.second->match(node, ref)) { return true; } diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/single_op/single_op.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/single_op/single_op.cpp index 3e0abda2a936e9..dc81db171f8b99 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/single_op/single_op.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/single_op/single_op.cpp @@ -7,6 +7,7 @@ #include "openvino/op/util/op_types.hpp" #include "common_test_utils/graph_comparator.hpp" #include "matchers/single_op/single_op.hpp" +#include "utils/node.hpp" using namespace ov::tools::subgraph_dumper; @@ -24,6 +25,10 @@ iMatcherConfig::Ptr SingleOpMatcher::get_config(const std::shared_ptr return std::make_shared>(); } +void SingleOpMatcher::set_strict_shape_match(bool strict_shape_match) { + is_strict_shape_match = strict_shape_match; +} + bool SingleOpMatcher::match_inputs(const std::shared_ptr &node, const std::shared_ptr &ref) const { if (node->get_input_size() != ref->get_input_size()) { @@ -35,21 +40,17 @@ bool SingleOpMatcher::match_inputs(const std::shared_ptr &node, if (std::find(ignored_ports.begin(), ignored_ports.end(), port_id) != ignored_ports.end()) { continue; } - if (!ov::op::util::is_parameter(node) && !ov::op::util::is_parameter(ref) && - !ov::op::util::is_constant(node) && !ov::op::util::is_constant(ref)) { - const auto &cur_node_input_type = node->input_value(port_id).get_node_shared_ptr()->get_type_info(); - const auto &ref_node_input_type = ref->input_value(port_id).get_node_shared_ptr()->get_type_info(); - if (cur_node_input_type != ref_node_input_type) { - return false; - } - } - if (node->get_input_tensor(port_id).get_partial_shape().rank() != ref->get_input_tensor(port_id).get_partial_shape().rank()) { + if (node->get_input_element_type(port_id) != ref->get_input_element_type(port_id)) { return false; } - if (node->get_input_tensor(port_id).get_element_type() != ref->get_input_tensor(port_id).get_element_type()) { + const auto& partial_shape = node->get_input_partial_shape(port_id); + const auto& ref_partial_shape = ref->get_input_partial_shape(port_id); + if (is_strict_shape_match && partial_shape != ref_partial_shape) { + return false; + } else if (partial_shape.rank() != ref_partial_shape.rank()) { return false; } - if (node->get_input_partial_shape(port_id).is_dynamic() != ref->get_input_partial_shape(port_id).is_dynamic()) { + if (partial_shape.is_dynamic() != ref_partial_shape.is_dynamic()) { return false; } } @@ -63,20 +64,18 @@ SingleOpMatcher::match_outputs(const std::shared_ptr &node, return false; } for (size_t port_id = 0; port_id < node->get_output_size(); ++port_id) { - if (!ov::op::util::is_output(node) && !ov::op::util::is_output(ref)) { - const auto &cur_node_out_type = node->output(port_id).get_node_shared_ptr()->get_type_info(); - const auto &ref_node_out_type = ref->output(port_id).get_node_shared_ptr()->get_type_info(); - if (cur_node_out_type != ref_node_out_type) { - return false; - } - } - if (node->get_output_tensor(port_id).get_element_type() != ref->get_output_tensor(port_id).get_element_type()) { + if (node->get_output_element_type(port_id) != ref->get_output_element_type(port_id)) { return false; } - if (node->get_output_tensor(port_id).get_partial_shape().is_dynamic() != ref->get_output_tensor(port_id).get_partial_shape().is_dynamic()) { + + const auto& partial_shape = node->get_output_partial_shape(port_id); + const auto& ref_partial_shape = ref->get_output_partial_shape(port_id); + if (partial_shape.is_dynamic() != ref_partial_shape.is_dynamic()) { return false; } - if (node->get_output_tensor(port_id).get_partial_shape().rank()!= ref->get_output_tensor(port_id).get_partial_shape().rank()) { + if (is_strict_shape_match && partial_shape != ref_partial_shape) { + return false; + } else if (partial_shape.rank() != ref_partial_shape.rank()) { return false; } } @@ -98,17 +97,16 @@ bool SingleOpMatcher::match(const std::shared_ptr &node, if (cfg->ignore_matching) { return false; } - if (!same_op_type(node, ref)) { return false; } if (!match_inputs(node, ref)) { return false; } - if (!match_attrs(node, ref) && !ov::op::util::is_parameter(node) && !ov::op::util::is_parameter(ref)) { + if (!match_outputs(node, ref)) { return false; } - if (!match_outputs(node, ref)) { + if (!match_attrs(node, ref) && !is_node_to_skip(node)) { return false; } return true; @@ -121,9 +119,6 @@ bool SingleOpMatcher::same_op_type(const std::shared_ptr &node, SingleOpMatcher::SingleOpMatcher() { default_configs = { - // std::make_shared>(std::vector{}, std::vector{0}), - // std::make_shared>(std::vector{}, - // std::vector{0, 1, 2, 3, 4}), std::make_shared -FusedNamesExtractor::extract(const std::shared_ptr &model, - bool is_extract_body, - bool is_copy_constants) { +std::vector +FusedNamesExtractor::extract(const std::shared_ptr &model) { auto compiled_op_name = extract_compiled_model_names(model); - std::list matched_patterns; + std::vector matched_patterns; std::unordered_set checked_ops; - std::set> nodes; + ov::NodeVector nodes; for (const auto& op : model->get_ordered_ops()) { auto op_name = op->get_friendly_name(); if (is_node_to_skip(op) || checked_ops.count(op_name)) { @@ -71,7 +70,8 @@ FusedNamesExtractor::extract(const std::shared_ptr &model, } if (compiled_op_name.count(op_name)) { try { - matched_patterns.push_back(generate_model(nodes, checked_ops, extractor_name, is_copy_constants)); + auto extracted_pattern = generate_model(nodes, checked_ops, is_save_const); + matched_patterns.push_back({ extracted_pattern.first, extracted_pattern.second, extractor_name }); } catch(std::exception& e) { if (std::string(e.what()).find("Incorrect node number to create model") == std::string::npos) { // std::cout << "[ WARNING ] Impossible to generate network and add to GraphCache: " < &model, } nodes.clear(); } else { - nodes.insert(op); + nodes.push_back(op); } if (is_extract_body) { if (std::dynamic_pointer_cast(op)) { @@ -104,7 +104,8 @@ FusedNamesExtractor::extract(const std::shared_ptr &model, } } try { - matched_patterns.push_back(generate_model(nodes, checked_ops, extractor_name, is_copy_constants)); + auto extracted_pattern = generate_model(nodes, checked_ops, is_save_const); + matched_patterns.push_back({ extracted_pattern.first, extracted_pattern.second, extractor_name }); } catch(std::exception& e) { if (std::string(e.what()).find("Incorrect node number to create model") == std::string::npos) { // std::cout << "[ WARNING ] Impossible to generate network and add to GraphCache: " < &model, - const std::shared_ptr &ref) { - // `match` is not virtual method in base `SubgraphExtractor` class - // we can use function from any `extractor` to avoid of cycle - if (!m_extractors.empty()) { - if (m_extractors.begin()->second->match(model, ref)) { - return true; - } - } - return false; -} - -ExtractorsManager::ExtractedSubgraphTuple -ExtractorsManager::is_subgraph(const std::shared_ptr &model, - const std::shared_ptr &ref_model, - const std::map &in_info, - const std::map &in_info_ref) { - if (!m_extractors.empty()) { - // `is_subgraph` is not virtual method in base `SubgraphExtractor` class - // we can use function from any `extractor` to avoid of cycle - auto extractor_res = m_extractors.begin()->second->is_subgraph(model, ref_model); - if (std::get<0>(extractor_res)) { - std::map graph_in_info, subgraph_in_info; - if (std::get<1>(extractor_res) == model && std::get<2>(extractor_res) == ref_model) { - graph_in_info = in_info; - subgraph_in_info = in_info_ref; - } else if (std::get<1>(extractor_res) == ref_model && std::get<2>(extractor_res) == model) { - graph_in_info = in_info_ref; - subgraph_in_info = in_info; - } else { - throw std::runtime_error("Generated models are incompatible with original ones!"); - } - try { - subgraph_in_info = align_input_info(std::get<2>(extractor_res), std::get<1>(extractor_res), subgraph_in_info, graph_in_info); - } catch(std::exception) { - return { false, nullptr, nullptr, {}, {} }; - } - return { true, std::get<1>(extractor_res), std::get<2>(extractor_res), graph_in_info, subgraph_in_info }; - } - } - return { false, nullptr, nullptr, {}, {} }; -} - -bool ExtractorsManager::match(const std::shared_ptr &model, - const std::shared_ptr &ref, - std::map &in_info, - const std::map &in_info_ref) { - if (match(model, ref)) { - try { - in_info = align_input_info(model, ref, in_info, in_info_ref); - return true; - } catch (std::exception) { - return false; - } - } - return false; -} - -std::map -ExtractorsManager::align_input_info(const std::shared_ptr& model, - const std::shared_ptr& model_ref, - const std::map& in_info, - const std::map& in_info_ref, - const std::map &matched_op) { - std::map new_input_info = in_info; - bool is_update_required = false; - for (const auto& in_info_item : in_info_ref) { - if (!in_info.count(in_info_item.first)) { - is_update_required = true; - break; - } else if (in_info.at(in_info_item.first).is_const != in_info_item.second.is_const) { - throw std::runtime_error("Impossible to update input info!!!"); - } - } - if (is_update_required) { - // align matched model names - auto ref_model_ops = model_ref->get_ordered_ops(); - auto model_ops = model->get_ordered_ops(); - size_t ref_ordered_ops_size = ref_model_ops.size(); - size_t ordered_ops_size = model_ops.size(); - if (ref_ordered_ops_size != ordered_ops_size && matched_op.empty()) { - throw std::runtime_error("Matched models can not be compared according different op numbers!"); - } - for (size_t i = 0; i < ref_ordered_ops_size; ++i) { - auto model_op_name = i < ordered_ops_size ? model_ops[i]->get_friendly_name() : ""; - auto model_ref_op_name = ref_model_ops[i]->get_friendly_name(); - if (!in_info_ref.count(model_ref_op_name) && !in_info.count(model_op_name)) { - continue; - } - auto input_info = matched_op.empty() ? new_input_info[model_op_name] : in_info_ref.at(model_ref_op_name); - std::string input_name = matched_op.count(model_ref_op_name) ? matched_op.at(model_ref_op_name) : model_op_name; - if (new_input_info.count(input_name)) { - if (input_info.is_const != in_info_ref.at(model_ref_op_name).is_const) { - throw std::runtime_error("Impossible to update input info!!!"); - } - if (!matched_op.empty()) { - input_info = new_input_info.at(input_name); - } - new_input_info.erase(input_name); - } - new_input_info.insert({ model_ref_op_name, input_info }); - } - } - return new_input_info; -} - -std::list +std::vector ExtractorsManager::extract(const std::shared_ptr &model, bool is_extract_body, bool is_copy_constants) { - std::list result; + std::vector result; for (const auto &it : m_extractors) { // extract patterns from original models auto start = std::chrono::high_resolution_clock::now(); it.second->set_extractor_name(it.first); - auto extracted_patterns = it.second->extract(model, is_extract_body, is_copy_constants); + it.second->set_extract_body(is_extract_body); + it.second->set_save_const(is_copy_constants); + auto extracted_patterns = it.second->extract(model); result.insert(result.end(), extracted_patterns.begin(), extracted_patterns.end()); auto end = std::chrono::high_resolution_clock::now(); auto delta = std::chrono::duration_cast(end - start).count(); diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/subgraph/repeat_pattern.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/subgraph/repeat_pattern.cpp index 4331b178a7b037..006714774cc2a0 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/subgraph/repeat_pattern.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/subgraph/repeat_pattern.cpp @@ -8,18 +8,118 @@ #include "openvino/op/lstm_cell.hpp" #include "openvino/op/tensor_iterator.hpp" #include "openvino/op/if.hpp" +#include "openvino/op/loop.hpp" #include "matchers/subgraph/repeat_pattern.hpp" #include "utils/model.hpp" +#include "utils/model_comparator.hpp" using namespace ov::tools::subgraph_dumper; -std::list -RepeatPatternExtractor::extract(const std::shared_ptr &model, - bool is_extract_body, - bool is_copy_constants) { +void RepeatPatternExtractor::set_recursive_extraction(bool _is_recursive_extraction) { + is_recursive_extraction = _is_recursive_extraction; +} + +std::vector +RepeatPatternExtractor::extract(const std::shared_ptr &model) { + std::vector extracted_patterns; + for (const auto& pattern : find_repeat_patterns(model)) { + for (const auto& pattern_structure : pattern) { + extracted_patterns.push_back({std::get<0>(pattern_structure), std::get<2>(pattern_structure), extractor_name}); + } + } + return extracted_patterns; +} + +std::vector> +RepeatPatternExtractor::get_repeat_pattern_borders(const std::shared_ptr &model) { + std::vector> extracted_patterns; + for (auto& pattern : find_repeat_patterns(model, true)) { + std::vector same_pattern_borders; + for (const auto& pattern_structure : pattern) { + std::set output_names; + for (const auto& result : std::get<0>(pattern_structure)->get_results()) { + output_names.insert(result->get_input_node_shared_ptr(0)->get_friendly_name()); + } + + RepeatPatternExtractor::InputVector in_vec; + RepeatPatternExtractor::OutputVector out_vec; + for (const auto& node : std::get<1>(pattern_structure)) { + if (output_names.count(node->get_friendly_name())) { + OutputVector node_outputs = node->outputs(); + out_vec.insert(out_vec.end(), node_outputs.begin(), node_outputs.end()); + } else { + for (const auto& input : node->inputs()) { + in_vec.push_back(input); + } + } + } + same_pattern_borders.push_back({in_vec, out_vec}); + } + extracted_patterns.push_back(same_pattern_borders); + } + return extracted_patterns; +} + +std::vector> +RepeatPatternExtractor::get_repeat_node_vectors(const std::shared_ptr &model) { + std::vector> extracted_patterns; + for (const auto& pattern : find_repeat_patterns(model)) { + std::vector same_pattern_nodes; + for (const auto& pattern_structure : pattern) { + same_pattern_nodes.push_back(std::get<1>(pattern_structure)); + } + extracted_patterns.push_back(same_pattern_nodes); + } + return extracted_patterns; +} + +void +RepeatPatternExtractor::update_extractor_cache( + std::list>& extracted_patterns, + const std::shared_ptr& pattern, + const ov::NodeVector& pattern_node_vector, + const std::map& pattern_in_info) { + for (auto& extracted_pattern : extracted_patterns) { + auto& pattern_structure = extracted_pattern.front(); + const auto& cached_pattern = std::get<0>(pattern_structure); + if (model_comparator->match(pattern, cached_pattern)) { + try { + const auto& cached_in_info = std::get<2>(pattern_structure); + align_input_info(pattern, cached_pattern, pattern_in_info, cached_in_info); + extracted_pattern.push_back({ pattern, pattern_node_vector, pattern_in_info }); + return; + } catch(std::exception) {} + } + } + extracted_patterns.push_back({{ pattern, pattern_node_vector, pattern_in_info }}); +} + +void +RepeatPatternExtractor::update_extractor_cache( + std::list>& extracted_patterns, + std::list>& secondary_extracted_patterns) { + auto extern_it = secondary_extracted_patterns.begin(); + while (!secondary_extracted_patterns.empty()) { + auto it = extern_it->rbegin(); + while (!extern_it->empty()) { + auto& pattern_structure = *it; + const auto& pattern = std::get<0>(pattern_structure); + const auto& pattern_node_vector = std::get<1>(pattern_structure); + const auto& pattern_in_info = std::get<2>(pattern_structure); + update_extractor_cache(extracted_patterns, pattern, pattern_node_vector, pattern_in_info); + extern_it->pop_back(); + it = extern_it->rbegin(); + } + secondary_extracted_patterns.pop_front(); + } +} + +std::list> +RepeatPatternExtractor::find_repeat_patterns(const std::shared_ptr &model, + bool is_save_borders_only) { + std::list> extracted_patterns; std::unordered_set checked_ops; - std::list to_cache; auto ordered_ops = model->get_ordered_ops(); auto op_cnt = ordered_ops.size(); @@ -31,9 +131,10 @@ RepeatPatternExtractor::extract(const std::shared_ptr &model, continue; } + // find the same nodes std::vector start_node_idx{idx}; for (size_t i = idx + 1; i < op_cnt; ++i) { - if (manager.match(op, ordered_ops[i])) { + if (model_comparator->match(op, ordered_ops[i])) { start_node_idx.push_back(i); } } @@ -57,9 +158,9 @@ RepeatPatternExtractor::extract(const std::shared_ptr &model, if (node_idx == start_node_idx[i] && ref_node_idx == start_node_idx[j]) { nodes[i].insert(node); nodes[j].insert(ref_node); - } else if (manager.match(node, ref_node)) { + } else if (model_comparator->match(node, ref_node)) { // check if we met the same node - if (manager.match(node, op)) { + if (model_comparator->match(node, op)) { break; } if (checked_ops.count(node->get_friendly_name()) || @@ -94,16 +195,26 @@ RepeatPatternExtractor::extract(const std::shared_ptr &model, for (size_t i = 0; i < start_node_idx.size(); ++i) { try { std::unordered_set tmp_checked_ops; - auto extracted_pattern = generate_model(nodes[i], tmp_checked_ops, extractor_name, is_copy_constants); - auto extracted_model = std::get<0>(extracted_pattern); - std::list secondary_patterns; - if (nodes[i].size() > 20) { - secondary_patterns = extract(std::get<0>(extracted_pattern), is_extract_body, is_copy_constants); - } - if (secondary_patterns.size() > 1) { - to_cache.insert(to_cache.end(), secondary_patterns.begin(), secondary_patterns.end()); + // model, in_info, extractor_name + ov::NodeVector nodes_vector(nodes[i].begin(), nodes[i].end()); + auto extracted_pattern = generate_model(nodes_vector, tmp_checked_ops, is_save_const, is_save_borders_only); + auto extracted_model = extracted_pattern.first; + if (is_recursive_extraction && nodes_vector.size() > 20) { + auto secondary_patterns = find_repeat_patterns(extracted_model, is_save_borders_only); + if (!secondary_patterns.empty()) { + tmp_checked_ops.clear(); + update_extractor_cache(extracted_patterns, secondary_patterns); + } else { + update_extractor_cache(extracted_patterns, + extracted_model, + nodes_vector, + extracted_pattern.second); + } } else { - to_cache.push_back(extracted_pattern); + update_extractor_cache(extracted_patterns, + extracted_model, + nodes_vector, + extracted_pattern.second); } nodes[i].clear(); checked_ops.insert(tmp_checked_ops.begin(), tmp_checked_ops.end()); @@ -117,23 +228,39 @@ RepeatPatternExtractor::extract(const std::shared_ptr &model, if (std::dynamic_pointer_cast(op)) { auto ti = ov::as_type_ptr(op); auto ti_body = ti->get_function(); - auto tmp_res = extract(ti_body); - to_cache.insert(to_cache.end(), tmp_res.begin(), tmp_res.end()); + auto secondary_patterns = find_repeat_patterns(ti_body, is_save_borders_only); + update_extractor_cache(extracted_patterns, secondary_patterns); } else if (std::dynamic_pointer_cast(op)) { auto loop = ov::as_type_ptr(op); auto loop_body = loop->get_function(); - auto tmp_res = extract(loop_body); - to_cache.insert(to_cache.end(), tmp_res.begin(), tmp_res.end()); + auto secondary_patterns = find_repeat_patterns(loop_body, is_save_borders_only); + update_extractor_cache(extracted_patterns, secondary_patterns); } else if (std::dynamic_pointer_cast(op)) { auto if_op = ov::as_type_ptr(op); std::vector> bodies; for (size_t i = 0; i < if_op->get_internal_subgraphs_size(); i++) { auto if_body = if_op->get_function(i); - auto tmp_res = extract(if_body); - to_cache.insert(to_cache.end(), tmp_res.begin(), tmp_res.end()); + auto secondary_patterns = find_repeat_patterns(if_body, is_save_borders_only); + update_extractor_cache(extracted_patterns, secondary_patterns); } } } } - return to_cache; + + // clean up patterns + { + auto it = extracted_patterns.begin(); + size_t elem_cnt = 0; + while (it != extracted_patterns.end()) { + if (it->size() > 1) { + ++it; + ++elem_cnt; + } else { + extracted_patterns.erase(it); + it = extracted_patterns.begin(); + std::advance(it, elem_cnt); + } + } + } + return extracted_patterns; } diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/subgraph/subgraph.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/subgraph/subgraph.cpp deleted file mode 100644 index 5b0bddc2e183e9..00000000000000 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/subgraph/subgraph.cpp +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include "matchers/subgraph/subgraph.hpp" - -using namespace ov::tools::subgraph_dumper; - -bool -SubgraphExtractor::match(const std::shared_ptr &model, - const std::shared_ptr &ref_model) const { - bool res = comparator.compare(model, ref_model).valid; - if (res) { - return res; - } - std::vector> ordered_ops = model->get_ordered_ops(), - ref_ordered_ops = ref_model->get_ordered_ops(); - if (ordered_ops.size() != ref_ordered_ops.size()) { - return false; - } - size_t matched_op_cnt = 0, total_op_cnt = ordered_ops.size(); - size_t matched_op_cnt_required = round(0.9 * total_op_cnt); - for (size_t i = 0; i < total_op_cnt; ++i) { - if (is_node_to_skip(ordered_ops[i]) && - is_node_to_skip(ref_ordered_ops[i]) || - m_manager.match(ordered_ops[i], ref_ordered_ops[i])) { - ++matched_op_cnt; - } - if (matched_op_cnt >= matched_op_cnt_required) { - return true; - } - } - return false; -} - -inline SubgraphExtractor::IsSubgraphTuple prepare_is_subgraph_result(bool is_subgraph, - const std::shared_ptr& graph, - const std::shared_ptr& subgraph, - const std::map& matched_ops) { - return is_subgraph ? - std::make_tuple(is_subgraph, graph, subgraph, matched_ops) : - std::make_tuple(is_subgraph, nullptr, nullptr, std::map()); -} - -SubgraphExtractor::IsSubgraphTuple -SubgraphExtractor::is_subgraph(const std::shared_ptr &model, - const std::shared_ptr &ref_model) const { - std::vector> ordered_ops = model->get_ordered_ops(), - ref_ordered_ops = ref_model->get_ordered_ops(); - bool is_model = ordered_ops.size() > ref_ordered_ops.size(); - ov::NodeVector graph_to_check_ops, subgraph_to_check_ops; - std::shared_ptr graph = nullptr, subgraph = nullptr; - if (is_model) { - graph_to_check_ops = ordered_ops; - subgraph_to_check_ops = ref_ordered_ops; - graph = model; - subgraph = ref_model; - } else { - graph_to_check_ops = ref_ordered_ops; - subgraph_to_check_ops = ordered_ops; - graph = ref_model; - subgraph = model; - } - std::map matched_op_names; - - auto graph_it = graph_to_check_ops.begin(), subgraph_it = subgraph_to_check_ops.begin(); - while (graph_it != graph_to_check_ops.end() && subgraph_it != subgraph_to_check_ops.end()) { - if (m_manager.match(*graph_it, *subgraph_it)) { - matched_op_names.insert({ (*graph_it)->get_friendly_name(), (*subgraph_it)->get_friendly_name()}); - ++subgraph_it; - } - ++graph_it; - } - return prepare_is_subgraph_result(subgraph_it == subgraph_to_check_ops.end(), graph, subgraph, matched_op_names); -} diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/model.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/model.cpp index 6717961ea1b837..b84a11b0b458c2 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/model.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/model.cpp @@ -70,6 +70,27 @@ find_models(const std::vector &dirs, const std::string& regexp) { return { models, { ModelCacheStatus::NOT_READ, not_read_model } }; } +bool is_dynamic_model(const std::shared_ptr& model) { + for (const auto& parameter : model->get_parameters()) { + if (is_dynamic_node(parameter)) { + return true; + } + } + for (const auto& result : model->get_results()) { + if (is_dynamic_node(result)) { + return true; + } + } + return false; +} + +std::string get_model_type(const std::shared_ptr& model) { + if (is_dynamic_model(model)) { + return "dynamic"; + } + return "static"; +} + std::map> cache_models( std::shared_ptr& cache, const std::vector& models, @@ -115,6 +136,78 @@ std::map> cache_models( return cache_status; } +std::map +get_input_info_by_model(const std::shared_ptr& model) { + std::map in_info; + for (const auto& node : model->get_ordered_ops()) { + InputInfo::Range ranges(DEFAULT_MIN_VALUE, DEFAULT_MAX_VALUE); + bool is_const = false; + if (ov::op::util::is_constant(node)) { + std::shared_ptr constant = std::dynamic_pointer_cast(node); + auto const_ranges = get_const_ranges(constant, + constant->get_default_output().get_element_type()); + ranges = const_ranges; + } else if (!ov::op::util::is_parameter(node)) { + continue; + } + auto partial_shape = node->get_default_output().get_partial_shape(); + in_info.insert({node->get_friendly_name(), + InputInfo(partial_shape, ranges.min, ranges.max, is_const)}); + } + return in_info; +} + +std::map +align_input_info(const std::shared_ptr& model, + const std::shared_ptr& model_ref, + const std::map& in_info, + const std::map& in_info_ref, + const std::map &matched_op) { + bool is_update_required = !matched_op.empty(); + if (!is_update_required) { + for (const auto& ref_item : in_info_ref) { + if (!in_info.count(ref_item.first)) { + is_update_required = true; + break; + } else if (in_info.at(ref_item.first).is_const != ref_item.second.is_const) { + throw std::runtime_error("Impossible to update input info!!!"); + } + } + } + + std::map updated_input_info = in_info_ref; + if (is_update_required) { + // align matched model names + const auto& ref_model_ops = model_ref->get_ordered_ops(); + const auto& model_ops = model->get_ordered_ops(); + size_t ref_ordered_ops_size = ref_model_ops.size(); + size_t ordered_ops_size = model_ops.size(); + if (ref_ordered_ops_size != ordered_ops_size && matched_op.empty()) { + throw std::runtime_error("Matched models can not be compared according different op numbers!"); + } + for (size_t i = 0; i < ordered_ops_size; ++i) { + auto model_op_name = model_ops[i]->get_friendly_name(); + if (!in_info.count(model_op_name)) { + continue; + } + if (!matched_op.empty()) { + if (!matched_op.count(model_op_name)) { + continue; + } + } + auto model_ref_op_name = matched_op.empty() ? ref_model_ops[i]->get_friendly_name() : matched_op.at(model_op_name); + + const auto& in_info_item = in_info.at(model_op_name); + const auto& ref_in_info_item = in_info_ref.at(model_ref_op_name); + if (in_info_item.is_const != ref_in_info_item.is_const) { + throw std::runtime_error("Impossible to update input info!!!"); + } + updated_input_info[model_ref_op_name] = in_info_item; + } + } + return updated_input_info; +} + } // namespace subgraph_dumper } // namespace tools } // namespace ov \ No newline at end of file diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/model_comparator.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/model_comparator.cpp new file mode 100644 index 00000000000000..027dc61be8e4db --- /dev/null +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/model_comparator.cpp @@ -0,0 +1,136 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "utils/model_comparator.hpp" +#include "utils/model.hpp" + +using namespace ov::tools::subgraph_dumper; + +std::shared_ptr ModelComparator::m_instance = nullptr; + +void ModelComparator::set_match_coefficient(float _match_coefficient) { + if (_match_coefficient < 0 || _match_coefficient > 1) { + throw std::runtime_error("[ ERROR ] Match coefficient should be from 0 to 1!"); + } + match_coefficient = _match_coefficient; +} + +void ModelComparator::set_shape_strict_match(bool in_is_shape_strict_match) { + m_manager.set_shape_strict_match(in_is_shape_strict_match); +} + +inline ModelComparator::IsSubgraphTuple +prepare_is_subgraph_result(bool is_subgraph, + const std::shared_ptr& subgraph, + const std::shared_ptr& graph, + const std::map& matched_ops) { + return is_subgraph ? + std::make_tuple(is_subgraph, subgraph, graph, matched_ops) : + std::make_tuple(is_subgraph, nullptr, nullptr, std::map()); +} + +ModelComparator::IsSubgraphTuple +ModelComparator::is_subgraph(const std::shared_ptr &model, + const std::shared_ptr &ref_model) const { + std::vector> ordered_ops = model->get_ordered_ops(), + ref_ordered_ops = ref_model->get_ordered_ops(); + bool is_model = ordered_ops.size() > ref_ordered_ops.size(); + ov::NodeVector graph_to_check_ops, subgraph_to_check_ops; + std::shared_ptr graph = nullptr, subgraph = nullptr; + if (is_model) { + graph_to_check_ops = ordered_ops; + subgraph_to_check_ops = ref_ordered_ops; + graph = model; + subgraph = ref_model; + } else { + graph_to_check_ops = ref_ordered_ops; + subgraph_to_check_ops = ordered_ops; + graph = ref_model; + subgraph = model; + } + std::map matched_op_names; + + auto graph_it = graph_to_check_ops.begin(), subgraph_it = subgraph_to_check_ops.begin(); + while (graph_it != graph_to_check_ops.end() && subgraph_it != subgraph_to_check_ops.end()) { + if (m_manager.match(*graph_it, *subgraph_it)) { + matched_op_names.insert({ (*subgraph_it)->get_friendly_name(), (*graph_it)->get_friendly_name()}); + ++subgraph_it; + } + ++graph_it; + } + return prepare_is_subgraph_result(subgraph_it == subgraph_to_check_ops.end(), subgraph, graph, matched_op_names); +} + +bool +ModelComparator::match(const std::shared_ptr &node, + const std::shared_ptr &ref_node) const { + return m_manager.match(node, ref_node); +} + +bool +ModelComparator::match(const std::shared_ptr &model, + const std::shared_ptr &ref_model) const { + std::vector> ordered_ops = model->get_ordered_ops(), + ref_ordered_ops = ref_model->get_ordered_ops(); + if (ordered_ops.size() != ref_ordered_ops.size()) { + return false; + } + size_t matched_op_cnt = 0, total_op_cnt = ordered_ops.size(); + size_t matched_op_cnt_required = round(match_coefficient * total_op_cnt); + for (size_t i = 0; i < total_op_cnt; ++i) { + if (m_manager.match(ordered_ops[i], ref_ordered_ops[i])) { + ++matched_op_cnt; + } + if (matched_op_cnt >= matched_op_cnt_required) { + return true; + } + } + return false; +} + +ModelComparator::ExtractedSubgraphTuple +ModelComparator::is_subgraph(const std::shared_ptr &model, + const std::shared_ptr &ref_model, + const std::map &in_info, + const std::map &in_info_ref) { + auto extractor_res = is_subgraph(model, ref_model); + if (std::get<0>(extractor_res)) { + std::map graph_in_info, subgraph_in_info; + std::shared_ptr subgraph = nullptr, graph = nullptr; + // if (model == subgraph && ref_model == graph) + if (std::get<1>(extractor_res) == model && std::get<2>(extractor_res) == ref_model) { + subgraph = model; + subgraph_in_info = in_info; + graph = ref_model; + graph_in_info = in_info_ref; + // else if (subgraph == ref_model && graph = model) + } else if (std::get<1>(extractor_res) == ref_model && std::get<2>(extractor_res) == model) { + subgraph = ref_model; + subgraph_in_info = in_info_ref; + graph = model; + graph_in_info = in_info; + } else { + throw std::runtime_error("Generated models are incompatible with original ones!"); + } + try { + subgraph_in_info = align_input_info(subgraph, graph, subgraph_in_info, graph_in_info); + return { true, subgraph, graph, subgraph_in_info, graph_in_info }; + } catch(std::exception) {} + } + return { false, nullptr, nullptr, {}, {} }; +} + +std::pair> +ModelComparator::match(const std::shared_ptr &model, + const std::shared_ptr &model_ref, + const std::map &in_info, + const std::map &in_info_ref) { + try { + if (match(model, model_ref)) { + auto new_input_info = align_input_info(model, model_ref, in_info, in_info_ref); + return {true, new_input_info}; + } + } catch (std::exception) {} + return {false, {}}; +} \ No newline at end of file diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/node.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/node.cpp index 9df7ea3dc178fb..5c371174b26ef4 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/node.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/node.cpp @@ -7,6 +7,73 @@ namespace ov { namespace tools { namespace subgraph_dumper { +InputInfo::Range get_const_ranges(const std::shared_ptr& const_node, + ov::element::Type elem_type) { + InputInfo::Range ranges(DEFAULT_MIN_VALUE, DEFAULT_MAX_VALUE); + switch (elem_type) { + case ov::element::Type_t::boolean: { + ranges = get_const_ranges(const_node); + break; + } + case ov::element::Type_t::bf16: { + ranges = get_const_ranges(const_node); + break; + } + case ov::element::Type_t::f16: { + ranges = get_const_ranges(const_node); + break; + } + case ov::element::Type_t::f32: { + ranges = get_const_ranges(const_node); + break; + } + case ov::element::Type_t::f64: { + ranges = get_const_ranges(const_node); + break; + } + case ov::element::Type_t::i8: { + ranges = get_const_ranges(const_node); + break; + } + case ov::element::Type_t::i16: { + ranges = get_const_ranges(const_node); + break; + } + case ov::element::Type_t::i32: { + ranges = get_const_ranges(const_node); + break; + } + case ov::element::Type_t::i64: { + ranges = get_const_ranges(const_node); + break; + } + // TODO cast_vector doesn't support u1 now + // case ov::element::Type_t::u1: + // return get_const_ranges(const_node); + case ov::element::Type_t::u8: { + ranges = get_const_ranges(const_node); + break; + } + case ov::element::Type_t::u16: { + ranges = get_const_ranges(const_node); + break; + } + case ov::element::Type_t::u32: { + ranges = get_const_ranges(const_node); + break; + } + case ov::element::Type_t::u64: { + ranges = get_const_ranges(const_node); + break; + } + default: { + std::cout << "Can't get ranges.. Unsupported data type" << std::endl; + break; + } + } + return ranges; +} + std::map get_input_info_by_node(const std::shared_ptr& node) { std::map input_info; for (size_t port_id = 0; port_id < node->get_input_size(); ++port_id) { @@ -19,71 +86,12 @@ std::map get_input_info_by_node(const std::shared_ptr(input_node)) { if (ov::shape_size(input_node->get_output_shape(0)) == 0) continue; - auto const_node = - std::dynamic_pointer_cast(input_node); + auto const_node = ov::as_type_ptr(input_node); in_info.is_const = true; - switch (node->get_output_element_type(0)) { - case ov::element::Type_t::boolean: { - in_info.ranges = get_const_ranges(const_node); - break; - } - case ov::element::Type_t::bf16: { - in_info.ranges = get_const_ranges(const_node); - break; - } - case ov::element::Type_t::f16: { - in_info.ranges = get_const_ranges(const_node); - break; - } - case ov::element::Type_t::f32: { - in_info.ranges = get_const_ranges(const_node); - break; - } - case ov::element::Type_t::f64: { - in_info.ranges = get_const_ranges(const_node); - break; - } - case ov::element::Type_t::i8: { - in_info.ranges = get_const_ranges(const_node); - break; - } - case ov::element::Type_t::i16: { - in_info.ranges = get_const_ranges(const_node); - break; - } - case ov::element::Type_t::i32: { - in_info.ranges = get_const_ranges(const_node); - break; - } - case ov::element::Type_t::i64: { - in_info.ranges = get_const_ranges(const_node); - break; - } - // TODO cast_vector doesn't support u1 now - // case ov::element::Type_t::u1: - // return get_const_ranges(const_node); - case ov::element::Type_t::u8: { - in_info.ranges = get_const_ranges(const_node); - break; - } - case ov::element::Type_t::u16: { - in_info.ranges = get_const_ranges(const_node); - break; - } - case ov::element::Type_t::u32: { - in_info.ranges = get_const_ranges(const_node); - break; - } - case ov::element::Type_t::u64: { - in_info.ranges = get_const_ranges(const_node); - break; - } - default: { - std::cout << "Can't get ranges.. Unsupported data type" << std::endl; - break; - }} + in_info.ranges = get_const_ranges(const_node, + const_node->get_default_output().get_element_type()); } - input_info.insert({ input_name, in_info }); + input_info.insert({input_name, in_info}); } return input_info; } @@ -128,9 +136,10 @@ std::shared_ptr clone_node(std::shared_ptr node, std::shared_ptr cloned_node = nullptr; if (!has_parameters && !is_copy_const_node && !inputs.empty()) { cloned_node = clone_node(node, true, true, node_name); - // std::cout << "The operation: " + node->get_friendly_name() + " does not have parameters! Replace first input to parameter!" << std::endl; - auto param = - std::make_shared(cloned_node->get_input_element_type(0), cloned_node->get_input_partial_shape(0)); + // std::cout << "The operation: " + node->get_friendly_name() + " does not have parameters! Replace first input + // to parameter!" << std::endl; + auto param = std::make_shared(cloned_node->get_input_element_type(0), + cloned_node->get_input_partial_shape(0)); std::string param_name = node_name + "_0"; param->set_friendly_name(param_name); auto node_to_replace = cloned_node->get_input_node_shared_ptr(0); @@ -142,10 +151,11 @@ std::shared_ptr clone_node(std::shared_ptr node, return cloned_node; } -std::shared_ptr convert_const_to_param(const std::shared_ptr& op_to_replace) { +std::shared_ptr convert_const_to_param( + const std::shared_ptr& op_to_replace) { if (op_to_replace->get_byte_size() > 1024) { - auto param = std::make_shared( - op_to_replace->get_output_element_type(0), op_to_replace->get_output_partial_shape(0)); + auto param = std::make_shared(op_to_replace->get_output_element_type(0), + op_to_replace->get_output_partial_shape(0)); param->set_friendly_name(op_to_replace->get_friendly_name()); if (param != nullptr) { ov::replace_node(op_to_replace, param); diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/cache/cache.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/cache/cache.cpp index a0d46c733809d7..0450d05ab1c054 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/cache/cache.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/cache/cache.cpp @@ -93,4 +93,22 @@ TEST_F(ICacheUnitTest, serialize_model) { } } +TEST_F(ICacheUnitTest, is_model_large_to_read) { + this->mem_size = 0; + ASSERT_NO_THROW(this->is_model_large_to_read(test_model, test_model_path)); + ASSERT_TRUE(this->is_model_large_to_read(test_model, test_model_path)); + this->mem_size = 1 << 30; + ASSERT_NO_THROW(this->is_model_large_to_read(test_model, test_model_path)); + ASSERT_FALSE(this->is_model_large_to_read(test_model, test_model_path)); +} + +TEST_F(ICacheUnitTest, is_model_large_to_store_const) { + this->mem_size = 0; + ASSERT_NO_THROW(this->is_model_large_to_store_const(test_model)); + ASSERT_TRUE(this->is_model_large_to_store_const(test_model)); + this->mem_size = 1 << 30; + ASSERT_NO_THROW(this->is_model_large_to_store_const(test_model)); + ASSERT_FALSE(this->is_model_large_to_store_const(test_model)); +} + } // namespace \ No newline at end of file diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/subgraph/fused_names.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/subgraph/fused_names.cpp index f83c34bbe9ce43..6a287a8e364c64 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/subgraph/fused_names.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/subgraph/fused_names.cpp @@ -8,6 +8,7 @@ #include "matchers/subgraph/fused_names.hpp" #include "utils/model.hpp" +#include "utils/model_comparator.hpp" #include "test_models/model_0.hpp" #include "test_models/model_1.hpp" @@ -32,7 +33,7 @@ class FusedNamesExtractorTest : public SubgraphsDumperBaseTest { auto it_model_2 = models_2.begin(); while (it_model_1 != models_1.end() || it_model_2 != models_2.end()) { SubgraphExtractor extractor; - ASSERT_TRUE(extractor.match(std::get<0>(*it_model_1), std::get<0>(*it_model_2))); + ASSERT_TRUE(ModelComparator::get()->match(std::get<0>(*it_model_1), std::get<0>(*it_model_2))); auto in_info_1 = std::get<1>(*it_model_1); auto in_info_2 = std::get<1>(*it_model_2); for (const auto& in_info : in_info_1) { diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/subgraph/manager.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/subgraph/manager.cpp index 76bbeb4769bc08..fe25e581e71608 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/subgraph/manager.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/subgraph/manager.cpp @@ -35,31 +35,10 @@ class ExtractorsManagerTest : public ExtractorsManager, test_model_0_0 = std::make_shared(ov::ResultVector{test_res}, ov::ParameterVector{test_parameter}); } - { - std::shared_ptr test_parameter = - std::make_shared(ov::element::f32, ov::Shape{2, 5}); - test_parameter->set_friendly_name("test_parameter_1"); - std::shared_ptr test_abs = - std::make_shared(test_parameter); - std::shared_ptr test_res = - std::make_shared(test_abs); - test_model_0_1 = std::make_shared(ov::ResultVector{test_res}, - ov::ParameterVector{test_parameter}); - } - { - std::shared_ptr test_parameter = - std::make_shared(ov::element::f32, ov::Shape{2, 5}); - std::shared_ptr test_abs = - std::make_shared(test_parameter); - std::shared_ptr test_res = - std::make_shared(test_abs); - test_model_1 = std::make_shared(ov::ResultVector{test_res}, - ov::ParameterVector{test_parameter}); - } } ExtractorsManager::ExtractorsMap test_map; - std::shared_ptr test_model_0_0, test_model_0_1, test_model_1; + std::shared_ptr test_model_0_0; }; TEST_F(ExtractorsManagerTest, constructor) { @@ -78,57 +57,9 @@ TEST_F(ExtractorsManagerTest, get_extractors) { ASSERT_EQ(this->m_extractors, this->get_extractors()); } -TEST_F(ExtractorsManagerTest, match) { - this->set_extractors(test_map); - ASSERT_NO_THROW(this->match(test_model_0_0, test_model_0_1)); - ASSERT_TRUE(this->match(test_model_0_0, test_model_0_1)); - ASSERT_NO_THROW(this->match(test_model_0_0, test_model_1)); - ASSERT_FALSE(this->match(test_model_0_0, test_model_1)); - ASSERT_NO_THROW(this->match(test_model_0_1, test_model_1)); - ASSERT_FALSE(this->match(test_model_0_1, test_model_1)); -} - -TEST_F(ExtractorsManagerTest, is_subgraph) { - this->set_extractors(test_map); - ASSERT_NO_THROW(this->is_subgraph(test_model_0_0, test_model_0_1)); - auto is_subgraph = this->is_subgraph(test_model_0_0, test_model_0_1); - ASSERT_TRUE(std::get<0>(is_subgraph)); - ASSERT_NO_THROW(this->is_subgraph(test_model_0_0, test_model_1)); - ASSERT_FALSE(std::get<0>(this->is_subgraph(test_model_0_0, test_model_1))); - ASSERT_NO_THROW(this->is_subgraph(test_model_0_1, test_model_1)); - ASSERT_FALSE(std::get<0>(this->is_subgraph(test_model_0_1, test_model_1))); -} - -TEST_F(ExtractorsManagerTest, match_with_in_info) { - this->set_extractors(test_map); - std::map test_in_info({{"test_parameter_0", InputInfo()}}), test_in_info_1({{"test_parameter_1", InputInfo({}, 1, 2, true)}}); - ASSERT_NO_THROW(this->match(test_model_0_0, test_model_0_1, test_in_info, test_in_info)); - ASSERT_TRUE(this->match(test_model_0_0, test_model_0_1, test_in_info, test_in_info)); - ASSERT_NO_THROW(this->match(test_model_0_0, test_model_0_1, test_in_info, test_in_info_1)); - ASSERT_FALSE(this->match(test_model_0_0, test_model_0_1, test_in_info, test_in_info_1)); - ASSERT_NO_THROW(this->match(test_model_0_1, test_model_1, test_in_info, test_in_info)); - ASSERT_FALSE(this->match(test_model_0_1, test_model_1, test_in_info, test_in_info)); -} - TEST_F(ExtractorsManagerTest, extract) { this->set_extractors(test_map); ASSERT_NO_THROW(this->extract(test_model_0_0)); } -TEST_F(ExtractorsManagerTest, align_input_info) { - std::map test_in_info({{"test_parameter_0", InputInfo()}}), test_in_info_ref({{"test_parameter_1", InputInfo()}}); - ASSERT_NE(test_in_info, test_in_info_ref); - ASSERT_NO_THROW(this->align_input_info(test_model_0_0, test_model_0_1, test_in_info, test_in_info_ref)); - auto c = this->align_input_info(test_model_0_0, test_model_0_1, test_in_info, test_in_info_ref); - ASSERT_EQ(c, test_in_info_ref); -} - -TEST_F(ExtractorsManagerTest, align_input_info_for_subgraphs) { - std::map test_in_info({{"test_parameter_0", InputInfo()}}), test_in_info_ref({{"test_parameter_1", InputInfo()}}); - ASSERT_NE(test_in_info, test_in_info_ref); - ASSERT_NO_THROW(this->align_input_info(test_model_0_0, test_model_0_1, test_in_info, test_in_info_ref, {{"test_parameter_0", "test_parameter_1"}})); - auto c = this->align_input_info(test_model_0_0, test_model_0_1, test_in_info, test_in_info_ref); - ASSERT_EQ(c, test_in_info_ref); -} - } // namespace diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/subgraph/repeat_pattern.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/subgraph/repeat_pattern.cpp index 7bb49decaeaab7..e583bca1eef155 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/subgraph/repeat_pattern.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/subgraph/repeat_pattern.cpp @@ -6,6 +6,7 @@ #include "matchers/subgraph/repeat_pattern.hpp" #include "utils/model.hpp" +#include "utils/model_comparator.hpp" #include "base_test.hpp" #include "test_models/model_0.hpp" @@ -22,13 +23,13 @@ class RepeatPatternExtractorTest : public SubgraphsDumperBaseTest { protected: RepeatPatternExtractor extractor; - bool is_match(const std::list& models, + bool is_match(const std::vector& models, const std::vector>& ref_models) { size_t match_numbers = 0; for (const auto& model : models) { bool is_match = false; for (const auto& ref_model : ref_models) { - if (extractor.match(std::get<0>(model), ref_model)) { + if (ModelComparator::get()->match(std::get<0>(model), ref_model)) { is_match = true; ++match_numbers; break; @@ -40,6 +41,28 @@ class RepeatPatternExtractorTest : public SubgraphsDumperBaseTest { } return match_numbers == models.size(); } + + void sort_node_vec(std::vector>& pattern_vec) { + for (auto& pattern : pattern_vec) { + for (auto& node_vec : pattern) { + std::sort(node_vec.begin(), node_vec.end()); + } + std::sort(pattern.begin(), pattern.end()); + } + std::sort(pattern_vec.begin(), pattern_vec.end()); + } + + // not allowed to sort inputs/outputs according there are not copy constructor + // void sort_borders(std::vector>& pattern_vec) { + // for (auto& pattern : pattern_vec) { + // for (auto& node_vec : pattern) { + // std::sort(node_vec.first.begin(), node_vec.first.end()); + // std::sort(node_vec.second.begin(), node_vec.second.end()); + // } + // std::sort(pattern.begin(), pattern.end()); + // } + // std::sort(pattern_vec.begin(), pattern_vec.end()); + // } }; TEST_F(RepeatPatternExtractorTest, extract_0) { @@ -63,4 +86,59 @@ TEST_F(RepeatPatternExtractorTest, extract_2) { ASSERT_TRUE(is_match(models, ref)); } +TEST_F(RepeatPatternExtractorTest, get_repeat_node_vectors_model_0) { + auto test_model = Model_0(); + auto node_vector = extractor.get_repeat_node_vectors(test_model.get()); + auto ref = test_model.get_ref_node_vector(); + sort_node_vec(node_vector); + sort_node_vec(ref); + ASSERT_EQ(node_vector, ref); +} + +TEST_F(RepeatPatternExtractorTest, get_repeat_node_vectors_model_1) { + auto test_model = Model_1(); + auto node_vector = extractor.get_repeat_node_vectors(test_model.get()); + auto ref = test_model.get_ref_node_vector(); + sort_node_vec(node_vector); + sort_node_vec(ref); + ASSERT_EQ(node_vector, ref); +} + +TEST_F(RepeatPatternExtractorTest, get_repeat_node_vectors_model_2) { + auto test_model = Model_2(); + auto node_vector = extractor.get_repeat_node_vectors(test_model.get()); + auto ref = test_model.get_ref_node_vector(); + sort_node_vec(node_vector); + sort_node_vec(ref); + ASSERT_EQ(node_vector, ref); +} + +TEST_F(RepeatPatternExtractorTest, get_repeat_pattern_borders_model_0) { + auto test_model = Model_0(); + auto extracted_borders = extractor.get_repeat_pattern_borders(test_model.get()); + auto ref_borders = test_model.get_ref_node_borders(); + // sort_borders(extracted_borders); + // sort_borders(ref_borders); + ASSERT_EQ(extracted_borders, ref_borders); +} + +TEST_F(RepeatPatternExtractorTest, get_repeat_pattern_borders_model_1) { + auto test_model = Model_1(); + auto extracted_borders = extractor.get_repeat_pattern_borders(test_model.get()); + auto ref_borders = test_model.get_ref_node_borders(); + // sort_borders(extracted_borders); + // sort_borders(ref_borders); + ASSERT_EQ(extracted_borders, ref_borders); +} + +TEST_F(RepeatPatternExtractorTest, get_repeat_pattern_borders_model_2) { + auto test_model = Model_2(); + auto extracted_borders = extractor.get_repeat_pattern_borders(test_model.get()); + auto ref_borders = test_model.get_ref_node_borders(); + // sort_borders(extracted_borders); + // sort_borders(ref_borders); + ASSERT_EQ(extracted_borders, ref_borders); +} + + } // namespace diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/subgraph/subgraph.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/subgraph/subgraph.cpp index 7de5706b9e9a06..8819456bdcf2e5 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/subgraph/subgraph.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/subgraph/subgraph.cpp @@ -1,209 +1,209 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// +// // Copyright (C) 2018-2023 Intel Corporation +// // SPDX-License-Identifier: Apache-2.0 +// // -#include "matchers/subgraph/subgraph.hpp" -#include "base_test.hpp" +// #include "matchers/subgraph/subgraph.hpp" +// #include "base_test.hpp" -#include "openvino/op/abs.hpp" -#include "openvino/op/relu.hpp" -#include "openvino/op/parameter.hpp" -#include "openvino/op/result.hpp" +// #include "openvino/op/abs.hpp" +// #include "openvino/op/relu.hpp" +// #include "openvino/op/parameter.hpp" +// #include "openvino/op/result.hpp" -namespace { +// namespace { -using namespace ov::tools::subgraph_dumper; +// using namespace ov::tools::subgraph_dumper; -// ======================= ExtractorsManagerTest Unit tests ======================= -class SubgraphExtractorTest : public SubgraphExtractor, - public SubgraphsDumperBaseTest { -protected: - void SetUp() override { - SubgraphsDumperBaseTest::SetUp(); - { - std::shared_ptr test_parameter = - std::make_shared(ov::element::f32, ov::Shape{1, 2}); - std::shared_ptr test_abs = - std::make_shared(test_parameter); - std::shared_ptr test_res = - std::make_shared(test_abs); - test_model_0_0 = std::make_shared(ov::ResultVector{test_res}, - ov::ParameterVector{test_parameter}); - } - { - std::shared_ptr test_parameter = - std::make_shared(ov::element::f32, ov::Shape{2, 5}); - std::shared_ptr test_abs = - std::make_shared(test_parameter); - std::shared_ptr test_res = - std::make_shared(test_abs); - test_model_0_1 = std::make_shared(ov::ResultVector{test_res}, - ov::ParameterVector{test_parameter}); - } - { - std::shared_ptr test_parameter = - std::make_shared(ov::element::f32, ov::Shape{2, 5}); - std::shared_ptr test_abs = - std::make_shared(test_parameter); - std::shared_ptr test_res = - std::make_shared(test_abs); - test_model_1 = std::make_shared(ov::ResultVector{test_res}, - ov::ParameterVector{test_parameter}); - } - } +// // ======================= ExtractorsManagerTest Unit tests ======================= +// class SubgraphExtractorTest : public SubgraphExtractor, +// public SubgraphsDumperBaseTest { +// protected: +// void SetUp() override { +// SubgraphsDumperBaseTest::SetUp(); +// { +// std::shared_ptr test_parameter = +// std::make_shared(ov::element::f32, ov::Shape{1, 2}); +// std::shared_ptr test_abs = +// std::make_shared(test_parameter); +// std::shared_ptr test_res = +// std::make_shared(test_abs); +// test_model_0_0 = std::make_shared(ov::ResultVector{test_res}, +// ov::ParameterVector{test_parameter}); +// } +// { +// std::shared_ptr test_parameter = +// std::make_shared(ov::element::f32, ov::Shape{2, 5}); +// std::shared_ptr test_abs = +// std::make_shared(test_parameter); +// std::shared_ptr test_res = +// std::make_shared(test_abs); +// test_model_0_1 = std::make_shared(ov::ResultVector{test_res}, +// ov::ParameterVector{test_parameter}); +// } +// { +// std::shared_ptr test_parameter = +// std::make_shared(ov::element::f32, ov::Shape{2, 5}); +// std::shared_ptr test_abs = +// std::make_shared(test_parameter); +// std::shared_ptr test_res = +// std::make_shared(test_abs); +// test_model_1 = std::make_shared(ov::ResultVector{test_res}, +// ov::ParameterVector{test_parameter}); +// } +// } - std::shared_ptr test_model_0_0, test_model_0_1, test_model_1; -}; +// std::shared_ptr test_model_0_0, test_model_0_1, test_model_1; +// }; -TEST_F(SubgraphExtractorTest, match) { - ASSERT_NO_THROW(this->match(test_model_0_0, test_model_0_1)); - ASSERT_TRUE(this->match(test_model_0_0, test_model_0_1)); - ASSERT_NO_THROW(this->match(test_model_0_0, test_model_1)); - ASSERT_FALSE(this->match(test_model_0_0, test_model_1)); - ASSERT_NO_THROW(this->match(test_model_0_1, test_model_1)); - ASSERT_FALSE(this->match(test_model_0_1, test_model_1)); -} +// TEST_F(SubgraphExtractorTest, match) { +// ASSERT_NO_THROW(this->match(test_model_0_0, test_model_0_1)); +// ASSERT_TRUE(this->match(test_model_0_0, test_model_0_1)); +// ASSERT_NO_THROW(this->match(test_model_0_0, test_model_1)); +// ASSERT_FALSE(this->match(test_model_0_0, test_model_1)); +// ASSERT_NO_THROW(this->match(test_model_0_1, test_model_1)); +// ASSERT_FALSE(this->match(test_model_0_1, test_model_1)); +// } -TEST_F(SubgraphExtractorTest, match_90_percent) { - { - std::shared_ptr test_parameter = - std::make_shared(ov::element::f32, ov::Shape{1, 2}); - std::shared_ptr test_abs_0 = - std::make_shared(test_parameter); - std::shared_ptr test_abs_1 = - std::make_shared(test_abs_0); - std::shared_ptr test_abs_2 = - std::make_shared(test_abs_1); - std::shared_ptr test_abs_3 = - std::make_shared(test_abs_2); - std::shared_ptr test_abs_4 = - std::make_shared(test_abs_3); - std::shared_ptr test_abs_5 = - std::make_shared(test_abs_4); - std::shared_ptr test_abs_6 = - std::make_shared(test_abs_5); - std::shared_ptr test_abs_7 = - std::make_shared(test_abs_6); - std::shared_ptr test_abs_8 = - std::make_shared(test_abs_7); - std::shared_ptr test_abs_9 = - std::make_shared(test_abs_8); - std::shared_ptr test_abs_10 = - std::make_shared(test_abs_9); - std::shared_ptr test_res = - std::make_shared(test_abs_10); - test_model_0_0 = std::make_shared(ov::ResultVector{test_res}, - ov::ParameterVector{test_parameter}); - } - { - std::shared_ptr test_parameter = - std::make_shared(ov::element::f32, ov::Shape{1, 2}); - std::shared_ptr test_abs_0 = - std::make_shared(test_parameter); - std::shared_ptr test_abs_1 = - std::make_shared(test_abs_0); - std::shared_ptr test_abs_2 = - std::make_shared(test_abs_1); - std::shared_ptr test_abs_3 = - std::make_shared(test_abs_2); - std::shared_ptr test_abs_4 = - std::make_shared(test_abs_3); - std::shared_ptr test_abs_5 = - std::make_shared(test_abs_4); - std::shared_ptr test_abs_6 = - std::make_shared(test_abs_5); - std::shared_ptr test_abs_7 = - std::make_shared(test_abs_6); - std::shared_ptr test_abs_8 = - std::make_shared(test_abs_7); - std::shared_ptr test_abs_9 = - std::make_shared(test_abs_8); - std::shared_ptr test_abs_10 = - std::make_shared(test_abs_9); - std::shared_ptr test_res = - std::make_shared(test_abs_10); - test_model_0_1 = std::make_shared(ov::ResultVector{test_res}, - ov::ParameterVector{test_parameter}); - } - { - std::shared_ptr test_parameter = - std::make_shared(ov::element::f32, ov::Shape{1, 2}); - std::shared_ptr test_abs_0 = - std::make_shared(test_parameter); - std::shared_ptr test_abs_1 = - std::make_shared(test_abs_0); - std::shared_ptr test_abs_2 = - std::make_shared(test_abs_1); - std::shared_ptr test_abs_3 = - std::make_shared(test_abs_2); - std::shared_ptr test_abs_4 = - std::make_shared(test_abs_3); - std::shared_ptr test_abs_5 = - std::make_shared(test_abs_4); - std::shared_ptr test_abs_6 = - std::make_shared(test_abs_5); - std::shared_ptr test_abs_7 = - std::make_shared(test_abs_6); - std::shared_ptr test_abs_8 = - std::make_shared(test_abs_7); - std::shared_ptr test_abs_9 = - std::make_shared(test_abs_8); - std::shared_ptr test_abs_10 = - std::make_shared(test_abs_9); - std::shared_ptr test_res = - std::make_shared(test_abs_10); - test_model_1 = std::make_shared(ov::ResultVector{test_res}, - ov::ParameterVector{test_parameter}); - } - ASSERT_NO_THROW(this->match(test_model_0_0, test_model_0_1)); - ASSERT_TRUE(this->match(test_model_0_0, test_model_0_1)); - ASSERT_NO_THROW(this->match(test_model_0_0, test_model_1)); - ASSERT_FALSE(this->match(test_model_0_0, test_model_1)); - ASSERT_NO_THROW(this->match(test_model_0_1, test_model_1)); - ASSERT_FALSE(this->match(test_model_0_1, test_model_1)); -} +// TEST_F(SubgraphExtractorTest, match_90_percent) { +// { +// std::shared_ptr test_parameter = +// std::make_shared(ov::element::f32, ov::Shape{1, 2}); +// std::shared_ptr test_abs_0 = +// std::make_shared(test_parameter); +// std::shared_ptr test_abs_1 = +// std::make_shared(test_abs_0); +// std::shared_ptr test_abs_2 = +// std::make_shared(test_abs_1); +// std::shared_ptr test_abs_3 = +// std::make_shared(test_abs_2); +// std::shared_ptr test_abs_4 = +// std::make_shared(test_abs_3); +// std::shared_ptr test_abs_5 = +// std::make_shared(test_abs_4); +// std::shared_ptr test_abs_6 = +// std::make_shared(test_abs_5); +// std::shared_ptr test_abs_7 = +// std::make_shared(test_abs_6); +// std::shared_ptr test_abs_8 = +// std::make_shared(test_abs_7); +// std::shared_ptr test_abs_9 = +// std::make_shared(test_abs_8); +// std::shared_ptr test_abs_10 = +// std::make_shared(test_abs_9); +// std::shared_ptr test_res = +// std::make_shared(test_abs_10); +// test_model_0_0 = std::make_shared(ov::ResultVector{test_res}, +// ov::ParameterVector{test_parameter}); +// } +// { +// std::shared_ptr test_parameter = +// std::make_shared(ov::element::f32, ov::Shape{1, 2}); +// std::shared_ptr test_abs_0 = +// std::make_shared(test_parameter); +// std::shared_ptr test_abs_1 = +// std::make_shared(test_abs_0); +// std::shared_ptr test_abs_2 = +// std::make_shared(test_abs_1); +// std::shared_ptr test_abs_3 = +// std::make_shared(test_abs_2); +// std::shared_ptr test_abs_4 = +// std::make_shared(test_abs_3); +// std::shared_ptr test_abs_5 = +// std::make_shared(test_abs_4); +// std::shared_ptr test_abs_6 = +// std::make_shared(test_abs_5); +// std::shared_ptr test_abs_7 = +// std::make_shared(test_abs_6); +// std::shared_ptr test_abs_8 = +// std::make_shared(test_abs_7); +// std::shared_ptr test_abs_9 = +// std::make_shared(test_abs_8); +// std::shared_ptr test_abs_10 = +// std::make_shared(test_abs_9); +// std::shared_ptr test_res = +// std::make_shared(test_abs_10); +// test_model_0_1 = std::make_shared(ov::ResultVector{test_res}, +// ov::ParameterVector{test_parameter}); +// } +// { +// std::shared_ptr test_parameter = +// std::make_shared(ov::element::f32, ov::Shape{1, 2}); +// std::shared_ptr test_abs_0 = +// std::make_shared(test_parameter); +// std::shared_ptr test_abs_1 = +// std::make_shared(test_abs_0); +// std::shared_ptr test_abs_2 = +// std::make_shared(test_abs_1); +// std::shared_ptr test_abs_3 = +// std::make_shared(test_abs_2); +// std::shared_ptr test_abs_4 = +// std::make_shared(test_abs_3); +// std::shared_ptr test_abs_5 = +// std::make_shared(test_abs_4); +// std::shared_ptr test_abs_6 = +// std::make_shared(test_abs_5); +// std::shared_ptr test_abs_7 = +// std::make_shared(test_abs_6); +// std::shared_ptr test_abs_8 = +// std::make_shared(test_abs_7); +// std::shared_ptr test_abs_9 = +// std::make_shared(test_abs_8); +// std::shared_ptr test_abs_10 = +// std::make_shared(test_abs_9); +// std::shared_ptr test_res = +// std::make_shared(test_abs_10); +// test_model_1 = std::make_shared(ov::ResultVector{test_res}, +// ov::ParameterVector{test_parameter}); +// } +// ASSERT_NO_THROW(this->match(test_model_0_0, test_model_0_1)); +// ASSERT_TRUE(this->match(test_model_0_0, test_model_0_1)); +// ASSERT_NO_THROW(this->match(test_model_0_0, test_model_1)); +// ASSERT_FALSE(this->match(test_model_0_0, test_model_1)); +// ASSERT_NO_THROW(this->match(test_model_0_1, test_model_1)); +// ASSERT_FALSE(this->match(test_model_0_1, test_model_1)); +// } -TEST_F(SubgraphExtractorTest, extract) { - ASSERT_NO_THROW(this->extract(test_model_0_0)); - ASSERT_NO_THROW(this->extract(test_model_0_1)); - ASSERT_NO_THROW(this->extract(test_model_1)); -} +// TEST_F(SubgraphExtractorTest, extract) { +// ASSERT_NO_THROW(this->extract(test_model_0_0)); +// ASSERT_NO_THROW(this->extract(test_model_0_1)); +// ASSERT_NO_THROW(this->extract(test_model_1)); +// } -TEST_F(SubgraphExtractorTest, is_subgraph) { - auto is_subgraph = this->is_subgraph(test_model_0_0, test_model_0_0); - ASSERT_NO_THROW(this->is_subgraph(test_model_0_0, test_model_0_0)); - ASSERT_TRUE(std::get<0>(is_subgraph)); - ASSERT_NO_THROW(this->is_subgraph(test_model_0_0, test_model_1)); - is_subgraph = this->is_subgraph(test_model_0_0, test_model_1); - ASSERT_FALSE(std::get<0>(is_subgraph)); - ASSERT_NO_THROW(this->is_subgraph(test_model_0_1, test_model_1)); - is_subgraph = this->is_subgraph(test_model_0_1, test_model_1); - ASSERT_FALSE(std::get<0>(is_subgraph)); - { - std::shared_ptr test_parameter = - std::make_shared(ov::element::f32, ov::Shape{1, 2}); - std::shared_ptr test_abs_0 = - std::make_shared(test_parameter); - std::shared_ptr test_abs_1 = - std::make_shared(test_abs_0); - std::shared_ptr test_res = - std::make_shared(test_abs_1); - auto big_model_0 = std::make_shared(ov::ResultVector{test_res}, - ov::ParameterVector{test_parameter}); - is_subgraph = this->is_subgraph(test_model_0_0, big_model_0); - ASSERT_NO_THROW(this->is_subgraph(test_model_0_0, big_model_0)); - ASSERT_TRUE(std::get<0>(is_subgraph)); - ASSERT_EQ(std::get<1>(is_subgraph), big_model_0); - ASSERT_EQ(std::get<2>(is_subgraph), test_model_0_0); +// TEST_F(SubgraphExtractorTest, is_subgraph) { +// auto is_subgraph = this->is_subgraph(test_model_0_0, test_model_0_0); +// ASSERT_NO_THROW(this->is_subgraph(test_model_0_0, test_model_0_0)); +// ASSERT_TRUE(std::get<0>(is_subgraph)); +// ASSERT_NO_THROW(this->is_subgraph(test_model_0_0, test_model_1)); +// is_subgraph = this->is_subgraph(test_model_0_0, test_model_1); +// ASSERT_FALSE(std::get<0>(is_subgraph)); +// ASSERT_NO_THROW(this->is_subgraph(test_model_0_1, test_model_1)); +// is_subgraph = this->is_subgraph(test_model_0_1, test_model_1); +// ASSERT_FALSE(std::get<0>(is_subgraph)); +// { +// std::shared_ptr test_parameter = +// std::make_shared(ov::element::f32, ov::Shape{1, 2}); +// std::shared_ptr test_abs_0 = +// std::make_shared(test_parameter); +// std::shared_ptr test_abs_1 = +// std::make_shared(test_abs_0); +// std::shared_ptr test_res = +// std::make_shared(test_abs_1); +// auto big_model_0 = std::make_shared(ov::ResultVector{test_res}, +// ov::ParameterVector{test_parameter}); +// is_subgraph = this->is_subgraph(test_model_0_0, big_model_0); +// ASSERT_NO_THROW(this->is_subgraph(test_model_0_0, big_model_0)); +// ASSERT_TRUE(std::get<0>(is_subgraph)); +// ASSERT_EQ(std::get<1>(is_subgraph), big_model_0); +// ASSERT_EQ(std::get<2>(is_subgraph), test_model_0_0); - is_subgraph = this->is_subgraph(test_model_0_1, big_model_0); - ASSERT_NO_THROW(this->is_subgraph(test_model_0_1, big_model_0)); - ASSERT_TRUE(std::get<0>(is_subgraph)); - ASSERT_EQ(std::get<1>(is_subgraph), big_model_0); - ASSERT_EQ(std::get<2>(is_subgraph), test_model_0_1); - ASSERT_NO_THROW(this->is_subgraph(test_model_1, big_model_0)); - ASSERT_FALSE(std::get<0>(this->is_subgraph(test_model_1, big_model_0))); - } -} +// is_subgraph = this->is_subgraph(test_model_0_1, big_model_0); +// ASSERT_NO_THROW(this->is_subgraph(test_model_0_1, big_model_0)); +// ASSERT_TRUE(std::get<0>(is_subgraph)); +// ASSERT_EQ(std::get<1>(is_subgraph), big_model_0); +// ASSERT_EQ(std::get<2>(is_subgraph), test_model_0_1); +// ASSERT_NO_THROW(this->is_subgraph(test_model_1, big_model_0)); +// ASSERT_FALSE(std::get<0>(this->is_subgraph(test_model_1, big_model_0))); +// } +// } -} // namespace +// } // namespace diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/test_models/model_0.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/test_models/model_0.hpp index 43f49506ee40c8..4a37a80bbfdb04 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/test_models/model_0.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/test_models/model_0.hpp @@ -11,8 +11,12 @@ #include "openvino/op/relu.hpp" #include "openvino/op/parameter.hpp" #include "openvino/op/result.hpp" +#include "matchers/subgraph/repeat_pattern.hpp" class Model_0 { +private: + using PatternBorders = ov::tools::subgraph_dumper::RepeatPatternExtractor::PatternBorders; + public: Model_0() { // param param @@ -48,6 +52,13 @@ class Model_0 { std::make_shared(test_add_0); model = std::make_shared(ov::ResultVector{test_res}, ov::ParameterVector{test_parameter_0, test_parameter_1}); + ref_nodes = {{{test_abs_0, test_relu_0}, {test_abs_1, test_relu_1}}}; + { + PatternBorders ref_pattern_0 = {test_abs_0->inputs(), test_relu_0->outputs()}, + ref_pattern_1 = {test_abs_1->inputs(), test_relu_1->outputs()}; + std::vector> ref_res = {{ref_pattern_0, ref_pattern_1}}; + ref_borders = std::move(ref_res); + } } std::shared_ptr get() { @@ -72,6 +83,14 @@ class Model_0 { return ref; } + std::vector> + get_ref_node_vector() { return ref_nodes; } + + std::vector> + get_ref_node_borders() { return ref_borders; } + protected: std::shared_ptr model; + std::vector> ref_nodes; + std::vector> ref_borders; }; diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/test_models/model_1.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/test_models/model_1.hpp index 5893fb949f774a..96a478abc09c56 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/test_models/model_1.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/test_models/model_1.hpp @@ -12,8 +12,12 @@ #include "openvino/op/parameter.hpp" #include "openvino/op/result.hpp" #include "openvino/op/subtract.hpp" +#include "matchers/subgraph/repeat_pattern.hpp" class Model_1 { +private: + using PatternBorders = ov::tools::subgraph_dumper::RepeatPatternExtractor::PatternBorders; + public: Model_1() { // param param param param @@ -119,6 +123,22 @@ class Model_1 { ov::ParameterVector{test_parameter_0, test_parameter_1, test_parameter_0_0, test_parameter_0_1, test_parameter_1_0, test_parameter_1_1}); + + ref_nodes = {{{test_abs_0, test_relu_0}, {test_abs_0_0, test_relu_0_0}}, + {{test_abs_1, test_clamp_1}, {test_abs_0_1, test_clamp_0_1}}, + {{test_multiply_0_1, test_relu_0_1}, {test_multiply_1_1, test_relu_1_1}}}; + { + PatternBorders ref_pattern_0 = {test_abs_0->inputs(), test_relu_0->outputs()}, + ref_pattern_0_0 = {test_abs_0_0->inputs(), test_relu_0_0->outputs()}, + ref_pattern_1 = {test_abs_1->inputs(), test_clamp_1->outputs()}, + ref_pattern_0_1_0 = {test_abs_0_1->inputs(), test_clamp_0_1->outputs()}, + test_pattern_0_1_1 = {test_multiply_0_1->inputs(), test_relu_0_1->outputs()}, + test_pattern_1_1 = {test_multiply_1_1->inputs(), test_relu_1_1->outputs()}; + std::vector> ref_res = {{ref_pattern_0, ref_pattern_0_0}, + {ref_pattern_1, ref_pattern_0_1_0}, + {test_pattern_0_1_1, test_pattern_1_1}}; + ref_borders = std::move(ref_res); + } } std::shared_ptr get() { @@ -166,10 +186,19 @@ class Model_1 { std::make_shared(test_relu_1); auto ref_model = std::make_shared(ov::ResultVector{res}, ov::ParameterVector{test_parameter_1_0, test_parameter_1_1}); + ref.push_back(ref_model); } return ref; } + std::vector> + get_ref_node_vector() { return ref_nodes; } + + std::vector> + get_ref_node_borders() { return ref_borders; } + protected: std::shared_ptr model; + std::vector> ref_nodes; + std::vector> ref_borders; }; diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/test_models/model_2.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/test_models/model_2.hpp index fd7a24cbe49e3d..94f11ee192b06f 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/test_models/model_2.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/test_models/model_2.hpp @@ -13,6 +13,9 @@ #include "openvino/op/result.hpp" class Model_2 { +private: + using PatternBorders = ov::tools::subgraph_dumper::RepeatPatternExtractor::PatternBorders; + public: Model_2() { // param @@ -55,9 +58,17 @@ class Model_2 { } std::vector> get_repeat_pattern_ref() { - return {}; + return std::vector>(); } + std::vector> + get_ref_node_vector() { return ref_nodes; } + + std::vector> + get_ref_node_borders() { return ref_borders; } + protected: std::shared_ptr model; + std::vector> ref_nodes; + std::vector> ref_borders; }; diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/utils/model.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/utils/model.cpp index a5bb560f486e1e..afb4f490b1d0dc 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/utils/model.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/utils/model.cpp @@ -4,6 +4,7 @@ #include "openvino/op/util/op_types.hpp" #include "utils/model.hpp" +#include "utils/model_comparator.hpp" #include "matchers/subgraph/subgraph.hpp" #include "test_models/model_0.hpp" #include "test_models/model_1.hpp" @@ -16,11 +17,11 @@ using namespace ov::tools::subgraph_dumper; using ModelUtilsTest = SubgraphsDumperBaseTest; -std::set> +ov::NodeVector get_functional_ops(const std::shared_ptr& model) { - std::set> nodes; + std::vector> nodes; for (const auto& op : model->get_ordered_ops()) { - nodes.insert(op); + nodes.push_back(op); } return nodes; } @@ -31,12 +32,11 @@ TEST_F(ModelUtilsTest, generate_0) { { std::unordered_set checked_ops; auto func_ops = get_functional_ops(test_model); - auto model_with_in_info = generate_model(func_ops, checked_ops, "test_extractor"); + auto model_with_in_info = generate_model(func_ops, checked_ops); recovered_model = std::get<0>(model_with_in_info); } { - SubgraphExtractor extractor; - ASSERT_TRUE(extractor.match(test_model, recovered_model)); + ASSERT_TRUE(ModelComparator::get()->match(test_model, recovered_model)); } } @@ -46,12 +46,11 @@ TEST_F(ModelUtilsTest, generate_1) { { std::unordered_set checked_ops; auto func_ops = get_functional_ops(test_model); - auto model_with_in_info = generate_model(func_ops, checked_ops, "test_extractor"); + auto model_with_in_info = generate_model(func_ops, checked_ops); recovered_model = std::get<0>(model_with_in_info); } { - SubgraphExtractor extractor; - ASSERT_TRUE(extractor.match(test_model, recovered_model)); + ASSERT_TRUE(ModelComparator::get()->match(test_model, recovered_model)); } } @@ -61,14 +60,59 @@ TEST_F(ModelUtilsTest, generate_2) { { std::unordered_set checked_ops; auto func_ops = get_functional_ops(test_model); - auto model_with_in_info = generate_model(func_ops, checked_ops, "extract_model"); + auto model_with_in_info = generate_model(func_ops, checked_ops); recovered_model = std::get<0>(model_with_in_info); auto in_info = std::get<1>(model_with_in_info); } { - SubgraphExtractor extractor; - ASSERT_TRUE(extractor.match(test_model, recovered_model)); + ASSERT_TRUE(ModelComparator::get()->match(test_model, recovered_model)); } } +TEST_F(ModelUtilsTest, align_input_info) { + Model_0 test_model_0, test_model_1; + auto in_info_0 = get_input_info_by_model(test_model_0.get()); + auto in_info_1 = get_input_info_by_model(test_model_1.get()); + ASSERT_NE(in_info_0, in_info_1); + ASSERT_NO_THROW(align_input_info(test_model_0.get(), test_model_1.get(), in_info_0, in_info_1)); + auto in_info_ref = align_input_info(test_model_0.get(), test_model_1.get(), in_info_0, in_info_1); + ASSERT_EQ(in_info_1, in_info_ref); +} + +TEST_F(ModelUtilsTest, align_input_info_for_subgraphs) { + Model_0 model_0, model_1; + auto test_model_0 = model_0.get(); + auto test_model_1 = model_1.get(); + auto in_info_0 = get_input_info_by_model(test_model_0); + auto in_info_1 = get_input_info_by_model(test_model_1); + ASSERT_NE(in_info_0, in_info_1); + std::map matched_ops; + auto params_0 = test_model_0->get_parameters(); + auto params_1 = test_model_1->get_parameters(); + size_t params_cnt = params_0.size(); + for (size_t param_id = 0; param_id < params_cnt; ++param_id) { + matched_ops.insert({params_0[param_id]->get_friendly_name(), + params_1[param_id]->get_friendly_name()}); + } + ASSERT_NO_THROW(align_input_info(test_model_0, test_model_1, + in_info_0, in_info_1, + matched_ops)); + auto ref = align_input_info(test_model_0, test_model_1, in_info_0, in_info_1, matched_ops); + ASSERT_EQ(in_info_1, ref); +} + +TEST_F(ModelUtilsTest, get_input_info_by_model) { + Model_1 model; + auto test_model = model.get(); + size_t param_idx = 0; + std::map ref; + for (auto& param : test_model->get_parameters()) { + std::string param_name = "parameter_" + std::to_string(param_idx++); + param->set_friendly_name(param_name); + ref.insert({param_name, InputInfo(param->get_default_output().get_partial_shape())}); + } + auto cur = get_input_info_by_model(test_model); + ASSERT_EQ(cur, ref); +} + } // namespace diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/utils/model_comparator.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/utils/model_comparator.cpp new file mode 100644 index 00000000000000..ca742c55d52046 --- /dev/null +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/utils/model_comparator.cpp @@ -0,0 +1,137 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "matchers/subgraph/subgraph.hpp" +#include "utils/model_comparator.hpp" +#include "base_test.hpp" + +#include "openvino/op/abs.hpp" +#include "openvino/op/relu.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/result.hpp" + +namespace { + +using namespace ov::tools::subgraph_dumper; + +// ======================= ExtractorsManagerTest Unit tests ======================= +class ModelComparatorTest : public SubgraphsDumperBaseTest { +protected: + void SetUp() override { + SubgraphsDumperBaseTest::SetUp(); + { + std::shared_ptr test_parameter = + std::make_shared(ov::element::f32, ov::Shape{1, 2}); + test_parameter->set_friendly_name("test_parameter_0"); + std::shared_ptr test_abs = + std::make_shared(test_parameter); + std::shared_ptr test_res = + std::make_shared(test_abs); + test_model_0_0 = std::make_shared(ov::ResultVector{test_res}, + ov::ParameterVector{test_parameter}); + } + { + std::shared_ptr test_parameter = + std::make_shared(ov::element::f32, ov::Shape{2, 5}); + test_parameter->set_friendly_name("test_parameter_1"); + std::shared_ptr test_abs = + std::make_shared(test_parameter); + std::shared_ptr test_res = + std::make_shared(test_abs); + test_model_0_1 = std::make_shared(ov::ResultVector{test_res}, + ov::ParameterVector{test_parameter}); + } + { + std::shared_ptr test_parameter = + std::make_shared(ov::element::f32, ov::Shape{2, 5}); + std::shared_ptr test_abs = + std::make_shared(test_parameter); + std::shared_ptr test_res = + std::make_shared(test_abs); + test_model_1 = std::make_shared(ov::ResultVector{test_res}, + ov::ParameterVector{test_parameter}); + } + } + + void TearDown() override { + ModelComparator::Ptr model_comparator = ModelComparator::get(); + model_comparator->set_shape_strict_match(false); + model_comparator->set_match_coefficient(0.9f); + } + + std::shared_ptr test_model_0_0, test_model_0_1, test_model_1; +}; + +TEST_F(ModelComparatorTest, get) { + ModelComparator::Ptr model_comparator = nullptr; + ASSERT_NO_THROW(model_comparator = ModelComparator::get()); + ASSERT_EQ(model_comparator, ModelComparator::get()); +} + +TEST_F(ModelComparatorTest, match) { + ModelComparator::Ptr model_comparator = ModelComparator::get(); + ASSERT_NO_THROW(model_comparator->match(test_model_0_0, test_model_0_1)); + ASSERT_TRUE(model_comparator->match(test_model_0_0, test_model_0_1)); + ASSERT_NO_THROW(model_comparator->match(test_model_0_0, test_model_1)); + ASSERT_FALSE(model_comparator->match(test_model_0_0, test_model_1)); + ASSERT_NO_THROW(model_comparator->match(test_model_0_1, test_model_1)); + ASSERT_FALSE(model_comparator->match(test_model_0_1, test_model_1)); +} + +TEST_F(ModelComparatorTest, match_strict_shape) { + ModelComparator::Ptr model_comparator = ModelComparator::get(); + ASSERT_NO_THROW(model_comparator->set_shape_strict_match(true)); + ASSERT_NO_THROW(model_comparator->match(test_model_0_0, test_model_0_1)); + ASSERT_FALSE(model_comparator->match(test_model_0_0, test_model_0_1)); + { + { + std::shared_ptr test_parameter = + std::make_shared(ov::element::f32, ov::Shape{1, 2}); + test_parameter->set_friendly_name("test_parameter_1"); + std::shared_ptr test_abs = + std::make_shared(test_parameter); + std::shared_ptr test_res = + std::make_shared(test_abs); + test_model_0_1 = std::make_shared(ov::ResultVector{test_res}, + ov::ParameterVector{test_parameter}); + } + ASSERT_TRUE(model_comparator->match(test_model_0_0, test_model_0_1)); + } +} + +TEST_F(ModelComparatorTest, match_with_low_coeff) { + ModelComparator::Ptr model_comparator = ModelComparator::get(); + model_comparator->set_match_coefficient(0.5f); + ASSERT_NO_THROW(model_comparator->match(test_model_0_0, test_model_0_1)); + ASSERT_TRUE(model_comparator->match(test_model_0_0, test_model_0_1)); + ASSERT_NO_THROW(model_comparator->match(test_model_0_0, test_model_1)); + ASSERT_TRUE(model_comparator->match(test_model_0_0, test_model_1)); + ASSERT_NO_THROW(model_comparator->match(test_model_0_1, test_model_1)); + ASSERT_TRUE(model_comparator->match(test_model_0_1, test_model_1)); +} + +TEST_F(ModelComparatorTest, match_with_in_info) { + ModelComparator::Ptr model_comparator = ModelComparator::get(); + std::map test_in_info({{"test_parameter_0", InputInfo()}}), + test_in_info_1({{"test_parameter_1", InputInfo({}, 1, 2, true)}}); + ASSERT_NO_THROW(model_comparator->match(test_model_0_0, test_model_0_1, test_in_info, test_in_info)); + ASSERT_TRUE(std::get<0>(model_comparator->match(test_model_0_0, test_model_0_1, test_in_info, test_in_info))); + ASSERT_NO_THROW(model_comparator->match(test_model_0_0, test_model_0_1, test_in_info, test_in_info_1)); + ASSERT_FALSE(std::get<0>(model_comparator->match(test_model_0_0, test_model_0_1, test_in_info, test_in_info_1))); + ASSERT_NO_THROW(model_comparator->match(test_model_0_1, test_model_1, test_in_info, test_in_info)); + ASSERT_FALSE(std::get<0>(model_comparator->match(test_model_0_1, test_model_1, test_in_info, test_in_info))); +} + +TEST_F(ModelComparatorTest, is_subgraph) { + ModelComparator::Ptr model_comparator = ModelComparator::get(); + ASSERT_NO_THROW(model_comparator->is_subgraph(test_model_0_0, test_model_0_1)); + auto is_subgraph = model_comparator->is_subgraph(test_model_0_0, test_model_0_1); + ASSERT_TRUE(std::get<0>(is_subgraph)); + ASSERT_NO_THROW(model_comparator->is_subgraph(test_model_0_0, test_model_1)); + ASSERT_FALSE(std::get<0>(model_comparator->is_subgraph(test_model_0_0, test_model_1))); + ASSERT_NO_THROW(model_comparator->is_subgraph(test_model_0_1, test_model_1)); + ASSERT_FALSE(std::get<0>(model_comparator->is_subgraph(test_model_0_1, test_model_1))); +} + +} // namespace From fa33693c4ad5b4d17d3ce1fd719268891ea5c512 Mon Sep 17 00:00:00 2001 From: Oleg Pipikin Date: Thu, 12 Oct 2023 19:22:16 +0200 Subject: [PATCH 178/257] Refactor NonZeroLayerTest, NormalizeL2LayerTest, OneHotLayerTest, PadLayerTest (#20318) * Refactor NonZeroLayerTest * Refactor NormalizeL2LayerTest * Refactor OneHotLayerTest * Refactor PadLayerTest * Apply comments --- .../single_layer_tests/nonzero.cpp | 47 +++--- .../single_layer_tests/normalize_l2.cpp | 62 ++++---- .../single_layer_tests/one_hot.cpp | 113 +++++++------- .../single_layer_tests/pad.cpp | 145 +++++++++--------- .../skip_tests_config.cpp | 2 - .../include/single_op_tests/nonzero.hpp | 15 ++ .../include/single_op_tests/normalize_l2.hpp | 15 ++ .../include/single_op_tests/one_hot.hpp | 15 ++ .../shared/include/single_op_tests/pad.hpp | 19 +++ .../shared_test_classes/single_op/nonzero.hpp | 31 ++++ .../single_op/normalize_l2.hpp | 33 ++++ .../shared_test_classes/single_op/one_hot.hpp | 37 +++++ .../shared_test_classes/single_op/pad.hpp | 48 ++++++ .../src/base/utils/generate_inputs.cpp | 12 ++ .../src/single_op/nonzero.cpp | 57 +++++++ .../src/single_op/normalize_l2.cpp | 62 ++++++++ .../src/single_op/one_hot.cpp | 71 +++++++++ .../shared_test_classes/src/single_op/pad.cpp | 85 ++++++++++ 18 files changed, 687 insertions(+), 182 deletions(-) create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/nonzero.hpp create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/normalize_l2.hpp create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/one_hot.hpp create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/pad.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/nonzero.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/normalize_l2.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/one_hot.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/pad.hpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/nonzero.cpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/normalize_l2.cpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/one_hot.cpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/pad.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/nonzero.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/nonzero.cpp index bbbb7f888dcd2f..629392a5d99829 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/nonzero.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/nonzero.cpp @@ -2,35 +2,34 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "single_layer_tests/nonzero.hpp" +#include "single_op_tests/nonzero.hpp" #include "common_test_utils/test_constants.hpp" #include -using namespace ngraph::helpers; -using namespace LayerTestsDefinitions; - namespace { - std::vector> inShapes = { - {1000}, - {4, 1000}, - {2, 4, 1000}, - {2, 4, 4, 1000}, - {2, 4, 4, 2, 1000}, - }; +using ov::test::NonZeroLayerTest; + +std::vector> input_shapes_static = { + {{1000}}, + {{4, 1000}}, + {{2, 4, 1000}}, + {{2, 4, 4, 1000}}, + {{2, 4, 4, 2, 1000}}, +}; - const std::vector inputPrecisions = { - InferenceEngine::Precision::I32, - InferenceEngine::Precision::FP16, - InferenceEngine::Precision::U8, - }; +const std::vector model_types = { + ov::element::i32, + ov::element::f16, + ov::element::u8, +}; - ConfigMap config; +std::map config = {}; - INSTANTIATE_TEST_SUITE_P(smoke_nonzero, NonZeroLayerTest, - ::testing::Combine( - ::testing::ValuesIn(inShapes), - ::testing::ValuesIn(inputPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(config)), - NonZeroLayerTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_nonzero, NonZeroLayerTest, + ::testing::Combine( + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_static)), + ::testing::ValuesIn(model_types), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(config)), + NonZeroLayerTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/normalize_l2.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/normalize_l2.cpp index 6941437751d567..7fbf1207bc4e29 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/normalize_l2.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/normalize_l2.cpp @@ -4,36 +4,38 @@ #include -#include "single_layer_tests/normalize_l2.hpp" - -using namespace LayerTestsDefinitions; +#include "single_op_tests/normalize_l2.hpp" namespace { -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16 +using ov::test::NormalizeL2LayerTest; + +const std::vector model_types = { + ov::element::f32, + ov::element::f16 }; const std::vector eps = {1e-12f, 1e-6f, 1e-3f, 0.1f, 100}; -const std::vector epsMode = { - ngraph::op::EpsMode::ADD, - ngraph::op::EpsMode::MAX, +const std::vector eps_modes = { + ov::op::EpsMode::ADD, + ov::op::EpsMode::MAX, }; /* ============= 1D ============= */ // [SKIPPED][CPU] Unsupported rank, Issue: 35627 -const std::vector> axes_1D = { +const std::vector> axes_1d = { {}, {0} }; +std::vector input_shape_1d_static = {{5}}; + const auto normL2params_1D = testing::Combine( - testing::ValuesIn(axes_1D), + testing::ValuesIn(axes_1d), testing::ValuesIn(eps), - testing::ValuesIn(epsMode), - testing::ValuesIn(std::vector>({{5}})), - testing::ValuesIn(netPrecisions), + testing::ValuesIn(eps_modes), + testing::Values(ov::test::static_shapes_to_test_representation(input_shape_1d_static)), + testing::ValuesIn(model_types), testing::Values(ov::test::utils::DEVICE_CPU) ); @@ -54,12 +56,14 @@ const std::vector> axes_2D = { // {0, 1}, }; +std::vector input_shape_2d_static = {{5, 3}}; + const auto normL2params_2D = testing::Combine( testing::ValuesIn(axes_2D), testing::ValuesIn(eps), - testing::ValuesIn(epsMode), - testing::ValuesIn(std::vector>({{5, 3}})), - testing::ValuesIn(netPrecisions), + testing::ValuesIn(eps_modes), + testing::Values(ov::test::static_shapes_to_test_representation(input_shape_2d_static)), + testing::ValuesIn(model_types), testing::Values(ov::test::utils::DEVICE_CPU) ); @@ -84,12 +88,14 @@ const std::vector> axes_3D = { // {0, 1, 2} }; +std::vector input_shape_3d_static = {{2, 5, 3}}; + const auto normL2params_3D = testing::Combine( testing::ValuesIn(axes_3D), testing::ValuesIn(eps), - testing::ValuesIn(epsMode), - testing::ValuesIn(std::vector>({{2, 5, 3}})), - testing::ValuesIn(netPrecisions), + testing::ValuesIn(eps_modes), + testing::Values(ov::test::static_shapes_to_test_representation(input_shape_3d_static)), + testing::ValuesIn(model_types), testing::Values(ov::test::utils::DEVICE_CPU) ); @@ -117,12 +123,14 @@ const std::vector> axes_4D = { // {0, 1, 2, 3} }; +std::vector input_shape_4d_static = {{2, 3, 10, 5}}; + const auto normL2params_4D = testing::Combine( testing::ValuesIn(axes_4D), testing::ValuesIn(eps), - testing::ValuesIn(epsMode), - testing::ValuesIn(std::vector>({{2, 3, 10, 5}})), - testing::ValuesIn(netPrecisions), + testing::ValuesIn(eps_modes), + testing::Values(ov::test::static_shapes_to_test_representation(input_shape_4d_static)), + testing::ValuesIn(model_types), testing::Values(ov::test::utils::DEVICE_CPU) ); @@ -153,12 +161,14 @@ const std::vector> axes_5D = { {0, 1, 2, 3} }; +std::vector input_shape_5d_static = {{2, 2, 3, 10, 5}}; + const auto normL2params_5D = testing::Combine( testing::ValuesIn(axes_5D), testing::ValuesIn(eps), - testing::ValuesIn(epsMode), - testing::ValuesIn(std::vector>({{2, 2, 3, 10, 5}})), - testing::ValuesIn(netPrecisions), + testing::ValuesIn(eps_modes), + testing::Values(ov::test::static_shapes_to_test_representation(input_shape_5d_static)), + testing::ValuesIn(model_types), testing::Values(ov::test::utils::DEVICE_CPU) ); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/one_hot.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/one_hot.cpp index 4432eb7b7b2ea3..92272ba76b5bb9 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/one_hot.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/one_hot.cpp @@ -3,96 +3,97 @@ // #include -#include "single_layer_tests/one_hot.hpp" -using namespace LayerTestsDefinitions; +#include "single_op_tests/one_hot.hpp" namespace { -const std::vector netPrecisions = { - InferenceEngine::Precision::I32, +using ov::test::OneHotLayerTest; + +const std::vector model_types = { + ov::element::i32, }; -const std::vector argDepthType_IC = { ngraph::element::i32 }; -const std::vector argDepth_IC = { 1, 5, 1017 }; -const std::vector argSetType_IC = { ngraph::element::i32 }; -const std::vector argOnValue_IC = { 0, 1, -29 }; -const std::vector argOffValue_IC = { 0, 1, -127 }; -const std::vector argAxis_IC = {0}; -const std::vector> inputShapes_IC = {{4, 5}, {3, 7}}; +const std::vector arg_depth_type_ic = { ov::element::i32 }; +const std::vector arg_depth_ic = { 1, 5, 1017 }; +const std::vector arg_set_type_ic = { ov::element::i32 }; +const std::vector arg_on_value_ic = { 0, 1, -29 }; +const std::vector arg_off_value_ic = { 0, 1, -127 }; +const std::vector arg_axis_ic = {0}; +const std::vector> input_shapes_ic = {{{4, 5}}, {{3, 7}}}; -const auto oneHotParams_IC = testing::Combine( - testing::ValuesIn(argDepthType_IC), - testing::ValuesIn(argDepth_IC), - testing::ValuesIn(argSetType_IC), - testing::ValuesIn(argOnValue_IC), - testing::ValuesIn(argOffValue_IC), - testing::ValuesIn(argAxis_IC), - testing::ValuesIn(netPrecisions), - testing::ValuesIn(inputShapes_IC), +const auto oneHotParams_ic = testing::Combine( + testing::ValuesIn(arg_depth_type_ic), + testing::ValuesIn(arg_depth_ic), + testing::ValuesIn(arg_set_type_ic), + testing::ValuesIn(arg_on_value_ic), + testing::ValuesIn(arg_off_value_ic), + testing::ValuesIn(arg_axis_ic), + testing::ValuesIn(model_types), + testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_ic)), testing::Values(ov::test::utils::DEVICE_CPU) ); INSTANTIATE_TEST_SUITE_P( smoke_OneHotIntConst, OneHotLayerTest, - oneHotParams_IC, + oneHotParams_ic, OneHotLayerTest::getTestCaseName ); -const std::vector argDepthType_Ax = { ngraph::element::i32 }; -const std::vector argDepth_Ax = { 3 }; -const std::vector argSetType_Ax = { ngraph::element::i32, ngraph::element::f32 }; -const std::vector argOnValue_Ax = { 17 }; -const std::vector argOffValue_Ax = { -3 }; -const std::vector argAxis_Ax = {0, 1, 3, 5, -4, -5}; -const std::vector> inputShapes_Ax = {{4, 8, 5, 3, 2, 9}}; +const std::vector arg_depth_type_ax = { ov::element::i32 }; +const std::vector arg_depth_ax = { 3 }; +const std::vector arg_set_type_ax = { ov::element::i32, ov::element::f32 }; +const std::vector arg_on_value_ax = { 17 }; +const std::vector arg_off_value_ax = { -3 }; +const std::vector arg_axis_ax = {0, 1, 3, 5, -4, -5}; +const std::vector> input_shapes_ax = {{{4, 8, 5, 3, 2, 9}}}; -const auto oneHotParams_Ax = testing::Combine( - testing::ValuesIn(argDepthType_Ax), - testing::ValuesIn(argDepth_Ax), - testing::ValuesIn(argSetType_Ax), - testing::ValuesIn(argOnValue_Ax), - testing::ValuesIn(argOffValue_Ax), - testing::ValuesIn(argAxis_Ax), - testing::ValuesIn(netPrecisions), - testing::ValuesIn(inputShapes_Ax), +const auto oneHotParams_ax = testing::Combine( + testing::ValuesIn(arg_depth_type_ax), + testing::ValuesIn(arg_depth_ax), + testing::ValuesIn(arg_set_type_ax), + testing::ValuesIn(arg_on_value_ax), + testing::ValuesIn(arg_off_value_ax), + testing::ValuesIn(arg_axis_ax), + testing::ValuesIn(model_types), + testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_ax)), testing::Values(ov::test::utils::DEVICE_CPU) ); INSTANTIATE_TEST_SUITE_P( smoke_OneHotAxrng, OneHotLayerTest, - oneHotParams_Ax, + oneHotParams_ax, OneHotLayerTest::getTestCaseName ); -const std::vector argDepthType_T = { ngraph::element::i8, ngraph::element::u8 }; -const std::vector argDepth_T = { 1 }; -const std::vector argSetType_T = { ngraph::element::i8, ngraph::element::u8, - ngraph::element::bf16, ngraph::element::f32 }; -const std::vector argOnValue_T = { 1 }; -const std::vector argOffValue_T = { 1 }; -const std::vector argAxis_T = {-1}; -const std::vector> inputShapes_T = {{2, 2}}; +const std::vector arg_depth_type_t = { ov::element::i8, ov::element::u8 }; +const std::vector arg_depth_t = { 1 }; +const std::vector arg_set_type_t = { ov::element::i8, ov::element::u8, + ov::element::bf16, ov::element::f32 }; +const std::vector arg_on_value_t = { 1 }; +const std::vector arg_off_value_t = { 1 }; +const std::vector arg_axis_t = {-1}; +const std::vector> input_shapes_t = {{{2, 2}}}; -const auto oneHotParams_T = testing::Combine( - testing::ValuesIn(argDepthType_T), - testing::ValuesIn(argDepth_T), - testing::ValuesIn(argSetType_T), - testing::ValuesIn(argOnValue_T), - testing::ValuesIn(argOffValue_T), - testing::ValuesIn(argAxis_T), - testing::ValuesIn(netPrecisions), - testing::ValuesIn(inputShapes_T), +const auto oneHotParams_t = testing::Combine( + testing::ValuesIn(arg_depth_type_t), + testing::ValuesIn(arg_depth_t), + testing::ValuesIn(arg_set_type_t), + testing::ValuesIn(arg_on_value_t), + testing::ValuesIn(arg_off_value_t), + testing::ValuesIn(arg_axis_t), + testing::ValuesIn(model_types), + testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_t)), testing::Values(ov::test::utils::DEVICE_CPU) ); INSTANTIATE_TEST_SUITE_P( smoke_OneHotArgType, OneHotLayerTest, - oneHotParams_T, + oneHotParams_t, OneHotLayerTest::getTestCaseName ); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/pad.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/pad.cpp index 4d73dbf3aa070f..0dc19a7d79044f 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/pad.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/pad.cpp @@ -4,42 +4,45 @@ #include -#include "single_layer_tests/pad.hpp" - -using namespace LayerTestsDefinitions; +#include "single_op_tests/pad.hpp" namespace { -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::I32, - InferenceEngine::Precision::FP16, - InferenceEngine::Precision::I16, - InferenceEngine::Precision::U16, - InferenceEngine::Precision::I8, - InferenceEngine::Precision::U8, +using ov::test::PadLayerTest; +using ov::op::PadMode; + +const std::vector model_types = { + ov::element::f32, + ov::element::i32, + ov::element::f16, + ov::element::i16, + ov::element::u16, + ov::element::i8, + ov::element::u8, }; -const std::vector argPadValue = {0.f, 1.f, -1.f, 2.5f}; +const std::vector arg_pad_values = {0.f, 1.f, -1.f, 2.5f}; -const std::vector padMode = { - ngraph::helpers::PadMode::EDGE, - ngraph::helpers::PadMode::REFLECT, - ngraph::helpers::PadMode::SYMMETRIC +const std::vector pad_modes = { + PadMode::EDGE, + PadMode::REFLECT, + PadMode::SYMMETRIC }; -const std::vector> padsBegin1D = {{0}, {1}, {2}, {-2}}; -const std::vector> padsEnd1D = {{0}, {1}, {2}, {-2}}; + +// 1D + +const std::vector> pads_begin_1d = {{0}, {1}, {2}, {-2}}; +const std::vector> pads_end_1d = {{0}, {1}, {2}, {-2}}; + +const std::vector input_shape_1d_static = {{5}}; const auto pad1DConstparams = testing::Combine( - testing::ValuesIn(padsBegin1D), - testing::ValuesIn(padsEnd1D), - testing::ValuesIn(argPadValue), - testing::Values(ngraph::helpers::PadMode::CONSTANT), - testing::ValuesIn(netPrecisions), - testing::Values(InferenceEngine::Precision::UNSPECIFIED), - testing::Values(InferenceEngine::Precision::UNSPECIFIED), - testing::Values(InferenceEngine::Layout::ANY), - testing::Values(std::vector{5}), + testing::ValuesIn(pads_begin_1d), + testing::ValuesIn(pads_end_1d), + testing::ValuesIn(arg_pad_values), + testing::Values(PadMode::CONSTANT), + testing::ValuesIn(model_types), + testing::Values(ov::test::static_shapes_to_test_representation(input_shape_1d_static)), testing::Values(ov::test::utils::DEVICE_CPU) ); @@ -51,15 +54,12 @@ INSTANTIATE_TEST_SUITE_P( ); const auto pad1Dparams = testing::Combine( - testing::ValuesIn(padsBegin1D), - testing::ValuesIn(padsEnd1D), + testing::ValuesIn(pads_begin_1d), + testing::ValuesIn(pads_end_1d), testing::Values(0), - testing::ValuesIn(padMode), - testing::ValuesIn(netPrecisions), - testing::Values(InferenceEngine::Precision::UNSPECIFIED), - testing::Values(InferenceEngine::Precision::UNSPECIFIED), - testing::Values(InferenceEngine::Layout::ANY), - testing::Values(std::vector{5}), + testing::ValuesIn(pad_modes), + testing::ValuesIn(model_types), + testing::Values(ov::test::static_shapes_to_test_representation(input_shape_1d_static)), testing::Values(ov::test::utils::DEVICE_CPU) ); @@ -70,19 +70,21 @@ INSTANTIATE_TEST_SUITE_P( PadLayerTest::getTestCaseName ); -const std::vector> padsBegin2D = {{0, 0}, {1, 1}, {-2, 0}, {0, 3}}; -const std::vector> padsEnd2D = {{0, 0}, {1, 1}, {0, 1}, {-3, -2}}; + +// 2D + +const std::vector> pads_begin_2d = {{0, 0}, {1, 1}, {-2, 0}, {0, 3}}; +const std::vector> pads_end_2d = {{0, 0}, {1, 1}, {0, 1}, {-3, -2}}; + +const std::vector input_shape_2d_static = {{13, 5}}; const auto pad2DConstparams = testing::Combine( - testing::ValuesIn(padsBegin2D), - testing::ValuesIn(padsEnd2D), - testing::ValuesIn(argPadValue), - testing::Values(ngraph::helpers::PadMode::CONSTANT), - testing::ValuesIn(netPrecisions), - testing::Values(InferenceEngine::Precision::UNSPECIFIED), - testing::Values(InferenceEngine::Precision::UNSPECIFIED), - testing::Values(InferenceEngine::Layout::ANY), - testing::Values(std::vector{13, 5}), + testing::ValuesIn(pads_begin_2d), + testing::ValuesIn(pads_end_2d), + testing::ValuesIn(arg_pad_values), + testing::Values(PadMode::CONSTANT), + testing::ValuesIn(model_types), + testing::Values(ov::test::static_shapes_to_test_representation(input_shape_2d_static)), testing::Values(ov::test::utils::DEVICE_CPU) ); @@ -94,15 +96,12 @@ INSTANTIATE_TEST_SUITE_P( ); const auto pad2Dparams = testing::Combine( - testing::ValuesIn(padsBegin2D), - testing::ValuesIn(padsEnd2D), + testing::ValuesIn(pads_begin_2d), + testing::ValuesIn(pads_end_2d), testing::Values(0), - testing::ValuesIn(padMode), - testing::ValuesIn(netPrecisions), - testing::Values(InferenceEngine::Precision::UNSPECIFIED), - testing::Values(InferenceEngine::Precision::UNSPECIFIED), - testing::Values(InferenceEngine::Layout::ANY), - testing::Values(std::vector{13, 5}), + testing::ValuesIn(pad_modes), + testing::ValuesIn(model_types), + testing::Values(ov::test::static_shapes_to_test_representation(input_shape_2d_static)), testing::Values(ov::test::utils::DEVICE_CPU) ); @@ -113,19 +112,21 @@ INSTANTIATE_TEST_SUITE_P( PadLayerTest::getTestCaseName ); -const std::vector> padsBegin4D = {{0, 0, 0, 0}, {0, 3, 0, 0}, {0, 0, 0, 1}, {0, 0, -1, 1}, {2, 0, 0, 0}, {0, 3, 0, -1}}; -const std::vector> padsEnd4D = {{0, 0, 0, 0}, {0, 3, 0, 0}, {1, 0, 0, 0}, {0, 0, 0, 2}, {1, -3, 0, 0}, {0, 3, 0, -1}}; + +// 4D + +const std::vector> pads_begin_4d = {{0, 0, 0, 0}, {0, 3, 0, 0}, {0, 0, 0, 1}, {0, 0, -1, 1}, {2, 0, 0, 0}, {0, 3, 0, -1}}; +const std::vector> pads_end_4d = {{0, 0, 0, 0}, {0, 3, 0, 0}, {1, 0, 0, 0}, {0, 0, 0, 2}, {1, -3, 0, 0}, {0, 3, 0, -1}}; + +const std::vector input_shape_4d_static = {{3, 5, 10, 11}}; const auto pad4DConstparams = testing::Combine( - testing::ValuesIn(padsBegin4D), - testing::ValuesIn(padsEnd4D), - testing::ValuesIn(argPadValue), - testing::Values(ngraph::helpers::PadMode::CONSTANT), - testing::ValuesIn(netPrecisions), - testing::Values(InferenceEngine::Precision::UNSPECIFIED), - testing::Values(InferenceEngine::Precision::UNSPECIFIED), - testing::Values(InferenceEngine::Layout::ANY), - testing::Values(std::vector{3, 5, 10, 11}), + testing::ValuesIn(pads_begin_4d), + testing::ValuesIn(pads_end_4d), + testing::ValuesIn(arg_pad_values), + testing::Values(PadMode::CONSTANT), + testing::ValuesIn(model_types), + testing::Values(ov::test::static_shapes_to_test_representation(input_shape_4d_static)), testing::Values(ov::test::utils::DEVICE_CPU) ); @@ -137,15 +138,12 @@ INSTANTIATE_TEST_SUITE_P( ); const auto pad4Dparams = testing::Combine( - testing::ValuesIn(padsBegin4D), - testing::ValuesIn(padsEnd4D), + testing::ValuesIn(pads_begin_4d), + testing::ValuesIn(pads_end_4d), testing::Values(0), - testing::ValuesIn(padMode), - testing::ValuesIn(netPrecisions), - testing::Values(InferenceEngine::Precision::UNSPECIFIED), - testing::Values(InferenceEngine::Precision::UNSPECIFIED), - testing::Values(InferenceEngine::Layout::ANY), - testing::Values(std::vector{3, 5, 10, 11}), + testing::ValuesIn(pad_modes), + testing::ValuesIn(model_types), + testing::Values(ov::test::static_shapes_to_test_representation(input_shape_4d_static)), testing::Values(ov::test::utils::DEVICE_CPU) ); @@ -155,5 +153,4 @@ INSTANTIATE_TEST_SUITE_P( pad4Dparams, PadLayerTest::getTestCaseName ); - } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 3cf22ebff921a3..e629a715b69890 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -64,8 +64,6 @@ std::vector disabledTestPatterns() { R"(.*BF16NetworkRestore1.*)", R"(.*MobileNet_ssd_with_branching.*)", - // TODO: 57562 No dynamic output shape support - R"(.*NonZeroLayerTest.*)", // Not expected behavior R"(.*Behavior.*InferRequestSetBlobByType.*Batched.*)", R"(.*OVCompiledModelBaseTest.*(CanGetInputsInfoAndCheck|canSetConfigToCompiledModel).*)", diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/nonzero.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/nonzero.hpp new file mode 100644 index 00000000000000..7afb5a97579bee --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/nonzero.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/nonzero.hpp" + +namespace ov { +namespace test { +TEST_P(NonZeroLayerTest, Inference) { + run(); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/normalize_l2.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/normalize_l2.hpp new file mode 100644 index 00000000000000..aabe69a20654b1 --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/normalize_l2.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/normalize_l2.hpp" + +namespace ov { +namespace test { +TEST_P(NormalizeL2LayerTest, Inference) { + run(); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/one_hot.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/one_hot.hpp new file mode 100644 index 00000000000000..91242249e8794b --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/one_hot.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/one_hot.hpp" + +namespace ov { +namespace test { +TEST_P(OneHotLayerTest, Inference) { + run(); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/pad.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/pad.hpp new file mode 100644 index 00000000000000..a1bc55e5208eb7 --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/pad.hpp @@ -0,0 +1,19 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/pad.hpp" + +namespace ov { +namespace test { +TEST_P(PadLayerTest, Inference) { + run(); +} + +TEST_P(Pad12LayerTest, Inference) { + run(); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/nonzero.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/nonzero.hpp new file mode 100644 index 00000000000000..740259846b8150 --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/nonzero.hpp @@ -0,0 +1,31 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { +using NonZeroLayerTestParamsSet = typename std::tuple< + std::vector, // Input shapes + ov::element::Type, // Model shape + std::string, // Device name + std::map>; // Additional network configuration + +class NonZeroLayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); + +protected: + void SetUp() override; +}; + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/normalize_l2.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/normalize_l2.hpp new file mode 100644 index 00000000000000..160bfc89035258 --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/normalize_l2.hpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { +using NormalizeL2LayerTestParams = std::tuple< + std::vector, // axes + float, // eps + ov::op::EpsMode, // eps mode + std::vector, // input shape + ov::element::Type, // model type + std::string // target device +>; + +class NormalizeL2LayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); + +protected: + void SetUp() override; +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/one_hot.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/one_hot.hpp new file mode 100644 index 00000000000000..4714c8441603cc --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/one_hot.hpp @@ -0,0 +1,37 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { +typedef std::tuple< + ov::element::Type, // depth type (any integer type) + int64_t, // depth value + ov::element::Type, // On & Off values type (any supported type) + float, // OnValue + float, // OffValue + int64_t, // axis + ov::element::Type, // Model type + std::vector, // Input shapes + std::string // Target device name +> oneHotLayerTestParamsSet; + +class OneHotLayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); + +protected: + void SetUp() override; +}; + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/pad.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/pad.hpp new file mode 100644 index 00000000000000..1e8e2b7a50a144 --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/pad.hpp @@ -0,0 +1,48 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "openvino/op/util/attr_types.hpp" + +namespace ov { +namespace test { +typedef std::tuple< + std::vector, // padsBegin + std::vector, // padsEnd + float, // argPadValue + ov::op::PadMode, // padMode + ov::element::Type, // Net precision + std::vector, // Input shapes + std::string // Target device name +> padLayerTestParamsSet; + +class PadLayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); + +protected: + void SetUp() override; + virtual std::shared_ptr create_pad_op(const std::shared_ptr&, + const std::shared_ptr&, + const std::shared_ptr&, + const std::shared_ptr&, + ov::op::PadMode) const; +}; + +class Pad12LayerTest : public PadLayerTest { + std::shared_ptr create_pad_op(const std::shared_ptr&, + const std::shared_ptr&, + const std::shared_ptr&, + const std::shared_ptr&, + ov::op::PadMode) const override; +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp b/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp index b654b41feeb66b..e738e73d879d6e 100644 --- a/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp +++ b/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp @@ -1184,6 +1184,18 @@ ov::runtime::Tensor generate(const return tensor; } +ov::runtime::Tensor generate(const + std::shared_ptr& node, + size_t port, + const ov::element::Type& elemType, + const ov::Shape& targetShape) { + if (port == 0) { + InputGenerateData inGenData(-5, 10, 7, 222); + return ov::test::utils::create_and_fill_tensor(elemType, targetShape, inGenData.range, inGenData.start_from, inGenData.resolution, inGenData.seed); + } + return generate(std::dynamic_pointer_cast(node), port, elemType, targetShape); +} + template ov::runtime::Tensor generateInput(const std::shared_ptr& node, size_t port, diff --git a/src/tests/functional/shared_test_classes/src/single_op/nonzero.cpp b/src/tests/functional/shared_test_classes/src/single_op/nonzero.cpp new file mode 100644 index 00000000000000..610148b9127960 --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/nonzero.cpp @@ -0,0 +1,57 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/nonzero.hpp" + +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/non_zero.hpp" + +namespace ov { +namespace test { + +std::string NonZeroLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { + std::vector shapes; + ov::element::Type model_type; + std::string target_device; + std::map additional_config; + std::tie(shapes, model_type, target_device, additional_config) = obj.param; + + std::ostringstream result; + result << "IS=("; + for (size_t i = 0lu; i < shapes.size(); i++) { + result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < shapes.size(); j++) { + result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + result << "inPRC=" << model_type.get_type_name() << "_"; + result << "targetDevice=" << target_device; + return result.str(); +} + +void NonZeroLayerTest::SetUp() { + std::vector shapes; + ov::element::Type model_type; + std::map additional_config; + std::tie(shapes, model_type, targetDevice, additional_config) = GetParam(); + configuration.insert(additional_config.cbegin(), additional_config.cend()); + init_input_shapes(shapes); + + auto param = std::make_shared(model_type, inputDynamicShapes.front()); + + auto non_zero = std::make_shared(param); + + auto result = std::make_shared(non_zero); + + function = std::make_shared(result, ov::ParameterVector{param}, "non_zero"); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_op/normalize_l2.cpp b/src/tests/functional/shared_test_classes/src/single_op/normalize_l2.cpp new file mode 100644 index 00000000000000..b3db915a59a035 --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/normalize_l2.cpp @@ -0,0 +1,62 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/normalize_l2.hpp" + +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/normalize_l2.hpp" + +namespace ov { +namespace test { +std::string NormalizeL2LayerTest::getTestCaseName(const testing::TestParamInfo& obj) { + std::vector axes; + float eps; + ngraph::op::EpsMode eps_mode; + std::vector shapes; + ov::element::Type model_type; + std::string targetDevice; + std::tie(axes, eps, eps_mode, shapes, model_type, targetDevice) = obj.param; + + std::ostringstream result; + result << "IS=("; + for (size_t i = 0lu; i < shapes.size(); i++) { + result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < shapes.size(); j++) { + result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + result << "axes=" << ov::test::utils::vec2str(axes) << "_"; + result << "eps=" << eps << "_"; + result << "eps_mode=" << eps_mode << "_"; + result << "netPRC=" << model_type.get_type_name() << "_"; + result << "targetDevice=" << targetDevice; + return result.str(); +} + +void NormalizeL2LayerTest::SetUp() { + std::vector shapes; + std::vector axes; + float eps; + ngraph::op::EpsMode eps_mode; + ov::element::Type model_type; + std::tie(axes, eps, eps_mode, shapes, model_type, targetDevice) = this->GetParam(); + init_input_shapes(shapes); + + auto param = std::make_shared(model_type, inputDynamicShapes.front()); + + auto norm_axes = std::make_shared(ov::element::i64, ov::Shape{axes.size()}, axes); + auto norm = std::make_shared(param, norm_axes, eps, eps_mode); + + auto result = std::make_shared(norm); + function = std::make_shared(result, ov::ParameterVector{param}, "NormalizeL2"); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_op/one_hot.cpp b/src/tests/functional/shared_test_classes/src/single_op/one_hot.cpp new file mode 100644 index 00000000000000..273c51c00cef9c --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/one_hot.cpp @@ -0,0 +1,71 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/one_hot.hpp" + +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/one_hot.hpp" + +namespace ov { +namespace test { +std::string OneHotLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { + int64_t axis; + ov::element::Type depth_type, set_type; + int64_t depth_val; + float on_val, off_val; + ov::element::Type model_type; + std::vector shapes; + std::string targetDevice; + + std::tie(depth_type, depth_val, set_type, on_val, off_val, axis, model_type, shapes, targetDevice) = obj.param; + + std::ostringstream result; + result << "IS=("; + for (size_t i = 0lu; i < shapes.size(); i++) { + result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < shapes.size(); j++) { + result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + result << "depthType=" << depth_type << "_"; + result << "depth=" << depth_val << "_"; + result << "SetValueType=" << set_type << "_"; + result << "onValue=" << on_val << "_"; + result << "offValue=" << off_val << "_"; + result << "axis=" << axis << "_"; + + result << "netPRC=" << model_type.get_type_name() << "_"; + result << "trgDev=" << targetDevice; + return result.str(); +} + +void OneHotLayerTest::SetUp() { + int64_t axis; + ov::element::Type depth_type, set_type; + int64_t depth_val; + float on_val, off_val; + ov::element::Type model_type; + std::vector shapes; + std::tie(depth_type, depth_val, set_type, on_val, off_val, axis, model_type, shapes, targetDevice) = this->GetParam(); + init_input_shapes(shapes); + + auto param = std::make_shared(model_type, inputDynamicShapes.front()); + + auto depth_const = std::make_shared(depth_type, ov::Shape{}, depth_val); + auto on_value_const = std::make_shared(set_type, ov::Shape{}, on_val); + auto off_value_const = std::make_shared(set_type, ov::Shape{}, off_val); + auto onehot = std::make_shared(param, depth_const, on_value_const, off_value_const, axis); + + auto result = std::make_shared(onehot); + function = std::make_shared(result, ov::ParameterVector{param}, "OneHot"); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_op/pad.cpp b/src/tests/functional/shared_test_classes/src/single_op/pad.cpp new file mode 100644 index 00000000000000..a62b33ec21263c --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/pad.cpp @@ -0,0 +1,85 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/pad.hpp" + +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/pad.hpp" + +namespace ov { +namespace test { +std::string PadLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { + ov::element::Type model_type; + std::vector shapes; + std::vector pads_begin, pads_end; + ov::op::PadMode pad_mode; + float arg_pad_value; + std::string target_device; + std::tie(pads_begin, pads_end, arg_pad_value, pad_mode, model_type, shapes, target_device) = obj.param; + + std::ostringstream result; + result << "IS=("; + for (size_t i = 0lu; i < shapes.size(); i++) { + result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < shapes.size(); j++) { + result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + result << "PadsBegin=" << ov::test::utils::vec2str(pads_begin) << "_"; + result << "PadsEnd=" << ov::test::utils::vec2str(pads_end) << "_"; + if (pad_mode == ov::op::PadMode::CONSTANT) { + result << "Value=" << arg_pad_value << "_"; + } + result << "PadMode=" << pad_mode << "_"; + result << "ModelType=" << model_type.get_type_name() << "_"; + result << "TrgDev=" << target_device; + return result.str(); +} + +void PadLayerTest::SetUp() { + ov::element::Type model_type; + std::vector shapes; + std::vector pads_begin, pads_end; + ov::op::PadMode pad_mode; + float arg_pad_value; + std::tie(pads_begin, pads_end, arg_pad_value, pad_mode, model_type, shapes, targetDevice) = this->GetParam(); + init_input_shapes(shapes); + + auto param = std::make_shared(model_type, inputDynamicShapes.front()); + + auto pads_begin_const = std::make_shared(ov::element::i64, ov::Shape{pads_begin.size()}, pads_begin.data()); + auto pads_end_const = std::make_shared(ov::element::i64, ov::Shape{pads_end.size()}, pads_end.data()); + auto arg_pad_value_const = std::make_shared(model_type, ov::Shape{}, &arg_pad_value); + + auto pad = create_pad_op(param, pads_begin_const, pads_end_const, arg_pad_value_const, pad_mode); + + auto result = std::make_shared(pad); + + function = std::make_shared(result, ov::ParameterVector{param}, "pad"); +} + +std::shared_ptr PadLayerTest::create_pad_op(const std::shared_ptr& data, + const std::shared_ptr& pads_begin, + const std::shared_ptr& pads_end, + const std::shared_ptr& arg_pad_value, + ov::op::PadMode pad_mode) const { + return std::make_shared(data, pads_begin, pads_end, arg_pad_value, pad_mode); +} + +std::shared_ptr Pad12LayerTest::create_pad_op(const std::shared_ptr& data, + const std::shared_ptr& pads_begin, + const std::shared_ptr& pads_end, + const std::shared_ptr& arg_pad_value, + ov::op::PadMode pad_mode) const { + return std::make_shared(data, pads_begin, pads_end, arg_pad_value, pad_mode); +} +} // namespace test +} // namespace ov From cb61ad46bdfecfd17c87669950e2cef3593d365c Mon Sep 17 00:00:00 2001 From: Oleg Pipikin Date: Thu, 12 Oct 2023 20:27:55 +0200 Subject: [PATCH 179/257] Refactor CumSumLayerTest, DeformablePSROIPoolingLayerTest, DepthToSpaceLayerTest (#19870) * Refactor CumSumLayerTest * Refactor DeformablePSROIPoolingLayerTest * Refactor DepthToSpaceLayerTest --- .../single_layer_tests/cum_sum.cpp | 74 ++++++------ .../deformable_psroi_pooling.cpp | 36 +++--- .../single_layer_tests/depth_to_space.cpp | 55 +++++---- .../single_layer_tests/roi_pooling.cpp | 9 +- .../dynamic/roi_pooling.cpp | 9 +- .../include/single_op_tests/cum_sum.hpp | 15 +++ .../deformable_psroi_pooling.hpp | 15 +++ .../single_op_tests/depth_to_space.hpp | 15 +++ .../shared_test_classes/single_op/cum_sum.hpp | 33 ++++++ .../single_op/deformable_psroi_pooling.hpp | 39 +++++++ .../single_op/depth_to_space.hpp | 33 ++++++ .../src/base/utils/generate_inputs.cpp | 22 ++++ .../src/single_op/cum_sum.cpp | 54 +++++++++ .../single_op/deformable_psroi_pooling.cpp | 110 ++++++++++++++++++ .../src/single_op/depth_to_space.cpp | 72 ++++++++++++ .../include/common_test_utils/data_utils.hpp | 52 ++------- .../common_test_utils/src/data_utils.cpp | 82 +++++++++++++ 17 files changed, 600 insertions(+), 125 deletions(-) create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/cum_sum.hpp create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/deformable_psroi_pooling.hpp create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/depth_to_space.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/cum_sum.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/deformable_psroi_pooling.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/depth_to_space.hpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/cum_sum.cpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/deformable_psroi_pooling.cpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/depth_to_space.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/cum_sum.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/cum_sum.cpp index f55650c696bc2a..19440f5ae88fe7 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/cum_sum.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/cum_sum.cpp @@ -4,27 +4,28 @@ #include -#include "single_layer_tests/cum_sum.hpp" +#include "single_op_tests/cum_sum.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; - -const std::vector> shapes = { - {16}, - {9, 15}, - {16, 10, 12}, - {5, 14, 5, 7}, - {7, 8, 6, 7, 13}, - {2, 3, 4, 2, 3, 5}, - {4, 3, 6, 2, 3, 4, 5, 2, 3, 4}, +namespace { +using ov::test::CumSumLayerTest; + +const std::vector> shapes_static = { + {{16}}, + {{9, 15}}, + {{16, 10, 12}}, + {{5, 14, 5, 7}}, + {{7, 8, 6, 7, 13}}, + {{2, 3, 4, 2, 3, 5}}, + {{4, 3, 6, 2, 3, 4, 5, 2, 3, 4}}, }; -const std::vector inputPrecision = { - InferenceEngine::Precision::I8, - InferenceEngine::Precision::U8, - InferenceEngine::Precision::I16, - InferenceEngine::Precision::I32, - InferenceEngine::Precision::FP32 +const std::vector model_types = { + ov::element::i8, + ov::element::u8, + ov::element::i16, + ov::element::i32, + ov::element::f32 }; const std::vector axes = { 0, 1, 2, 3, 4, 5, 6}; @@ -34,8 +35,8 @@ const std::vector exclusive = {true, false}; const std::vector reverse = {true, false}; const auto testCasesNegativeAxis = ::testing::Combine( - ::testing::Values(std::vector{4, 16, 3, 6, 5, 2}), - ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::Values(ov::test::static_shapes_to_test_representation({{4, 16, 3, 6, 5, 2}})), + ::testing::Values(ov::element::f32), ::testing::ValuesIn(negativeAxes), ::testing::ValuesIn(exclusive), ::testing::ValuesIn(reverse), @@ -43,8 +44,8 @@ const auto testCasesNegativeAxis = ::testing::Combine( ); const auto testCasesAxis_0 = ::testing::Combine( - ::testing::ValuesIn(shapes), - ::testing::ValuesIn(inputPrecision), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(shapes_static)), + ::testing::ValuesIn(model_types), ::testing::Values(axes[0]), ::testing::ValuesIn(exclusive), ::testing::ValuesIn(reverse), @@ -52,8 +53,9 @@ const auto testCasesAxis_0 = ::testing::Combine( ); const auto testCasesAxis_1 = ::testing::Combine( - ::testing::ValuesIn(std::vector>(shapes.begin() + 1, shapes.end())), - ::testing::ValuesIn(inputPrecision), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation( + std::vector>(shapes_static.begin() + 1, shapes_static.end()))), + ::testing::ValuesIn(model_types), ::testing::Values(axes[1]), ::testing::ValuesIn(exclusive), ::testing::ValuesIn(reverse), @@ -61,8 +63,9 @@ const auto testCasesAxis_1 = ::testing::Combine( ); const auto testCasesAxis_2 = ::testing::Combine( - ::testing::ValuesIn(std::vector>(shapes.begin() + 2, shapes.end())), - ::testing::ValuesIn(inputPrecision), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation( + std::vector>(shapes_static.begin() + 2, shapes_static.end()))), + ::testing::ValuesIn(model_types), ::testing::Values(axes[2]), ::testing::ValuesIn(exclusive), ::testing::ValuesIn(reverse), @@ -70,8 +73,9 @@ const auto testCasesAxis_2 = ::testing::Combine( ); const auto testCasesAxis_3 = ::testing::Combine( - ::testing::ValuesIn(std::vector>(shapes.begin() + 3, shapes.end())), - ::testing::ValuesIn(inputPrecision), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation( + std::vector>(shapes_static.begin() + 3, shapes_static.end()))), + ::testing::ValuesIn(model_types), ::testing::Values(axes[3]), ::testing::ValuesIn(exclusive), ::testing::ValuesIn(reverse), @@ -79,8 +83,9 @@ const auto testCasesAxis_3 = ::testing::Combine( ); const auto testCasesAxis_4 = ::testing::Combine( - ::testing::ValuesIn(std::vector>(shapes.begin() + 4, shapes.end())), - ::testing::ValuesIn(inputPrecision), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation( + std::vector>(shapes_static.begin() + 4, shapes_static.end()))), + ::testing::ValuesIn(model_types), ::testing::Values(axes[4]), ::testing::ValuesIn(exclusive), ::testing::ValuesIn(reverse), @@ -88,8 +93,9 @@ const auto testCasesAxis_4 = ::testing::Combine( ); const auto testCasesAxis_5 = ::testing::Combine( - ::testing::ValuesIn(std::vector>(shapes.begin() + 5, shapes.end())), - ::testing::ValuesIn(inputPrecision), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation( + std::vector>(shapes_static.begin() + 5, shapes_static.end()))), + ::testing::ValuesIn(model_types), ::testing::Values(axes[5]), ::testing::ValuesIn(exclusive), ::testing::ValuesIn(reverse), @@ -97,8 +103,9 @@ const auto testCasesAxis_5 = ::testing::Combine( ); const auto testCasesAxis_6 = ::testing::Combine( - ::testing::ValuesIn(std::vector>(shapes.begin() + 6, shapes.end())), - ::testing::ValuesIn(inputPrecision), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation( + std::vector>(shapes_static.begin() + 6, shapes_static.end()))), + ::testing::ValuesIn(model_types), ::testing::Values(axes[6]), ::testing::ValuesIn(exclusive), ::testing::ValuesIn(reverse), @@ -113,3 +120,4 @@ INSTANTIATE_TEST_SUITE_P(smoke_TestsCumSum_axis_3, CumSumLayerTest, testCasesAxi INSTANTIATE_TEST_SUITE_P(smoke_TestsCumSum_axis_4, CumSumLayerTest, testCasesAxis_4, CumSumLayerTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_TestsCumSum_axis_5, CumSumLayerTest, testCasesAxis_5, CumSumLayerTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_TestsCumSum_axis_6, CumSumLayerTest, testCasesAxis_6, CumSumLayerTest::getTestCaseName); +} // namespace \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/deformable_psroi_pooling.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/deformable_psroi_pooling.cpp index e277e417c645f6..785aba8c1a1ef8 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/deformable_psroi_pooling.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/deformable_psroi_pooling.cpp @@ -4,17 +4,21 @@ #include -#include "single_layer_tests/deformable_psroi_pooling.hpp" +#include "single_op_tests/deformable_psroi_pooling.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; - namespace { +using ov::test::DeformablePSROIPoolingLayerTest; + + std::vector> shapes_static { + //dataShape, roisShape, offsetsShape + {{3, 8, 16, 16}, {10, 5}}, + {{1, 8, 67, 32}, {10, 5}}, + {{3, 8, 16, 16}, {10, 5}, {10, 2, 2, 2}}, + {{1, 8, 67, 32}, {10, 5}, {10, 2, 2, 2}}, + }; + const auto deformablePSROIParams = ::testing::Combine( - ::testing::ValuesIn(std::vector>{{3, 8, 16, 16}, {1, 8, 67, 32}}), // data input shape - ::testing::Values(std::vector{10, 5}), // rois input shape - // Empty offsets shape means test without optional third input - ::testing::ValuesIn(std::vector>{{}, {10, 2, 2, 2}}), // offsets input shape ::testing::Values(2), // output_dim ::testing::Values(2), // group_size ::testing::ValuesIn(std::vector{1.0f, 0.5f, 0.0625f}), // spatial scale @@ -24,17 +28,20 @@ namespace { const auto deformablePSROICases_test_params = ::testing::Combine( deformablePSROIParams, - ::testing::Values(InferenceEngine::Precision::FP32), // Net precision - ::testing::Values(ov::test::utils::DEVICE_CPU)); // Device name + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(shapes_static)), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::test::utils::DEVICE_CPU)); INSTANTIATE_TEST_SUITE_P(smoke_TestsDeformablePSROIPooling, DeformablePSROIPoolingLayerTest, deformablePSROICases_test_params, DeformablePSROIPoolingLayerTest::getTestCaseName); + std::vector> shapes_advanced_static { + //dataShape, roisShape, offsetsShape + {{2, 441, 63, 38}, {30, 5}, {30, 2, 3, 3}} + }; + const auto deformablePSROIParams_advanced = ::testing::Combine( - ::testing::ValuesIn(std::vector>{{2, 441, 63, 38}}), // data input shape - ::testing::Values(std::vector{30, 5}), // rois input shape - ::testing::Values(std::vector{30, 2, 3, 3}), // offsets input shape ::testing::Values(49), // output_dim ::testing::Values(3), // group_size ::testing::ValuesIn(std::vector{0.0625}), // spatial scale @@ -44,8 +51,9 @@ namespace { const auto deformablePSROICases_test_params_advanced = ::testing::Combine( deformablePSROIParams_advanced, - ::testing::Values(InferenceEngine::Precision::FP32), // Net precision - ::testing::Values(ov::test::utils::DEVICE_CPU)); // Device name + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(shapes_advanced_static)), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::test::utils::DEVICE_CPU)); INSTANTIATE_TEST_SUITE_P(smoke_TestsDeformablePSROIPooling_advanced, DeformablePSROIPoolingLayerTest, deformablePSROICases_test_params_advanced, DeformablePSROIPoolingLayerTest::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/depth_to_space.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/depth_to_space.cpp index aca6f2e621de51..5e84ede53312b5 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/depth_to_space.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/depth_to_space.cpp @@ -2,20 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include - -#include "single_layer_tests/depth_to_space.hpp" +#include "single_op_tests/depth_to_space.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; -using namespace ngraph::opset3; - namespace { -const std::vector inputPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::U8, - InferenceEngine::Precision::I16, +using ov::test::DepthToSpaceLayerTest; +using ov::op::v0::DepthToSpace; + +const std::vector model_types = { + ov::element::f32, + ov::element::u8, + ov::element::i16, }; const std::vector modes = { @@ -23,14 +20,22 @@ const std::vector modes = { DepthToSpace::DepthToSpaceMode::DEPTH_FIRST }; -const std::vector> inputShapesBS2 = { - {1, 4, 1, 1}, {1, 4, 2, 2}, {1, 4, 3, 3}, {2, 32, 3, 3}, {2, 16, 5, 4}, - {1, 8, 1, 1, 1}, {1, 8, 2, 2, 2}, {1, 8, 3, 3, 3}, {2, 32, 3, 3, 3}, {2, 16, 5, 4, 6} +const std::vector> input_shapes_bs2_static = { + {{1, 4, 1, 1}}, + {{1, 4, 2, 2}}, + {{1, 4, 3, 3}}, + {{2, 32, 3, 3}}, + {{2, 16, 5, 4}}, + {{1, 8, 1, 1, 1}}, + {{1, 8, 2, 2, 2}}, + {{1, 8, 3, 3, 3}}, + {{2, 32, 3, 3, 3}}, + {{2, 16, 5, 4, 6}} }; const auto DepthToSpaceBS2 = ::testing::Combine( - ::testing::ValuesIn(inputShapesBS2), - ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_bs2_static)), + ::testing::ValuesIn(model_types), ::testing::ValuesIn(modes), ::testing::Values(1, 2), ::testing::Values(ov::test::utils::DEVICE_CPU) @@ -38,14 +43,22 @@ const auto DepthToSpaceBS2 = ::testing::Combine( INSTANTIATE_TEST_SUITE_P(smoke_DepthToSpaceBS2, DepthToSpaceLayerTest, DepthToSpaceBS2, DepthToSpaceLayerTest::getTestCaseName); -const std::vector> inputShapesBS3 = { - {1, 9, 1, 1}, {1, 9, 2, 2}, {1, 9, 3, 3}, {2, 36, 3, 3}, {2, 27, 5, 4}, - {1, 27, 1, 1, 1}, {1, 27, 2, 2, 2}, {1, 27, 3, 3, 3}, {2, 108, 3, 3, 3}, {2, 54, 5, 4, 6} +const std::vector> input_shapes_bs3_static = { + {{1, 9, 1, 1}}, + {{1, 9, 2, 2}}, + {{1, 9, 3, 3}}, + {{2, 36, 3, 3}}, + {{2, 27, 5, 4}}, + {{1, 27, 1, 1, 1}}, + {{1, 27, 2, 2, 2}}, + {{1, 27, 3, 3, 3}}, + {{2, 108, 3, 3, 3}}, + {{2, 54, 5, 4, 6}} }; const auto DepthToSpaceBS3 = ::testing::Combine( - ::testing::ValuesIn(inputShapesBS3), - ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_bs3_static)), + ::testing::ValuesIn(model_types), ::testing::ValuesIn(modes), ::testing::Values(1, 3), ::testing::Values(ov::test::utils::DEVICE_CPU) diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/roi_pooling.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/roi_pooling.cpp index f8c2235f02cb09..c5d3047452a122 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/roi_pooling.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/roi_pooling.cpp @@ -155,12 +155,9 @@ class ROIPoolingCPULayerTest : public testing::WithParamInterface(tensor, feat_map_shape[0] - 1, height, width, 1.f, is_roi_max_mode); - break; - } - case ngraph::element::bf16: { - ov::test::utils::fill_data_roi(tensor, feat_map_shape[0] - 1, height, width, 1.f, is_roi_max_mode); + case ov::element::f32: + case ov::element::bf16: { + ov::test::utils::fill_data_roi(tensor, feat_map_shape[0] - 1, height, width, 1.f, is_roi_max_mode); break; } default: diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/roi_pooling.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/roi_pooling.cpp index eee9451c206413..34ac60f2752c08 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/roi_pooling.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/roi_pooling.cpp @@ -148,12 +148,9 @@ class ROIPoolingLayerGPUTest : public testing::WithParamInterface(tensor, feat_map_shape[0] - 1, height, width, 1.f, is_roi_max_mode); - break; - } - case ngraph::element::bf16: { - ov::test::utils::fill_data_roi(tensor, feat_map_shape[0] - 1, height, width, 1.f, is_roi_max_mode); + case ov::element::f32: + case ov::element::bf16: { + ov::test::utils::fill_data_roi(tensor, feat_map_shape[0] - 1, height, width, 1.f, is_roi_max_mode); break; } default: diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/cum_sum.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/cum_sum.hpp new file mode 100644 index 00000000000000..0d7c57a9341eef --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/cum_sum.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/cum_sum.hpp" + +namespace ov { +namespace test { +TEST_P(CumSumLayerTest, Inference) { + run(); +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/deformable_psroi_pooling.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/deformable_psroi_pooling.hpp new file mode 100644 index 00000000000000..65063df0f5e545 --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/deformable_psroi_pooling.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/deformable_psroi_pooling.hpp" + +namespace ov { +namespace test { +TEST_P(DeformablePSROIPoolingLayerTest, Inference) { + run(); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/depth_to_space.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/depth_to_space.hpp new file mode 100644 index 00000000000000..a7f0552d6028a6 --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/depth_to_space.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/depth_to_space.hpp" + +namespace ov { +namespace test { +TEST_P(DepthToSpaceLayerTest, Inference) { + run(); +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/cum_sum.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/cum_sum.hpp new file mode 100644 index 00000000000000..035bff4de8c67e --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/cum_sum.hpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { + +typedef std::tuple< + std::vector, // Input shapes + ov::element::Type, // Model type + int64_t, // Axis + bool, // Exclusive + bool, // Reverse + std::string> cumSumParams; // Device name + +class CumSumLayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); + +protected: + void SetUp() override; +}; + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/deformable_psroi_pooling.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/deformable_psroi_pooling.hpp new file mode 100644 index 00000000000000..b3cc9ea739a779 --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/deformable_psroi_pooling.hpp @@ -0,0 +1,39 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { +using deformablePSROISpecificParams = std::tuple< + int64_t, // output_dim + int64_t, // group_size + float, // spatial_scale + std::vector, // spatial_bins_x_y + float, // trans_std + int64_t>; // part_size + +using deformablePSROILayerTestParams = std::tuple< + deformablePSROISpecificParams, + std::vector, // data input shape + ov::element::Type, // Net type + std::string>; // Device name + +class DeformablePSROIPoolingLayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); + +protected: + void SetUp() override; +}; + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/depth_to_space.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/depth_to_space.hpp new file mode 100644 index 00000000000000..40a7632aaa5098 --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/depth_to_space.hpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "openvino/op/depth_to_space.hpp" + +namespace ov { +namespace test { +using depthToSpaceParamsTuple = typename std::tuple< + std::vector, // Input shape + ov::element::Type, // Model type + ov::op::v0::DepthToSpace::DepthToSpaceMode, // Mode + std::size_t, // Block size + std::string>; // Device name> + +class DepthToSpaceLayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo &obj); + +protected: + void SetUp() override; +}; + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp b/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp index e738e73d879d6e..139678a602fd21 100644 --- a/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp +++ b/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp @@ -855,6 +855,28 @@ ov::runtime::Tensor generate(const std::shared_ptr& nod return generate(std::dynamic_pointer_cast(node), port, elemType, targetShape); } +ov::runtime::Tensor generate(const + std::shared_ptr& node, + size_t port, + const ov::element::Type& elemType, + const ov::Shape& targetShape) { + if (port == 1) { + ov::Tensor tensor(elemType, targetShape); + auto data_input_shape = node->input(0).get_shape(); + const auto batch_distrib = data_input_shape[0] - 1; + const auto height = data_input_shape[2] / node->get_spatial_scale(); + const auto width = data_input_shape[3] / node->get_spatial_scale(); + + ov::test::utils::fill_data_roi(tensor, batch_distrib, height, width, 1.0f, true); + return tensor; + } else if (port == 2) { + ov::Tensor tensor(elemType, targetShape); + ov::test::utils::fill_tensor_random(tensor, 1.8, -0.9); + return tensor; + } + return generate(std::static_pointer_cast(node), port, elemType, targetShape); +} + ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, diff --git a/src/tests/functional/shared_test_classes/src/single_op/cum_sum.cpp b/src/tests/functional/shared_test_classes/src/single_op/cum_sum.cpp new file mode 100644 index 00000000000000..7adccf0aaff1aa --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/cum_sum.cpp @@ -0,0 +1,54 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/cum_sum.hpp" + +namespace ov { +namespace test { +std::string CumSumLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { + std::vector shapes; + ov::element::Type model_type; + int64_t axis; + bool exclusive, reverse; + std::string targetDevice; + std::tie(shapes, model_type, axis, exclusive, reverse, targetDevice) = obj.param; + + std::ostringstream result; + result << "IS=("; + for (size_t i = 0lu; i < shapes.size(); i++) { + result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < shapes.size(); j++) { + result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + result << "Precision=" << model_type.get_type_name() << "_"; + result << "Axis=" << axis << "_"; + result << "Exclusive=" << (exclusive ? "TRUE" : "FALSE") << "_"; + result << "Reverse=" << (reverse ? "TRUE" : "FALSE") << "_"; + result << "TargetDevice=" << targetDevice; + return result.str(); +} + +void CumSumLayerTest::SetUp() { + std::vector shapes; + ov::element::Type model_type; + bool exclusive, reverse; + int64_t axis; + std::tie(shapes, model_type, axis, exclusive, reverse, targetDevice) = this->GetParam(); + init_input_shapes(shapes); + + const auto param = std::make_shared(model_type, inputDynamicShapes.front()); + const auto axis_node = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{axis}); + const auto cum_sum = std::make_shared(param, axis_node, exclusive, reverse); + + auto result = std::make_shared(cum_sum); + function = std::make_shared(result, ov::ParameterVector{param}, "cumsum"); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_op/deformable_psroi_pooling.cpp b/src/tests/functional/shared_test_classes/src/single_op/deformable_psroi_pooling.cpp new file mode 100644 index 00000000000000..42722c8d4dad01 --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/deformable_psroi_pooling.cpp @@ -0,0 +1,110 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/deformable_psroi_pooling.hpp" + +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/deformable_psroi_pooling.hpp" + +namespace ov { +namespace test { + +std::string DeformablePSROIPoolingLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { + std::vector shapes; + ov::element::Type model_type; + int64_t outputDim; + int64_t groupSize; + float spatialScale; + std::vector spatialBinsXY; + float trans_std; + int64_t part_size; + std::string target_device; + deformablePSROISpecificParams opParams; + + std::tie(opParams, shapes, model_type, target_device) = obj.param; + std::tie(outputDim, groupSize, spatialScale, spatialBinsXY, + trans_std, part_size) = opParams; + + std::ostringstream result; + result << "IS=("; + for (size_t i = 0lu; i < shapes.size(); i++) { + result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < shapes.size(); j++) { + result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + result << "out_dim=" << outputDim << "_"; + result << "group_size=" << groupSize << "_"; + result << "scale=" << spatialScale << "_"; + result << "bins_x=" << spatialBinsXY[0] << "_"; + result << "bins_y=" << spatialBinsXY[1] << "_"; + result << "trans_std=" << trans_std << "_"; + result << "part_size=" << part_size << "_"; + result << "prec=" << model_type.get_type_name() << "_"; + result << "dev=" << target_device; + return result.str(); +} + +void DeformablePSROIPoolingLayerTest::SetUp() { + std::vector shapes; + ov::element::Type model_type; + int64_t outputDim; + int64_t groupSize; + std::string mode = "bilinear_deformable"; + std::vector spatialBinsXY; + float trans_std, spatial_scale; + int64_t part_size; + deformablePSROISpecificParams opParams; + + std::tie(opParams, shapes, model_type, targetDevice) = this->GetParam(); + std::tie(outputDim, groupSize, spatial_scale, spatialBinsXY, trans_std, part_size) = opParams; + init_input_shapes(shapes); + + ov::ParameterVector params; + std::shared_ptr defomablePSROIPooling; + + if (2 == inputDynamicShapes.size()) { // Test without optional third input (offsets) + params = ov::ParameterVector{std::make_shared(model_type, inputDynamicShapes[0]), + std::make_shared(model_type, inputDynamicShapes[1])}; + + defomablePSROIPooling = std::make_shared(params[0], + params[1], + outputDim, + spatial_scale, + groupSize, + mode, + spatialBinsXY[0], + spatialBinsXY[1], + trans_std, + part_size); + } else { + params = ov::ParameterVector{std::make_shared(model_type, inputDynamicShapes[0]), + std::make_shared(model_type, inputDynamicShapes[1]), + std::make_shared(model_type, inputDynamicShapes[2])}; + + defomablePSROIPooling = std::make_shared(params[0], + params[1], + params[2], + outputDim, + spatial_scale, + groupSize, + mode, + spatialBinsXY[0], + spatialBinsXY[1], + trans_std, + part_size); + } + + auto result = std::make_shared(defomablePSROIPooling); + function = std::make_shared(result, params, "deformable_psroi_pooling"); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_op/depth_to_space.cpp b/src/tests/functional/shared_test_classes/src/single_op/depth_to_space.cpp new file mode 100644 index 00000000000000..d4f45a617a8747 --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/depth_to_space.cpp @@ -0,0 +1,72 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/depth_to_space.hpp" + +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/depth_to_space.hpp" + +namespace ov { +namespace test { +using ov::op::v0::DepthToSpace; + +static inline std::string DepthToSpaceModeToString(const DepthToSpace::DepthToSpaceMode& mode) { + static std::map names = { + {DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, "BLOCKS_FIRST"}, + {DepthToSpace::DepthToSpaceMode::DEPTH_FIRST, "DEPTH_FIRST"}, + }; + + auto i = names.find(mode); + if (i != names.end()) + return i->second; + else + throw std::runtime_error("Unsupported DepthToSpaceMode"); +} + +std::string DepthToSpaceLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { + std::vector shapes; + DepthToSpace::DepthToSpaceMode mode; + std::size_t block_size; + ov::element::Type model_type; + std::string device_name; + std::tie(shapes, model_type, mode, block_size, device_name) = obj.param; + + std::ostringstream result; + result << "IS=("; + for (size_t i = 0lu; i < shapes.size(); i++) { + result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < shapes.size(); j++) { + result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + result << "inPrc=" << model_type.get_type_name() << "_"; + result << "M=" << DepthToSpaceModeToString(mode) << "_"; + result << "BS=" << block_size << "_"; + result << "targetDevice=" << device_name << "_"; + return result.str(); +} + +void DepthToSpaceLayerTest::SetUp() { + std::vector shapes; + DepthToSpace::DepthToSpaceMode mode; + std::size_t block_size; + ov::element::Type model_type; + std::tie(shapes, model_type, mode, block_size, targetDevice) = this->GetParam(); + init_input_shapes(shapes); + + auto param = std::make_shared(model_type, inputDynamicShapes.front()); + auto d2s = std::make_shared(param, mode, block_size); + auto result = std::make_shared(d2s); + + function = std::make_shared(result, ov::ParameterVector{param}, "DepthToSpace"); +} +} // namespace test +} // namespace ov diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/data_utils.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/data_utils.hpp index 55994c7f6b90ae..4d2e5006eeb72d 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/data_utils.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/data_utils.hpp @@ -158,52 +158,14 @@ inline void fill_data_roi(InferenceEngine::Blob::Ptr& blob, fill_roi_raw_ptr(data, blob->size(), range, height, width, omega, is_roi_max_mode, seed); } -template -inline void fill_data_roi(ov::runtime::Tensor& tensor, - const uint32_t range, - const int height, - const int width, - const float omega, - const bool is_roi_max_mode, - const int seed = 1) { - using T = typename InferenceEngine::PrecisionTrait::value_type; - auto* data = static_cast(tensor.data()); - std::default_random_engine random(seed); - std::uniform_int_distribution distribution(0, range); +void fill_data_roi(ov::runtime::Tensor& tensor, + const uint32_t range, + const int height, + const int width, + const float omega, + const bool is_roi_max_mode, + const int seed = 1); - const int max_y = (is_roi_max_mode) ? (height - 1) : 1; - const int max_x = (is_roi_max_mode) ? (width - 1) : 1; - - float center_h = (max_y) / 2.0f; - float center_w = (max_x) / 2.0f; - - for (size_t i = 0; i < tensor.get_size(); i += 5) { - data[i] = static_cast(distribution(random)); - const float x0 = (center_w + width * 0.3f * sin(static_cast(i + 1) * omega)); - const float x1 = (center_w + width * 0.3f * sin(static_cast(i + 3) * omega)); - data[i + 1] = static_cast(is_roi_max_mode ? std::floor(x0) : x0); - data[i + 3] = static_cast(is_roi_max_mode ? std::floor(x1) : x1); - if (data[i + 3] < data[i + 1]) { - std::swap(data[i + 1], data[i + 3]); - } - if (data[i + 1] < 0) - data[i + 1] = 0; - if (data[i + 3] > max_x) - data[i + 3] = static_cast(max_x); - - const float y0 = (center_h + height * 0.3f * sin(static_cast(i + 2) * omega)); - const float y1 = (center_h + height * 0.3f * sin(static_cast(i + 4) * omega)); - data[i + 2] = static_cast(is_roi_max_mode ? std::floor(y0) : y0); - data[i + 4] = static_cast(is_roi_max_mode ? std::floor(y1) : y1); - if (data[i + 4] < data[i + 2]) { - std::swap(data[i + 2], data[i + 4]); - } - if (data[i + 2] < 0) - data[i + 2] = 0; - if (data[i + 4] > max_y) - data[i + 4] = static_cast(max_y); - } -} OPENVINO_SUPPRESS_DEPRECATED_END template diff --git a/src/tests/test_utils/common_test_utils/src/data_utils.cpp b/src/tests/test_utils/common_test_utils/src/data_utils.cpp index 9991b976b467f6..ed956e7f860fd4 100644 --- a/src/tests/test_utils/common_test_utils/src/data_utils.cpp +++ b/src/tests/test_utils/common_test_utils/src/data_utils.cpp @@ -250,6 +250,88 @@ size_t byte_size(const InferenceEngine::TensorDesc& tdesc) { } OPENVINO_SUPPRESS_DEPRECATED_END +template +inline void fill_data_roi_impl(ov::runtime::Tensor& tensor, + const uint32_t range, + const int height, + const int width, + const float omega, + const bool is_roi_max_mode, + const int seed = 1) { + using T = typename ov::fundamental_type_for; + auto* data = static_cast(tensor.data()); + std::default_random_engine random(seed); + std::uniform_int_distribution distribution(0, range); + + const int max_y = (is_roi_max_mode) ? (height - 1) : 1; + const int max_x = (is_roi_max_mode) ? (width - 1) : 1; + + float center_h = (max_y) / 2.0f; + float center_w = (max_x) / 2.0f; + + for (size_t i = 0; i < tensor.get_size(); i += 5) { + data[i] = static_cast(distribution(random)); + const float x0 = (center_w + width * 0.3f * sin(static_cast(i + 1) * omega)); + const float x1 = (center_w + width * 0.3f * sin(static_cast(i + 3) * omega)); + data[i + 1] = static_cast(is_roi_max_mode ? std::floor(x0) : x0); + data[i + 3] = static_cast(is_roi_max_mode ? std::floor(x1) : x1); + if (data[i + 3] < data[i + 1]) { + std::swap(data[i + 1], data[i + 3]); + } + if (data[i + 1] < 0) + data[i + 1] = 0; + if (data[i + 3] > max_x) + data[i + 3] = static_cast(max_x); + + const float y0 = (center_h + height * 0.3f * sin(static_cast(i + 2) * omega)); + const float y1 = (center_h + height * 0.3f * sin(static_cast(i + 4) * omega)); + data[i + 2] = static_cast(is_roi_max_mode ? std::floor(y0) : y0); + data[i + 4] = static_cast(is_roi_max_mode ? std::floor(y1) : y1); + if (data[i + 4] < data[i + 2]) { + std::swap(data[i + 2], data[i + 4]); + } + if (data[i + 2] < 0) + data[i + 2] = 0; + if (data[i + 4] > max_y) + data[i + 4] = static_cast(max_y); + } +} + +void fill_data_roi(ov::runtime::Tensor& tensor, + const uint32_t range, + const int height, + const int width, + const float omega, + const bool is_roi_max_mode, + const int seed) { +#define CASE(X) \ + case X: \ + fill_data_roi_impl(tensor, range, height, width, omega, is_roi_max_mode, seed); \ + break; + + auto element_type = tensor.get_element_type(); + switch (element_type) { + CASE(ov::element::f64) + CASE(ov::element::f32) + CASE(ov::element::f16) + CASE(ov::element::bf16) + CASE(ov::element::u1) + CASE(ov::element::u4) + CASE(ov::element::u8) + CASE(ov::element::u32) + CASE(ov::element::u16) + CASE(ov::element::u64) + CASE(ov::element::i4) + CASE(ov::element::i8) + CASE(ov::element::i16) + CASE(ov::element::i32) + CASE(ov::element::i64) + default: + OPENVINO_THROW("Wrong precision specified: ", element_type); + } +#undef CASE +} + void fill_data_with_broadcast(ov::Tensor& tensor, ov::Tensor& values) { constexpr size_t MAX_N_DIMS = 7; // Suppose it's enough From 56d74a82cbf629ec6bae1554dbef4fe3a0321af8 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Fri, 13 Oct 2023 02:59:04 +0400 Subject: [PATCH 180/257] Relocatable developer package (#20327) * Merge Linux CC + static build + clang compiler * Improvements * Removed ie prefixes from cmake scripts * Fixes for NPU * Initial relocatable OpenVINO Developer package * Improvements * Try to fix * improvements * Export a lot of headers * Removed NVIDIA pipeline; make it to be a job * Fixes * Fixes 2 * Try ilya-lavrenov repo * Clean-up * More imrpovements * Even more improvements * Override export, install * Override export, install * Disable pythonwheel generation for relocatable OV dev package * Fixed issues with versions * Fixed android build * Fixed android build * Fixed NPU build * Update src/bindings/python/CMakeLists.txt --- .github/workflows/build_doc.yml | 4 +- .github/workflows/linux.yml | 136 ++++++++++++++++- .../linux_conditional_compilation.yml | 2 + .github/workflows/linux_cuda.yml | 143 ------------------ .github/workflows/windows.yml | 1 + .../windows_conditional_compilation.yml | 1 + CMakeLists.txt | 73 ++++----- cmake/dependencies.cmake | 14 +- .../OpenVINODeveloperScriptsConfig.cmake | 20 ++- .../add_target_helpers.cmake | 11 +- .../compile_flags/os_flags.cmake | 10 +- cmake/developer_package/options.cmake | 2 +- cmake/developer_package/packaging/nsis.cmake | 2 + .../packaging/packaging.cmake | 2 - cmake/developer_package/version.cmake | 87 +++++------ cmake/extra_modules.cmake | 127 +++++++++++----- ...renceEngineDeveloperPackageConfig.cmake.in | 4 +- .../OpenVINODeveloperPackageConfig.cmake.in | 8 +- ...DeveloperPackageConfigRelocatable.cmake.in | 76 ++++++++++ samples/CMakeLists.txt | 3 +- .../cpp/common/format_reader/CMakeLists.txt | 2 +- samples/cpp/common/utils/CMakeLists.txt | 2 +- src/bindings/c/src/CMakeLists.txt | 1 + src/bindings/python/CMakeLists.txt | 37 +++-- .../src/compatibility/pyngraph/CMakeLists.txt | 70 ++++----- .../python/src/pyopenvino/CMakeLists.txt | 131 ++++++++-------- src/cmake/openvino.cmake | 22 +-- .../conditional_compilation/CMakeLists.txt | 15 +- src/common/itt/CMakeLists.txt | 8 +- .../offline_transformations/CMakeLists.txt | 21 ++- src/common/preprocessing/src/CMakeLists.txt | 2 +- src/common/snippets/CMakeLists.txt | 5 +- src/common/transformations/CMakeLists.txt | 12 +- .../transformations/tests/CMakeLists.txt | 2 +- src/common/util/CMakeLists.txt | 18 ++- src/core/CMakeLists.txt | 20 ++- src/core/builder/CMakeLists.txt | 13 +- src/core/reference/CMakeLists.txt | 12 +- src/core/shape_inference/CMakeLists.txt | 14 +- .../tests/frontend/shared/CMakeLists.txt | 6 +- src/inference/CMakeLists.txt | 7 +- .../auto/tests/functional/CMakeLists.txt | 2 +- .../helpers/single_layer_common.hpp | 4 - .../intel_gpu/tests/unit/CMakeLists.txt | 10 +- src/plugins/template/CMakeLists.txt | 2 - src/plugins/template/backend/CMakeLists.txt | 36 +++-- src/plugins/template/src/CMakeLists.txt | 2 +- .../template/tests/functional/CMakeLists.txt | 2 +- .../subgraphs_dumper/CMakeLists.txt | 6 +- .../conformance_infra/CMakeLists.txt | 4 +- .../functional/plugin/shared/CMakeLists.txt | 9 +- .../shared_test_classes/CMakeLists.txt | 9 +- .../ov_helpers/ov_lpt_models/CMakeLists.txt | 11 +- src/tests/ov_helpers/ov_models/CMakeLists.txt | 11 +- .../ov_snippets_models/CMakeLists.txt | 16 +- .../common_test_utils/CMakeLists.txt | 27 +--- .../functional_test_utils/CMakeLists.txt | 18 ++- .../test_utils/unit_test_utils/CMakeLists.txt | 12 +- thirdparty/dependencies.cmake | 46 ++++-- thirdparty/gtest/CMakeLists.txt | 34 +++-- thirdparty/ittapi/CMakeLists.txt | 6 +- 61 files changed, 785 insertions(+), 628 deletions(-) delete mode 100644 .github/workflows/linux_cuda.yml create mode 100644 cmake/templates/OpenVINODeveloperPackageConfigRelocatable.cmake.in diff --git a/.github/workflows/build_doc.yml b/.github/workflows/build_doc.yml index b7e688d55f8c4a..fca75e99dc4109 100644 --- a/.github/workflows/build_doc.yml +++ b/.github/workflows/build_doc.yml @@ -17,8 +17,8 @@ jobs: - name: Clone OpenVINO uses: actions/checkout@v4 with: - submodules: true - lfs: true + submodules: 'true' + lfs: 'true' - name: Install apt-get dependencies uses: awalsh128/cache-apt-pkgs-action@v1.3.0 diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index bf602387ae8c6d..06d64236f3c8c0 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -50,6 +50,7 @@ jobs: OPENVINO_CONTRIB_REPO: /__w/openvino/openvino/openvino_contrib INSTALL_DIR: /__w/openvino/openvino/openvino_install INSTALL_TEST_DIR: /__w/openvino/openvino/tests_install + DEVELOPER_PACKAGE_DIR: /__w/openvino/openvino/developer_package_install BUILD_DIR: /__w/openvino/openvino/openvino_build CCACHE_DIR: /mount/caches/ccache/ubuntu20_x86_64_Release CCACHE_TEMPDIR: /__w/openvino/openvino/ccache_temp @@ -74,6 +75,7 @@ jobs: repository: 'openvinotoolkit/openvino_contrib' path: ${{ env.OPENVINO_CONTRIB_REPO }} submodules: 'true' + ref: 'master' # # Dependencies @@ -150,6 +152,7 @@ jobs: run: | cmake -DCMAKE_INSTALL_PREFIX=${INSTALL_DIR} -P ${BUILD_DIR}/cmake_install.cmake cmake -DCMAKE_INSTALL_PREFIX=${INSTALL_TEST_DIR} -DCOMPONENT=tests -P ${BUILD_DIR}/cmake_install.cmake + cmake -DCMAKE_INSTALL_PREFIX=${DEVELOPER_PACKAGE_DIR} -DCOMPONENT=developer_package -P ${BUILD_DIR}/cmake_install.cmake cmake -DCMAKE_INSTALL_PREFIX=${INSTALL_DIR} -DCOMPONENT=python_wheels -P ${BUILD_DIR}/cmake_install.cmake - name: Pack Artifacts @@ -164,6 +167,10 @@ jobs: tar -czvf ${BUILD_DIR}/openvino_package.tar.gz * popd + pushd ${DEVELOPER_PACKAGE_DIR} + tar -czvf ${BUILD_DIR}/openvino_developer_package.tar.gz * + popd + pushd ${INSTALL_TEST_DIR} tar -czvf ${BUILD_DIR}/openvino_tests.tar.gz * popd @@ -203,6 +210,14 @@ jobs: path: ${{ env.BUILD_DIR }}/openvino_package.tar.gz if-no-files-found: 'error' + - name: Upload openvino developer package + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: openvino_developer_package + path: ${{ env.BUILD_DIR }}/openvino_developer_package.tar.gz + if-no-files-found: 'error' + - name: Upload openvino debian packages if: ${{ always() }} uses: actions/upload-artifact@v3 @@ -493,7 +508,6 @@ jobs: ONNX_RUNTIME_BUILD_DIR: /__w/openvino/openvino/onnxruntime/build steps: - - name: Fetch install_build_dependencies.sh uses: actions/checkout@v4 with: @@ -501,6 +515,7 @@ jobs: install_build_dependencies.sh sparse-checkout-cone-mode: false path: ${{ env.OPENVINO_REPO }} + ref: 'master' - name: Install git run: | @@ -1292,3 +1307,122 @@ jobs: path: | ${{ env.INSTALL_TEST_DIR }}/TEST*.html if-no-files-found: 'error' + + NVIDIA_Plugin: + name: NVIDIA plugin + needs: Build + defaults: + run: + shell: bash + runs-on: aks-linux-16-cores + container: + image: openvinogithubactions.azurecr.io/dockerhub/nvidia/cuda:11.8.0-runtime-ubuntu20.04 + volumes: + - /mount/caches:/mount/caches + env: + CMAKE_BUILD_TYPE: 'Release' + CMAKE_GENERATOR: 'Ninja Multi-Config' + CMAKE_CUDA_COMPILER_LAUNCHER: ccache + CMAKE_CXX_COMPILER_LAUNCHER: ccache + CMAKE_C_COMPILER_LAUNCHER: ccache + INSTALL_DIR: /__w/openvino/openvino/install + OPENVINO_DEVELOPER_PACKAGE: /__w/openvino/openvino/install/developer_package + OPENVINO_REPO: /__w/openvino/openvino/openvino + OPENVINO_CONTRIB_REPO: /__w/openvino/openvino/openvino_contrib + NVIDIA_BUILD_DIR: /__w/openvino/openvino/nvidia_plugin_build + DEBIAN_FRONTEND: 'noninteractive' + CCACHE_DIR: /mount/caches/ccache/ubuntu20_x86_64_Release + CCACHE_TEMPDIR: /__w/openvino/openvino/ccache_temp + CCACHE_MAXSIZE: 50G + + steps: + - name: Install Prerequisites + run: apt update && apt install -y git ca-certificates + + - name: Download OpenVINO package + uses: actions/download-artifact@v3 + with: + name: openvino_package + path: ${{ env.INSTALL_DIR }} + + - name: Download OpenVINO Developer package + uses: actions/download-artifact@v3 + with: + name: openvino_developer_package + path: ${{ env.INSTALL_DIR }} + + - name: Extract OpenVINO packages + run: | + pushd ${INSTALL_DIR} + tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} + popd + + pushd ${INSTALL_DIR} + tar -xzf openvino_developer_package.tar.gz -C ${INSTALL_DIR} + popd + + # TODO: replace with sparse checkout below + - name: Clone OpenVINO + uses: actions/checkout@v4 + with: + path: ${{ env.OPENVINO_REPO }} + + - name: Fetch install_build_dependencies.sh + if: ${{ 'false' }} + uses: actions/checkout@v4 + with: + sparse-checkout: | + install_build_dependencies.sh + sparse-checkout-cone-mode: false + path: ${{ env.OPENVINO_REPO }} + + - name: Clone OpenVINO Contrib + uses: actions/checkout@v4 + with: + repository: 'openvinotoolkit/openvino_contrib' + path: ${{ env.OPENVINO_CONTRIB_REPO }} + ref: 'master' + + # + # Dependencies + # + + - name: Install build dependencies + run: | + ${OPENVINO_REPO}/install_build_dependencies.sh + apt -y --no-install-recommends install software-properties-common + + - name: Install CUDA + run: | + wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/cuda-ubuntu2004.pin + mv cuda-ubuntu2004.pin /etc/apt/preferences.d/cuda-repository-pin-600 + + apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/3bf863cc.pub + add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/ /" + apt update + apt install -y \ + libcudnn8=8.9.4.*-1+cuda11.8 \ + libcudnn8-dev=8.9.4.*-1+cuda11.8 \ + libcudnn8-samples=8.9.4.*-1+cuda11.8 \ + cuda-runtime-11-8 \ + cuda-11-8 \ + libcutensor1=1.6.1.5-1 \ + libcutensor-dev=1.6.1.5-1 \ + cuda-drivers=520.61.05-1 + + # + # Build + # + + - name: Cmake & Build - NVIDIA Plugin + run: | + source ${INSTALL_DIR}/setupvars.sh + cmake \ + -DOpenVINODeveloperPackage_DIR=${OPENVINO_DEVELOPER_PACKAGE}/cmake \ + -DCMAKE_COMPILE_WARNING_AS_ERROR=OFF \ + -S ${OPENVINO_CONTRIB_REPO}/modules/nvidia_plugin \ + -B ${NVIDIA_BUILD_DIR} + cmake --build ${NVIDIA_BUILD_DIR} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} --verbose -- ov_nvidia_func_tests ov_nvidia_unit_tests + + - name: Show ccache stats + run: ccache --show-stats diff --git a/.github/workflows/linux_conditional_compilation.yml b/.github/workflows/linux_conditional_compilation.yml index b8567d57a6a6cd..8d71ec531c130c 100644 --- a/.github/workflows/linux_conditional_compilation.yml +++ b/.github/workflows/linux_conditional_compilation.yml @@ -73,6 +73,7 @@ jobs: repository: 'openvinotoolkit/testdata' path: ${{ env.MODELS_PATH }} lfs: 'true' + ref: 'master' # # Dependencies @@ -230,6 +231,7 @@ jobs: repository: 'openvinotoolkit/testdata' path: ${{ env.MODELS_PATH }} lfs: 'true' + ref: 'master' - name: Download selective build statistics package uses: actions/download-artifact@v3 diff --git a/.github/workflows/linux_cuda.yml b/.github/workflows/linux_cuda.yml deleted file mode 100644 index 7fd90dac00dcf4..00000000000000 --- a/.github/workflows/linux_cuda.yml +++ /dev/null @@ -1,143 +0,0 @@ -name: Linux NVIDIA Plugin (Ubuntu 20.04) -on: - workflow_dispatch: - pull_request: - paths-ignore: - - '**/docs/**' - - 'docs/**' - - '**/**.md' - - '**.md' - - '**/layer_tests_summary/**' - - '**/conformance/**' - push: - paths-ignore: - - '**/docs/**' - - 'docs/**' - - '**/**.md' - - '**.md' - - '**/layer_tests_summary/**' - - '**/conformance/**' - branches: - - master - -concurrency: - # github.ref is not unique in post-commit - group: ${{ github.event_name == 'push' && github.run_id || github.ref }}-linux-nvidia - cancel-in-progress: true - -jobs: - Build: - defaults: - run: - shell: bash - runs-on: aks-linux-16-cores - container: - image: openvinogithubactions.azurecr.io/dockerhub/nvidia/cuda:11.8.0-runtime-ubuntu20.04 - volumes: - - /mount/caches:/mount/caches - env: - CMAKE_BUILD_TYPE: 'Release' - CMAKE_GENERATOR: 'Ninja Multi-Config' - CMAKE_CUDA_COMPILER_LAUNCHER: ccache - CMAKE_CXX_COMPILER_LAUNCHER: ccache - CMAKE_C_COMPILER_LAUNCHER: ccache - OPENVINO_REPO: /__w/openvino/openvino/openvino - OPENVINO_CONTRIB_REPO: /__w/openvino/openvino/openvino_contrib - OV_BUILD_DIR: /__w/openvino/openvino/openvino_build - NVIDIA_BUILD_DIR: /__w/openvino/openvino/nvidia_plugin_build - DEBIAN_FRONTEND: 'noninteractive' - CCACHE_DIR: /mount/caches/ccache/ubuntu20_x86_64_Release - CCACHE_TEMPDIR: /__w/openvino/openvino/ccache_temp - CCACHE_MAXSIZE: 50G - steps: - - - name: Install Prerequisites - run: | - apt update - apt install -y git curl git git-lfs unzip wget - - - name: Clone OpenVINO - uses: actions/checkout@v4 - with: - path: ${{ env.OPENVINO_REPO }} - submodules: 'true' - - - name: Clone OpenVINO Contrib - uses: actions/checkout@v4 - with: - repository: 'openvinotoolkit/openvino_contrib' - path: ${{ env.OPENVINO_CONTRIB_REPO }} - ref: 'master' - - # - # Dependencies - # - - - name: Install build dependencies - run: | - ${OPENVINO_REPO}/install_build_dependencies.sh - - apt -y --no-install-recommends install unzip wget software-properties-common - - - name: Install CUDA - run: | - wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/cuda-ubuntu2004.pin - mv cuda-ubuntu2004.pin /etc/apt/preferences.d/cuda-repository-pin-600 - - apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/3bf863cc.pub - add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/ /" - apt update - apt install -y \ - libcudnn8=8.9.4.*-1+cuda11.8 \ - libcudnn8-dev=8.9.4.*-1+cuda11.8 \ - libcudnn8-samples=8.9.4.*-1+cuda11.8 \ - cuda-runtime-11-8 \ - cuda-11-8 \ - libcutensor1=1.6.1.5-1 \ - libcutensor-dev=1.6.1.5-1 \ - cuda-drivers=520.61.05-1 - - # - # Build - # - - - name: CMake configure - run: | - cmake \ - -G "${{ env.CMAKE_GENERATOR }}" \ - -DENABLE_CPPLINT=OFF \ - -DENABLE_NCC_STYLE=OFF \ - -DENABLE_SYSTEM_PUGIXML=ON \ - -DENABLE_SYSTEM_OPENCL=ON \ - -DENABLE_STRICT_DEPENDENCIES=OFF \ - -DCMAKE_BUILD_TYPE=${{ env.CMAKE_BUILD_TYPE }} \ - -DENABLE_INTEL_CPU=OFF \ - -DENABLE_INTEL_GPU=OFF \ - -DENABLE_INTEL_GNA=OFF \ - -DENABLE_OV_TF_FRONTEND=OFF \ - -DENABLE_OV_TF_LITE_FRONTEND=OFF \ - -DENABLE_OV_PADDLE_FRONTEND=OFF \ - -DENABLE_OV_PYTORCH_FRONTEND=OFF \ - -DENABLE_OV_ONNX_FRONTEND=OFF \ - -DENABLE_PYTHON=OFF \ - -DENABLE_TESTS=ON \ - -DCPACK_GENERATOR=TGZ \ - -DCMAKE_COMPILE_WARNING_AS_ERROR=ON \ - -S ${OPENVINO_REPO} \ - -B ${OV_BUILD_DIR} - - - name: Build - OpenVINO - run: | - cmake --build ${OV_BUILD_DIR} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} --verbose --target ov_dev_targets - - - name: Cmake & Build - NVIDIA Plugin - run: | - cmake \ - -DOpenVINODeveloperPackage_DIR=${OV_BUILD_DIR} \ - -DCMAKE_COMPILE_WARNING_AS_ERROR=OFF \ - -S ${OPENVINO_CONTRIB_REPO}/modules/nvidia_plugin \ - -B ${NVIDIA_BUILD_DIR} - cmake --build ${NVIDIA_BUILD_DIR} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} --verbose -- ov_nvidia_func_tests ov_nvidia_unit_tests - - - name: Show ccache stats - run: ccache --show-stats diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 493a1e47ba6e0a..1e5e0b8946680f 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -56,6 +56,7 @@ jobs: with: repository: 'openvinotoolkit/openvino_contrib' path: 'openvino_contrib' + ref: 'master' # # Dependencies diff --git a/.github/workflows/windows_conditional_compilation.yml b/.github/workflows/windows_conditional_compilation.yml index fabd763e1a3f2d..93f947ee071df1 100644 --- a/.github/workflows/windows_conditional_compilation.yml +++ b/.github/workflows/windows_conditional_compilation.yml @@ -67,6 +67,7 @@ jobs: repository: 'openvinotoolkit/testdata' path: 'testdata' lfs: 'true' + ref: 'master' # # Dependencies diff --git a/CMakeLists.txt b/CMakeLists.txt index ea3de7994f722e..82277e5c875cfb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -81,54 +81,55 @@ file(REMOVE "${CMAKE_BINARY_DIR}/ngraphTargets.cmake") file(REMOVE "${CMAKE_BINARY_DIR}/InferenceEngineTargets.cmake") file(REMOVE "${CMAKE_BINARY_DIR}/OpenVINOTargets.cmake") -# remove exported developer targets to force its regeneration -macro(ov_clean_dev_targets) - foreach(component IN LISTS openvino_export_components) - file(REMOVE "${CMAKE_BINARY_DIR}/${component}_dev_targets.cmake") - file(REMOVE "${CMAKE_BINARY_DIR}/ov_${component}_dev_targets.cmake") - unset(${component} CACHE) - endforeach() - unset(openvino_export_components CACHE) +# remove exported developer targets files to force its regeneration +macro(ov_clean_developer_package_targets) + file(REMOVE "${CMAKE_BINARY_DIR}/inference_engine_developer_package_targets.cmake") + file(REMOVE "${CMAKE_BINARY_DIR}/openvino_developer_package_targets.cmake") + unset(_OPENVINO_DEVELOPER_PACKAGE_TARGETS CACHE) unset(openvino_installed_targets CACHE) endmacro() -ov_clean_dev_targets() +ov_clean_developer_package_targets() -# -# Build -# +function(ov_developer_package_export_targets) + cmake_parse_arguments(EXPORT "" "TARGET;INSTALL_DESTIONATION" "INSTALL_INCLUDE_DIRECTORIES" ${ARGN}) -function(openvino_developer_export_targets) - cmake_parse_arguments(EXPORT "" "COMPONENT" "TARGETS" ${ARGN}) + # to allow exporting of aliased targets with the original names + if(TARGET "${EXPORT_TARGET}") + get_target_property(original_name ${EXPORT_TARGET} ALIASED_TARGET) + if(TARGET "${original_name}") + # replace target with its original name + set(EXPORT_TARGET ${original_name}) + endif() + list(APPEND _OPENVINO_DEVELOPER_PACKAGE_TARGETS ${EXPORT_TARGET}) - if(EXPORT_UNPARSED_ARGUMENTS) - message(FATAL_ERROR "openvino_developer_export_targets has unparsed arguments: ${EXPORT_UNPARSED_ARGUMENTS}") - endif() + if(EXPORT_INSTALL_INCLUDE_DIRECTORIES) + if(NOT EXPORT_INSTALL_DESTIONATION) + set(EXPORT_INSTALL_DESTIONATION "developer_package/include/${EXPORT_TARGET}") + endif() - set(${EXPORT_COMPONENT} "${${EXPORT_COMPONENT}};${EXPORT_TARGETS}") + target_include_directories(${EXPORT_TARGET} INTERFACE "$") - # to allow exporting of aliased targets with the original names - foreach(target_name IN LISTS ${EXPORT_COMPONENT}) - if(TARGET "${target_name}") - get_target_property(original_name ${target_name} ALIASED_TARGET) - if(TARGET "${original_name}") - list(REMOVE_ITEM ${EXPORT_COMPONENT} ${target_name}) - list(APPEND ${EXPORT_COMPONENT} ${original_name}) - endif() + foreach(install_dir IN LISTS EXPORT_INSTALL_INCLUDE_DIRECTORIES) + install(DIRECTORY "${install_dir}" + DESTINATION "${EXPORT_INSTALL_DESTIONATION}" + COMPONENT developer_package EXCLUDE_FROM_ALL) + endforeach() endif() - endforeach() - - list(REMOVE_DUPLICATES ${EXPORT_COMPONENT}) - set(${EXPORT_COMPONENT} "${${EXPORT_COMPONENT}}" CACHE INTERNAL - "A list of OpenVINO ${EXPORT_COMPONENT} exported targets" FORCE) + else() + message(FATAL_ERROR "Internal error: ${target_name} does not represent a cmake target") + endif() - list(APPEND openvino_export_components ${EXPORT_COMPONENT}) - list(REMOVE_DUPLICATES openvino_export_components) - set(openvino_export_components "${openvino_export_components}" CACHE INTERNAL - "A list of OpenVINO exported components" FORCE) + list(REMOVE_DUPLICATES _OPENVINO_DEVELOPER_PACKAGE_TARGETS) + set(_OPENVINO_DEVELOPER_PACKAGE_TARGETS "${_OPENVINO_DEVELOPER_PACKAGE_TARGETS}" CACHE INTERNAL + "A list of OpenVINO Developer Package exported targets" FORCE) endfunction() -# add target with processed tests model zoo +# +# Build +# + if(ENABLE_TESTS) + # add target with processed tests model zoo include(cmake/test_model_zoo.cmake) endif() diff --git a/cmake/dependencies.cmake b/cmake/dependencies.cmake index 1d4210f300b058..5c86bdea57620c 100644 --- a/cmake/dependencies.cmake +++ b/cmake/dependencies.cmake @@ -5,7 +5,7 @@ cmake_policy(SET CMP0054 NEW) # TODO: fix it, outside of source dir MO cannot find TBB dependency -set_temp_directory(TEMP "${CMAKE_SOURCE_DIR}") +ov_set_temp_directory(TEMP "${CMAKE_SOURCE_DIR}") ## Intel OMP package if(THREADING STREQUAL "OMP") @@ -71,12 +71,16 @@ function(ov_download_tbb) if(NOT DEFINED ENV{TBBROOT} AND (DEFINED ENV{TBB_DIR} OR DEFINED TBB_DIR)) if(DEFINED ENV{TBB_DIR}) - set(TEMP_ROOT $ENV{TBB_DIR}) - elseif (DEFINED TBB_DIR) - set(TEMP_ROOT ${TBB_DIR}) + set(TBB_DIR "$ENV{TBB_DIR}") endif() + set(TEMP_ROOT "${TBB_DIR}") while(NOT EXISTS "${TEMP_ROOT}/include") - get_filename_component(TEMP_ROOT ${TEMP_ROOT} PATH) + get_filename_component(TEMP_ROOT_PARENT ${TEMP_ROOT} PATH) + if(TEMP_ROOT_PARENT STREQUAL TEMP_ROOT) + # to prevent recursion + message(FATAL_ERROR "${TBB_DIR} does not contain 'include' folder. Please, unset TBB_DIR") + endif() + set(TEMP_ROOT "${TEMP_ROOT_PARENT}") endwhile() set(TBBROOT ${TEMP_ROOT}) endif() diff --git a/cmake/developer_package/OpenVINODeveloperScriptsConfig.cmake b/cmake/developer_package/OpenVINODeveloperScriptsConfig.cmake index 3996f373156d89..bc512b9b229b02 100644 --- a/cmake/developer_package/OpenVINODeveloperScriptsConfig.cmake +++ b/cmake/developer_package/OpenVINODeveloperScriptsConfig.cmake @@ -27,11 +27,12 @@ endmacro() set(OLD_CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH}) set(CMAKE_MODULE_PATH "${OpenVINODeveloperScripts_DIR}") -function(set_ci_build_number) - set(repo_root "${CMAKE_SOURCE_DIR}") +function(ov_set_ci_build_number) include(version) - foreach(var CI_BUILD_NUMBER OpenVINO_VERSION OpenVINO_SOVERSION OpenVINO_VERSION_SUFFIX OpenVINO_VERSION_BUILD - OpenVINO_VERSION_MAJOR OpenVINO_VERSION_MINOR OpenVINO_VERSION_PATCH) + ov_parse_ci_build_number("${CMAKE_SOURCE_DIR}") + + foreach(var CI_BUILD_NUMBER OpenVINO_VERSION OpenVINO_SOVERSION OpenVINO_VERSION_SUFFIX + OpenVINO_VERSION_MAJOR OpenVINO_VERSION_MINOR OpenVINO_VERSION_PATCH OpenVINO_VERSION_BUILD) if(NOT DEFINED ${var}) message(FATAL_ERROR "${var} version component is not defined") endif() @@ -44,7 +45,7 @@ ov_set_if_not_defined(Python3_FIND_STRATEGY LOCATION) include(features) -set_ci_build_number() +ov_set_ci_build_number() # # Detect target @@ -139,10 +140,13 @@ endif() # allow to override default OUTPUT_ROOT root if(NOT DEFINED OUTPUT_ROOT) - if(NOT DEFINED OpenVINO_SOURCE_DIR) - message(FATAL_ERROR "OpenVINO_SOURCE_DIR is not defined") + if(DEFINED OpenVINO_SOURCE_DIR) + # For BW compatiblity, when extra modules are built separately + # but still write its artifacts to OpenVINO source directory + set(OUTPUT_ROOT ${OpenVINO_SOURCE_DIR}) + else() + set(OUTPUT_ROOT ${CMAKE_SOURCE_DIR}) endif() - set(OUTPUT_ROOT ${OpenVINO_SOURCE_DIR}) endif() # Enable postfixes for Debug/Release builds diff --git a/cmake/developer_package/add_target_helpers.cmake b/cmake/developer_package/add_target_helpers.cmake index c52b393d7bbe74..d07011ae210cd3 100644 --- a/cmake/developer_package/add_target_helpers.cmake +++ b/cmake/developer_package/add_target_helpers.cmake @@ -9,7 +9,6 @@ ov_add_target( NAME core_lib ADD_CPPLINT ADD_CLANG_FORMAT - DEVELOPER_PACKAGE TYPE ROOT ${CMAKE_CURRENT_SOURCE_DIR} ADDITIONAL_SOURCE_DIRS @@ -44,9 +43,6 @@ function(ov_add_target) NAME # name of target ROOT # root directory to be used for recursive search of source files ) - set(oneValueOptionalArgs - DEVELOPER_PACKAGE # Enables exporting of the target through the developer package - ) set(multiValueArgs INCLUDES # Extra include directories LINK_LIBRARIES # Link libraries (in form of target name or file name) @@ -58,7 +54,7 @@ function(ov_add_target) LINK_LIBRARIES_WHOLE_ARCHIVE # list of static libraries to link, each object file should be used and not discarded LINK_FLAGS # list of extra commands to linker ) - cmake_parse_arguments(ARG "${options}" "${oneValueRequiredArgs};${oneValueOptionalArgs}" "${multiValueArgs}" ${ARGN} ) + cmake_parse_arguments(ARG "${options}" "${oneValueRequiredArgs}" "${multiValueArgs}" ${ARGN} ) # sanity checks foreach(argName IN LISTS oneValueRequiredArgs) @@ -128,11 +124,6 @@ function(ov_add_target) # code style ov_add_clang_format_target(${ARG_NAME}_clang FOR_TARGETS ${ARG_NAME}) endif() - if (ARG_DEVELOPER_PACKAGE) - # developer package - openvino_developer_export_targets(COMPONENT ${ARG_DEVELOPER_PACKAGE} - TARGETS ${ARG_NAME}) - endif() if(WIN32) # Provide default compile pdb name equal to target name set_target_properties(${ARG_NAME} PROPERTIES COMPILE_PDB_NAME ${ARG_NAME}) diff --git a/cmake/developer_package/compile_flags/os_flags.cmake b/cmake/developer_package/compile_flags/os_flags.cmake index 7d98b40c3ce81d..c0c878e0183eb0 100644 --- a/cmake/developer_package/compile_flags/os_flags.cmake +++ b/cmake/developer_package/compile_flags/os_flags.cmake @@ -329,15 +329,15 @@ endif() file(RELATIVE_PATH OV_RELATIVE_BIN_PATH ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_SOURCE_DIR}) -if(${CMAKE_VERSION} VERSION_LESS "3.20") - file(TO_NATIVE_PATH ${OpenVINO_SOURCE_DIR} OV_NATIVE_PROJECT_ROOT_DIR) +if(CMAKE_VERSION VERSION_LESS 3.20) + file(TO_NATIVE_PATH ${CMAKE_SOURCE_DIR} OV_NATIVE_PROJECT_ROOT_DIR) file(TO_NATIVE_PATH ${OV_RELATIVE_BIN_PATH} NATIVE_OV_RELATIVE_BIN_PATH) else() - cmake_path(NATIVE_PATH OpenVINO_SOURCE_DIR OV_NATIVE_PROJECT_ROOT_DIR) + cmake_path(NATIVE_PATH CMAKE_SOURCE_DIR OV_NATIVE_PROJECT_ROOT_DIR) cmake_path(NATIVE_PATH OV_RELATIVE_BIN_PATH NATIVE_OV_RELATIVE_BIN_PATH) endif() -file(RELATIVE_PATH OV_NATIVE_PARENT_PROJECT_ROOT_DIR "${OpenVINO_SOURCE_DIR}/.." ${OpenVINO_SOURCE_DIR}) +file(RELATIVE_PATH OV_NATIVE_PARENT_PROJECT_ROOT_DIR "${CMAKE_SOURCE_DIR}/.." ${CMAKE_SOURCE_DIR}) if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") # @@ -392,7 +392,7 @@ if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") if(NOT DEFINED ENV{GITHUB_ACTIONS}) add_compile_options( "$<$:/d1trimfile:${OV_NATIVE_PROJECT_ROOT_DIR}\\>" - "$<$:/d1trimfile:${OpenVINO_SOURCE_DIR}/>") + "$<$:/d1trimfile:${CMAKE_SOURCE_DIR}/>") endif() # diff --git a/cmake/developer_package/options.cmake b/cmake/developer_package/options.cmake index 75b9c886894631..4506d85a027f92 100644 --- a/cmake/developer_package/options.cmake +++ b/cmake/developer_package/options.cmake @@ -43,7 +43,7 @@ macro(ov_option_enum variable description value) endmacro() function (ov_print_enabled_features) - if(NOT COMMAND set_ci_build_number) + if(NOT COMMAND ov_set_ci_build_number) message(FATAL_ERROR "CI_BUILD_NUMBER is not set yet") endif() diff --git a/cmake/developer_package/packaging/nsis.cmake b/cmake/developer_package/packaging/nsis.cmake index 901e34f97820bb..4174037af74f39 100644 --- a/cmake/developer_package/packaging/nsis.cmake +++ b/cmake/developer_package/packaging/nsis.cmake @@ -5,6 +5,8 @@ macro(ov_nsis_specific_settings) # installation directory set(CPACK_PACKAGE_INSTALL_DIRECTORY "Intel") + # License to be embedded in the installer + set(CPACK_RESOURCE_FILE_LICENSE "${OpenVINO_SOURCE_DIR}/LICENSE") # TODO: provide icons # set(CPACK_NSIS_MUI_ICON "") diff --git a/cmake/developer_package/packaging/packaging.cmake b/cmake/developer_package/packaging/packaging.cmake index 505565f55da5d7..2279580040f736 100644 --- a/cmake/developer_package/packaging/packaging.cmake +++ b/cmake/developer_package/packaging/packaging.cmake @@ -193,8 +193,6 @@ macro(ov_cpack) set(CPACK_PACKAGE_CONTACT "OpenVINO Developers ") set(CPACK_VERBATIM_VARIABLES ON) set(CPACK_COMPONENTS_ALL ${ARGN}) - # TODO: set proper license file for Windows installer - set(CPACK_RESOURCE_FILE_LICENSE "${OpenVINO_SOURCE_DIR}/LICENSE") # default permissions for directories creation set(CMAKE_INSTALL_DEFAULT_DIRECTORY_PERMISSIONS diff --git a/cmake/developer_package/version.cmake b/cmake/developer_package/version.cmake index 0353e3a52a8617..1b71befe448b76 100644 --- a/cmake/developer_package/version.cmake +++ b/cmake/developer_package/version.cmake @@ -4,50 +4,51 @@ find_package(Git QUIET) -function (branchName VAR) - if(NOT DEFINED repo_root) - message(FATAL_ERROR "repo_root is not defined") - endif() +function(ov_branch_name VAR REPO_ROOT) if(GIT_FOUND) execute_process( COMMAND ${GIT_EXECUTABLE} rev-parse --abbrev-ref HEAD - WORKING_DIRECTORY ${repo_root} + WORKING_DIRECTORY ${REPO_ROOT} OUTPUT_VARIABLE GIT_BRANCH OUTPUT_STRIP_TRAILING_WHITESPACE) set (${VAR} ${GIT_BRANCH} PARENT_SCOPE) endif() endfunction() -function (commitHash VAR) - if(NOT DEFINED repo_root) - message(FATAL_ERROR "repo_root is not defined") - endif() +function(ov_commit_hash VAR REPO_ROOT) if(GIT_FOUND) execute_process( COMMAND ${GIT_EXECUTABLE} rev-parse --short=11 HEAD - WORKING_DIRECTORY ${repo_root} + WORKING_DIRECTORY ${REPO_ROOT} OUTPUT_VARIABLE GIT_COMMIT_HASH OUTPUT_STRIP_TRAILING_WHITESPACE) set (${VAR} ${GIT_COMMIT_HASH} PARENT_SCOPE) endif() endfunction() -function (commitNumber VAR) - if(NOT DEFINED repo_root) - message(FATAL_ERROR "repo_root is not defined") - endif() +function(ov_commit_number VAR REPO_ROOT) if(GIT_FOUND) execute_process( COMMAND ${GIT_EXECUTABLE} rev-list --count --first-parent HEAD - WORKING_DIRECTORY ${repo_root} + WORKING_DIRECTORY ${REPO_ROOT} OUTPUT_VARIABLE GIT_COMMIT_NUMBER OUTPUT_STRIP_TRAILING_WHITESPACE) set (${VAR} ${GIT_COMMIT_NUMBER} PARENT_SCOPE) + else() + # set zeros since git is not available + set (${VAR} "000" PARENT_SCOPE) endif() endfunction() -macro(ov_parse_ci_build_number) - set(OpenVINO_VERSION_BUILD 000) +macro(ov_parse_ci_build_number repo_root) + # provides OpenVINO version + # 1. If CI_BUILD_NUMBER is defined, parses this information + # 2. Otherwise, either: + # - parses openvino/core/version.hpp + # - takes from OpenVINOConfig-version.cmake in case of relocatable Developer package + if (DEFINED ENV{CI_BUILD_NUMBER}) + set(CI_BUILD_NUMBER $ENV{CI_BUILD_NUMBER}) + endif() if(CI_BUILD_NUMBER MATCHES "^([0-9]+)\.([0-9]+)\.([0-9]+)\-([0-9]+)\-.*") set(OpenVINO_VERSION_MAJOR ${CMAKE_MATCH_1}) @@ -63,12 +64,9 @@ macro(ov_parse_ci_build_number) message(FATAL_ERROR "Failed to parse CI_BUILD_NUMBER which is ${CI_BUILD_NUMBER}") endif() - if(NOT DEFINED repo_root) - message(FATAL_ERROR "repo_root is not defined") - endif() - - macro(ov_get_hpp_version) + function(ov_compare_version_with_headers) if(NOT DEFINED OpenVINO_SOURCE_DIR) + # if we are not in OpenVINO source tree, let's ignore this comparison return() endif() @@ -101,30 +99,29 @@ macro(ov_parse_ci_build_number) endif() endforeach() - # detect commit number - commitNumber(OpenVINO_VERSION_BUILD_HPP) - if(OpenVINO_VERSION_BUILD STREQUAL "000" AND DEFINED OpenVINO_VERSION_BUILD_HPP) - set(OpenVINO_VERSION_BUILD "${OpenVINO_VERSION_BUILD_HPP}") - else() - set(OpenVINO_VERSION_BUILD_HPP "${OpenVINO_VERSION_BUILD}") - endif() - - set(ov_hpp_version_is_found ON) - endmacro() - - # detect OpenVINO version via openvino/core/version.hpp and ie_version.hpp - ov_get_hpp_version() - - if(ov_hpp_version_is_found) - foreach(var OpenVINO_VERSION_MAJOR OpenVINO_VERSION_MINOR OpenVINO_VERSION_PATCH OpenVINO_VERSION_BUILD) + foreach(var OpenVINO_VERSION_MAJOR OpenVINO_VERSION_MINOR OpenVINO_VERSION_PATCH) if(DEFINED ${var} AND NOT ${var} EQUAL ${var}_HPP) message(FATAL_ERROR "${var} parsed from CI_BUILD_NUMBER (${${var}}) \ and from openvino/core/version.hpp (${${var}_HPP}) are different") else() # CI_BUILD_NUMBER is not defined well, take info from openvino/core/version.hpp as a baseline - set(${var} ${${var}_HPP}) + set(${var} ${${var}_HPP} PARENT_SCOPE) endif() endforeach() + endfunction() + + # detect OpenVINO version via openvino/core/version.hpp and ie_version.hpp + ov_compare_version_with_headers() + + # detect commit number + ov_commit_number(OpenVINO_VERSION_BUILD_FROM_GIT "${repo_root}") + + if(OpenVINO_VERSION_BUILD AND NOT OpenVINO_VERSION_BUILD STREQUAL OpenVINO_VERSION_BUILD_FROM_GIT) + # TODO: replace with FATAL_ERROR once NPU version will be discussed + message(WARNING "OpenVINO_VERSION_BUILD parsed from CI_BUILD_NUMBER (${OpenVINO_VERSION_BUILD}) \ + and determined by git (${OpenVINO_VERSION_BUILD_FROM_GIT}) are different") + else() + set(OpenVINO_VERSION_BUILD "${OpenVINO_VERSION_BUILD_FROM_GIT}") endif() set(OpenVINO_SOVERSION "${OpenVINO_VERSION_MAJOR}${OpenVINO_VERSION_MINOR}${OpenVINO_VERSION_PATCH}") @@ -140,8 +137,8 @@ macro(ov_parse_ci_build_number) if(NOT the_whole_version_is_defined_by_ci) # create CI_BUILD_NUMBER - branchName(GIT_BRANCH) - commitHash(GIT_COMMIT_HASH) + ov_branch_name(GIT_BRANCH "${repo_root}") + ov_commit_hash(GIT_COMMIT_HASH "${repo_root}") if(NOT GIT_BRANCH STREQUAL "master") set(GIT_BRANCH_POSTFIX "-${GIT_BRANCH}") @@ -157,14 +154,6 @@ macro(ov_parse_ci_build_number) endif() endmacro() -# provides OpenVINO version -# 1. If CI_BUILD_NUMBER is defined, parses this information -# 2. Otherwise, parses openvino/core/version.hpp -if (DEFINED ENV{CI_BUILD_NUMBER}) - set(CI_BUILD_NUMBER $ENV{CI_BUILD_NUMBER}) -endif() -ov_parse_ci_build_number() - macro (addVersionDefines FILE) message(WARNING "'addVersionDefines' is deprecated. Please, use 'ov_add_version_defines'") diff --git a/cmake/extra_modules.cmake b/cmake/extra_modules.cmake index afc1dc335b3e55..6c392fcc6eed12 100644 --- a/cmake/extra_modules.cmake +++ b/cmake/extra_modules.cmake @@ -9,13 +9,10 @@ function(ie_generate_dev_package_config) set(OpenCV_FOUND OFF) endif() - foreach(component IN LISTS openvino_export_components) - # export all targets with prefix and use them during extra modules build - export(TARGETS ${${component}} NAMESPACE IE:: - APPEND FILE "${CMAKE_BINARY_DIR}/${component}_dev_targets.cmake") - list(APPEND all_dev_targets ${${component}}) - endforeach() - add_custom_target(ie_dev_targets DEPENDS ${all_dev_targets}) + # export all targets with prefix and use them during extra modules build + export(TARGETS ${_OPENVINO_DEVELOPER_PACKAGE_TARGETS} NAMESPACE IE:: + APPEND FILE "${CMAKE_BINARY_DIR}/inference_engine_developer_package_targets.cmake") + add_custom_target(ie_dev_targets DEPENDS ${_OPENVINO_DEVELOPER_PACKAGE_TARGETS}) set(PATH_VARS "OpenVINO_SOURCE_DIR") if(ENABLE_SAMPLES OR ENABLE_TESTS) @@ -44,20 +41,20 @@ function(ov_generate_dev_package_config) set(OpenCV_FOUND OFF) endif() - foreach(component IN LISTS openvino_export_components) - # filter out targets which are installed by OpenVINOConfig.cmake static build case - set(exported_targets) - foreach(target IN LISTS ${component}) - if(NOT target IN_LIST openvino_installed_targets) - list(APPEND exported_targets ${target}) - endif() - endforeach() - # export all developer targets with prefix and use them during extra modules build - export(TARGETS ${exported_targets} NAMESPACE openvino:: - APPEND FILE "${CMAKE_BINARY_DIR}/ov_${component}_dev_targets.cmake") - list(APPEND all_dev_targets ${${component}}) - endforeach() - add_custom_target(ov_dev_targets DEPENDS ${all_dev_targets}) + # create a helper target to build all developer package targets + add_custom_target(ov_dev_targets DEPENDS ${_OPENVINO_DEVELOPER_PACKAGE_TARGETS}) + + # filter out targets which are installed by OpenVINOConfig.cmake static build case + if(openvino_installed_targets) + list(REMOVE_ITEM _OPENVINO_DEVELOPER_PACKAGE_TARGETS ${openvino_installed_targets}) + endif() + # export all developer targets with prefix and use them during extra modules build + export(TARGETS ${_OPENVINO_DEVELOPER_PACKAGE_TARGETS} NAMESPACE openvino:: + APPEND FILE "${CMAKE_BINARY_DIR}/openvino_developer_package_targets.cmake") + + # + # OpenVINODeveloperPackageConfig.cmake for build tree + # set(PATH_VARS "OpenVINO_SOURCE_DIR") if(ENABLE_SAMPLES OR ENABLE_TESTS) @@ -77,38 +74,91 @@ function(ov_generate_dev_package_config) configure_file("${OpenVINO_SOURCE_DIR}/cmake/templates/OpenVINOConfig-version.cmake.in" "${CMAKE_BINARY_DIR}/OpenVINODeveloperPackageConfig-version.cmake" @ONLY) + + # + # OpenVINODeveloperPackageConfig.cmake for installation tree + # + + set(DEV_PACKAGE_ROOT_DIR developer_package) + set(DEV_PACKAGE_CMAKE_DIR ${DEV_PACKAGE_ROOT_DIR}/cmake) + set(DEVELOPER_PACKAGE_COMPONENT developer_package) + set(DEVELOPER_PACKAGE_EXPORT_SET OpenVINODeveloperTargets) + + # create and install main developer package config files + configure_package_config_file("${OpenVINO_SOURCE_DIR}/cmake/templates/OpenVINODeveloperPackageConfigRelocatable.cmake.in" + "${OpenVINO_BINARY_DIR}/share/OpenVINODeveloperPackageConfig.cmake" + INSTALL_DESTINATION ${DEV_PACKAGE_CMAKE_DIR} + NO_CHECK_REQUIRED_COMPONENTS_MACRO) + + configure_file("${OpenVINO_SOURCE_DIR}/cmake/templates/OpenVINOConfig-version.cmake.in" + "${OpenVINO_BINARY_DIR}/share/OpenVINODeveloperPackageConfig-version.cmake" + @ONLY) + + install(FILES "${OpenVINO_BINARY_DIR}/share/OpenVINODeveloperPackageConfig.cmake" + "${OpenVINO_BINARY_DIR}/share/OpenVINODeveloperPackageConfig-version.cmake" + DESTINATION ${DEV_PACKAGE_CMAKE_DIR} + COMPONENT ${DEVELOPER_PACKAGE_COMPONENT} + EXCLUDE_FROM_ALL) + + # Install whole 'cmake/developer_package' folder + install(DIRECTORY "${OpenVINODeveloperScripts_DIR}/" + DESTINATION "${DEV_PACKAGE_CMAKE_DIR}" + COMPONENT ${DEVELOPER_PACKAGE_COMPONENT} + EXCLUDE_FROM_ALL) + + # Install CMakeLists.txt to read cache variables from + install(FILES "${OpenVINO_BINARY_DIR}/CMakeCache.txt" + DESTINATION ${DEV_PACKAGE_CMAKE_DIR} + COMPONENT ${DEVELOPER_PACKAGE_COMPONENT} + EXCLUDE_FROM_ALL) + + # install developer package targets + install(TARGETS ${_OPENVINO_DEVELOPER_PACKAGE_TARGETS} EXPORT ${DEVELOPER_PACKAGE_EXPORT_SET} + RUNTIME DESTINATION ${DEV_PACKAGE_ROOT_DIR}/bin COMPONENT ${DEVELOPER_PACKAGE_COMPONENT} EXCLUDE_FROM_ALL + ARCHIVE DESTINATION ${DEV_PACKAGE_ROOT_DIR}/lib COMPONENT ${DEVELOPER_PACKAGE_COMPONENT} EXCLUDE_FROM_ALL + LIBRARY DESTINATION ${DEV_PACKAGE_ROOT_DIR}/lib COMPONENT ${DEVELOPER_PACKAGE_COMPONENT} EXCLUDE_FROM_ALL) + + install(EXPORT ${DEVELOPER_PACKAGE_EXPORT_SET} + FILE OpenVINODeveloperPackageTargets.cmake + NAMESPACE openvino:: + DESTINATION ${DEV_PACKAGE_ROOT_DIR}/cmake + COMPONENT ${DEVELOPER_PACKAGE_COMPONENT} + EXCLUDE_FROM_ALL) + + # Note: that OpenCV and gflags are explicitly not installed to simplify relocatable + # OpenVINO Developer package maintainance. OpenVINO_SOURCE_DIR is also unvailable, because + # relocatable developer package can be used on a different machine where OpenVINO repo is not available endfunction() # # Add extra modules # -function(register_extra_modules) +function(_ov_register_extra_modules) set(InferenceEngineDeveloperPackage_DIR "${CMAKE_CURRENT_BINARY_DIR}/build-modules") set(OpenVINODeveloperPackage_DIR "${CMAKE_BINARY_DIR}/build-modules") set(OpenVINO_DIR "${CMAKE_BINARY_DIR}") - function(generate_fake_dev_package NS) + function(_ov_generate_fake_developer_package NS) if(NS STREQUAL "openvino") set(devconfig_file "${OpenVINODeveloperPackage_DIR}/OpenVINODeveloperPackageConfig.cmake") else() set(devconfig_file "${InferenceEngineDeveloperPackage_DIR}/InferenceEngineDeveloperPackageConfig.cmake") endif() - file(REMOVE "${devconfig_file}") + file(REMOVE "${devconfig_file}") file(WRITE "${devconfig_file}" "\# !! AUTOGENERATED: DON'T EDIT !!\n\n") - foreach(targets_list IN LISTS ${openvino_export_components}) - foreach(target IN LISTS targets_list) - file(APPEND "${devconfig_file}" "if(NOT TARGET ${NS}::${target}) - add_library(${NS}::${target} ALIAS ${target}) + foreach(exported_target IN LISTS _OPENVINO_DEVELOPER_PACKAGE_TARGETS) + file(APPEND "${devconfig_file}" "if(NOT TARGET ${NS}::${exported_target}) + add_library(${NS}::${exported_target} ALIAS ${exported_target}) endif()\n") - endforeach() endforeach() endfunction() - generate_fake_dev_package("openvino") - generate_fake_dev_package("IE") + _ov_generate_fake_developer_package("openvino") + # TODO: remove with API 1.0 removal + _ov_generate_fake_developer_package("IE") # detect where OPENVINO_EXTRA_MODULES contains folders with CMakeLists.txt # other folders are supposed to have sub-folders with CMakeLists.txt @@ -155,21 +205,18 @@ endfunction() # Extra modules support # -# this InferenceEngineDeveloperPackageConfig.cmake is not used -# during extra modules build since it's generated after modules -# are configured +# this OpenVINODeveloperPackageConfig.cmake is not used during extra modules build +# since it's generated after modules are configured ie_generate_dev_package_config() ov_generate_dev_package_config() # extra modules must be registered after inference_engine library # and all other OpenVINO Core libraries are creared -# because 'register_extra_modules' creates fake InferenceEngineDeveloperPackageConfig.cmake +# because '_ov_register_extra_modules' creates fake InferenceEngineDeveloperPackageConfig.cmake # with all imported developer targets -register_extra_modules() +_ov_register_extra_modules() -# for static libraries case we need to generate final ov_plugins.hpp -# with all the information about plugins +# we need to generate final ov_plugins.hpp with all the information about plugins ov_generate_plugins_hpp() - -# used for static build +# we need to generate final ov_frontends.hpp with all the information about frontends ov_generate_frontends_hpp() diff --git a/cmake/templates/InferenceEngineDeveloperPackageConfig.cmake.in b/cmake/templates/InferenceEngineDeveloperPackageConfig.cmake.in index e197597487b61c..a98b4207e285d2 100644 --- a/cmake/templates/InferenceEngineDeveloperPackageConfig.cmake.in +++ b/cmake/templates/InferenceEngineDeveloperPackageConfig.cmake.in @@ -88,9 +88,7 @@ endif() _ov_find_tbb() -foreach(component @openvino_export_components@) - include("${CMAKE_CURRENT_LIST_DIR}/${component}_dev_targets.cmake") -endforeach() +include("${CMAKE_CURRENT_LIST_DIR}/inference_engine_developer_package_targets.cmake") if(TARGET IE::ov_core_dev AND NOT TARGET openvino::core::dev) add_library(openvino::core::dev INTERFACE IMPORTED) diff --git a/cmake/templates/OpenVINODeveloperPackageConfig.cmake.in b/cmake/templates/OpenVINODeveloperPackageConfig.cmake.in index f78e31ce635d81..3620bcd091dab5 100644 --- a/cmake/templates/OpenVINODeveloperPackageConfig.cmake.in +++ b/cmake/templates/OpenVINODeveloperPackageConfig.cmake.in @@ -77,13 +77,7 @@ find_dependency(OpenVINO _ov_find_tbb() _ov_find_pugixml() -foreach(component @openvino_export_components@) - # TODO: remove legacy targets from some tests - # string(FIND "${component}" "_legacy" index) - # if (index EQUAL -1) - include("${CMAKE_CURRENT_LIST_DIR}/ov_${component}_dev_targets.cmake") - # endif() -endforeach() +include("${CMAKE_CURRENT_LIST_DIR}/openvino_developer_package_targets.cmake") # inherit OpenCV from main OpenVINO project if enabled if("@OpenCV_FOUND@") diff --git a/cmake/templates/OpenVINODeveloperPackageConfigRelocatable.cmake.in b/cmake/templates/OpenVINODeveloperPackageConfigRelocatable.cmake.in new file mode 100644 index 00000000000000..ed9826e663e8d9 --- /dev/null +++ b/cmake/templates/OpenVINODeveloperPackageConfigRelocatable.cmake.in @@ -0,0 +1,76 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +@PACKAGE_INIT@ + +include(CMakeFindDependencyMacro) + +# Variables to export in plugin's projects + +set(ov_options "@OV_OPTIONS@") +list(APPEND ov_options CMAKE_CXX_COMPILER_LAUNCHER CMAKE_C_COMPILER_LAUNCHER + CMAKE_CXX_LINKER_LAUNCHER CMAKE_C_LINKER_LAUNCHER + CMAKE_INSTALL_PREFIX CPACK_GENERATOR) + +if(APPLE) + list(APPEND ov_options CMAKE_OSX_ARCHITECTURES CMAKE_OSX_DEPLOYMENT_TARGET) +endif() + +get_property(_OV_GENERATOR_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) +if(_OV_GENERATOR_MULTI_CONFIG) + list(APPEND ov_options CMAKE_CONFIGURATION_TYPES) + if(CMAKE_GENERATOR MATCHES "^Ninja Multi-Config$") + list(APPEND ov_options CMAKE_DEFAULT_BUILD_TYPE) + endif() +else() + list(APPEND ov_options CMAKE_BUILD_TYPE) +endif() +unset(_OV_GENERATOR_MULTI_CONFIG) + +file(TO_CMAKE_PATH "${CMAKE_CURRENT_LIST_DIR}" cache_path) + +message(STATUS "The following CMake options are exported from OpenVINO Developer package") +message(" ") +foreach(option IN LISTS ov_options) + if(NOT DEFINED "${option}") + load_cache("${cache_path}" READ_WITH_PREFIX "" ${option}) + endif() + message(" ${option}: ${${option}}") +endforeach() +message(" ") + +# Restore TBB installation directory (requires for proper LC_RPATH on macOS with SIP) +load_cache("${cache_path}" READ_WITH_PREFIX "" TBB_INSTALL_DIR) + +# activate generation of plugins.xml +set(ENABLE_PLUGINS_XML ON) + +# Disable warning as error for private components +set(CMAKE_COMPILE_WARNING_AS_ERROR OFF) + +# +# Content +# + +# OpenVINO_DIR is supposed to be set as an environment variable +find_dependency(OpenVINO) + +find_dependency(OpenVINODeveloperScripts + PATHS "${CMAKE_CURRENT_LIST_DIR}" + NO_CMAKE_FIND_ROOT_PATH + NO_DEFAULT_PATH) + +_ov_find_tbb() +_ov_find_pugixml() + +include("${CMAKE_CURRENT_LIST_DIR}/OpenVINODeveloperPackageTargets.cmake") +# +# Extra Compile Flags +# + +# don't fail on strict compilation options in 3rd party modules +ov_dev_package_no_errors() + +# Don't threat deprecated API warnings as errors in 3rd party apps +ov_deprecated_no_errors() diff --git a/samples/CMakeLists.txt b/samples/CMakeLists.txt index ab5abf8024c045..e2aeebc9c35e7f 100644 --- a/samples/CMakeLists.txt +++ b/samples/CMakeLists.txt @@ -5,7 +5,8 @@ add_subdirectory(cpp) add_subdirectory(c) -openvino_developer_export_targets(COMPONENT samples TARGETS format_reader ie_samples_utils) +ov_developer_package_export_targets(TARGET format_reader) +ov_developer_package_export_targets(TARGET ie_samples_utils) # # Install diff --git a/samples/cpp/common/format_reader/CMakeLists.txt b/samples/cpp/common/format_reader/CMakeLists.txt index 89732ca039a363..7be5f6af757501 100644 --- a/samples/cpp/common/format_reader/CMakeLists.txt +++ b/samples/cpp/common/format_reader/CMakeLists.txt @@ -30,7 +30,7 @@ else() target_compile_definitions(${TARGET_NAME} PRIVATE USE_OPENCV) endif() -target_include_directories(${TARGET_NAME} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" +target_include_directories(${TARGET_NAME} PUBLIC "$" PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/src") set_target_properties(${TARGET_NAME} PROPERTIES FOLDER cpp_samples) diff --git a/samples/cpp/common/utils/CMakeLists.txt b/samples/cpp/common/utils/CMakeLists.txt index 108818b94c0c23..f7dd66d67b58fd 100644 --- a/samples/cpp/common/utils/CMakeLists.txt +++ b/samples/cpp/common/utils/CMakeLists.txt @@ -11,7 +11,7 @@ add_library(${TARGET_NAME} STATIC EXCLUDE_FROM_ALL ${SOURCES}) set_target_properties(${TARGET_NAME} PROPERTIES FOLDER "src") target_include_directories(${TARGET_NAME} - PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include") + PUBLIC "$") find_package(OpenVINO REQUIRED COMPONENTS Runtime) diff --git a/src/bindings/c/src/CMakeLists.txt b/src/bindings/c/src/CMakeLists.txt index 8f5eaeac581735..a6a649ada6e990 100644 --- a/src/bindings/c/src/CMakeLists.txt +++ b/src/bindings/c/src/CMakeLists.txt @@ -46,6 +46,7 @@ export(TARGETS ${TARGET_NAME} NAMESPACE openvino:: APPEND FILE "${CMAKE_BINARY_DIR}/OpenVINOTargets.cmake") # install + ov_cpack_add_component(${OV_CPACK_COMP_CORE_C} HIDDEN) ov_cpack_add_component(${OV_CPACK_COMP_CORE_C_DEV} HIDDEN) diff --git a/src/bindings/python/CMakeLists.txt b/src/bindings/python/CMakeLists.txt index 8a572f89a0f47e..89d21c8a7c91f4 100644 --- a/src/bindings/python/CMakeLists.txt +++ b/src/bindings/python/CMakeLists.txt @@ -20,7 +20,18 @@ project(OpenVINOPython DESCRIPTION "OpenVINO Runtime Python bindings") if(NOT DEFINED OpenVINO_SOURCE_DIR) find_package(OpenVINODeveloperPackage REQUIRED PATHS "${InferenceEngineDeveloperPackage_DIR}") + + # we assume that OpenVINODeveloperPackage is generated in OpenVINO build tree set(OpenVINO_BINARY_DIR "${OpenVINODeveloperPackage_DIR}") + # but this can be invalid for cases of OpenVINODeveloperPackage relocatable installation + # so, we need to disable wheen generation for this case + if(NOT EXISTS "${OpenVINO_BINARY_DIR}/cmake_install.cmake") + set(OpenVINODeveloperPackage_RELOCATABLE ON) + endif() +endif() + +if(NOT DEFINED OpenVINODeveloperPackage_RELOCATABLE) + set(OpenVINODeveloperPackage_RELOCATABLE OFF) endif() # @@ -141,11 +152,10 @@ function(ov_check_init_files_alignment init_files) endforeach() endfunction() -set(INIT_FILES_RUNTIME -"${OpenVINOPython_SOURCE_DIR}/src/openvino/__init__.py" -"${OpenVINOPython_SOURCE_DIR}/src/compatibility/openvino/__init__.py" -"${OpenVINO_SOURCE_DIR}/tools/ovc/openvino/__init__.py" -"${OpenVINO_SOURCE_DIR}/tools/benchmark_tool/openvino/__init__.py") +set(INIT_FILES_RUNTIME "${OpenVINOPython_SOURCE_DIR}/src/openvino/__init__.py" + "${OpenVINOPython_SOURCE_DIR}/src/compatibility/openvino/__init__.py" + "${OpenVINOPython_SOURCE_DIR}/../../../tools/ovc/openvino/__init__.py" + "${OpenVINOPython_SOURCE_DIR}/../../../tools/benchmark_tool/openvino/__init__.py") ov_check_init_files_alignment("${INIT_FILES_RUNTIME}") @@ -193,7 +203,7 @@ endif() # this option should not be a part of OpenVINODeveloperPackage # since wheels can be built only together with main OV build -ov_dependent_option(ENABLE_WHEEL "Build wheel packages for PyPI" ${ENABLE_WHEEL_DEFAULT} "ENABLE_PYTHON" OFF) +ov_dependent_option(ENABLE_WHEEL "Build wheel packages for PyPI" ${ENABLE_WHEEL_DEFAULT} "ENABLE_PYTHON;NOT OpenVINODeveloperPackage_RELOCATABLE" OFF) if(NOT ENABLE_PYTHON) if(CMAKE_SOURCE_DIR STREQUAL OpenVINOPython_SOURCE_DIR) @@ -309,15 +319,15 @@ macro(ov_define_setup_py_dependencies) "${CMAKE_CURRENT_SOURCE_DIR}/wheel/setup.py" "${OpenVINOPython_SOURCE_DIR}/requirements.txt" "${OpenVINOPython_SOURCE_DIR}/wheel/readme.txt" - "${OpenVINO_SOURCE_DIR}/LICENSE" - "${OpenVINO_SOURCE_DIR}/licensing/onednn_third-party-programs.txt" - "${OpenVINO_SOURCE_DIR}/licensing/runtime-third-party-programs.txt" - "${OpenVINO_SOURCE_DIR}/licensing/tbb_third-party-programs.txt" - "${OpenVINO_SOURCE_DIR}/docs/install_guides/pypi-openvino-rt.md") + "${OpenVINOPython_SOURCE_DIR}/../../../LICENSE" + "${OpenVINOPython_SOURCE_DIR}/../../../licensing/onednn_third-party-programs.txt" + "${OpenVINOPython_SOURCE_DIR}/../../../licensing/runtime-third-party-programs.txt" + "${OpenVINOPython_SOURCE_DIR}/../../../licensing/tbb_third-party-programs.txt" + "${OpenVINOPython_SOURCE_DIR}/../../../docs/install_guides/pypi-openvino-rt.md") if(wheel_pre_release) list(APPEND ov_setup_py_deps - "${OpenVINO_SOURCE_DIR}/docs/install_guides/pre-release-note.md") + "${OpenVINOPython_SOURCE_DIR}/../../../docs/install_guides/pre-release-note.md") endif() endmacro() @@ -394,8 +404,9 @@ if(ENABLE_TESTS) endif() if(OpenVINODeveloperPackage_FOUND) + # TODO: understand whether it's required # provides a callback function to describe each component in repo - include("${OpenVINO_SOURCE_DIR}/cmake/packaging/packaging.cmake") + include("${OpenVINOPython_SOURCE_DIR}/../../../cmake/packaging/packaging.cmake") ov_cpack(${OV_CPACK_COMPONENTS_ALL}) endif() diff --git a/src/bindings/python/src/compatibility/pyngraph/CMakeLists.txt b/src/bindings/python/src/compatibility/pyngraph/CMakeLists.txt index ba20fd76055cac..8b68d5dde8a5e9 100644 --- a/src/bindings/python/src/compatibility/pyngraph/CMakeLists.txt +++ b/src/bindings/python/src/compatibility/pyngraph/CMakeLists.txt @@ -7,27 +7,25 @@ cmake_minimum_required (VERSION 3.13) project (pyngraph) if(NOT DEFINED OpenVINO_SOURCE_DIR) + find_package(OpenVINO REQUIRED) find_package(OpenVINODeveloperPackage QUIET PATHS "${InferenceEngineDeveloperPackage_DIR}") - find_package(OpenVINO REQUIRED) endif() # Python3_VERSION_MAJOR and Python3_VERSION_MINOR are defined in FindPython3 set(pyversion python${Python3_VERSION_MAJOR}.${Python3_VERSION_MINOR}) -if(OpenVINO_SOURCE_DIR) - if(OV_GENERATOR_MULTI_CONFIG) - set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/$/python/) - else() - set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/python/) - endif() - - set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) - set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) - set(CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) - set(CMAKE_PDB_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) +if(OV_GENERATOR_MULTI_CONFIG) + set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/$/python/) +else() + set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/python/) endif() +set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) +set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) +set(CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) +set(CMAKE_PDB_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) + # compile options if (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") @@ -54,43 +52,39 @@ file(GLOB_RECURSE SOURCES *.cpp) pybind11_add_module(_${PROJECT_NAME} MODULE NO_EXTRAS ${SOURCES}) -target_include_directories(_${PROJECT_NAME} PRIVATE "../" "${OpenVINO_SOURCE_DIR}/src/common/transformations/include") +target_include_directories(_${PROJECT_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/../") -target_link_libraries(_${PROJECT_NAME} PRIVATE openvino::runtime) +target_link_libraries(_${PROJECT_NAME} PRIVATE openvino::runtime openvino::core::dev) set_target_properties(_${PROJECT_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO}) # perform copy -if(OpenVINO_SOURCE_DIR) - add_custom_command(TARGET _${PROJECT_NAME} - POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_CURRENT_SOURCE_DIR}/../ngraph ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/ngraph - ) -endif() +add_custom_command(TARGET _${PROJECT_NAME} + POST_BUILD + COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_CURRENT_SOURCE_DIR}/../ngraph ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/ngraph +) ov_set_apple_rpath(_${PROJECT_NAME} ${OV_CPACK_PYTHONDIR} ${OV_CPACK_RUNTIMEDIR}) # Install -if(OpenVINO_SOURCE_DIR OR OpenVINODeveloperPackage_FOUND) - ov_python_minimal_api(_${PROJECT_NAME}) - ov_add_clang_format_target(_${PROJECT_NAME}_clang FOR_TARGETS _${PROJECT_NAME}) +ov_python_minimal_api(_${PROJECT_NAME}) +ov_add_clang_format_target(_${PROJECT_NAME}_clang FOR_TARGETS _${PROJECT_NAME}) - ov_cpack_add_component(${OV_CPACK_COMP_PYTHON_OPENVINO}_${pyversion} HIDDEN) +ov_cpack_add_component(${OV_CPACK_COMP_PYTHON_OPENVINO}_${pyversion} HIDDEN) - install(TARGETS _${PROJECT_NAME} - DESTINATION ${OV_CPACK_PYTHONDIR} - COMPONENT ${OV_CPACK_COMP_PYTHON_OPENVINO}_${pyversion} - ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL}) +install(TARGETS _${PROJECT_NAME} + DESTINATION ${OV_CPACK_PYTHONDIR} + COMPONENT ${OV_CPACK_COMP_PYTHON_OPENVINO}_${pyversion} + ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL}) - install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/../ngraph - DESTINATION ${OV_CPACK_PYTHONDIR} - COMPONENT ${OV_CPACK_COMP_PYTHON_OPENVINO}_${pyversion} - ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL} - USE_SOURCE_PERMISSIONS) +install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/../ngraph + DESTINATION ${OV_CPACK_PYTHONDIR} + COMPONENT ${OV_CPACK_COMP_PYTHON_OPENVINO}_${pyversion} + ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL} + USE_SOURCE_PERMISSIONS) - install(DIRECTORY ${OpenVINOPython_SOURCE_DIR}/tests_compatibility - DESTINATION tests/${PROJECT_NAME} - COMPONENT tests - EXCLUDE_FROM_ALL) -endif() +install(DIRECTORY ${OpenVINOPython_SOURCE_DIR}/tests_compatibility + DESTINATION tests/${PROJECT_NAME} + COMPONENT tests + EXCLUDE_FROM_ALL) diff --git a/src/bindings/python/src/pyopenvino/CMakeLists.txt b/src/bindings/python/src/pyopenvino/CMakeLists.txt index 99ae9983ee82c5..5566c961d1a57b 100644 --- a/src/bindings/python/src/pyopenvino/CMakeLists.txt +++ b/src/bindings/python/src/pyopenvino/CMakeLists.txt @@ -10,19 +10,17 @@ endif() # Python3_VERSION_MAJOR and Python3_VERSION_MINOR are defined by FindPython3 set(pyversion python${Python3_VERSION_MAJOR}.${Python3_VERSION_MINOR}) -if(OpenVINO_SOURCE_DIR) - if(OV_GENERATOR_MULTI_CONFIG) - set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/$/python/openvino) - else() - set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/python/openvino) - endif() - - set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) - set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) - set(CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) - set(CMAKE_PDB_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) +if(OV_GENERATOR_MULTI_CONFIG) + set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/$/python/openvino) +else() + set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/python/openvino) endif() +set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) +set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) +set(CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) +set(CMAKE_PDB_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) + # compile options if(OV_COMPILER_IS_APPLECLANG) @@ -64,15 +62,8 @@ list(FILTER SOURCES EXCLUDE REGEX ".*(frontend/(onnx|tensorflow|paddle|pytorch)) pybind11_add_module(${PROJECT_NAME} MODULE NO_EXTRAS ${SOURCES}) -if(TARGET offline_transformations) - set(OFFLINE_TRANSFORMATIONS_LIB offline_transformations) -else() - set(OFFLINE_TRANSFORMATIONS_LIB openvino::offline_transformations) -endif() - target_include_directories(${PROJECT_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/..") -target_link_libraries(${PROJECT_NAME} PRIVATE - openvino::core::dev openvino::runtime ${OFFLINE_TRANSFORMATIONS_LIB}) +target_link_libraries(${PROJECT_NAME} PRIVATE openvino::core::dev openvino::runtime openvino::offline_transformations) set_target_properties(${PROJECT_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO} OUTPUT_NAME "_pyopenvino") @@ -96,58 +87,56 @@ if(OV_GENERATOR_MULTI_CONFIG) endif() # perform copy -if(OpenVINO_SOURCE_DIR) - add_custom_command(TARGET ${PROJECT_NAME} - POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy_directory ${OpenVINOPython_SOURCE_DIR}/src/openvino ${CMAKE_LIBRARY_OUTPUT_DIRECTORY} - COMMAND ${CMAKE_COMMAND} -E copy ${OpenVINOPython_SOURCE_DIR}/requirements.txt ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/../requirements.txt - ) -endif() - -if(OpenVINO_SOURCE_DIR OR OpenVINODeveloperPackage_FOUND) - ov_python_minimal_api(${PROJECT_NAME}) - ov_add_clang_format_target(${PROJECT_NAME}_clang FOR_TARGETS ${PROJECT_NAME}) - - ov_cpack_add_component(${OV_CPACK_COMP_PYTHON_OPENVINO}_${pyversion} - HIDDEN) - - install(DIRECTORY ${OpenVINOPython_SOURCE_DIR}/src/openvino - DESTINATION ${OV_CPACK_PYTHONDIR} - COMPONENT ${OV_CPACK_COMP_PYTHON_OPENVINO}_${pyversion} - ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL} - USE_SOURCE_PERMISSIONS - PATTERN "test_utils" EXCLUDE - PATTERN "torchvision/requirements.txt" EXCLUDE) - - install(TARGETS ${PROJECT_NAME} - DESTINATION ${OV_CPACK_PYTHONDIR}/openvino - COMPONENT ${OV_CPACK_COMP_PYTHON_OPENVINO}_${pyversion} - ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL}) - - ov_set_apple_rpath(${PROJECT_NAME} ${OV_CPACK_PYTHONDIR}/openvino - # path to OpenVINO C++ libraries - ${OV_CPACK_RUNTIMEDIR} - # pyopenvino also depends on TBB because of: - # pyopenvino => openvino::offline_transformations => TBB optimized openvino::reference - ${TBB_LIB_INSTALL_DIR}) - - ov_cpack_add_component(${OV_CPACK_COMP_OPENVINO_REQ_FILES} HIDDEN) - - install(FILES ${OpenVINOPython_SOURCE_DIR}/requirements.txt - DESTINATION ${OV_CPACK_PYTHONDIR} - COMPONENT ${OV_CPACK_COMP_OPENVINO_REQ_FILES} - ${OV_CPACK_COMP_OPENVINO_REQ_FILES_EXCLUDE_ALL}) - - install(FILES ${OpenVINOPython_SOURCE_DIR}/src/openvino/preprocess/torchvision/requirements.txt - DESTINATION ${OV_CPACK_PYTHONDIR}/openvino/preprocess/torchvision - COMPONENT ${OV_CPACK_COMP_OPENVINO_REQ_FILES} - ${OV_CPACK_COMP_OPENVINO_REQ_FILES_EXCLUDE_ALL}) - - install(DIRECTORY ${OpenVINOPython_SOURCE_DIR}/tests - DESTINATION tests/${PROJECT_NAME} - COMPONENT tests - EXCLUDE_FROM_ALL) -endif() +add_custom_command(TARGET ${PROJECT_NAME} + POST_BUILD + COMMAND ${CMAKE_COMMAND} -E copy_directory ${OpenVINOPython_SOURCE_DIR}/src/openvino ${CMAKE_LIBRARY_OUTPUT_DIRECTORY} + COMMAND ${CMAKE_COMMAND} -E copy ${OpenVINOPython_SOURCE_DIR}/requirements.txt ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/../requirements.txt + ) + +ov_python_minimal_api(${PROJECT_NAME}) +ov_add_clang_format_target(${PROJECT_NAME}_clang FOR_TARGETS ${PROJECT_NAME}) + +# install steps + +ov_cpack_add_component(${OV_CPACK_COMP_PYTHON_OPENVINO}_${pyversion} + HIDDEN) + +install(DIRECTORY ${OpenVINOPython_SOURCE_DIR}/src/openvino + DESTINATION ${OV_CPACK_PYTHONDIR} + COMPONENT ${OV_CPACK_COMP_PYTHON_OPENVINO}_${pyversion} + ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL} + USE_SOURCE_PERMISSIONS + PATTERN "test_utils" EXCLUDE + PATTERN "torchvision/requirements.txt" EXCLUDE) + +install(TARGETS ${PROJECT_NAME} + DESTINATION ${OV_CPACK_PYTHONDIR}/openvino + COMPONENT ${OV_CPACK_COMP_PYTHON_OPENVINO}_${pyversion} + ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL}) + +ov_set_apple_rpath(${PROJECT_NAME} ${OV_CPACK_PYTHONDIR}/openvino + # path to OpenVINO C++ libraries + ${OV_CPACK_RUNTIMEDIR} + # pyopenvino also depends on TBB because of: + # pyopenvino => openvino::offline_transformations => TBB optimized openvino::reference + ${TBB_LIB_INSTALL_DIR}) + +ov_cpack_add_component(${OV_CPACK_COMP_OPENVINO_REQ_FILES} HIDDEN) + +install(FILES ${OpenVINOPython_SOURCE_DIR}/requirements.txt + DESTINATION ${OV_CPACK_PYTHONDIR} + COMPONENT ${OV_CPACK_COMP_OPENVINO_REQ_FILES} + ${OV_CPACK_COMP_OPENVINO_REQ_FILES_EXCLUDE_ALL}) + +install(FILES ${OpenVINOPython_SOURCE_DIR}/src/openvino/preprocess/torchvision/requirements.txt + DESTINATION ${OV_CPACK_PYTHONDIR}/openvino/preprocess/torchvision + COMPONENT ${OV_CPACK_COMP_OPENVINO_REQ_FILES} + ${OV_CPACK_COMP_OPENVINO_REQ_FILES_EXCLUDE_ALL}) + +install(DIRECTORY ${OpenVINOPython_SOURCE_DIR}/tests + DESTINATION tests/${PROJECT_NAME} + COMPONENT tests + EXCLUDE_FROM_ALL) if(TARGET ie_wheel) add_dependencies(ie_wheel ${PROJECT_NAME}) diff --git a/src/cmake/openvino.cmake b/src/cmake/openvino.cmake index ba3786cd697a75..7fb6e2fd77bd6b 100644 --- a/src/cmake/openvino.cmake +++ b/src/cmake/openvino.cmake @@ -107,27 +107,27 @@ install(TARGETS ${TARGET_NAME} EXPORT OpenVINOTargets # Add openvino::runtine::dev target # -add_library(${TARGET_NAME}_dev INTERFACE) -add_library(openvino::runtime::dev ALIAS ${TARGET_NAME}_dev) +add_library(openvino_runtime_dev INTERFACE) +add_library(openvino::runtime::dev ALIAS openvino_runtime_dev) -target_include_directories(${TARGET_NAME}_dev INTERFACE +target_include_directories(openvino_runtime_dev INTERFACE $ $ - $) + $>) -target_compile_definitions(${TARGET_NAME}_dev INTERFACE +target_compile_definitions(openvino_runtime_dev INTERFACE $) -target_link_libraries(${TARGET_NAME}_dev INTERFACE ${TARGET_NAME} openvino::core::dev) +target_link_libraries(openvino_runtime_dev INTERFACE ${TARGET_NAME} openvino::core::dev) -# TODO: remove once NPU will use explicltly `ov_set_threading_interface_for` -ov_set_threading_interface_for(${TARGET_NAME}_dev) -set_target_properties(${TARGET_NAME}_dev PROPERTIES EXPORT_NAME runtime::dev) +ov_set_threading_interface_for(openvino_runtime_dev) +set_target_properties(openvino_runtime_dev PROPERTIES EXPORT_NAME runtime::dev) -openvino_developer_export_targets(COMPONENT core TARGETS openvino::runtime::dev) +ov_developer_package_export_targets(TARGET openvino::runtime::dev + INSTALL_INCLUDE_DIRECTORIES "${OpenVINO_SOURCE_DIR}/src/inference/dev_api/") # Install static libraries for case BUILD_SHARED_LIBS=OFF -ov_install_static_lib(${TARGET_NAME}_dev ${OV_CPACK_COMP_CORE}) +ov_install_static_lib(openvino_runtime_dev ${OV_CPACK_COMP_CORE}) # # Install OpenVINO runtime diff --git a/src/common/conditional_compilation/CMakeLists.txt b/src/common/conditional_compilation/CMakeLists.txt index 876558cea5e474..8f5cd90fe22d21 100644 --- a/src/common/conditional_compilation/CMakeLists.txt +++ b/src/common/conditional_compilation/CMakeLists.txt @@ -2,11 +2,12 @@ # SPDX-License-Identifier: Apache-2.0 # -set(TARGET_NAME conditional_compilation) +set(TARGET_NAME openvino_conditional_compilation) add_library(${TARGET_NAME} INTERFACE) add_library(openvino::conditional_compilation ALIAS ${TARGET_NAME}) +set_target_properties(${TARGET_NAME} PROPERTIES EXPORT_NAME conditional_compilation) target_link_libraries(${TARGET_NAME} INTERFACE openvino::itt) @@ -23,9 +24,7 @@ elseif(SELECTIVE_BUILD STREQUAL "ON") find_host_package (Python3 REQUIRED COMPONENTS Interpreter) file(TO_CMAKE_PATH ${SELECTIVE_BUILD_STAT} CMAKE_SELECTIVE_BUILD_STAT) - file(GLOB STAT_FILES ${CMAKE_SELECTIVE_BUILD_STAT}) - if(NOT STAT_FILES) message(FATAL_ERROR "SELECTIVE_BUILD_STAT (${SELECTIVE_BUILD_STAT}) path doesn't contain valid csv files!") endif() @@ -56,12 +55,16 @@ elseif(SELECTIVE_BUILD STREQUAL "ON") ov_force_include(${TARGET_NAME} INTERFACE ${GENERATED_HEADER}) endif() -ov_install_static_lib(${TARGET_NAME} ${OV_CPACK_COMP_CORE}) - file(GLOB_RECURSE hdrs ${CMAKE_CURRENT_SOURCE_DIR}/include/*.h ${CMAKE_CURRENT_SOURCE_DIR}/include/*.hpp) ov_add_clang_format_target(${TARGET_NAME}_clang FOR_SOURCES ${hdrs}) -openvino_developer_export_targets(COMPONENT openvino_common TARGETS openvino::conditional_compilation) if(ENABLE_TESTS) add_subdirectory(tests) endif() + +# install & export + +ov_install_static_lib(${TARGET_NAME} ${OV_CPACK_COMP_CORE}) + +ov_developer_package_export_targets(TARGET openvino::conditional_compilation + INSTALL_INCLUDE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}/include/") diff --git a/src/common/itt/CMakeLists.txt b/src/common/itt/CMakeLists.txt index 512574fd89bc06..4541fa112755a7 100644 --- a/src/common/itt/CMakeLists.txt +++ b/src/common/itt/CMakeLists.txt @@ -36,7 +36,11 @@ endif() target_include_directories(${TARGET_NAME} PUBLIC $) +ov_add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME}) + +# install & export + ov_install_static_lib(${TARGET_NAME} ${OV_CPACK_COMP_CORE}) -ov_add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME}) -openvino_developer_export_targets(COMPONENT openvino_common TARGETS openvino::itt) +ov_developer_package_export_targets(TARGET openvino::itt + INSTALL_INCLUDE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}/include/") diff --git a/src/common/offline_transformations/CMakeLists.txt b/src/common/offline_transformations/CMakeLists.txt index 6712f2f28586e3..69335b19be4e7a 100644 --- a/src/common/offline_transformations/CMakeLists.txt +++ b/src/common/offline_transformations/CMakeLists.txt @@ -2,13 +2,13 @@ # SPDX-License-Identifier: Apache-2.0 # -set(TARGET_NAME "offline_transformations") - -file(GLOB_RECURSE LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp) -file(GLOB_RECURSE PUBLIC_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/include/*.hpp) +set(TARGET_NAME "openvino_offline_transformations") set(PUBLIC_HEADERS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/include") +file(GLOB_RECURSE LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp) +file(GLOB_RECURSE PUBLIC_HEADERS ${PUBLIC_HEADERS_DIR}/*.hpp) + # Create named folders for the sources within the .vcproj # Empty name lists them directly under the .vcproj @@ -19,15 +19,20 @@ source_group("include" FILES ${PUBLIC_HEADERS}) add_library(${TARGET_NAME} STATIC ${LIBRARY_SRC} ${PUBLIC_HEADERS}) +add_library(openvino::offline_transformations ALIAS ${TARGET_NAME}) +set_target_properties(${TARGET_NAME} PROPERTIES EXPORT_NAME offline_transformations) + target_link_libraries(${TARGET_NAME} PRIVATE openvino::core::dev openvino::reference openvino::runtime) -target_include_directories(${TARGET_NAME} PUBLIC ${PUBLIC_HEADERS_DIR} - PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/src") +target_include_directories(${TARGET_NAME} PUBLIC $ + PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/src" + $) add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME}) ov_add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME}) -# developer package +# install & export -openvino_developer_export_targets(COMPONENT core TARGETS ${TARGET_NAME}) +ov_developer_package_export_targets(TARGET ${TARGET_NAME} + INSTALL_INCLUDE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}/include/") diff --git a/src/common/preprocessing/src/CMakeLists.txt b/src/common/preprocessing/src/CMakeLists.txt index ccab33a652c633..9e3fd2d3789a02 100644 --- a/src/common/preprocessing/src/CMakeLists.txt +++ b/src/common/preprocessing/src/CMakeLists.txt @@ -211,7 +211,7 @@ endif() # developer package -openvino_developer_export_targets(COMPONENT core TARGETS ${TARGET_NAME}) +ov_developer_package_export_targets(TARGET ${TARGET_NAME}) # install diff --git a/src/common/snippets/CMakeLists.txt b/src/common/snippets/CMakeLists.txt index fdc1c83889423d..b3d2db77b77241 100644 --- a/src/common/snippets/CMakeLists.txt +++ b/src/common/snippets/CMakeLists.txt @@ -46,8 +46,9 @@ endif() set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO}) -# install +# install & export ov_install_static_lib(${TARGET_NAME} ${OV_CPACK_COMP_CORE}) -openvino_developer_export_targets(COMPONENT ${OV_CPACK_COMP_CORE} TARGETS ${TARGET_NAME}) +ov_developer_package_export_targets(TARGET ${TARGET_NAME} + INSTALL_INCLUDE_DIRECTORIES "${PUBLIC_HEADERS_DIR}/") diff --git a/src/common/transformations/CMakeLists.txt b/src/common/transformations/CMakeLists.txt index 164daec54c2f18..67907b0c265d5c 100644 --- a/src/common/transformations/CMakeLists.txt +++ b/src/common/transformations/CMakeLists.txt @@ -4,11 +4,11 @@ set(TARGET_NAME "inference_engine_transformations") -file(GLOB_RECURSE LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp) -file(GLOB_RECURSE PUBLIC_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/include/*.hpp) - set(PUBLIC_HEADERS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/include") +file(GLOB_RECURSE LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp) +file(GLOB_RECURSE PUBLIC_HEADERS ${PUBLIC_HEADERS_DIR}/*.hpp) + # Create named folders for the sources within the .vcproj # Empty name lists them directly under the .vcproj @@ -27,8 +27,8 @@ ov_build_target_faster(${TARGET_NAME}_obj target_link_libraries(${TARGET_NAME}_obj PRIVATE openvino::reference openvino::itt openvino::builders openvino::core::dev openvino::shape_inference) -target_include_directories(${TARGET_NAME}_obj PRIVATE $ - "${CMAKE_CURRENT_SOURCE_DIR}/src") +target_include_directories(${TARGET_NAME}_obj PRIVATE "${PUBLIC_HEADERS_DIR}" + "${CMAKE_CURRENT_SOURCE_DIR}/src") ov_add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME}_obj) @@ -43,7 +43,7 @@ endif() add_library(${TARGET_NAME} INTERFACE) target_include_directories(${TARGET_NAME} INTERFACE - $ + $ $>) target_link_libraries(${TARGET_NAME} INTERFACE openvino::runtime) diff --git a/src/common/transformations/tests/CMakeLists.txt b/src/common/transformations/tests/CMakeLists.txt index 84a4fb6e900edf..7091f5162fea8d 100644 --- a/src/common/transformations/tests/CMakeLists.txt +++ b/src/common/transformations/tests/CMakeLists.txt @@ -15,7 +15,7 @@ ov_add_test_target( LINK_LIBRARIES gmock func_test_utils - offline_transformations + openvino::offline_transformations sharedTestClasses ov_lpt_models ADD_CLANG_FORMAT diff --git a/src/common/util/CMakeLists.txt b/src/common/util/CMakeLists.txt index faaab5c26d22bc..49f9d1e19cf163 100644 --- a/src/common/util/CMakeLists.txt +++ b/src/common/util/CMakeLists.txt @@ -4,11 +4,11 @@ set(TARGET_NAME openvino_util) -file(GLOB_RECURSE LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp) -file(GLOB_RECURSE PUBLIC_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/include/*.hpp) - set(UTIL_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/include/) +file(GLOB_RECURSE LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp) +file(GLOB_RECURSE PUBLIC_HEADERS ${UTIL_INCLUDE_DIR}/*.hpp) + if (WIN32) # Remove linux specific files file(GLOB_RECURSE LIN_FILES ${CMAKE_CURRENT_SOURCE_DIR}/src/os/lin/*.cpp @@ -41,13 +41,15 @@ target_link_libraries(${TARGET_NAME} PRIVATE ${CMAKE_DL_LIBS}) if (WIN32) target_link_libraries(${TARGET_NAME} PRIVATE Shlwapi) endif() -target_include_directories(${TARGET_NAME} PUBLIC - $) - -ov_install_static_lib(${TARGET_NAME} ${OV_CPACK_COMP_CORE}) +target_include_directories(${TARGET_NAME} PUBLIC $) ov_add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME}) ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME} SOURCE_DIRECTORIES ${UTIL_INCLUDE_DIR}) -openvino_developer_export_targets(COMPONENT core TARGETS ${TARGET_NAME}) +# install & export + +ov_install_static_lib(${TARGET_NAME} ${OV_CPACK_COMP_CORE}) + +ov_developer_package_export_targets(TARGET ${TARGET_NAME} + INSTALL_INCLUDE_DIRECTORIES "${UTIL_INCLUDE_DIR}/") diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 869b9a02c49272..d389c1862703bf 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -36,25 +36,29 @@ source_group("src" FILES ${LIBRARY_SRC}) source_group("include" FILES ${PUBLIC_HEADERS}) # -# Create ov_core_dev library +# Create openvino_core_dev library # -add_library(ov_core_dev INTERFACE) -add_library(openvino::core::dev ALIAS ov_core_dev) +add_library(openvino_core_dev INTERFACE) +add_library(openvino::core::dev ALIAS openvino_core_dev) -target_include_directories(ov_core_dev INTERFACE +target_include_directories(openvino_core_dev INTERFACE $ $ $ $) -target_link_libraries(ov_core_dev INTERFACE openvino::itt openvino::util) +target_link_libraries(openvino_core_dev INTERFACE openvino::itt openvino::util) -set_target_properties(ov_core_dev PROPERTIES EXPORT_NAME core::dev) -openvino_developer_export_targets(COMPONENT core TARGETS openvino::core::dev) +set_target_properties(openvino_core_dev PROPERTIES EXPORT_NAME core::dev) +ov_developer_package_export_targets(TARGET openvino::core::dev + INSTALL_INCLUDE_DIRECTORIES + "${OV_CORE_DEV_API_PATH}/" + "${OpenVINO_SOURCE_DIR}/src/common/transformations/include/" + "${OpenVINO_SOURCE_DIR}/src/common/low_precision_transformations/include/") # Install interface libraries for case BUILD_SHARED_LIBS=OFF -ov_install_static_lib(ov_core_dev ${OV_CPACK_COMP_CORE}) +ov_install_static_lib(openvino_core_dev ${OV_CPACK_COMP_CORE}) # Fix error LNK1248: image size (...) exceeds maximum allowable size (FFFFFFFF) # the symbolic debugging information will be stored in a separate .pdb file. diff --git a/src/core/builder/CMakeLists.txt b/src/core/builder/CMakeLists.txt index 64ce45a4870921..ee87ece0365d60 100644 --- a/src/core/builder/CMakeLists.txt +++ b/src/core/builder/CMakeLists.txt @@ -4,11 +4,11 @@ set(TARGET_NAME "openvino_builders") -file(GLOB_RECURSE LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp) -file(GLOB_RECURSE PUBLIC_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/include/*.hpp) - set(BUILDER_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/include/) +file(GLOB_RECURSE LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp) +file(GLOB_RECURSE PUBLIC_HEADERS ${BUILDER_INCLUDE_DIR}/*.hpp) + # Create named folders for the sources within the .vcproj # Empty name lists them directly under the .vcproj @@ -35,8 +35,9 @@ endif() ov_add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME}) -ov_install_static_lib(openvino_builders ${OV_CPACK_COMP_CORE}) +# install & export -# developer package +ov_install_static_lib(openvino_builders ${OV_CPACK_COMP_CORE}) -openvino_developer_export_targets(COMPONENT core TARGETS openvino::builders) +ov_developer_package_export_targets(TARGET openvino::builders + INSTALL_INCLUDE_DIRECTORIES "${BUILDER_INCLUDE_DIR}/") diff --git a/src/core/reference/CMakeLists.txt b/src/core/reference/CMakeLists.txt index 4154a1455ffef0..e868c07c391e96 100644 --- a/src/core/reference/CMakeLists.txt +++ b/src/core/reference/CMakeLists.txt @@ -4,11 +4,11 @@ set(TARGET_NAME "openvino_reference") -file(GLOB_RECURSE LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp) -file(GLOB_RECURSE PUBLIC_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/include/*.hpp) - set(REF_IMPL_INCLUDE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/include") +file(GLOB_RECURSE LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp) +file(GLOB_RECURSE PUBLIC_HEADERS ${REF_IMPL_INCLUDE_DIR}/*.hpp) + # Create named folders for the sources within the .vcproj # Empty name lists them directly under the .vcproj @@ -46,7 +46,9 @@ target_link_libraries(${TARGET_NAME} PRIVATE Threads::Threads openvino::core::de ov_add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME}) +# install & export + ov_install_static_lib(${TARGET_NAME} ${OV_CPACK_COMP_CORE}) -# developer package -openvino_developer_export_targets(COMPONENT core TARGETS openvino::reference) +ov_developer_package_export_targets(TARGET openvino::reference + INSTALL_INCLUDE_DIRECTORIES "${REF_IMPL_INCLUDE_DIR}/") diff --git a/src/core/shape_inference/CMakeLists.txt b/src/core/shape_inference/CMakeLists.txt index b04f0cf8573b85..db862ac520d0b5 100644 --- a/src/core/shape_inference/CMakeLists.txt +++ b/src/core/shape_inference/CMakeLists.txt @@ -4,11 +4,11 @@ set(TARGET_NAME "openvino_shape_inference") -file(GLOB_RECURSE LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp) -file(GLOB_RECURSE PUBLIC_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/include/*.hpp) - set(SHAPE_INFER_INCLUDE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/include") +file(GLOB_RECURSE LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp) +file(GLOB_RECURSE PUBLIC_HEADERS ${SHAPE_INFER_INCLUDE_DIR}/*.hpp) + # Create named folders for the sources within the .vcproj # Empty name lists them directly under the .vcproj @@ -24,7 +24,7 @@ set_target_properties(${TARGET_NAME} PROPERTIES EXPORT_NAME shape_inference) target_include_directories(${TARGET_NAME} PUBLIC $ $ - $>) + $) ov_add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME}) @@ -32,7 +32,9 @@ if(NOT BUILD_SHARED_LIBS) target_compile_definitions(${TARGET_NAME} PUBLIC OPENVINO_STATIC_LIBRARY) endif() +# developer package + ov_install_static_lib(${TARGET_NAME} ${OV_CPACK_COMP_CORE}) -# developer package -openvino_developer_export_targets(COMPONENT core TARGETS ${TARGET_NAME}) +ov_developer_package_export_targets(TARGET ${TARGET_NAME} + INSTALL_INCLUDE_DIRECTORIES "${SHAPE_INFER_INCLUDE_DIR}/") diff --git a/src/frontends/tests/frontend/shared/CMakeLists.txt b/src/frontends/tests/frontend/shared/CMakeLists.txt index f5d2809205db0e..f413e359afb738 100644 --- a/src/frontends/tests/frontend/shared/CMakeLists.txt +++ b/src/frontends/tests/frontend/shared/CMakeLists.txt @@ -17,8 +17,10 @@ target_include_directories(${TARGET_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/../ target_link_libraries(${TARGET_NAME} PUBLIC - offline_transformations - common_test_utils func_test_utils openvino::util + openvino::offline_transformations + common_test_utils + func_test_utils + openvino::util openvino::runtime PRIVATE cnpy) diff --git a/src/inference/CMakeLists.txt b/src/inference/CMakeLists.txt index 3e61c370d20482..11ad85b3740d6a 100644 --- a/src/inference/CMakeLists.txt +++ b/src/inference/CMakeLists.txt @@ -199,8 +199,8 @@ if(WIN32) set_target_properties(${TARGET_NAME}_s PROPERTIES COMPILE_PDB_NAME ${TARGET_NAME}_s) endif() -target_link_libraries(${TARGET_NAME}_s PRIVATE openvino::itt ${CMAKE_DL_LIBS} ngraph - frontend_common::static inference_engine_transformations openvino::pugixml) +target_link_libraries(${TARGET_NAME}_s PRIVATE openvino::itt ${CMAKE_DL_LIBS} + openvino::runtime::dev openvino::pugixml) target_compile_definitions(${TARGET_NAME}_s PUBLIC USE_STATIC_IE) @@ -215,7 +215,8 @@ set_target_properties(${TARGET_NAME}_obj # Export for developer package -openvino_developer_export_targets(COMPONENT core_legacy TARGETS ${TARGET_NAME}_plugin_api) +ov_developer_package_export_targets(TARGET ${TARGET_NAME}_s) +ov_developer_package_export_targets(TARGET ${TARGET_NAME}_plugin_api) # Install static libraries for case BUILD_SHARED_LIBS=OFF diff --git a/src/plugins/auto/tests/functional/CMakeLists.txt b/src/plugins/auto/tests/functional/CMakeLists.txt index cd239db8806120..b15afe68b96660 100644 --- a/src/plugins/auto/tests/functional/CMakeLists.txt +++ b/src/plugins/auto/tests/functional/CMakeLists.txt @@ -31,4 +31,4 @@ ov_add_test_target( ) target_compile_definitions(${TARGET_NAME} PRIVATE ${COMPILE_DEFINITIONS}) -set_ie_threading_interface_for(${TARGET_NAME}) \ No newline at end of file +ov_set_threading_interface_for(${TARGET_NAME}) \ No newline at end of file diff --git a/src/plugins/intel_gna/tests/deprecated/helpers/single_layer_common.hpp b/src/plugins/intel_gna/tests/deprecated/helpers/single_layer_common.hpp index 03cf9af92fbaa7..765846056930f8 100644 --- a/src/plugins/intel_gna/tests/deprecated/helpers/single_layer_common.hpp +++ b/src/plugins/intel_gna/tests/deprecated/helpers/single_layer_common.hpp @@ -20,10 +20,6 @@ # include # define REPLACE_WITH_STR(SRC, PATTERN, STR) SRC = std::regex_replace(SRC, std::regex(PATTERN), STR) # define FIND_STR(SRC, PATTERN) std::regex_search(SRC, std::regex(PATTERN)) -#elif defined USE_BOOST_RE -# include -# define REPLACE_WITH_STR(SRC, PATTERN, STR) SRC = boost::regex_replace(SRC, boost::regex(PATTERN), STR) -# define FIND_STR(SRC, PATTERN) boost::regex_search(SRC, boost::regex(PATTERN)) #else # error "Cannot implement regex" # define REPLACE_WITH_STR(SRC, PATTERN, STR) diff --git a/src/plugins/intel_gpu/tests/unit/CMakeLists.txt b/src/plugins/intel_gpu/tests/unit/CMakeLists.txt index 3dda088627b833..1230e57effbd74 100644 --- a/src/plugins/intel_gpu/tests/unit/CMakeLists.txt +++ b/src/plugins/intel_gpu/tests/unit/CMakeLists.txt @@ -23,14 +23,12 @@ file(GLOB_RECURSE SOURCES_MAIN ) if (NOT ENABLE_ONEDNN_FOR_GPU) - set(EXCLUDE_DIR "/onednn/") - foreach (SOURCE_FILE ${SOURCES_MAIN}) - string (FIND ${SOURCE_FILE} ${EXCLUDE_DIR} EXCLUDE_DIR_FOUND) - if (NOT ${EXCLUDE_DIR_FOUND} EQUAL -1) - message (Exclude : ${SOURCE_FILE}) + set(EXCLUDE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/onednn/") + foreach (SOURCE_FILE IN LISTS SOURCES_MAIN) + if (SOURCE_FILE MATCHES "${EXCLUDE_DIR}.*") list (REMOVE_ITEM SOURCES_MAIN ${SOURCE_FILE}) endif () - endforeach(SOURCE_FILE) + endforeach() endif() if (MSVC) diff --git a/src/plugins/template/CMakeLists.txt b/src/plugins/template/CMakeLists.txt index 4a3691186302aa..47cbb954d63dbd 100644 --- a/src/plugins/template/CMakeLists.txt +++ b/src/plugins/template/CMakeLists.txt @@ -7,8 +7,6 @@ cmake_minimum_required(VERSION 3.13) project(OpenVINOTemplatePlugin) -set(TEMPLATE_PLUGIN_SOURCE_DIR ${OpenVINOTemplatePlugin_SOURCE_DIR}) - find_package(OpenVINODeveloperPackage REQUIRED) ov_option(ENABLE_TEMPLATE_REGISTRATION "Enables registration of TEMPLATE plugin" OFF) diff --git a/src/plugins/template/backend/CMakeLists.txt b/src/plugins/template/backend/CMakeLists.txt index 2836d0c34b6c4f..0dc03242b554af 100644 --- a/src/plugins/template/backend/CMakeLists.txt +++ b/src/plugins/template/backend/CMakeLists.txt @@ -2,12 +2,11 @@ # SPDX-License-Identifier: Apache-2.0 # +set(TARGET_NAME openvino_interpreter_backend) + add_definitions(-DIN_OV_COMPONENT) -ov_deprecated_no_errors() -file(GLOB OPS_SRC - "${CMAKE_CURRENT_SOURCE_DIR}/ops/*.cpp" - ) +file(GLOB OPS_SRC "${CMAKE_CURRENT_SOURCE_DIR}/ops/*.cpp") set (SRC backend.cpp @@ -24,31 +23,38 @@ if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") ov_add_compiler_flags(/wd4018) endif() -add_library(interpreter_backend STATIC EXCLUDE_FROM_ALL ${OPS_SRC} ${SRC}) -add_library(openvino::interpreter_backend ALIAS interpreter_backend) +add_library(${TARGET_NAME} STATIC EXCLUDE_FROM_ALL ${OPS_SRC} ${SRC}) + +add_library(openvino::interpreter_backend ALIAS ${TARGET_NAME}) +set_target_properties(${TARGET_NAME} PROPERTIES EXPORT_NAME interpreter_backend) if(CMAKE_COMPILER_IS_GNUCXX) ov_add_compiler_flags(-Wno-missing-declarations) endif() -ov_build_target_faster(interpreter_backend UNITY) +ov_build_target_faster(${TARGET_NAME} UNITY) -target_compile_definitions(interpreter_backend +target_compile_definitions(${TARGET_NAME} PRIVATE SHARED_LIB_PREFIX="${CMAKE_SHARED_LIBRARY_PREFIX}" SHARED_LIB_SUFFIX="${OV_BUILD_POSTFIX}${CMAKE_SHARED_LIBRARY_SUFFIX}" ) -target_link_libraries(interpreter_backend PRIVATE openvino::builders openvino::reference openvino::util openvino::runtime::dev openvino::shape_inference) +target_link_libraries(${TARGET_NAME} PRIVATE openvino::builders openvino::reference openvino::util openvino::runtime::dev openvino::shape_inference) -target_include_directories(interpreter_backend PUBLIC $ $) +target_include_directories(${TARGET_NAME} PUBLIC $ + $ + $) file(GLOB_RECURSE all_backends_src "${CMAKE_CURRENT_SOURCE_DIR}/*.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/*.hpp") -ov_add_clang_format_target(interpreter_backend_clang FOR_SOURCES ${all_backends_src}) +ov_add_clang_format_target(${TARGET_NAME}_clang FOR_SOURCES ${all_backends_src}) -# developer package +# install & export -openvino_developer_export_targets(COMPONENT core TARGETS interpreter_backend) +ov_install_static_lib(${TARGET_NAME} ${OV_CPACK_COMP_CORE}) -# install +ov_developer_package_export_targets(TARGET openvino::interpreter_backend) -ov_install_static_lib(interpreter_backend ${OV_CPACK_COMP_CORE}) +install(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/" + DESTINATION developer_package/include/${TARGET_NAME} + COMPONENT developer_package EXCLUDE_FROM_ALL + FILES_MATCHING PATTERN "*.hpp") diff --git a/src/plugins/template/src/CMakeLists.txt b/src/plugins/template/src/CMakeLists.txt index f382f90d11b450..effc4831f18947 100644 --- a/src/plugins/template/src/CMakeLists.txt +++ b/src/plugins/template/src/CMakeLists.txt @@ -26,7 +26,7 @@ ov_mark_target_as_cc(${TARGET_NAME}) target_include_directories(${TARGET_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}" - "${TEMPLATE_PLUGIN_SOURCE_DIR}/include") + "${OpenVINOTemplatePlugin_SOURCE_DIR}/include") # link common OpenVINO Runtime libraries target_link_libraries(${TARGET_NAME} PRIVATE diff --git a/src/plugins/template/tests/functional/CMakeLists.txt b/src/plugins/template/tests/functional/CMakeLists.txt index eb634d4f91f955..18296710d64a28 100644 --- a/src/plugins/template/tests/functional/CMakeLists.txt +++ b/src/plugins/template/tests/functional/CMakeLists.txt @@ -18,7 +18,7 @@ ov_add_test_target( openvino::funcSharedTests openvino::runtime::dev INCLUDES - "${TEMPLATE_PLUGIN_SOURCE_DIR}/include" + "${OpenVINOTemplatePlugin_SOURCE_DIR}/include" "${CMAKE_CURRENT_SOURCE_DIR}/op_reference" ADD_CLANG_FORMAT LABELS diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/CMakeLists.txt b/src/tests/functional/plugin/conformance/subgraphs_dumper/CMakeLists.txt index 6f981d0702c96d..7174d96449e0ae 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/CMakeLists.txt +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/CMakeLists.txt @@ -18,7 +18,7 @@ ov_add_target( ROOT ${CMAKE_CURRENT_SOURCE_DIR}/src INCLUDES PRIVATE - ${CMAKE_CURRENT_SOURCE_DIR}/include + "$" LINK_LIBRARIES PRIVATE ${LIBRARIES} @@ -34,7 +34,7 @@ ov_add_target( ROOT "${CMAKE_CURRENT_SOURCE_DIR}/src" INCLUDES PUBLIC - ${CMAKE_CURRENT_SOURCE_DIR}/include + "$" LINK_LIBRARIES PUBLIC ${LIBRARIES} @@ -46,4 +46,4 @@ ov_add_target( ADD_CPPLINT ) -ov_build_target_faster(${TARGET_NAME} UNITY) \ No newline at end of file +ov_build_target_faster(${TARGET_NAME} UNITY) diff --git a/src/tests/functional/plugin/conformance/test_runner/conformance_infra/CMakeLists.txt b/src/tests/functional/plugin/conformance/test_runner/conformance_infra/CMakeLists.txt index 0f25d3218e1b92..5f9dc01189a443 100644 --- a/src/tests/functional/plugin/conformance/test_runner/conformance_infra/CMakeLists.txt +++ b/src/tests/functional/plugin/conformance/test_runner/conformance_infra/CMakeLists.txt @@ -14,8 +14,8 @@ ov_add_target( ADD_CPPLINT INCLUDES PUBLIC - ${CMAKE_CURRENT_SOURCE_DIR}/include - ${OpenVINO_SOURCE_DIR}/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/cache/meta/ + "$" + "$" LINK_LIBRARIES PUBLIC gflags diff --git a/src/tests/functional/plugin/shared/CMakeLists.txt b/src/tests/functional/plugin/shared/CMakeLists.txt index c75d2938d6badf..3715abb9bb059d 100644 --- a/src/tests/functional/plugin/shared/CMakeLists.txt +++ b/src/tests/functional/plugin/shared/CMakeLists.txt @@ -52,13 +52,11 @@ ov_add_target( ADDITIONAL_SOURCE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/src ADD_CPPLINT - DEVELOPER_PACKAGE - tests EXCLUDED_SOURCE_PATHS ${EXCLUDED_SOURCE_PATHS} DEFINES ${DEFINES} INCLUDES PUBLIC - ${PUBLIC_HEADERS_DIR} + "$" LINK_LIBRARIES PUBLIC openvino::pugixml @@ -86,3 +84,8 @@ if (ENABLE_INTEL_CPU) "${CMAKE_CURRENT_SOURCE_DIR}/src/behavior/ov_executable_network/get_metric.cpp" PROPERTIES COMPILE_DEFINITIONS ENABLE_INTEL_CPU=1) endif() + +# install & export + +ov_developer_package_export_targets(TARGET ${TARGET_NAME} + INSTALL_INCLUDE_DIRECTORIES "${PUBLIC_HEADERS_DIR}/") diff --git a/src/tests/functional/shared_test_classes/CMakeLists.txt b/src/tests/functional/shared_test_classes/CMakeLists.txt index 35d09840c09770..a4f46b241437b0 100644 --- a/src/tests/functional/shared_test_classes/CMakeLists.txt +++ b/src/tests/functional/shared_test_classes/CMakeLists.txt @@ -9,11 +9,9 @@ ov_add_target( TYPE STATIC ROOT "${CMAKE_CURRENT_SOURCE_DIR}/include" ADD_CPPLINT - DEVELOPER_PACKAGE - tests INCLUDES PUBLIC - "${CMAKE_CURRENT_SOURCE_DIR}/include" + "$" ADDITIONAL_SOURCE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/src LINK_LIBRARIES @@ -25,3 +23,8 @@ ov_add_target( ov_build_target_faster(${TARGET_NAME} PCH PRIVATE "src/precomp.hpp" ) + +# install & export + +ov_developer_package_export_targets(TARGET ${TARGET_NAME} + INSTALL_INCLUDE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}/include/") diff --git a/src/tests/ov_helpers/ov_lpt_models/CMakeLists.txt b/src/tests/ov_helpers/ov_lpt_models/CMakeLists.txt index f2b4514c5b0d32..7eda3438659f0d 100644 --- a/src/tests/ov_helpers/ov_lpt_models/CMakeLists.txt +++ b/src/tests/ov_helpers/ov_lpt_models/CMakeLists.txt @@ -12,7 +12,7 @@ ov_add_target( ROOT ${PUBLIC_HEADERS_DIR} INCLUDES PUBLIC - ${PUBLIC_HEADERS_DIR} + "$" ADDITIONAL_SOURCE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/src LINK_LIBRARIES @@ -20,13 +20,14 @@ ov_add_target( ov_models openvino::runtime::dev ADD_CPPLINT - DEPENDENCIES - ov_models - DEVELOPER_PACKAGE - tests ) ov_build_target_faster(${TARGET_NAME} UNITY PCH PRIVATE "src/precomp.hpp" ) + +# install & export + +ov_developer_package_export_targets(TARGET ${TARGET_NAME} + INSTALL_INCLUDE_DIRECTORIES "${PUBLIC_HEADERS_DIR}/") diff --git a/src/tests/ov_helpers/ov_models/CMakeLists.txt b/src/tests/ov_helpers/ov_models/CMakeLists.txt index 6d2989f94af734..69631bd82ba2a0 100644 --- a/src/tests/ov_helpers/ov_models/CMakeLists.txt +++ b/src/tests/ov_helpers/ov_models/CMakeLists.txt @@ -12,22 +12,25 @@ ov_add_target( ROOT ${PUBLIC_HEADERS_DIR} INCLUDES PUBLIC - ${PUBLIC_HEADERS_DIR} + "$" ADDITIONAL_SOURCE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/src LINK_LIBRARIES PUBLIC openvino::runtime openvino::reference - interpreter_backend + openvino::interpreter_backend openvino::runtime::dev common_test_utils ADD_CLANG_FORMAT - DEVELOPER_PACKAGE - tests ) ov_build_target_faster(${TARGET_NAME} UNITY PCH PRIVATE "src/precomp.hpp" ) + +# install & export + +ov_developer_package_export_targets(TARGET ${TARGET_NAME} + INSTALL_INCLUDE_DIRECTORIES "${PUBLIC_HEADERS_DIR}/") diff --git a/src/tests/ov_helpers/ov_snippets_models/CMakeLists.txt b/src/tests/ov_helpers/ov_snippets_models/CMakeLists.txt index 69cd602bb5eab5..24f1efae26a9e8 100644 --- a/src/tests/ov_helpers/ov_snippets_models/CMakeLists.txt +++ b/src/tests/ov_helpers/ov_snippets_models/CMakeLists.txt @@ -5,8 +5,6 @@ set(TARGET_NAME ov_snippets_models) set(PUBLIC_HEADERS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/include") -set(SNIPPETS_INCLUDES "$/include") -set(COMMON_TEST_UTILS_INCLUDES "$") ov_add_target( NAME ${TARGET_NAME} @@ -14,11 +12,8 @@ ov_add_target( ROOT ${PUBLIC_HEADERS_DIR} INCLUDES PUBLIC - ${PUBLIC_HEADERS_DIR} - ${COMMON_TEST_UTILS_INCLUDES} - PRIVATE - ${SNIPPETS_INCLUDES} - + "$" + "$" ADDITIONAL_SOURCE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/src LINK_LIBRARIES @@ -28,11 +23,14 @@ ov_add_target( openvino::snippets ov_lpt_models ADD_CPPLINT - DEVELOPER_PACKAGE - tests ) ov_build_target_faster(${TARGET_NAME} UNITY PCH PRIVATE "src/precomp.hpp" ) + +# install & export + +ov_developer_package_export_targets(TARGET ${TARGET_NAME} + INSTALL_INCLUDE_DIRECTORIES "${PUBLIC_HEADERS_DIR}/") diff --git a/src/tests/test_utils/common_test_utils/CMakeLists.txt b/src/tests/test_utils/common_test_utils/CMakeLists.txt index abf36d4fa3864d..1112ccd08558af 100644 --- a/src/tests/test_utils/common_test_utils/CMakeLists.txt +++ b/src/tests/test_utils/common_test_utils/CMakeLists.txt @@ -23,8 +23,6 @@ function(add_common_utils ADD_TARGET_NAME) EXCLUDED_SOURCE_PATHS ${TARGET_EXCLUDED_SOURCE_PATHS} ADD_CLANG_FORMAT - DEVELOPER_PACKAGE - tests LINK_LIBRARIES PUBLIC gtest @@ -38,12 +36,14 @@ function(add_common_utils ADD_TARGET_NAME) openvino::shape_inference INCLUDES PUBLIC - "${CMAKE_CURRENT_SOURCE_DIR}/include" + "$" PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/src" - ) + ov_developer_package_export_targets(TARGET ${ADD_TARGET_NAME} + INSTALL_INCLUDE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}/include/") + if(ENABLE_CONFORMANCE_PGQL) target_compile_definitions(${ADD_TARGET_NAME} PUBLIC ENABLE_CONFORMANCE_PGQL) endif() @@ -58,31 +58,14 @@ function(add_common_utils ADD_TARGET_NAME) PCH PRIVATE "src/precomp.hpp" ) - # detecting regex support - if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9) - set(USE_BOOST_RE ON) - endif() - if (USE_BOOST_RE) - target_compile_definitions(${ADD_TARGET_NAME} PUBLIC USE_BOOST_RE) - - debug_message(STATUS "Adding boost dependency") - if (CMAKE_VERBOSE_MAKEFILE) - set(Boost_DEBUG on) - endif () - find_package(Boost REQUIRED COMPONENTS regex) - target_link_libraries(${ADD_TARGET_NAME} PUBLIC ${Boost_REGEX_LIBRARY}) - target_include_directories(${ADD_TARGET_NAME} PUBLIC ${Boost_INCLUDE_DIRS}) - endif () - target_include_directories(${ADD_TARGET_NAME} PUBLIC $ PRIVATE $) - target_include_directories(${ADD_TARGET_NAME} SYSTEM PUBLIC ${OV_TESTS_ROOT}/test_utils) + target_include_directories(${ADD_TARGET_NAME} SYSTEM PUBLIC "$") target_compile_definitions(${ADD_TARGET_NAME} PUBLIC ${ARGN}) - endfunction() # Keep old name so that library can be used from NPU repo diff --git a/src/tests/test_utils/functional_test_utils/CMakeLists.txt b/src/tests/test_utils/functional_test_utils/CMakeLists.txt index c990febcd6a0b2..e1148d82ee1132 100644 --- a/src/tests/test_utils/functional_test_utils/CMakeLists.txt +++ b/src/tests/test_utils/functional_test_utils/CMakeLists.txt @@ -9,11 +9,9 @@ ov_add_target( TYPE STATIC ROOT ${CMAKE_CURRENT_SOURCE_DIR} ADD_CLANG_FORMAT - DEVELOPER_PACKAGE - tests INCLUDES PUBLIC - "${CMAKE_CURRENT_SOURCE_DIR}/include" + "$" ADDITIONAL_SOURCE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/src LINK_LIBRARIES @@ -24,13 +22,17 @@ ov_add_target( PRIVATE ov_models openvino::pugixml - INCLUDES - PUBLIC - $ ) -install(DIRECTORY layer_tests_summary DESTINATION tests/functional_test_utils COMPONENT tests EXCLUDE_FROM_ALL) - ov_build_target_faster(${TARGET_NAME} PCH PRIVATE "src/precomp.hpp" ) + +# install & export + +install(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/layer_tests_summary" + DESTINATION tests/functional_test_utils + COMPONENT tests EXCLUDE_FROM_ALL) + +ov_developer_package_export_targets(TARGET ${TARGET_NAME} + INSTALL_INCLUDE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}/include/") diff --git a/src/tests/test_utils/unit_test_utils/CMakeLists.txt b/src/tests/test_utils/unit_test_utils/CMakeLists.txt index 7b8607d6bae28c..dd2bb77aa52224 100644 --- a/src/tests/test_utils/unit_test_utils/CMakeLists.txt +++ b/src/tests/test_utils/unit_test_utils/CMakeLists.txt @@ -15,14 +15,20 @@ ov_add_target( TYPE STATIC ROOT ${CMAKE_CURRENT_SOURCE_DIR} ADD_CLANG_FORMAT - DEVELOPER_PACKAGE - tests LINK_LIBRARIES PUBLIC common_test_utils_s inference_engine_s gmock + DEPENDENCIES + mock_engine INCLUDES PUBLIC - "${CMAKE_CURRENT_SOURCE_DIR}/.." + "$" ) + +# install & export + +ov_developer_package_export_targets(TARGET ${TARGET_NAME} + INSTALL_INCLUDE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}/../unit_test_utils" + INSTALL_DESTIONATION "developer_package/include/unit_test_utils/unit_test_utils") diff --git a/thirdparty/dependencies.cmake b/thirdparty/dependencies.cmake index 8ef6c5bf1c51ea..1524378287fdfa 100644 --- a/thirdparty/dependencies.cmake +++ b/thirdparty/dependencies.cmake @@ -51,9 +51,6 @@ if(X86_64 OR X86 OR UNIVERSAL2) # conan creates alias xbyak::xbyak, no extra steps are required else() add_subdirectory(thirdparty/xbyak EXCLUDE_FROM_ALL) - # export and install xbyak - openvino_developer_export_targets(COMPONENT openvino_common TARGETS xbyak::xbyak) - ov_install_static_lib(xbyak ${OV_CPACK_COMP_CORE}) endif() endif() @@ -269,12 +266,32 @@ if(NOT TARGET openvino::pugixml) function(ov_build_pugixml) function(ov_build_pugixml_static) set(BUILD_SHARED_LIBS OFF) + function(install) + cmake_parse_arguments(_install "" "EXPORT" "" ${ARGV}) + if(_install_EXPORT STREQUAL "pugixml-targets") + # does nothing! + # we need to override 'export' command to prevent cmake issue with multiple + # export sets for pugixml-target. Currently, it's installed only by OpenVINO + else() + _install(${ARGV}) + endif() + endfunction() + function(export) + cmake_parse_arguments(_export "" "EXPORT" "" ${ARGV}) + if(_export_EXPORT STREQUAL "pugixml-targets") + # does nothing! + # we need to override 'export' command to prevent cmake issue with multiple + # export sets for pugixml-target. Currently, it's installed only by OpenVINO + else() + _export(${ARGV}) + endif() + endfunction() add_subdirectory(thirdparty/pugixml EXCLUDE_FROM_ALL) endfunction() ov_build_pugixml_static() set_property(TARGET pugixml-static PROPERTY EXPORT_NAME pugixml) add_library(openvino::pugixml ALIAS pugixml-static) - openvino_developer_export_targets(COMPONENT openvino_common TARGETS openvino::pugixml) + ov_developer_package_export_targets(TARGET openvino::pugixml) ov_install_static_lib(pugixml-static ${OV_CPACK_COMP_CORE}) endfunction() @@ -300,7 +317,7 @@ if(ENABLE_GAPI_PREPROCESSING) add_subdirectory(thirdparty/ade EXCLUDE_FROM_ALL) set_target_properties(ade PROPERTIES FOLDER thirdparty) - openvino_developer_export_targets(COMPONENT openvino_common TARGETS ade) + ov_developer_package_export_targets(TARGET ade) ov_install_static_lib(ade ${OV_CPACK_COMP_CORE}) endif() @@ -316,7 +333,7 @@ if(ENABLE_GAPI_PREPROCESSING) endif() set_target_properties(fluid PROPERTIES FOLDER thirdparty) - openvino_developer_export_targets(COMPONENT openvino_common TARGETS fluid) + ov_developer_package_export_targets(TARGET fluid) ov_install_static_lib(fluid ${OV_CPACK_COMP_CORE}) endif() @@ -369,7 +386,7 @@ if(ENABLE_SAMPLES OR ENABLE_TESTS) if(NOT TARGET gflags) add_subdirectory(thirdparty/gflags EXCLUDE_FROM_ALL) - openvino_developer_export_targets(COMPONENT openvino_common TARGETS gflags) + ov_developer_package_export_targets(TARGET gflags) endif() endif() @@ -391,8 +408,14 @@ if(ENABLE_TESTS) endforeach() else() add_subdirectory(thirdparty/gtest EXCLUDE_FROM_ALL) - openvino_developer_export_targets(COMPONENT tests - TARGETS gmock gmock_main gtest gtest_main) + # install & export + set(googletest_root "${CMAKE_CURRENT_SOURCE_DIR}/thirdparty/gtest/gtest") + ov_developer_package_export_targets(TARGET gtest_main + INSTALL_INCLUDE_DIRECTORIES "${googletest_root}/googletest/include/") + ov_developer_package_export_targets(TARGET gtest + INSTALL_INCLUDE_DIRECTORIES "${googletest_root}/googletest/include/") + ov_developer_package_export_targets(TARGET gmock + INSTALL_INCLUDE_DIRECTORIES "${googletest_root}/googlemock/include/") endif() endif() @@ -585,8 +608,9 @@ if(ENABLE_SAMPLES) else() add_subdirectory(thirdparty/json EXCLUDE_FROM_ALL) - # this is required only because of NPU plugin reused this - openvino_developer_export_targets(COMPONENT openvino_common TARGETS nlohmann_json) + # this is required only because of NPU plugin reused this: export & install + ov_developer_package_export_targets(TARGET nlohmann_json + INSTALL_INCLUDE_DIRECTORIES "${OpenVINO_SOURCE_DIR}/thirdparty/json/nlohmann_json/include") # for nlohmann library versions older than v3.0.0 if(NOT TARGET nlohmann_json::nlohmann_json) diff --git a/thirdparty/gtest/CMakeLists.txt b/thirdparty/gtest/CMakeLists.txt index f527552903c1d7..585b80934bc4ce 100644 --- a/thirdparty/gtest/CMakeLists.txt +++ b/thirdparty/gtest/CMakeLists.txt @@ -14,16 +14,31 @@ set(INSTALL_GTEST OFF CACHE BOOL "" FORCE) add_subdirectory(gtest EXCLUDE_FROM_ALL) -get_target_property(gtest_include_dirs gtest INTERFACE_INCLUDE_DIRECTORIES) -set_target_properties(gtest PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${gtest_include_dirs}") +function(_ov_gtest_filter_install_interface TARGET TYPE) + set(final_include_dirs "$") + + get_target_property(include_dirs ${TARGET} INTERFACE_INCLUDE_DIRECTORIES) + foreach(include_dir IN LISTS include_dirs) + if(NOT include_dir MATCHES ".*INSTALL_INTERFACE.*") + # remove leading and trailing parts of generator expressions + string(REPLACE "$" "" include_dir "${include_dir}") + # wrap to BUILD_INTERFACE again + list(APPEND final_include_dirs "$") + endif() + endforeach() -get_target_property(gmock_include_dirs gtest INTERFACE_INCLUDE_DIRECTORIES) -set_target_properties(gmock PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES - "${gmock_include_dirs};${gmock_SOURCE_DIR}/include") + set_target_properties(${TARGET} PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${final_include_dirs}" + INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${final_include_dirs}") +endfunction() -set(gtest_targets gtest gtest_main gmock gmock_main) +_ov_gtest_filter_install_interface(gtest gtest) +_ov_gtest_filter_install_interface(gtest_main gtest) +_ov_gtest_filter_install_interface(gmock gmock) +_ov_gtest_filter_install_interface(gmock_main gmock) -foreach(target IN LISTS gtest_targets) +foreach(target gtest gtest_main gmock gmock_main) # If we have specified /Z7 option, remove -Zi option which comes from gtest if (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") get_target_property(_target_cxx_flags ${target} COMPILE_OPTIONS) @@ -41,8 +56,7 @@ foreach(target IN LISTS gtest_targets) target_compile_options(${target} PRIVATE -Wno-deprecated-copy) endif() endif() + # disable warnings ov_disable_all_warnings(${target}) + set_target_properties(${target} PROPERTIES FOLDER thirdparty) endforeach() - -set_target_properties(${gtest_targets} - PROPERTIES FOLDER thirdparty) diff --git a/thirdparty/ittapi/CMakeLists.txt b/thirdparty/ittapi/CMakeLists.txt index fe821f74feaaac..14e18f8bbced68 100644 --- a/thirdparty/ittapi/CMakeLists.txt +++ b/thirdparty/ittapi/CMakeLists.txt @@ -4,8 +4,8 @@ if(DEFINED INTEL_VTUNE_DIR OR DEFINED ENV{INTEL_VTUNE_DIR}) find_package(ITT - PATHS "${OpenVINO_SOURCE_DIR}/src/common/itt/cmake" - NO_DEFAULT_PATH) + PATHS "${OpenVINO_SOURCE_DIR}/src/common/itt/cmake" + NO_DEFAULT_PATH) if(NOT ITT_FOUND) message(WARNING "Profiling option enabled, but no ITT library was found under INTEL_VTUNE_DIR") endif() @@ -25,6 +25,6 @@ else() # create alias ittapi::ittnotify add_library(ittapi::ittnotify ALIAS ittnotify) - openvino_developer_export_targets(COMPONENT openvino_common TARGETS ittapi::ittnotify) + ov_developer_package_export_targets(TARGET ittapi::ittnotify) ov_install_static_lib(ittnotify ${OV_CPACK_COMP_CORE}) endif() From 5170350cf5004ebe109fe07c582e55401105d81c Mon Sep 17 00:00:00 2001 From: Wilson Seok Date: Fri, 13 Oct 2023 12:39:35 +0900 Subject: [PATCH 181/257] [GPU] fix issue of optimized out reorder during event sync (#20223) * fix confliction * remove debug code * fix unit test failure * update condition of reorder check * apply the condition for only opt out reorder user --- src/plugins/intel_gpu/src/graph/primitive_inst.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp index eea18ca1fe6b79..44b1fec8bb0963 100644 --- a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp +++ b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp @@ -685,6 +685,7 @@ void primitive_inst::do_runtime_skip_reorder() { GPU_DEBUG_TRACE_DETAIL << "[do runtime skip reorder] update shape for user " << u->id() << std::endl; u->update_shape(); u->update_shape_done_by_other = true; + if (u->_impl_params->get_input_layout() == u->_impl_params->get_output_layout()) { std::function>)> update_memory_dependencies; update_memory_dependencies = [&](std::vector> users) { @@ -699,6 +700,10 @@ void primitive_inst::do_runtime_skip_reorder() { update_memory_dependencies(u->get_user_insts()); u->set_can_be_optimized(true); + // Opt out reorder which has _needs_completion_event = true causes syncronization failed in dGPU. + if (_needs_completion_event == false && u->_needs_completion_event == true) { + _needs_completion_event = true; + } GPU_DEBUG_TRACE_DETAIL << "[do runtime skip reorder] set user " << u->id() << " as can_be_optimized" << std::endl; } else { GPU_DEBUG_TRACE_DETAIL << "[do runtime skip reorder] user " << u->id() << " cannot be optimized" << std::endl; From b7b5d4cd936ef461e9e5410889b613d20446d777 Mon Sep 17 00:00:00 2001 From: Nesterov Alexander Date: Fri, 13 Oct 2023 07:34:07 +0200 Subject: [PATCH 182/257] [ARM CPU] Update TBB ACL Scheduler (#18885) --- .../src/nodes/executors/acl/acl_eltwise.cpp | 10 +++ .../nodes/executors/acl/acl_ie_scheduler.cpp | 77 +++++++++++++++++++ .../nodes/executors/acl/acl_ie_scheduler.hpp | 31 ++++++++ src/plugins/intel_cpu/src/plugin.cpp | 38 +++++++++ src/plugins/intel_cpu/src/plugin.h | 14 ++++ 5 files changed, 170 insertions(+) create mode 100644 src/plugins/intel_cpu/src/nodes/executors/acl/acl_ie_scheduler.cpp create mode 100644 src/plugins/intel_cpu/src/nodes/executors/acl/acl_ie_scheduler.hpp diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_eltwise.cpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_eltwise.cpp index cb5911e90836fb..f22004a0d3e154 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_eltwise.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_eltwise.cpp @@ -10,6 +10,11 @@ namespace intel_cpu { using namespace arm_compute; +static std::mutex & get_mtx_ifunc() { + static std::mutex mtx_ifunc; + return mtx_ifunc; +} + inline VectorDims reshape_sizes(VectorDims dims) { const size_t MAX_NUM_SHAPE = arm_compute::MAX_DIMS; VectorDims result_dims(MAX_NUM_SHAPE - 1); @@ -494,6 +499,11 @@ bool AclEltwiseExecutor::init(const EltwiseAttrs &eltwiseAttrs, const std::vecto default: IE_THROW() << "Unsupported operation type for ACL Eltwise executor: " << static_cast(aclEltwiseAttrs.algorithm); } + + // We get a problem (seg. faults, data race etc) for eltwise operations when we use several configure(...) functions in parallel. + // We created issue about this problem here: https://github.com/ARM-software/ComputeLibrary/issues/1073 + // TODO: change it when we will get an answer to our question in issue + std::lock_guard _lock {get_mtx_ifunc()}; ifunc = exec_func(); return true; } diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_ie_scheduler.cpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_ie_scheduler.cpp new file mode 100644 index 00000000000000..c617363aefebf6 --- /dev/null +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_ie_scheduler.cpp @@ -0,0 +1,77 @@ +// Copyright (C) 2020-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "acl_ie_scheduler.hpp" + +#include "arm_compute/core/CPP/ICPPKernel.h" +#include "arm_compute/core/Error.h" +#include "arm_compute/core/Helpers.h" +#include + +namespace ov { +namespace intel_cpu { + +using namespace arm_compute; + +ACLScheduler::ACLScheduler() = default; + +unsigned int ACLScheduler::num_threads() const { + return parallel_get_num_threads(); +} + +void ACLScheduler::set_num_threads(unsigned int num_threads) {} + +void ACLScheduler::schedule_custom(ICPPKernel *kernel, const Hints &hints, const Window &window, ITensorPack &tensors) { + const Window & max_window = window; + const unsigned int num_iterations = max_window.num_iterations_total(); + const auto _num_threads = std::min(num_iterations, static_cast(parallel_get_num_threads())); + + if (num_iterations == 0) { + return; + } + + std::function main_run; + if (tensors.empty()) { + main_run = [&](const Window &window, const ThreadInfo &info) { + kernel->run(window, info); + }; + } else { + main_run = [&](const Window &window, const ThreadInfo &info) { + kernel->run_op(tensors, window, info); + }; + } + + if (!kernel->is_parallelisable() || _num_threads == 1) { + ThreadInfo info; + info.cpu_info = &cpu_info(); + main_run(max_window, info); + } else { + const auto num_windows = _num_threads; + const auto hints_split_dimension = hints.split_dimension(); + + InferenceEngine::parallel_for(num_windows, [&](int wid) { + Window win = max_window.split_window(hints_split_dimension, wid, num_windows); + win.validate(); + main_run(win, {wid, static_cast(_num_threads), &cpu_info()}); + }); + } +} + +void ACLScheduler::schedule(ICPPKernel *kernel, const Hints &hints) { + ITensorPack tensors; + schedule_custom(kernel, hints, kernel->window(), tensors); +} + +void ACLScheduler::schedule_op(ICPPKernel *kernel, const Hints &hints, const Window &window, ITensorPack &tensors) { + schedule_custom(kernel, hints, window, tensors); +} + +void ACLScheduler::run_workloads(std::vector &workloads) { + InferenceEngine::parallel_for(workloads.size(), [&](int wid) { + workloads[wid]({wid, static_cast(parallel_get_num_threads()), &cpu_info()}); + }); +} + +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_ie_scheduler.hpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_ie_scheduler.hpp new file mode 100644 index 00000000000000..1148f4ad5edd69 --- /dev/null +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_ie_scheduler.hpp @@ -0,0 +1,31 @@ +// Copyright (C) 2020-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include "support/Mutex.h" + +namespace ov { +namespace intel_cpu { + +using namespace arm_compute; + +class ACLScheduler final : public IScheduler { +public: + ACLScheduler(); + ~ACLScheduler() override = default; + std::uint32_t num_threads() const override; + void set_num_threads(unsigned int num_threads) override; + void schedule(ICPPKernel *kernel, const Hints &hints) override; + void schedule_op(ICPPKernel *kernel, const Hints &hints, const Window &window, ITensorPack &tensors) override; +protected: + void run_workloads(std::vector &workloads) override; +private: + void schedule_custom(ICPPKernel *kernel, const Hints &hints, const Window &window, ITensorPack &tensors); +}; +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/plugin.cpp b/src/plugins/intel_cpu/src/plugin.cpp index ddf14ef59a7eab..96be8734ec0dce 100644 --- a/src/plugins/intel_cpu/src/plugin.cpp +++ b/src/plugins/intel_cpu/src/plugin.cpp @@ -38,6 +38,11 @@ #include #include +#if defined(OV_CPU_WITH_ACL) +#include "nodes/executors/acl/acl_ie_scheduler.hpp" +#include "arm_compute/runtime/CPP/CPPScheduler.h" +#endif + using namespace InferenceEngine; #define IE_CPU_PLUGIN_THROW(...) IE_THROW(__VA_ARGS__) << "CPU plugin: " @@ -137,11 +142,44 @@ class CPUSpecialSetup { }; #endif // __linux__ +#if defined(OV_CPU_WITH_ACL) +std::mutex Engine::SchedulerGuard::mutex; +std::weak_ptr Engine::SchedulerGuard::ptr; + +Engine::SchedulerGuard::SchedulerGuard() { +#if IE_THREAD == IE_THREAD_SEQ + // To save state for ACL cores in single-thread mode + arm_compute::Scheduler::set(arm_compute::Scheduler::Type::ST); +#else + arm_compute::Scheduler::set(std::make_shared()); +#endif +} + +std::shared_ptr Engine::SchedulerGuard::instance() { + std::lock_guard lock{SchedulerGuard::mutex}; + auto scheduler_guard_ptr = SchedulerGuard::ptr.lock(); + if (scheduler_guard_ptr == nullptr) { + SchedulerGuard::ptr = scheduler_guard_ptr = std::make_shared(); + } + return scheduler_guard_ptr; +} + +Engine::SchedulerGuard::~SchedulerGuard() { + // To save the state of scheduler after ACLScheduler has been executed + // TODO: find out the cause of the state + std::lock_guard lock{this->dest_mutex}; + arm_compute::Scheduler::set(arm_compute::Scheduler::Type::ST); +} +#endif + Engine::Engine() : deviceFullName(getDeviceFullName()), specialSetup(new CPUSpecialSetup) { _pluginName = "CPU"; extensionManager->AddExtension(std::make_shared()); +#if defined(OV_CPU_WITH_ACL) + scheduler_guard = SchedulerGuard::instance(); +#endif } Engine::~Engine() { diff --git a/src/plugins/intel_cpu/src/plugin.h b/src/plugins/intel_cpu/src/plugin.h index 20c6d315a2c623..3e9d616dcec02c 100644 --- a/src/plugins/intel_cpu/src/plugin.h +++ b/src/plugins/intel_cpu/src/plugin.h @@ -63,6 +63,20 @@ class Engine : public InferenceEngine::IInferencePlugin { const std::string deviceFullName; std::shared_ptr specialSetup; + +#if defined(OV_CPU_WITH_ACL) + struct SchedulerGuard { + SchedulerGuard(); + ~SchedulerGuard(); + static std::shared_ptr instance(); + static std::mutex mutex; + // separate mutex for saving ACLScheduler state in destructor + mutable std::mutex dest_mutex; + static std::weak_ptr ptr; + }; + + std::shared_ptr scheduler_guard; +#endif }; } // namespace intel_cpu From 157041e6f8ac8b547052e145753bdad306647349 Mon Sep 17 00:00:00 2001 From: Ivan Novoselov Date: Fri, 13 Oct 2023 08:51:54 +0100 Subject: [PATCH 183/257] [CPU] Improve Commit slider's handling of non-decodable characters (#20149) --- .../intel_cpu/tools/commit_slider/utils/helpers.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/plugins/intel_cpu/tools/commit_slider/utils/helpers.py b/src/plugins/intel_cpu/tools/commit_slider/utils/helpers.py index c30c5773467b4f..3adf6e65025af4 100644 --- a/src/plugins/intel_cpu/tools/commit_slider/utils/helpers.py +++ b/src/plugins/intel_cpu/tools/commit_slider/utils/helpers.py @@ -191,14 +191,10 @@ def runCommandList(commit, cfgData, enforceClean=False): ) proc = subprocess.Popen( formattedCmd, cwd=cwd, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT + stderr=subprocess.STDOUT, + encoding="utf-8", errors="replace" ) for line in proc.stdout: - # decode if line is byte-type - try: - line = line.decode("utf-8") - except (UnicodeDecodeError, AttributeError): - pass sys.stdout.write(line) commitLogger.info(line) if "catchMsg" in cmd: From e289e8282dba15ab778b96a6d5e4d33d6dfeb1ea Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Fri, 13 Oct 2023 13:19:53 +0400 Subject: [PATCH 184/257] Migrated all subgraph tests after quantization graphs (#20381) * Migrated all subgraph tests after quantization graphs * Fixed skip tests config * Add alias for NPU * Try to fix NPU * Remove unsupported precisions * Remove GNA test * Remove more tests --- .../skip_tests_config.cpp | 4 +- .../subgraph_tests/range_add.cpp | 131 ++++++++---------- .../subgraph_tests/relu_shape_of.cpp | 24 ++-- .../reshape_squeeze_reshape_relu.cpp | 71 +++++----- .../subgraph_tests/split_conv_concat.cpp | 28 ++-- .../subgraph_tests/variadic_split_pad.cpp | 72 +++++----- .../reshape_squeeze_reshape_relu.cpp | 49 ------- .../subgraph_tests/split_conv_concat.cpp | 22 --- .../reshape_squeeze_reshape_relu.cpp | 78 +++++------ .../subgraph_tests/split_conv_concat.cpp | 25 ++-- .../include/subgraph_tests/range_add.hpp | 10 +- .../include/subgraph_tests/relu_shape_of.hpp | 8 +- .../reshape_squeeze_reshape_relu.hpp | 10 +- .../subgraph_tests/split_conv_concat.hpp | 13 +- .../subgraph_tests/variadic_split_pad.hpp | 10 +- .../shared_test_classes/base/ov_subgraph.hpp | 6 + .../subgraph/range_add.hpp | 35 +++-- .../subgraph/relu_shape_of.hpp | 29 ++-- .../subgraph/reshape_squeeze_reshape_relu.hpp | 44 +++--- .../subgraph/split_conv_concat.hpp | 28 +++- .../subgraph/variadic_split_pad.hpp | 42 +++--- .../src/subgraph/range_add.cpp | 79 +++++------ .../src/subgraph/relu_shape_of.cpp | 51 +++---- .../subgraph/reshape_squeeze_reshape_relu.cpp | 82 +++++------ .../src/subgraph/split_conv_concat.cpp | 100 ++++++++++--- .../src/subgraph/variadic_split_pad.cpp | 42 +++--- 26 files changed, 547 insertions(+), 546 deletions(-) delete mode 100644 src/plugins/intel_gna/tests/functional/shared_tests_instances/subgraph_tests/reshape_squeeze_reshape_relu.cpp delete mode 100644 src/plugins/intel_gna/tests/functional/shared_tests_instances/subgraph_tests/split_conv_concat.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index e629a715b69890..47c789928db4ea 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -24,8 +24,8 @@ std::vector disabledTestPatterns() { R"(.*(QuantGroupConv3D).*)", // TODO: Issue: 34518 R"(.*RangeLayerTest.*)", - R"(.*(RangeAddSubgraphTest).*Start=1.2.*Stop=(5.2|-5.2).*Step=(0.1|-0.1).*netPRC=FP16.*)", - R"(.*(RangeNumpyAddSubgraphTest).*netPRC=FP16.*)", + R"(.*(RangeAddSubgraphTest).*Start=1.2.*Stop=(5.2|-5.2).*Step=(0.1|-0.1).*ET=f16.*)", + R"(.*(RangeNumpyAddSubgraphTest).*ET=f16.*)", // TODO: Issue: 43793 R"(.*InferRequestPreprocessDynamicallyInSetBlobTest.*iPRC=0.*_iLT=1.*)", R"(.*InferRequestPreprocessDynamicallyInSetBlobTest.*oPRC=0.*_oLT=1.*)", diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/range_add.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/range_add.cpp index 409ba17f27e68f..5b19f8b34d7f81 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/range_add.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/range_add.cpp @@ -2,97 +2,76 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - #include "subgraph_tests/range_add.hpp" -#include "common_test_utils/test_constants.hpp" -using namespace SubgraphTestsDefinitions; +#include + +using namespace ov::test; namespace { -const std::vector positiveStart = { 1.0f, 1.2f }; -const std::vector positiveStop = { 5.0f, 5.2f }; -const std::vector positiveStep = { 1.0f, 0.1f }; +const std::vector positiveStart = {1.0f, 1.2f}; +const std::vector positiveStop = {5.0f, 5.2f}; +const std::vector positiveStep = {1.0f, 0.1f}; -const std::vector negativeStart = { 1.0f, 1.2f }; -const std::vector negativeStop = { -5.0f, -5.2f }; -const std::vector negativeStep = { -1.0f, -0.1f }; +const std::vector negativeStart = {1.0f, 1.2f}; +const std::vector negativeStop = {-5.0f, -5.2f}; +const std::vector negativeStep = {-1.0f, -0.1f}; -const std::vector trunc_start = { 1.2f, 1.9f }; -const std::vector trunc_stop = { 11.4f, 11.8f }; -const std::vector trunc_step = { 1.3f, 2.8f }; +const std::vector trunc_start = {1.2f, 1.9f}; +const std::vector trunc_stop = {11.4f, 11.8f}; +const std::vector trunc_step = {1.3f, 2.8f}; -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16 // "[NOT_IMPLEMENTED] Input image format FP16 is not supported yet... +const std::vector element_types = { + ov::element::f32, + ov::element::f16 // "[NOT_IMPLEMENTED] Input image format FP16 is not supported yet... }; // ------------------------------ V0 ------------------------------ -INSTANTIATE_TEST_SUITE_P(smoke_BasicPositive, RangeAddSubgraphTest, - ::testing::Combine( - ::testing::ValuesIn(positiveStart), - ::testing::ValuesIn(positiveStop), - ::testing::ValuesIn(positiveStep), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - RangeAddSubgraphTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_BasicPositive, + RangeAddSubgraphTest, + ::testing::Combine(::testing::ValuesIn(positiveStart), + ::testing::ValuesIn(positiveStop), + ::testing::ValuesIn(positiveStep), + ::testing::ValuesIn(element_types), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + RangeAddSubgraphTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_BasicNegative, RangeAddSubgraphTest, - ::testing::Combine( - ::testing::ValuesIn(negativeStart), - ::testing::ValuesIn(negativeStop), - ::testing::ValuesIn(negativeStep), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - RangeAddSubgraphTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_BasicNegative, + RangeAddSubgraphTest, + ::testing::Combine(::testing::ValuesIn(negativeStart), + ::testing::ValuesIn(negativeStop), + ::testing::ValuesIn(negativeStep), + ::testing::ValuesIn(element_types), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + RangeAddSubgraphTest::getTestCaseName); // ------------------------------ V4 ------------------------------ -INSTANTIATE_TEST_SUITE_P(smoke_BasicPositive, RangeNumpyAddSubgraphTest, - ::testing::Combine( - ::testing::ValuesIn(positiveStart), - ::testing::ValuesIn(positiveStop), - ::testing::ValuesIn(positiveStep), - ::testing::ValuesIn(netPrecisions), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - RangeNumpyAddSubgraphTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_BasicPositive, + RangeNumpyAddSubgraphTest, + ::testing::Combine(::testing::ValuesIn(positiveStart), + ::testing::ValuesIn(positiveStop), + ::testing::ValuesIn(positiveStep), + ::testing::ValuesIn(element_types), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + RangeNumpyAddSubgraphTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_BasicNegative, RangeNumpyAddSubgraphTest, - ::testing::Combine( - ::testing::ValuesIn(negativeStart), - ::testing::ValuesIn(negativeStop), - ::testing::ValuesIn(negativeStep), - ::testing::ValuesIn(netPrecisions), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - RangeNumpyAddSubgraphTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_BasicNegative, + RangeNumpyAddSubgraphTest, + ::testing::Combine(::testing::ValuesIn(negativeStart), + ::testing::ValuesIn(negativeStop), + ::testing::ValuesIn(negativeStep), + ::testing::ValuesIn(element_types), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + RangeNumpyAddSubgraphTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_BasicTruncateInputs, RangeNumpyAddSubgraphTest, - ::testing::Combine( - ::testing::ValuesIn(trunc_start), - ::testing::ValuesIn(trunc_stop), - ::testing::ValuesIn(trunc_step), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::I32), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - RangeNumpyAddSubgraphTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_BasicTruncateInputs, + RangeNumpyAddSubgraphTest, + ::testing::Combine(::testing::ValuesIn(trunc_start), + ::testing::ValuesIn(trunc_stop), + ::testing::ValuesIn(trunc_step), + ::testing::ValuesIn(element_types), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + RangeNumpyAddSubgraphTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/relu_shape_of.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/relu_shape_of.cpp index 18ad81aecc25d2..00559d09144d2d 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/relu_shape_of.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/relu_shape_of.cpp @@ -2,22 +2,20 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - #include "subgraph_tests/relu_shape_of.hpp" -#include "common_test_utils/test_constants.hpp" -using namespace SubgraphTestsDefinitions; +#include + +using namespace ov::test; namespace { - const std::vector netPrecisions = { - InferenceEngine::Precision::I32 - }; +const std::vector input_types = {ov::element::i32}; - INSTANTIATE_TEST_SUITE_P(smoke_Check, ReluShapeOfSubgraphTest, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(std::vector({20, 10, 10, 10})), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ReluShapeOfSubgraphTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Check, + ReluShapeOfSubgraphTest, + ::testing::Combine(::testing::ValuesIn(input_types), + ::testing::Values(ov::element::i64), + ::testing::Values(ov::Shape{20, 10, 10, 10}), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ReluShapeOfSubgraphTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/reshape_squeeze_reshape_relu.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/reshape_squeeze_reshape_relu.cpp index 79743431999170..fa82dc5baa7f1e 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/reshape_squeeze_reshape_relu.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/reshape_squeeze_reshape_relu.cpp @@ -2,47 +2,46 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include "subgraph_tests/reshape_squeeze_reshape_relu.hpp" -#include "common_test_utils/test_constants.hpp" -using namespace SubgraphTestsDefinitions; +#include + +using namespace ov::test; namespace { - std::vector inputs{ - {{1, 1, 3}, {0, 1}}, - {{1, 1, 3}, {0}}, - {{1, 1, 3}, {1}}, - {{1, 3, 1}, {0, 2}}, - {{1, 3, 1}, {0}}, - {{1, 3, 1}, {2}}, - {{3, 1, 1}, {1, 2}}, - {{3, 1, 1}, {1}}, - {{3, 1, 1}, {2}}, - {{4, 1, 3, 1}, {1, 3}}, - {{4, 1, 1, 3}, {1, 2}}, - {{1, 4, 1, 3}, {0, 2}}, - {{1, 3, 5, 2, 1}, {0, 4}}, - {{3, 1, 2, 4, 4, 3}, {1}}, - {{1, 1, 1, 1, 1, 3}, {0, 1, 2, 3, 4}}, - {{1, 1, 1, 1, 1, 3}, {1, 3}}, - {{1}, {0}}, - }; +std::vector inputs{ + {{1, 1, 3}, {0, 1}}, + {{1, 1, 3}, {0}}, + {{1, 1, 3}, {1}}, + {{1, 3, 1}, {0, 2}}, + {{1, 3, 1}, {0}}, + {{1, 3, 1}, {2}}, + {{3, 1, 1}, {1, 2}}, + {{3, 1, 1}, {1}}, + {{3, 1, 1}, {2}}, + {{4, 1, 3, 1}, {1, 3}}, + {{4, 1, 1, 3}, {1, 2}}, + {{1, 4, 1, 3}, {0, 2}}, + {{1, 3, 5, 2, 1}, {0, 4}}, + {{3, 1, 2, 4, 4, 3}, {1}}, + {{1, 1, 1, 1, 1, 3}, {0, 1, 2, 3, 4}}, + {{1, 1, 1, 1, 1, 3}, {1, 3}}, + {{1}, {0}}, +}; - std::vector netPrecisions = {InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16, - }; +std::vector input_types = { + ov::element::f32, + ov::element::f16, +}; - const std::vector opTypes = { - ngraph::helpers::SqueezeOpType::SQUEEZE, - ngraph::helpers::SqueezeOpType::UNSQUEEZE - }; +const std::vector opTypes = {ov::test::utils::SqueezeOpType::SQUEEZE, + ov::test::utils::SqueezeOpType::UNSQUEEZE}; - INSTANTIATE_TEST_SUITE_P(smoke_reshape_squeeze_reshape_relu, ReshapeSqueezeReshapeRelu, - ::testing::Combine( - ::testing::ValuesIn(inputs), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(opTypes)), - ReshapeSqueezeReshapeRelu::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_reshape_squeeze_reshape_relu, + ReshapeSqueezeReshapeRelu, + ::testing::Combine(::testing::ValuesIn(inputs), + ::testing::ValuesIn(input_types), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(opTypes)), + ReshapeSqueezeReshapeRelu::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/split_conv_concat.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/split_conv_concat.cpp index 8e347ad75c31d4..1d60e68187709e 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/split_conv_concat.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/split_conv_concat.cpp @@ -2,28 +2,20 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - #include "subgraph_tests/split_conv_concat.hpp" -#include "common_test_utils/test_constants.hpp" -using namespace SubgraphTestsDefinitions; +#include + +using namespace ov::test; namespace { -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16 -}; +const std::vector input_types = {ov::element::f32, ov::element::f16}; -INSTANTIATE_TEST_SUITE_P(smoke_NoReshape, SplitConvConcat, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::SizeVector({1, 6, 40, 40})), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - SplitConvConcat::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_NoReshape, + SplitConvConcat, + ::testing::Combine(::testing::ValuesIn(input_types), + ::testing::Values(ov::Shape{1, 6, 40, 40}), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + SplitConvConcat::getTestCaseName); } // namespace - - - - diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/variadic_split_pad.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/variadic_split_pad.cpp index 7386a46ef3868a..27d63ad27ff852 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/variadic_split_pad.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/variadic_split_pad.cpp @@ -4,60 +4,52 @@ #include "subgraph_tests/variadic_split_pad.hpp" -using namespace SubgraphTestsDefinitions; +using namespace ov::test; namespace { -const std::vector netPrecision = { - InferenceEngine::Precision::FP32 -}; +const std::vector input_type = {ov::element::f32}; -const std::vector shapes = { - {1, 8, 3, 2}, - {3, 8, 8, 8}, +const std::vector shapes = { + {1, 8, 3, 2}, + {3, 8, 8, 8}, }; const std::vector> connectedIndexes = { - {0}, - {0, 2}, - {0, 1, 3}, - {0, 1, 1, 0}, - {0, 0, 0, 1}, + {0}, + {0, 2}, + {0, 1, 3}, + {0, 1, 1, 0}, + {0, 0, 0, 1}, }; -const std::vector> numSplits = { - {2, 2, 2, 2}, - {1, 2, 4, 1}, - {3, 2, 2, 1} -}; +const std::vector> numSplits = {{2, 2, 2, 2}, {1, 2, 4, 1}, {3, 2, 2, 1}}; const std::vector> padsBegin = { - {0, 0, 0, 0}, - {0, 0, 1, 1}, + {0, 0, 0, 0}, + {0, 0, 1, 1}, }; const std::vector> padsEnd = { - {0, 0, 0, 0}, - {0, 0, 1, 1}, -}; - -const std::vector padMode = { - ngraph::helpers::PadMode::CONSTANT, - ngraph::helpers::PadMode::EDGE, - ngraph::helpers::PadMode::REFLECT, - ngraph::helpers::PadMode::SYMMETRIC + {0, 0, 0, 0}, + {0, 0, 1, 1}, }; -INSTANTIATE_TEST_SUITE_P(smoke_CPU, VariadicSplitPad, - ::testing::Combine( - ::testing::ValuesIn(shapes), - ::testing::Values(1), - ::testing::ValuesIn(numSplits), - ::testing::ValuesIn(connectedIndexes), - ::testing::ValuesIn(padsBegin), - ::testing::ValuesIn(padsEnd), - ::testing::ValuesIn(padMode), - ::testing::ValuesIn(netPrecision), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - VariadicSplitPad::getTestCaseName); +const std::vector padMode = {ov::op::PadMode::CONSTANT, + ov::op::PadMode::EDGE, + ov::op::PadMode::REFLECT, + ov::op::PadMode::SYMMETRIC}; + +INSTANTIATE_TEST_SUITE_P(smoke_CPU, + VariadicSplitPad, + ::testing::Combine(::testing::ValuesIn(shapes), + ::testing::Values(1), + ::testing::ValuesIn(numSplits), + ::testing::ValuesIn(connectedIndexes), + ::testing::ValuesIn(padsBegin), + ::testing::ValuesIn(padsEnd), + ::testing::ValuesIn(padMode), + ::testing::ValuesIn(input_type), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + VariadicSplitPad::getTestCaseName); } // namespace diff --git a/src/plugins/intel_gna/tests/functional/shared_tests_instances/subgraph_tests/reshape_squeeze_reshape_relu.cpp b/src/plugins/intel_gna/tests/functional/shared_tests_instances/subgraph_tests/reshape_squeeze_reshape_relu.cpp deleted file mode 100644 index f702ba9b3ec5f7..00000000000000 --- a/src/plugins/intel_gna/tests/functional/shared_tests_instances/subgraph_tests/reshape_squeeze_reshape_relu.cpp +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "subgraph_tests/reshape_squeeze_reshape_relu.hpp" - -#include - -#include "common_test_utils/test_constants.hpp" - -using namespace SubgraphTestsDefinitions; - -namespace { -std::vector inputs{ - {{1, 1, 3}, {0, 1}}, - {{1, 1, 3}, {0}}, - {{1, 1, 3}, {1}}, - {{1, 3, 1}, {0, 2}}, - {{1, 3, 1}, {0}}, - {{1, 3, 1}, {2}}, - {{3, 1, 1}, {1, 2}}, - {{3, 1, 1}, {1}}, - {{3, 1, 1}, {2}}, - {{4, 1, 3, 1}, {1, 3}}, - {{4, 1, 1, 3}, {1, 2}}, - {{1, 4, 1, 3}, {0, 2}}, - {{1, 3, 5, 2, 1}, {0, 4}}, - {{3, 1, 2, 4, 4, 3}, {1}}, - {{1, 1, 1, 1, 1, 3}, {0, 1, 2, 3, 4}}, - {{1, 1, 1, 1, 1, 3}, {1, 3}}, - {{1}, {0}}, -}; - -std::vector netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16, -}; - -const std::vector opTypes = {ngraph::helpers::SqueezeOpType::SQUEEZE, - ngraph::helpers::SqueezeOpType::UNSQUEEZE}; - -INSTANTIATE_TEST_SUITE_P(smoke_reshape_squeeze_reshape_relu, - ReshapeSqueezeReshapeRelu, - ::testing::Combine(::testing::ValuesIn(inputs), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_GNA), - ::testing::ValuesIn(opTypes)), - ReshapeSqueezeReshapeRelu::getTestCaseName); -} // namespace diff --git a/src/plugins/intel_gna/tests/functional/shared_tests_instances/subgraph_tests/split_conv_concat.cpp b/src/plugins/intel_gna/tests/functional/shared_tests_instances/subgraph_tests/split_conv_concat.cpp deleted file mode 100644 index 1d9cea34c783ac..00000000000000 --- a/src/plugins/intel_gna/tests/functional/shared_tests_instances/subgraph_tests/split_conv_concat.cpp +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "subgraph_tests/split_conv_concat.hpp" - -#include - -#include "common_test_utils/test_constants.hpp" - -using namespace SubgraphTestsDefinitions; -const std::vector netPrecisions = {InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16}; - -std::vector> inputShapes = {{1, 32, 1, 130}, {1, 64, 1, 170}, {1, 32, 1, 1026}}; - -INSTANTIATE_TEST_SUITE_P(smoke_SplitConvConcat, - SplitConvConcat, - ::testing::Combine(::testing::ValuesIn(netPrecisions), - ::testing::ValuesIn(inputShapes), - ::testing::Values(ov::test::utils::DEVICE_GNA)), - SplitConvConcat::getTestCaseName); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/reshape_squeeze_reshape_relu.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/reshape_squeeze_reshape_relu.cpp index 872140ceea815f..78cef86f3b0c09 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/reshape_squeeze_reshape_relu.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/reshape_squeeze_reshape_relu.cpp @@ -2,48 +2,48 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include "subgraph_tests/reshape_squeeze_reshape_relu.hpp" + +#include + #include "common_test_utils/test_constants.hpp" -using namespace SubgraphTestsDefinitions; +using namespace ov::test; namespace { - std::vector inputs_squeeze { - {{1, 1, 3}, {0, 1}}, - {{1, 1, 3}, {1}}, - {{1, 3, 1}, {0, 2}}, - {{3, 1, 1}, {1}}, - {{1, 4, 1, 3}, {0, 2}}, - {{3, 1, 2, 4, 4, 3}, {1}}, - {{1, 1, 1, 1, 1, 3}, {0, 1, 2, 3, 4}}, - {{1}, {0}}, - }; - - std::vector inputs_unsqueeze{ - {{1}, {0}}, - {{1}, {0, 1}}, - {{1}, {0, 1, 2}}, - {{1, 2, 3}, {0}}, - {{1, 1, 3}, {1, 2}}, - {{1, 4, 1, 3}, {0, 2}}, - }; - - std::vector netPrecisions = {InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16, - }; - - - const std::vector opTypes = { - ngraph::helpers::SqueezeOpType::SQUEEZE, - ngraph::helpers::SqueezeOpType::UNSQUEEZE - }; - - INSTANTIATE_TEST_SUITE_P(smoke_reshape_squeeze_reshape_relu, ReshapeSqueezeReshapeRelu, - ::testing::Combine( - ::testing::ValuesIn(inputs_squeeze), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::ValuesIn(opTypes)), - ReshapeSqueezeReshapeRelu::getTestCaseName); +std::vector inputs_squeeze{ + {{1, 1, 3}, {0, 1}}, + {{1, 1, 3}, {1}}, + {{1, 3, 1}, {0, 2}}, + {{3, 1, 1}, {1}}, + {{1, 4, 1, 3}, {0, 2}}, + {{3, 1, 2, 4, 4, 3}, {1}}, + {{1, 1, 1, 1, 1, 3}, {0, 1, 2, 3, 4}}, + {{1}, {0}}, +}; + +std::vector inputs_unsqueeze{ + {{1}, {0}}, + {{1}, {0, 1}}, + {{1}, {0, 1, 2}}, + {{1, 2, 3}, {0}}, + {{1, 1, 3}, {1, 2}}, + {{1, 4, 1, 3}, {0, 2}}, +}; + +std::vector input_types = { + ov::element::f32, + ov::element::f16, +}; + +const std::vector opTypes = {ov::test::utils::SqueezeOpType::SQUEEZE, + ov::test::utils::SqueezeOpType::UNSQUEEZE}; + +INSTANTIATE_TEST_SUITE_P(smoke_reshape_squeeze_reshape_relu, + ReshapeSqueezeReshapeRelu, + ::testing::Combine(::testing::ValuesIn(inputs_squeeze), + ::testing::ValuesIn(input_types), + ::testing::Values(ov::test::utils::DEVICE_GPU), + ::testing::ValuesIn(opTypes)), + ReshapeSqueezeReshapeRelu::getTestCaseName); } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/split_conv_concat.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/split_conv_concat.cpp index 3d5a1af905130e..5857c2188cfdb2 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/split_conv_concat.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/split_conv_concat.cpp @@ -2,26 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - #include "subgraph_tests/split_conv_concat.hpp" -#include "common_test_utils/test_constants.hpp" -using namespace SubgraphTestsDefinitions; +#include + +using namespace ov::test; namespace { -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16 -}; +const std::vector input_types = {ov::element::f32, ov::element::f16}; -INSTANTIATE_TEST_SUITE_P(smoke_NoReshape, SplitConvConcat, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(std::vector({1, 6, 40, 40})), - ::testing::Values(ov::test::utils::DEVICE_GPU)), - SplitConvConcat::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_NoReshape, + SplitConvConcat, + ::testing::Combine(::testing::ValuesIn(input_types), + ::testing::Values(ov::Shape{1, 6, 40, 40}), + ::testing::Values(ov::test::utils::DEVICE_GPU)), + SplitConvConcat::getTestCaseName); } // namespace - diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/range_add.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/range_add.hpp index 197fe81621e5f8..ce16e5850744a5 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/range_add.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/range_add.hpp @@ -6,14 +6,16 @@ #include "shared_test_classes/subgraph/range_add.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { TEST_P(RangeAddSubgraphTest, CompareWithRefs) { - Run(); + run(); } TEST_P(RangeNumpyAddSubgraphTest, CompareWithRefs) { - Run(); + run(); } -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/relu_shape_of.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/relu_shape_of.hpp index b2391ef6c04dd8..e26e8837f80279 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/relu_shape_of.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/relu_shape_of.hpp @@ -6,10 +6,12 @@ #include "shared_test_classes/subgraph/relu_shape_of.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { TEST_P(ReluShapeOfSubgraphTest, CompareWithRefs) { - Run(); + run(); } -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/reshape_squeeze_reshape_relu.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/reshape_squeeze_reshape_relu.hpp index 331ee30f7e515d..7b1c841d7d17b7 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/reshape_squeeze_reshape_relu.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/reshape_squeeze_reshape_relu.hpp @@ -6,10 +6,12 @@ #include "shared_test_classes/subgraph/reshape_squeeze_reshape_relu.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { -TEST_P(ReshapeSqueezeReshapeRelu, CompareWithRefs){ - Run(); +TEST_P(ReshapeSqueezeReshapeRelu, CompareWithRefs) { + run(); }; -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/split_conv_concat.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/split_conv_concat.hpp index c121683977f898..5fd61dd48f738a 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/split_conv_concat.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/split_conv_concat.hpp @@ -6,14 +6,17 @@ #include "shared_test_classes/subgraph/split_conv_concat.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { TEST_P(SplitConvConcat, CompareWithRefImpl) { - Run(); + run(); }; -TEST_P(SplitConvConcat, QueryNetwork) { - QueryNetwork(); +TEST_P(SplitConvConcat, QueryModel) { + query_model(); } -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov + diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/variadic_split_pad.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/variadic_split_pad.hpp index 9b9d4fb77839ea..8f1eec6cbf6214 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/variadic_split_pad.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/variadic_split_pad.hpp @@ -6,10 +6,12 @@ #include "shared_test_classes/subgraph/variadic_split_pad.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { -TEST_P(VariadicSplitPad, CompareWithRefs){ - Run(); +TEST_P(VariadicSplitPad, CompareWithRefs) { + run(); }; -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/ov_subgraph.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/ov_subgraph.hpp index 05867b81d67c8c..5ca0b6531a39f3 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/ov_subgraph.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/ov_subgraph.hpp @@ -20,6 +20,12 @@ using ElementType = ov::element::Type_t; using Config = ov::AnyMap; using TargetDevice = std::string; +typedef std::tuple + BasicParams; + class SubgraphBaseTest : public ov::test::TestsCommon { public: virtual void run(); diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/range_add.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/range_add.hpp index 18fe4775ebbea7..6139db3244fc83 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/range_add.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/range_add.hpp @@ -4,36 +4,45 @@ #pragma once -#include +#include #include +#include #include -#include -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -#include "shared_test_classes/single_layer/range.hpp" +namespace ov { +namespace test { -namespace SubgraphTestsDefinitions { +typedef std::tuple + RangeParams; // ------------------------------ V0 ------------------------------ -class RangeAddSubgraphTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { +class RangeAddSubgraphTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseStaticTest { public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); + protected: void SetUp() override; }; // ------------------------------ V4 ------------------------------ -class RangeNumpyAddSubgraphTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { +class RangeNumpyAddSubgraphTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseStaticTest { public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); + protected: void SetUp() override; }; -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/relu_shape_of.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/relu_shape_of.hpp index c84f0e2d8292e5..6127de67d74ab1 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/relu_shape_of.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/relu_shape_of.hpp @@ -4,23 +4,26 @@ #pragma once -#include -#include -#include -#include +#include "shared_test_classes/base/ov_subgraph.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "shared_test_classes/single_layer/shape_of.hpp" +namespace ov { +namespace test { -#include "ov_models/builders.hpp" +typedef std::tuple + ShapeOfParams; -namespace SubgraphTestsDefinitions { - -class ReluShapeOfSubgraphTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { +class ReluShapeOfSubgraphTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseStaticTest { public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); + protected: void SetUp() override; }; -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/reshape_squeeze_reshape_relu.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/reshape_squeeze_reshape_relu.hpp index eb4811e08cf353..b2c69f96d946c0 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/reshape_squeeze_reshape_relu.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/reshape_squeeze_reshape_relu.hpp @@ -4,30 +4,32 @@ #pragma once -#include #include +#include #include -#include -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace SubgraphTestsDefinitions { -using ShapeAxesTuple = std::pair, std::vector>; - -using ReshapeSqueezeReshapeReluTuple = typename std::tuple< - ShapeAxesTuple, // Input shapes & squeeze_indices - InferenceEngine::Precision, // Network precision - std::string, // Device name - ngraph::helpers::SqueezeOpType // SqueezeOpType ->; - -class ReshapeSqueezeReshapeRelu - : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { + +#include "common_test_utils/test_enums.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { + +using ShapeAxesTuple = std::pair>; + +using ReshapeSqueezeReshapeReluTuple = typename std::tuple; + +class ReshapeSqueezeReshapeRelu : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseStaticTest { public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); + protected: void SetUp() override; }; -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_conv_concat.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_conv_concat.hpp index a02822a50b9374..d74865a6bb0c6b 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_conv_concat.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_conv_concat.hpp @@ -4,19 +4,37 @@ #pragma once +#include +#include #include #include -#include -#include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { + +class SplitConvConcatBase : public ov::test::SubgraphBaseStaticTest { +protected: + void configure_test(const ov::test::BasicParams& param); +}; + +class SplitConvConcat : public testing::WithParamInterface, virtual public SplitConvConcatBase { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); + +protected: + void SetUp() override; +}; + +} // namespace test +} // namespace ov namespace SubgraphTestsDefinitions { class SplitConvConcat : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { + virtual public ov::test::SplitConvConcatBase { public: static std::string getTestCaseName(const testing::TestParamInfo& obj); diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/variadic_split_pad.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/variadic_split_pad.hpp index 6cfbf94286902d..d9744f8b2ab8cc 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/variadic_split_pad.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/variadic_split_pad.hpp @@ -4,33 +4,35 @@ #pragma once -#include #include +#include #include -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" - -namespace SubgraphTestsDefinitions { +#include "shared_test_classes/base/ov_subgraph.hpp" -typedef std::tuple< - InferenceEngine::SizeVector, // Input shapes - size_t, // Axis - std::vector, // Split number - std::vector, // Index connected layer - std::vector, // Pad begin - std::vector, // Pad end - ngraph::helpers::PadMode, // Pad mode - InferenceEngine::Precision, // Network precision - std::string // Device name -> SplitPadTuple; +namespace ov { +namespace test { +typedef std::tuple, // Split number + std::vector, // Index connected layer + std::vector, // Pad begin + std::vector, // Pad end + ov::op::PadMode, // Pad mode + ov::element::Type, // Input element type + std::string // Device name + > + SplitPadTuple; -class VariadicSplitPad: public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon{ +class VariadicSplitPad : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseStaticTest { public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); + protected: void SetUp() override; }; -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/subgraph/range_add.cpp b/src/tests/functional/shared_test_classes/src/subgraph/range_add.cpp index 86cfab9864b6cf..129b19667baca8 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/range_add.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/range_add.cpp @@ -4,84 +4,79 @@ #include "shared_test_classes/subgraph/range_add.hpp" -namespace SubgraphTestsDefinitions { +#include "ov_models/builders.hpp" + +namespace ov { +namespace test { // ------------------------------ V0 ------------------------------ -std::string RangeAddSubgraphTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; +std::string RangeAddSubgraphTest::getTestCaseName(const testing::TestParamInfo& obj) { + ov::element::Type input_type; float start, stop, step; std::string targetDevice; - std::tie(start, stop, step, netPrecision, inPrc, outPrc, inLayout, outLayout, targetDevice) = obj.param; + std::tie(start, stop, step, input_type, targetDevice) = obj.param; std::ostringstream result; const char separator = '_'; result << "Start=" << start << separator; result << "Stop=" << stop << separator; result << "Step=" << step << separator; - result << "netPRC=" << netPrecision.name() << separator; + result << "ET=" << input_type << separator; result << "targetDevice=" << targetDevice; return result.str(); } void RangeAddSubgraphTest::SetUp() { - InferenceEngine::Precision netPrecision; + ov::element::Type element_type; float start, stop, step; - std::tie(start, stop, step, netPrecision, inPrc, outPrc, inLayout, outLayout, targetDevice) = GetParam(); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - auto startConstant = std::make_shared(ngPrc, ngraph::Shape{}, start); - auto stopConstant = std::make_shared(ngPrc, ngraph::Shape{}, stop); - auto stepConstant = std::make_shared(ngPrc, ngraph::Shape{}, step); - auto range = std::make_shared(startConstant, stopConstant, stepConstant); - - ov::ParameterVector params{std::make_shared(ngPrc, range->get_shape())}; - auto eltwise = ngraph::builder::makeEltwise(params.front(), range, ngraph::helpers::EltwiseTypes::ADD); - const ngraph::ResultVector results{std::make_shared(eltwise)}; - function = std::make_shared(results, params, "RangeEltwise"); + std::tie(start, stop, step, element_type, targetDevice) = GetParam(); + + auto startConstant = std::make_shared(element_type, ov::Shape{}, start); + auto stopConstant = std::make_shared(element_type, ov::Shape{}, stop); + auto stepConstant = std::make_shared(element_type, ov::Shape{}, step); + auto range = std::make_shared(startConstant, stopConstant, stepConstant); + + ov::ParameterVector params{std::make_shared(element_type, range->get_shape())}; + auto eltwise = ngraph::builder::makeEltwise(params.front(), range, ov::test::utils::EltwiseTypes::ADD); + const ov::ResultVector results{std::make_shared(eltwise)}; + function = std::make_shared(results, params, "RangeEltwise"); } // ------------------------------ V4 ------------------------------ -std::string RangeNumpyAddSubgraphTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrc; - InferenceEngine::Precision constPrc; - InferenceEngine::Precision outPrc; - InferenceEngine::Layout inLayout, outLayout; +std::string RangeNumpyAddSubgraphTest::getTestCaseName(const testing::TestParamInfo& obj) { + ov::element::Type element_type; float start, stop, step; std::string targetDevice; - std::tie(start, stop, step, constPrc, netPrc, outPrc, inLayout, outLayout, targetDevice) = obj.param; + std::tie(start, stop, step, element_type, targetDevice) = obj.param; std::ostringstream result; const char separator = '_'; result << "Start=" << start << separator; result << "Stop=" << stop << separator; result << "Step=" << step << separator; - result << "constPRC=" << constPrc.name() << separator; - result << "netPRC=" << netPrc.name() << separator; + result << "ET=" << element_type << separator; result << "targetDevice=" << targetDevice; return result.str(); } void RangeNumpyAddSubgraphTest::SetUp() { - InferenceEngine::Precision netPrc; - InferenceEngine::Precision constPrc; + ov::element::Type element_type; float start, stop, step; - std::tie(start, stop, step, constPrc, netPrc, outPrc, inLayout, outLayout, targetDevice) = GetParam(); - auto ngConstPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(constPrc); - auto ngNetPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrc); + std::tie(start, stop, step, element_type, targetDevice) = GetParam(); - auto startConstant = std::make_shared(ngConstPrc, ngraph::Shape{}, start); - auto stopConstant = std::make_shared(ngConstPrc, ngraph::Shape{}, stop); - auto stepConstant = std::make_shared(ngConstPrc, ngraph::Shape{}, step); - auto range = std::make_shared(startConstant, stopConstant, stepConstant, ngNetPrc); + auto startConstant = std::make_shared(element_type, ov::Shape{}, start); + auto stopConstant = std::make_shared(element_type, ov::Shape{}, stop); + auto stepConstant = std::make_shared(element_type, ov::Shape{}, step); + auto range = std::make_shared(startConstant, stopConstant, stepConstant, element_type); - ov::ParameterVector params{std::make_shared(ngNetPrc, range->get_shape())}; + ov::ParameterVector params{std::make_shared(element_type, range->get_shape())}; - auto eltwise = ngraph::builder::makeEltwise(params.front(), range, ngraph::helpers::EltwiseTypes::ADD); - const ngraph::ResultVector results{std::make_shared(eltwise)}; - function = std::make_shared(results, params, "RangeEltwise"); + auto eltwise = ngraph::builder::makeEltwise(params.front(), range, ov::test::utils::EltwiseTypes::ADD); + const ov::ResultVector results{std::make_shared(eltwise)}; + function = std::make_shared(results, params, "RangeEltwise"); } -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/subgraph/relu_shape_of.cpp b/src/tests/functional/shared_test_classes/src/subgraph/relu_shape_of.cpp index b23f32f4d5b99f..bbb78339d04e6f 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/relu_shape_of.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/relu_shape_of.cpp @@ -4,29 +4,32 @@ #include "shared_test_classes/subgraph/relu_shape_of.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { - std::string ReluShapeOfSubgraphTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::SizeVector inputShapes; - InferenceEngine::Precision inputPrecision; - std::string targetDevice; - std::tie(inputPrecision, inputShapes, targetDevice) = obj.param; - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "Precision=" << inputPrecision.name() << "_"; - result << "TargetDevice=" << targetDevice; - return result.str(); - } +std::string ReluShapeOfSubgraphTest::getTestCaseName(const testing::TestParamInfo& obj) { + ov::Shape inputShapes; + ov::element::Type element_type, output_type; + std::string targetDevice; + std::tie(element_type, output_type, inputShapes, targetDevice) = obj.param; + std::ostringstream result; + result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; + result << "IET=" << element_type << "_"; + result << "OET=" << output_type << "_"; + result << "TargetDevice=" << targetDevice; + return result.str(); +} - void ReluShapeOfSubgraphTest::SetUp() { - InferenceEngine::SizeVector inputShapes; - InferenceEngine::Precision inputPrecision; - std::tie(inputPrecision, inputShapes, targetDevice) = this->GetParam(); - auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); - ov::ParameterVector param {std::make_shared(inType, ov::Shape(inputShapes))}; - auto relu = std::make_shared(param[0]); - auto shapeOf = std::make_shared(relu, inType); - const ngraph::ResultVector results{std::make_shared(shapeOf)}; - function = std::make_shared(results, param, "ReluShapeOf"); - } -} // namespace SubgraphTestsDefinitions +void ReluShapeOfSubgraphTest::SetUp() { + ov::Shape inputShapes; + ov::element::Type element_type, output_type; + std::tie(element_type, output_type, inputShapes, targetDevice) = this->GetParam(); + ov::ParameterVector param{std::make_shared(element_type, ov::Shape(inputShapes))}; + auto relu = std::make_shared(param[0]); + auto shapeOf = std::make_shared(relu, output_type); + const ov::ResultVector results{std::make_shared(shapeOf)}; + function = std::make_shared(results, param, "ReluShapeOf"); +} + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/subgraph/reshape_squeeze_reshape_relu.cpp b/src/tests/functional/shared_test_classes/src/subgraph/reshape_squeeze_reshape_relu.cpp index 098f1d12a5ed89..1d0c680a55408e 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/reshape_squeeze_reshape_relu.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/reshape_squeeze_reshape_relu.cpp @@ -2,46 +2,50 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include "shared_test_classes/subgraph/reshape_squeeze_reshape_relu.hpp" -namespace SubgraphTestsDefinitions { - std::string ReshapeSqueezeReshapeRelu::getTestCaseName(const testing::TestParamInfo &obj) { - ShapeAxesTuple squeezeShape; - InferenceEngine::Precision netPrecision; - std::string targetName; - ngraph::helpers::SqueezeOpType opType; - std::tie(squeezeShape, netPrecision, targetName, opType) = obj.param; - std::ostringstream results; - results << "OpType=" << opType; - results << "IS=" << ov::test::utils::vec2str(squeezeShape.first) << "_"; - results << "indices=" << ov::test::utils::vec2str(squeezeShape.second) << "_"; - results << "netPRC=" << netPrecision.name() << "_"; - results << "targetDevice=" << targetName << "_"; - return results.str(); - } +#include "ov_models/builders.hpp" - void ReshapeSqueezeReshapeRelu::SetUp() { - ShapeAxesTuple squeezeShape; - InferenceEngine::Precision netPrecision; - ngraph::helpers::SqueezeOpType opType; - std::tie(squeezeShape, netPrecision, targetDevice, opType) = this->GetParam(); - const std::size_t input_dim = InferenceEngine::details::product(squeezeShape.first); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - std::vector shape_input{1, input_dim}; - ov::ParameterVector input {std::make_shared(ngPrc, ov::Shape(shape_input))}; - auto reshape1_pattern = std::make_shared(ngraph::element::i64, - ngraph::Shape{squeezeShape.first.size()}, - squeezeShape.first); - auto reshape1 = std::make_shared(input[0], reshape1_pattern, false); - auto squeeze = ngraph::builder::makeSqueezeUnsqueeze(reshape1, ngraph::element::i64, squeezeShape.second, opType); - auto reshape2_pattern = std::make_shared(ngraph::element::i64, - ngraph::Shape{2}, - std::vector{1, input_dim}); - auto reshape2 = std::make_shared(squeeze, reshape2_pattern, false); - auto func = std::make_shared(reshape2); - std::string squeezeType; +namespace ov { +namespace test { - function = std::make_shared(func, input, "reshape_squeeze_reshape_relu"); - } -} // namespace SubgraphTestsDefinitions +std::string ReshapeSqueezeReshapeRelu::getTestCaseName( + const testing::TestParamInfo& obj) { + ShapeAxesTuple squeezeShape; + ov::element::Type element_type; + std::string targetName; + ov::test::utils::SqueezeOpType opType; + std::tie(squeezeShape, element_type, targetName, opType) = obj.param; + std::ostringstream results; + results << "OpType=" << opType; + results << "IS=" << ov::test::utils::vec2str(squeezeShape.first) << "_"; + results << "indices=" << ov::test::utils::vec2str(squeezeShape.second) << "_"; + results << "netPRC=" << element_type << "_"; + results << "targetDevice=" << targetName << "_"; + return results.str(); +} + +void ReshapeSqueezeReshapeRelu::SetUp() { + ShapeAxesTuple squeezeShape; + ov::element::Type element_type; + ov::test::utils::SqueezeOpType opType; + std::tie(squeezeShape, element_type, targetDevice, opType) = this->GetParam(); + const size_t input_dim = ov::shape_size(squeezeShape.first); + std::vector shape_input{1, input_dim}; + ov::ParameterVector input{std::make_shared(element_type, ov::Shape(shape_input))}; + auto reshape1_pattern = std::make_shared(ov::element::i64, + ov::Shape{squeezeShape.first.size()}, + squeezeShape.first); + auto reshape1 = std::make_shared(input[0], reshape1_pattern, false); + auto squeeze = ngraph::builder::makeSqueezeUnsqueeze(reshape1, ov::element::i64, squeezeShape.second, opType); + auto reshape2_pattern = + std::make_shared(ov::element::i64, ov::Shape{2}, std::vector{1, input_dim}); + auto reshape2 = std::make_shared(squeeze, reshape2_pattern, false); + auto func = std::make_shared(reshape2); + std::string squeezeType; + + function = std::make_shared(func, input, "reshape_squeeze_reshape_relu"); +} + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/subgraph/split_conv_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/split_conv_concat.cpp index 60ad615b7567a1..7dc009d0022fff 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/split_conv_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/split_conv_concat.cpp @@ -4,30 +4,40 @@ #include "shared_test_classes/subgraph/split_conv_concat.hpp" -namespace SubgraphTestsDefinitions { +#include "common_test_utils/data_utils.hpp" +#include "ie_common.h" +#include "ov_models/builders.hpp" +#include "shared_test_classes/base/layer_test_utils.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -std::string SplitConvConcat::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - InferenceEngine::SizeVector inputShapes, newInputShapes; +namespace ov { +namespace test { + +std::string SplitConvConcat::getTestCaseName(const testing::TestParamInfo& obj) { + ov::element::Type element_type; + ov::Shape inputShapes; std::string targetDevice; - std::tie(netPrecision, inputShapes, targetDevice) = obj.param; + std::tie(element_type, inputShapes, targetDevice) = obj.param; std::ostringstream result; result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "netPRC=" << netPrecision.name() << "_"; + result << "ET=" << element_type << "_"; result << "targetDevice=" << targetDevice; return result.str(); } void SplitConvConcat::SetUp() { - std::vector inputShape; - InferenceEngine::Precision netPrecision; - std::tie(netPrecision, inputShape, targetDevice) = this->GetParam(); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + configure_test(this->GetParam()); +} + +void SplitConvConcatBase::configure_test(const ov::test::BasicParams& param) { + ov::Shape inputShape; + ov::element::Type element_type; + std::tie(element_type, inputShape, targetDevice) = param; - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; + ov::ParameterVector params{std::make_shared(element_type, ov::Shape(inputShape))}; - auto split = ngraph::builder::makeSplit(params[0], ngPrc, 2, 1); + auto split = ngraph::builder::makeSplit(params[0], element_type, 2, 1); std::vector filterWeights1; std::vector filterWeights2; @@ -35,17 +45,65 @@ void SplitConvConcat::SetUp() { filterWeights1 = ov::test::utils::generate_float_numbers(8 * inputShape[1] / 2 * 3, -0.2f, 0.2f); filterWeights2 = ov::test::utils::generate_float_numbers(8 * inputShape[1] / 2 * 3, -0.2f, 0.2f); } - auto conv1 = ngraph::builder::makeConvolution(split->output(0), ngPrc, {1, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1}, - ngraph::op::PadType::VALID, 8, false, filterWeights1); - auto relu1 = std::make_shared(conv1); + auto conv1 = ngraph::builder::makeConvolution(split->output(0), + element_type, + {1, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::VALID, + 8, + false, + filterWeights1); + auto relu1 = std::make_shared(conv1); + + auto conv2 = ngraph::builder::makeConvolution(split->output(1), + element_type, + {1, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::VALID, + 8, + false, + filterWeights2); + auto relu2 = std::make_shared(conv2); + auto concat = std::make_shared(ov::OutputVector{relu1->output(0), relu2->output(0)}, 1); + + ov::ResultVector results{std::make_shared(concat)}; + function = std::make_shared(results, params, "SplitConvConcat"); +} + +} // namespace test +} // namespace ov + +namespace SubgraphTestsDefinitions { - auto conv2 = ngraph::builder::makeConvolution(split->output(1), ngPrc, {1, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1}, - ngraph::op::PadType::VALID, 8, false, filterWeights2); - auto relu2 = std::make_shared(conv2); - auto concat = std::make_shared(ngraph::OutputVector{relu1->output(0), relu2->output(0)}, 1); +std::string SplitConvConcat::getTestCaseName(const testing::TestParamInfo& obj) { + InferenceEngine::Precision precision; + InferenceEngine::SizeVector inputShapes; + std::string targetDevice; + std::tie(precision, inputShapes, targetDevice) = obj.param; + auto element_type = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(precision); + + std::ostringstream result; + result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; + result << "ET=" << element_type << "_"; + result << "targetDevice=" << targetDevice; + return result.str(); +} + +void SplitConvConcat::SetUp() { + InferenceEngine::Precision precision; + InferenceEngine::SizeVector inputShapes; + std::tie(precision, inputShapes, targetDevice) = this->GetParam(); + auto element_type = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(precision); + ov::Shape shape = inputShapes; - ngraph::ResultVector results{std::make_shared(concat)}; - function = std::make_shared(results, params, "SplitConvConcat"); + ov::test::BasicParams param(element_type, shape, targetDevice); + configure_test(param); } } // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/variadic_split_pad.cpp b/src/tests/functional/shared_test_classes/src/subgraph/variadic_split_pad.cpp index 13d1c9c542c5cb..8c7906c275e3c1 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/variadic_split_pad.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/variadic_split_pad.cpp @@ -4,48 +4,54 @@ #include "shared_test_classes/subgraph/variadic_split_pad.hpp" -namespace SubgraphTestsDefinitions { +#include "ov_models/builders.hpp" -std::string VariadicSplitPad::getTestCaseName(const testing::TestParamInfo &obj) { - InferenceEngine::SizeVector inputShape; +namespace ov { +namespace test { + +std::string VariadicSplitPad::getTestCaseName(const testing::TestParamInfo& obj) { + ov::Shape input_shape; int64_t axis; std::vector numSplits, connectIndexes; std::vector padsBegin, padsEnd; - ngraph::helpers::PadMode padMode; - InferenceEngine::Precision netPrecision; + ov::op::PadMode padMode; + ov::element::Type element_type; std::string targetName; - std::tie(inputShape, axis, numSplits, connectIndexes, padsBegin, padsEnd, padMode, netPrecision, targetName) = obj.param; + std::tie(input_shape, axis, numSplits, connectIndexes, padsBegin, padsEnd, padMode, element_type, targetName) = + obj.param; std::ostringstream results; - results << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; + results << "IS=" << ov::test::utils::vec2str(input_shape) << "_"; results << "Axis=" << axis << "_"; results << "NumSplits=" << ov::test::utils::vec2str(numSplits) << "_"; results << "ConnectIndexes=" << ov::test::utils::vec2str(connectIndexes) << "_"; results << "padsBegin=" << ov::test::utils::vec2str(padsBegin) << "_"; results << "padsEnd=" << ov::test::utils::vec2str(padsEnd) << "_"; results << "PadMode=" << padMode << "_"; - results << "netPRC=" << netPrecision.name() << "_"; + results << "netPRC=" << element_type << "_"; results << "targetDevice=" << targetName << "_"; return results.str(); } void VariadicSplitPad::SetUp() { - InferenceEngine::SizeVector inputs; + ov::Shape input_shape; int64_t axis; std::vector numSplits, connectIndexes; std::vector padBegin, padEnd; - ngraph::helpers::PadMode padMode; - InferenceEngine::Precision netPrecision; - std::tie(inputs, axis, numSplits, connectIndexes, padBegin, padEnd, padMode, netPrecision, targetDevice) = this->GetParam(); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector input {std::make_shared(ngPrc, ov::Shape(inputs))}; + ov::op::PadMode padMode; + ov::element::Type element_type; + std::tie(input_shape, axis, numSplits, connectIndexes, padBegin, padEnd, padMode, element_type, targetDevice) = + this->GetParam(); + ov::ParameterVector input{std::make_shared(element_type, ov::Shape(input_shape))}; auto split = ngraph::builder::makeVariadicSplit(input[0], numSplits, axis); - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i : connectIndexes) { auto pad = ngraph::builder::makePad(split->output(i), padBegin, padEnd, 0, padMode); - results.push_back(std::make_shared(pad)); + results.push_back(std::make_shared(pad)); } - function = std::make_shared(results, input, "variadic_split_pad"); + function = std::make_shared(results, input, "variadic_split_pad"); } -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov From e866bfef429a9508229cfef3f790e820edef7d8f Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Fri, 13 Oct 2023 13:35:13 +0400 Subject: [PATCH 185/257] Migrate subgraph tests till quantized subgraph (#20351) * Migrate subgraph tests till quatized subgraph * Fixed code style * Fixed code style * Try to fix build * Remove run method * Remove GNA test --- .../subgraph_tests/mul_conv_fusion.cpp | 467 +++++++++--------- .../subgraph_tests/multiply_add.cpp | 34 +- .../subgraph_tests/mvn_multiply_add.cpp | 126 +++-- .../subgraph_tests/parameter_result.cpp | 6 - .../subgraph_tests/perm_conv_perm_concat.cpp | 49 +- .../subgraph_tests/perm_conv_perm_concat.cpp | 45 -- .../subgraph_tests/multiply_add.cpp | 35 +- .../subgraph_tests/perm_conv_perm_concat.cpp | 47 +- .../subgraph_tests/mul_conv_fusion.hpp | 9 +- .../include/subgraph_tests/multiply_add.hpp | 8 +- .../subgraph_tests/mvn_multiply_add.hpp | 10 +- .../subgraph_tests/perm_conv_perm_concat.hpp | 8 +- .../subgraph/mul_conv_fusion.hpp | 45 +- .../subgraph/multiply_add.hpp | 32 +- .../subgraph/mvn_multiply_add.hpp | 39 +- .../subgraph/perm_conv_perm_concat.hpp | 39 +- .../src/subgraph/mul_conv_fusion.cpp | 127 +++-- .../src/subgraph/multiply_add.cpp | 46 +- .../src/subgraph/mvn_multiply_add.cpp | 48 +- .../src/subgraph/perm_conv_perm_concat.cpp | 118 +++-- 20 files changed, 655 insertions(+), 683 deletions(-) delete mode 100644 src/plugins/intel_gna/tests/functional/shared_tests_instances/subgraph_tests/perm_conv_perm_concat.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/mul_conv_fusion.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/mul_conv_fusion.cpp index 480a538c6c68ad..4ab6ddfbaefdb9 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/mul_conv_fusion.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/mul_conv_fusion.cpp @@ -2,263 +2,268 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "subgraph_tests/mul_conv_fusion.hpp" + #include -#include "subgraph_tests/mul_conv_fusion.hpp" #include "common_test_utils/test_constants.hpp" -#include - -using namespace SubgraphTestsDefinitions; +using namespace ov::test; namespace { - const std::vector types{ngraph::element::f32}; +const std::vector types{ov::element::f32}; - const std::vector const_shapes_fprop_1d{ - {}, - {1}, - {1, 1}, - {8, 1}, - {1, 1, 1}, - {1, 8, 1}, - }; +const std::vector const_shapes_fprop_1d{ + {}, + {1}, + {1, 1}, + {8, 1}, + {1, 1, 1}, + {1, 8, 1}, +}; - INSTANTIATE_TEST_SUITE_P(smoke_Convolution_1D, MulConvFusion, - ::testing::Combine( - ::testing::Values(ngraph::opset8::Convolution::get_type_info_static()), - ::testing::Values(ngraph::Shape{1, 8, 64}), - ::testing::Values(ngraph::Shape{64, 8, 1}), - ::testing::ValuesIn(const_shapes_fprop_1d), - ::testing::ValuesIn(types), - ::testing::Values(false), // Positive test - ::testing::Values(ov::test::utils::DEVICE_CPU)), - MulConvFusion::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Convolution_1D, + MulConvFusion, + ::testing::Combine(::testing::Values(ov::op::v1::Convolution::get_type_info_static()), + ::testing::Values(ov::Shape{1, 8, 64}), + ::testing::Values(ov::Shape{64, 8, 1}), + ::testing::ValuesIn(const_shapes_fprop_1d), + ::testing::ValuesIn(types), + ::testing::Values(false), // Positive test + ::testing::Values(ov::test::utils::DEVICE_CPU)), + MulConvFusion::getTestCaseName); - const std::vector const_shapes_fprop_2d{ - {}, - {1}, - {1, 1}, - {1, 1, 1}, - {8, 1, 1}, - {1, 1, 1, 1}, - {1, 8, 1, 1}, - }; +const std::vector const_shapes_fprop_2d{ + {}, + {1}, + {1, 1}, + {1, 1, 1}, + {8, 1, 1}, + {1, 1, 1, 1}, + {1, 8, 1, 1}, +}; - INSTANTIATE_TEST_SUITE_P(smoke_Convolution_2D, MulConvFusion, - ::testing::Combine( - ::testing::Values(ngraph::opset8::Convolution::get_type_info_static()), - ::testing::Values(ngraph::Shape{2, 8, 14, 14}), - ::testing::Values(ngraph::Shape{2, 8, 7, 7}), - ::testing::ValuesIn(const_shapes_fprop_2d), - ::testing::ValuesIn(types), - ::testing::Values(false), // Positive test - ::testing::Values(ov::test::utils::DEVICE_CPU)), - MulConvFusion::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Convolution_2D, + MulConvFusion, + ::testing::Combine(::testing::Values(ov::op::v1::Convolution::get_type_info_static()), + ::testing::Values(ov::Shape{2, 8, 14, 14}), + ::testing::Values(ov::Shape{2, 8, 7, 7}), + ::testing::ValuesIn(const_shapes_fprop_2d), + ::testing::ValuesIn(types), + ::testing::Values(false), // Positive test + ::testing::Values(ov::test::utils::DEVICE_CPU)), + MulConvFusion::getTestCaseName); - const std::vector const_shapes_fprop_2d_kernel_same_as_input{ - {7}, - {1, 7}, - {1, 1, 7}, - {8, 1, 7}, - {1, 1, 1, 7}, - {1, 8, 1, 7}, - {7, 1}, - {1, 7, 1}, - {8, 7, 1}, - {1, 1, 7, 1}, - {1, 8, 7, 1}, - {1, 1, 7, 7}, - {1, 8, 7, 7}, - }; +const std::vector const_shapes_fprop_2d_kernel_same_as_input{ + {7}, + {1, 7}, + {1, 1, 7}, + {8, 1, 7}, + {1, 1, 1, 7}, + {1, 8, 1, 7}, + {7, 1}, + {1, 7, 1}, + {8, 7, 1}, + {1, 1, 7, 1}, + {1, 8, 7, 1}, + {1, 1, 7, 7}, + {1, 8, 7, 7}, +}; - INSTANTIATE_TEST_SUITE_P(smoke_Convolution_2D_kernel_same_as_input, MulConvFusion, - ::testing::Combine( - ::testing::Values(ngraph::opset8::Convolution::get_type_info_static()), - ::testing::Values(ngraph::Shape{2, 8, 7, 7}), - ::testing::Values(ngraph::Shape{3, 8, 7, 7}), - ::testing::ValuesIn(const_shapes_fprop_2d_kernel_same_as_input), - ::testing::ValuesIn(types), - ::testing::Values(false), // Positive test - ::testing::Values(ov::test::utils::DEVICE_CPU)), - MulConvFusion::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Convolution_2D_kernel_same_as_input, + MulConvFusion, + ::testing::Combine(::testing::Values(ov::op::v1::Convolution::get_type_info_static()), + ::testing::Values(ov::Shape{2, 8, 7, 7}), + ::testing::Values(ov::Shape{3, 8, 7, 7}), + ::testing::ValuesIn(const_shapes_fprop_2d_kernel_same_as_input), + ::testing::ValuesIn(types), + ::testing::Values(false), // Positive test + ::testing::Values(ov::test::utils::DEVICE_CPU)), + MulConvFusion::getTestCaseName); - const std::vector const_shapes_conv_bprop{ - {}, - {1}, - {1, 1}, - {1, 1, 1}, - {1, 1, 1, 1}, - {3, 1, 1}, - {1, 3, 1, 1}, - }; - INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionBackpropData_2D, MulConvFusion, - ::testing::Combine( - ::testing::Values(ngraph::opset8::ConvolutionBackpropData::get_type_info_static()), - ::testing::Values(ngraph::Shape{4, 3, 64, 64}), - ::testing::Values(ngraph::Shape{3, 20, 3, 3}), - ::testing::ValuesIn(const_shapes_conv_bprop), - ::testing::ValuesIn(types), - ::testing::Values(false), // Positive test - ::testing::Values(ov::test::utils::DEVICE_CPU)), - MulConvFusion::getTestCaseName); +const std::vector const_shapes_conv_bprop{ + {}, + {1}, + {1, 1}, + {1, 1, 1}, + {1, 1, 1, 1}, + {3, 1, 1}, + {1, 3, 1, 1}, +}; +INSTANTIATE_TEST_SUITE_P( + smoke_ConvolutionBackpropData_2D, + MulConvFusion, + ::testing::Combine(::testing::Values(ov::op::v1::ConvolutionBackpropData::get_type_info_static()), + ::testing::Values(ov::Shape{4, 3, 64, 64}), + ::testing::Values(ov::Shape{3, 20, 3, 3}), + ::testing::ValuesIn(const_shapes_conv_bprop), + ::testing::ValuesIn(types), + ::testing::Values(false), // Positive test + ::testing::Values(ov::test::utils::DEVICE_CPU)), + MulConvFusion::getTestCaseName); - const std::vector const_shapes_group_conv{ - {}, - {1}, - {1, 1}, - {1, 1, 1}, - {12, 1, 1}, - {1, 1, 1, 1}, - {1, 12, 1, 1}, - }; +const std::vector const_shapes_group_conv{ + {}, + {1}, + {1, 1}, + {1, 1, 1}, + {12, 1, 1}, + {1, 1, 1, 1}, + {1, 12, 1, 1}, +}; - INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolution_2D, MulConvFusion, - ::testing::Combine( - ::testing::Values(ngraph::opset8::GroupConvolution::get_type_info_static()), - ::testing::Values(ngraph::Shape{2, 12, 14, 14}), - ::testing::Values(ngraph::Shape{4, 5, 3, 7, 7}), - ::testing::ValuesIn(const_shapes_group_conv), - ::testing::ValuesIn(types), - ::testing::Values(false), // Positive test - ::testing::Values(ov::test::utils::DEVICE_CPU)), - MulConvFusion::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolution_2D, + MulConvFusion, + ::testing::Combine(::testing::Values(ov::op::v1::GroupConvolution::get_type_info_static()), + ::testing::Values(ov::Shape{2, 12, 14, 14}), + ::testing::Values(ov::Shape{4, 5, 3, 7, 7}), + ::testing::ValuesIn(const_shapes_group_conv), + ::testing::ValuesIn(types), + ::testing::Values(false), // Positive test + ::testing::Values(ov::test::utils::DEVICE_CPU)), + MulConvFusion::getTestCaseName); - const std::vector const_shapes_group_conv_kernel_same_as_input{ - {14}, - {1, 14}, - {1, 1, 14}, - {12, 1, 14}, - {1, 1, 1, 14}, - {1, 12, 1, 14}, - {14, 1}, - {1, 14, 1}, - {12, 14, 1}, - {1, 1, 14, 1}, - {1, 12, 14, 1}, - {1, 1, 14, 14}, - {1, 12, 14, 14}, - }; +const std::vector const_shapes_group_conv_kernel_same_as_input{ + {14}, + {1, 14}, + {1, 1, 14}, + {12, 1, 14}, + {1, 1, 1, 14}, + {1, 12, 1, 14}, + {14, 1}, + {1, 14, 1}, + {12, 14, 1}, + {1, 1, 14, 1}, + {1, 12, 14, 1}, + {1, 1, 14, 14}, + {1, 12, 14, 14}, +}; - INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolution_2D_kernel_same_as_input, MulConvFusion, - ::testing::Combine( - ::testing::Values(ngraph::opset8::GroupConvolution::get_type_info_static()), - ::testing::Values(ngraph::Shape{2, 12, 14, 14}), - ::testing::Values(ngraph::Shape{4, 5, 3, 14, 14}), - ::testing::ValuesIn(const_shapes_group_conv_kernel_same_as_input), - ::testing::ValuesIn(types), - ::testing::Values(false), // Positive test - ::testing::Values(ov::test::utils::DEVICE_CPU)), - MulConvFusion::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolution_2D_kernel_same_as_input, + MulConvFusion, + ::testing::Combine(::testing::Values(ov::op::v1::GroupConvolution::get_type_info_static()), + ::testing::Values(ov::Shape{2, 12, 14, 14}), + ::testing::Values(ov::Shape{4, 5, 3, 14, 14}), + ::testing::ValuesIn(const_shapes_group_conv_kernel_same_as_input), + ::testing::ValuesIn(types), + ::testing::Values(false), // Positive test + ::testing::Values(ov::test::utils::DEVICE_CPU)), + MulConvFusion::getTestCaseName); - const std::vector const_shapes_group_conv_bprop{ - {}, - {1}, - {1, 1}, - {1, 1, 1}, - {12, 1, 1}, - {1, 1, 1, 1}, - {1, 12, 1, 1}, - }; +const std::vector const_shapes_group_conv_bprop{ + {}, + {1}, + {1, 1}, + {1, 1, 1}, + {12, 1, 1}, + {1, 1, 1, 1}, + {1, 12, 1, 1}, +}; - INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionBackpropData_2D, MulConvFusion, - ::testing::Combine( - ::testing::Values(ngraph::opset8::GroupConvolutionBackpropData::get_type_info_static()), - ::testing::Values(ngraph::Shape{2, 12, 10, 10}), - ::testing::Values(ngraph::Shape{4, 3, 5, 2, 2}), - ::testing::ValuesIn(const_shapes_group_conv_bprop), - ::testing::ValuesIn(types), - ::testing::Values(false), // Positive test - ::testing::Values(ov::test::utils::DEVICE_CPU)), - MulConvFusion::getTestCaseName); +INSTANTIATE_TEST_SUITE_P( + smoke_GroupConvolutionBackpropData_2D, + MulConvFusion, + ::testing::Combine(::testing::Values(ov::op::v1::GroupConvolutionBackpropData::get_type_info_static()), + ::testing::Values(ov::Shape{2, 12, 10, 10}), + ::testing::Values(ov::Shape{4, 3, 5, 2, 2}), + ::testing::ValuesIn(const_shapes_group_conv_bprop), + ::testing::ValuesIn(types), + ::testing::Values(false), // Positive test + ::testing::Values(ov::test::utils::DEVICE_CPU)), + MulConvFusion::getTestCaseName); - const std::vector negative_const_shapes{ - {12, 64, 64}, - {2, 1, 1, 1}, - {1, 1, 64, 64}, - {1, 12, 64, 64}, - {2, 12, 64, 64}, - }; +const std::vector negative_const_shapes{ + {12, 64, 64}, + {2, 1, 1, 1}, + {1, 1, 64, 64}, + {1, 12, 64, 64}, + {2, 12, 64, 64}, +}; - INSTANTIATE_TEST_SUITE_P(smoke_NegativeConvolution_2D, MulConvFusion, - ::testing::Combine( - ::testing::Values(ngraph::opset8::Convolution::get_type_info_static()), - ::testing::Values(ngraph::Shape{2, 12, 64, 64}), - ::testing::Values(ngraph::Shape{20, 12, 1, 1}), - ::testing::ValuesIn(negative_const_shapes), - ::testing::ValuesIn(types), - ::testing::Values(true), // Negative test - ::testing::Values(ov::test::utils::DEVICE_CPU)), - MulConvFusion::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_NegativeConvolution_2D, + MulConvFusion, + ::testing::Combine(::testing::Values(ov::op::v1::Convolution::get_type_info_static()), + ::testing::Values(ov::Shape{2, 12, 64, 64}), + ::testing::Values(ov::Shape{20, 12, 1, 1}), + ::testing::ValuesIn(negative_const_shapes), + ::testing::ValuesIn(types), + ::testing::Values(true), // Negative test + ::testing::Values(ov::test::utils::DEVICE_CPU)), + MulConvFusion::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_NegativeConvolutionBackpropData_2D, MulConvFusion, - ::testing::Combine( - ::testing::Values(ngraph::opset8::ConvolutionBackpropData::get_type_info_static()), - ::testing::Values(ngraph::Shape{2, 12, 64, 64}), - ::testing::Values(ngraph::Shape{12, 20, 3, 3}), - ::testing::ValuesIn(negative_const_shapes), - ::testing::ValuesIn(types), - ::testing::Values(true), // Negative test - ::testing::Values(ov::test::utils::DEVICE_CPU)), - MulConvFusion::getTestCaseName); +INSTANTIATE_TEST_SUITE_P( + smoke_NegativeConvolutionBackpropData_2D, + MulConvFusion, + ::testing::Combine(::testing::Values(ov::op::v1::ConvolutionBackpropData::get_type_info_static()), + ::testing::Values(ov::Shape{2, 12, 64, 64}), + ::testing::Values(ov::Shape{12, 20, 3, 3}), + ::testing::ValuesIn(negative_const_shapes), + ::testing::ValuesIn(types), + ::testing::Values(true), // Negative test + ::testing::Values(ov::test::utils::DEVICE_CPU)), + MulConvFusion::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_NegativeGroupConvolution_2D, MulConvFusion, - ::testing::Combine( - ::testing::Values(ngraph::opset8::GroupConvolution::get_type_info_static()), - ::testing::Values(ngraph::Shape{2, 12, 64, 64}), - ::testing::Values(ngraph::Shape{4, 5, 3, 1, 2}), - ::testing::ValuesIn(negative_const_shapes), - ::testing::ValuesIn(types), - ::testing::Values(true), // Negative test - ::testing::Values(ov::test::utils::DEVICE_CPU)), - MulConvFusion::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_NegativeGroupConvolution_2D, + MulConvFusion, + ::testing::Combine(::testing::Values(ov::op::v1::GroupConvolution::get_type_info_static()), + ::testing::Values(ov::Shape{2, 12, 64, 64}), + ::testing::Values(ov::Shape{4, 5, 3, 1, 2}), + ::testing::ValuesIn(negative_const_shapes), + ::testing::ValuesIn(types), + ::testing::Values(true), // Negative test + ::testing::Values(ov::test::utils::DEVICE_CPU)), + MulConvFusion::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_NegativeGroupConvolutionBackpropData_2D, MulConvFusion, - ::testing::Combine( - ::testing::Values(ngraph::opset8::GroupConvolutionBackpropData::get_type_info_static()), - ::testing::Values(ngraph::Shape{2, 12, 64, 64}), - ::testing::Values(ngraph::Shape{4, 3, 5, 1, 1}), - ::testing::ValuesIn(negative_const_shapes), - ::testing::ValuesIn(types), - ::testing::Values(true), // Negative test - ::testing::Values(ov::test::utils::DEVICE_CPU)), - MulConvFusion::getTestCaseName); +INSTANTIATE_TEST_SUITE_P( + smoke_NegativeGroupConvolutionBackpropData_2D, + MulConvFusion, + ::testing::Combine(::testing::Values(ov::op::v1::GroupConvolutionBackpropData::get_type_info_static()), + ::testing::Values(ov::Shape{2, 12, 64, 64}), + ::testing::Values(ov::Shape{4, 3, 5, 1, 1}), + ::testing::ValuesIn(negative_const_shapes), + ::testing::ValuesIn(types), + ::testing::Values(true), // Negative test + ::testing::Values(ov::test::utils::DEVICE_CPU)), + MulConvFusion::getTestCaseName); - const std::vector negative_const_shapes_kernel_same_as_input{ - {7}, - {1, 7}, - {1, 1, 7}, - {12, 1, 7}, - {1, 1, 1, 7}, - {1, 12, 1, 7}, - {7, 1}, - {1, 7, 1}, - {12, 7, 1}, - {1, 1, 7, 1}, - {1, 12, 7, 1}, - {1, 1, 7, 7}, - {1, 12, 7, 7}, - }; +const std::vector negative_const_shapes_kernel_same_as_input{ + {7}, + {1, 7}, + {1, 1, 7}, + {12, 1, 7}, + {1, 1, 1, 7}, + {1, 12, 1, 7}, + {7, 1}, + {1, 7, 1}, + {12, 7, 1}, + {1, 1, 7, 1}, + {1, 12, 7, 1}, + {1, 1, 7, 7}, + {1, 12, 7, 7}, +}; - INSTANTIATE_TEST_SUITE_P(smoke_NegativeConvolutionBackpropData_2D_kernel_same_as_input, MulConvFusion, - ::testing::Combine( - ::testing::Values(ngraph::opset8::ConvolutionBackpropData::get_type_info_static()), - ::testing::Values(ngraph::Shape{2, 12, 7, 7}), - ::testing::Values(ngraph::Shape{12, 20, 7, 7}), - ::testing::ValuesIn(negative_const_shapes_kernel_same_as_input), - ::testing::ValuesIn(types), - ::testing::Values(true), // Negative test - ::testing::Values(ov::test::utils::DEVICE_CPU)), - MulConvFusion::getTestCaseName); +INSTANTIATE_TEST_SUITE_P( + smoke_NegativeConvolutionBackpropData_2D_kernel_same_as_input, + MulConvFusion, + ::testing::Combine(::testing::Values(ov::op::v1::ConvolutionBackpropData::get_type_info_static()), + ::testing::Values(ov::Shape{2, 12, 7, 7}), + ::testing::Values(ov::Shape{12, 20, 7, 7}), + ::testing::ValuesIn(negative_const_shapes_kernel_same_as_input), + ::testing::ValuesIn(types), + ::testing::Values(true), // Negative test + ::testing::Values(ov::test::utils::DEVICE_CPU)), + MulConvFusion::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_NegativeGroupConvolutionBackpropData_2D_kernel_same_as_input, MulConvFusion, - ::testing::Combine( - ::testing::Values(ngraph::opset8::GroupConvolutionBackpropData::get_type_info_static()), - ::testing::Values(ngraph::Shape{2, 12, 7, 7}), - ::testing::Values(ngraph::Shape{4, 3, 5, 7, 7}), - ::testing::ValuesIn(negative_const_shapes_kernel_same_as_input), - ::testing::ValuesIn(types), - ::testing::Values(true), // Negative test - ::testing::Values(ov::test::utils::DEVICE_CPU)), - MulConvFusion::getTestCaseName); +INSTANTIATE_TEST_SUITE_P( + smoke_NegativeGroupConvolutionBackpropData_2D_kernel_same_as_input, + MulConvFusion, + ::testing::Combine(::testing::Values(ov::op::v1::GroupConvolutionBackpropData::get_type_info_static()), + ::testing::Values(ov::Shape{2, 12, 7, 7}), + ::testing::Values(ov::Shape{4, 3, 5, 7, 7}), + ::testing::ValuesIn(negative_const_shapes_kernel_same_as_input), + ::testing::ValuesIn(types), + ::testing::Values(true), // Negative test + ::testing::Values(ov::test::utils::DEVICE_CPU)), + MulConvFusion::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/multiply_add.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/multiply_add.cpp index f28990117a39d3..c46e92708949b0 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/multiply_add.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/multiply_add.cpp @@ -2,31 +2,29 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - #include "subgraph_tests/multiply_add.hpp" -using namespace SubgraphTestsDefinitions; +#include + +using namespace ov::test; namespace { -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32 -}; +const std::vector input_type = {ov::element::f32}; -const std::vector> inputShapes = { - {1, 3, 2, 2, 4, 5}, - {1, 3, 2, 2, 2, 4, 5}, - {1, 3, 2, 2, 2, 2, 4, 5}, - {1, 3, 2, 2, 2, 2, 2, 4, 5}, - {1, 3, 2, 2, 2, 2, 2, 2, 4, 5}, +const std::vector inputShapes = { + {1, 3, 2, 2, 4, 5}, + {1, 3, 2, 2, 2, 4, 5}, + {1, 3, 2, 2, 2, 2, 4, 5}, + {1, 3, 2, 2, 2, 2, 2, 4, 5}, + {1, 3, 2, 2, 2, 2, 2, 2, 4, 5}, }; -INSTANTIATE_TEST_SUITE_P(smoke_MultipleAdd_Nd, MultiplyAddLayerTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapes), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - MultiplyAddLayerTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_MultipleAdd_Nd, + MultiplyAddLayerTest, + ::testing::Combine(::testing::ValuesIn(inputShapes), + ::testing::ValuesIn(input_type), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + MultiplyAddLayerTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/mvn_multiply_add.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/mvn_multiply_add.cpp index 4fe0990bfba66a..f27050440a7cf9 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/mvn_multiply_add.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/mvn_multiply_add.cpp @@ -4,91 +4,75 @@ #include "subgraph_tests/mvn_multiply_add.hpp" -using namespace SubgraphTestsDefinitions; -using namespace InferenceEngine; +using namespace ov::test; namespace { -const std::vector netPrecision = { - Precision::FP32 -}; +const std::vector netPrecision = {ov::element::f32}; -std::vector idxPrecision = { - Precision::I64 -}; +std::vector idxPrecision = {ov::element::i64}; -const std::vector acrossChannels = { - true, - false -}; +const std::vector acrossChannels = {true, false}; -const std::vector normalizeVariance = { - true, - false -}; +const std::vector normalizeVariance = {true, false}; -const std::vector epsilon = { - 0.000000001f -}; +const std::vector epsilon = {0.000000001f}; -const std::vector epsMode = { - "inside_sqrt", - "outside_sqrt" -}; +const std::vector epsMode = {"inside_sqrt", "outside_sqrt"}; -const std::vector> shapes_1D = { - std::pair{ SizeVector{5}, SizeVector{5}}, - std::pair{ SizeVector{64}, SizeVector{64}}, +const std::vector> shapes_1D = { + std::pair{ov::Shape{5}, ov::Shape{5}}, + std::pair{ov::Shape{64}, ov::Shape{64}}, }; -const std::vector> shapes_2D = { - std::pair{ SizeVector{1, 5}, SizeVector{1, 5}}, - std::pair{ SizeVector{2, 17}, SizeVector{1, 17}}, - std::pair{ SizeVector{9, 64}, SizeVector{1, 64}}, - std::pair{ SizeVector{5, 15}, SizeVector{1, 15}}, +const std::vector> shapes_2D = { + std::pair{ov::Shape{1, 5}, ov::Shape{1, 5}}, + std::pair{ov::Shape{2, 17}, ov::Shape{1, 17}}, + std::pair{ov::Shape{9, 64}, ov::Shape{1, 64}}, + std::pair{ov::Shape{5, 15}, ov::Shape{1, 15}}, }; -const std::vector> shapes_3D = { - std::pair{ SizeVector{1, 5, 8}, SizeVector{1, 5, 8}}, - std::pair{ SizeVector{2, 17, 9}, SizeVector{1, 1, 9}}, - std::pair{ SizeVector{1, 1, 10}, SizeVector{1, 1, 10}}, - std::pair{ SizeVector{2, 3, 3}, SizeVector{2, 3, 3}}, +const std::vector> shapes_3D = { + std::pair{ov::Shape{1, 5, 8}, ov::Shape{1, 5, 8}}, + std::pair{ov::Shape{2, 17, 9}, ov::Shape{1, 1, 9}}, + std::pair{ov::Shape{1, 1, 10}, ov::Shape{1, 1, 10}}, + std::pair{ov::Shape{2, 3, 3}, ov::Shape{2, 3, 3}}, }; -INSTANTIATE_TEST_SUITE_P(smoke_MVNMultiplyAdd_1D, MVNMultiplyAdd, - ::testing::Combine( - ::testing::ValuesIn(shapes_1D), - ::testing::ValuesIn(netPrecision), - ::testing::ValuesIn(idxPrecision), - ::testing::Values(std::vector{0}), - ::testing::ValuesIn(normalizeVariance), - ::testing::ValuesIn(epsilon), - ::testing::ValuesIn(epsMode), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - MVNMultiplyAdd::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_MVNMultiplyAdd_2D, MVNMultiplyAdd, - ::testing::Combine( - ::testing::ValuesIn(shapes_2D), - ::testing::ValuesIn(netPrecision), - ::testing::ValuesIn(idxPrecision), - ::testing::Values(std::vector{1}), - ::testing::ValuesIn(normalizeVariance), - ::testing::ValuesIn(epsilon), - ::testing::ValuesIn(epsMode), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - MVNMultiplyAdd::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_MVNMultiplyAdd_3D, MVNMultiplyAdd, - ::testing::Combine( - ::testing::ValuesIn(shapes_3D), - ::testing::ValuesIn(netPrecision), - ::testing::ValuesIn(idxPrecision), - ::testing::Values(std::vector{2}), - ::testing::ValuesIn(normalizeVariance), - ::testing::ValuesIn(epsilon), - ::testing::ValuesIn(epsMode), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - MVNMultiplyAdd::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_MVNMultiplyAdd_1D, + MVNMultiplyAdd, + ::testing::Combine(::testing::ValuesIn(shapes_1D), + ::testing::ValuesIn(netPrecision), + ::testing::ValuesIn(idxPrecision), + ::testing::Values(std::vector{0}), + ::testing::ValuesIn(normalizeVariance), + ::testing::ValuesIn(epsilon), + ::testing::ValuesIn(epsMode), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + MVNMultiplyAdd::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_MVNMultiplyAdd_2D, + MVNMultiplyAdd, + ::testing::Combine(::testing::ValuesIn(shapes_2D), + ::testing::ValuesIn(netPrecision), + ::testing::ValuesIn(idxPrecision), + ::testing::Values(std::vector{1}), + ::testing::ValuesIn(normalizeVariance), + ::testing::ValuesIn(epsilon), + ::testing::ValuesIn(epsMode), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + MVNMultiplyAdd::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_MVNMultiplyAdd_3D, + MVNMultiplyAdd, + ::testing::Combine(::testing::ValuesIn(shapes_3D), + ::testing::ValuesIn(netPrecision), + ::testing::ValuesIn(idxPrecision), + ::testing::Values(std::vector{2}), + ::testing::ValuesIn(normalizeVariance), + ::testing::ValuesIn(epsilon), + ::testing::ValuesIn(epsMode), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + MVNMultiplyAdd::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/parameter_result.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/parameter_result.cpp index 7f25b2ef54ef44..a70b3c7bbc3659 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/parameter_result.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/parameter_result.cpp @@ -11,12 +11,6 @@ using namespace ov::test; namespace { -INSTANTIATE_TEST_SUITE_P(smoke_Check, - ParameterResultSubgraphTestLegacyApi, - ::testing::Combine(::testing::Values(ov::test::InputShape{{1, 3, 10, 10}, {}}), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ParameterResultSubgraphTestBase::getTestCaseName); - const std::vector inputShapes = { ov::test::InputShape{{1, 3, 10, 10}, {{1, 3, 10, 10}, {1, 3, 10, 10}}}, ov::test::InputShape{{-1, -1, -1, -1}, {{1, 3, 10, 10}, {2, 5, 3, 10}, {1, 3, 10, 10}, {1, 3, 10, 10}}}, diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/perm_conv_perm_concat.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/perm_conv_perm_concat.cpp index 4fe713ebc44187..8617d1e68c2742 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/perm_conv_perm_concat.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/perm_conv_perm_concat.cpp @@ -2,42 +2,45 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include "subgraph_tests/perm_conv_perm_concat.hpp" -#include "common_test_utils/test_constants.hpp" + +#include + namespace { -std::vector> input_shapes { +std::vector input_shapes{ {1, 1, 7, 32}, {1, 1, 8, 16}, }; -std::vector> kernel_shapes { +std::vector kernel_shapes{ {1, 3}, {1, 5}, }; -std::vector output_channels { +std::vector output_channels{ 32, 64, }; -std::vector netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16, +std::vector netPrecisions = { + ov::element::f32, + ov::element::f16, }; -std::map additional_config = { -}; -} // namespace - -namespace SubgraphTestsDefinitions { -INSTANTIATE_TEST_SUITE_P(smoke_basic, PermConvPermConcat, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(input_shapes), - ::testing::ValuesIn(kernel_shapes), - ::testing::ValuesIn(output_channels), - ::testing::Values(additional_config)), - PermConvPermConcat::getTestCaseName); -} // namespace SubgraphTestsDefinitions +ov::AnyMap additional_config = {}; +} // namespace + +namespace ov { +namespace test { + +INSTANTIATE_TEST_SUITE_P(smoke_basic, + PermConvPermConcat, + ::testing::Combine(::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(input_shapes), + ::testing::ValuesIn(kernel_shapes), + ::testing::ValuesIn(output_channels), + ::testing::Values(additional_config)), + PermConvPermConcat::getTestCaseName); +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_gna/tests/functional/shared_tests_instances/subgraph_tests/perm_conv_perm_concat.cpp b/src/plugins/intel_gna/tests/functional/shared_tests_instances/subgraph_tests/perm_conv_perm_concat.cpp deleted file mode 100644 index 299935865f1ca5..00000000000000 --- a/src/plugins/intel_gna/tests/functional/shared_tests_instances/subgraph_tests/perm_conv_perm_concat.cpp +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "subgraph_tests/perm_conv_perm_concat.hpp" - -#include - -#include "common_test_utils/test_constants.hpp" -namespace { -std::vector> input_shapes{ - {1, 1, 7, 32}, - {1, 1, 8, 16}, -}; - -std::vector> kernel_shapes{ - {1, 3}, - {1, 5}, -}; - -std::vector output_channels{ - 32, - 64, -}; - -std::vector netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16, -}; - -std::vector> configs = {{{"GNA_DEVICE_MODE", "GNA_SW_EXACT"}}, - {{"GNA_DEVICE_MODE", "GNA_SW_FP32"}}}; -} // namespace - -namespace SubgraphTestsDefinitions { -INSTANTIATE_TEST_SUITE_P(smoke_basic, - PermConvPermConcat, - ::testing::Combine(::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_GNA), - ::testing::ValuesIn(input_shapes), - ::testing::ValuesIn(kernel_shapes), - ::testing::ValuesIn(output_channels), - ::testing::ValuesIn(configs)), - PermConvPermConcat::getTestCaseName); -} // namespace SubgraphTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/multiply_add.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/multiply_add.cpp index d60cbeb5875472..056a85a926aaa3 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/multiply_add.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/multiply_add.cpp @@ -2,32 +2,29 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - #include "subgraph_tests/multiply_add.hpp" -using namespace SubgraphTestsDefinitions; +#include + +using namespace ov::test; namespace { -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16 -}; +const std::vector input_type = {ov::element::f32, ov::element::f16}; -const std::vector> inputShapes = { - {1, 3}, - {1, 3, 2}, - {1, 3, 2, 5}, - {1, 3, 2, 5, 4}, - {1, 3, 2, 2, 4, 5}, +const std::vector inputShapes = { + {1, 3}, + {1, 3, 2}, + {1, 3, 2, 5}, + {1, 3, 2, 5, 4}, + {1, 3, 2, 2, 4, 5}, }; -INSTANTIATE_TEST_SUITE_P(smoke_MultipleAdd_Nd, MultiplyAddLayerTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapes), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_GPU)), - MultiplyAddLayerTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_MultipleAdd_Nd, + MultiplyAddLayerTest, + ::testing::Combine(::testing::ValuesIn(inputShapes), + ::testing::ValuesIn(input_type), + ::testing::Values(ov::test::utils::DEVICE_GPU)), + MultiplyAddLayerTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/perm_conv_perm_concat.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/perm_conv_perm_concat.cpp index 105a37a30f2c37..5eda8a003d8971 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/perm_conv_perm_concat.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/perm_conv_perm_concat.cpp @@ -2,42 +2,41 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include "subgraph_tests/perm_conv_perm_concat.hpp" -#include "common_test_utils/test_constants.hpp" + +#include + namespace { -std::vector> input_shapes { +std::vector input_shapes{ {1, 1, 7, 32}, {1, 1, 8, 16}, }; -std::vector> kernel_shapes { +std::vector kernel_shapes{ {1, 3}, {1, 5}, }; -std::vector output_channels { +std::vector output_channels{ 32, 64, }; -std::vector netPrecisions = { - InferenceEngine::Precision::FP32, -// InferenceEngine::Precision::FP16, -}; +std::vector netPrecisions = {ov::element::f32}; -std::map additional_config = { -}; -} // namespace - -namespace SubgraphTestsDefinitions { - INSTANTIATE_TEST_SUITE_P(smoke_basic, PermConvPermConcat, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::ValuesIn(input_shapes), - ::testing::ValuesIn(kernel_shapes), - ::testing::ValuesIn(output_channels), - ::testing::Values(additional_config)), - PermConvPermConcat::getTestCaseName); -} // namespace SubgraphTestsDefinitions +ov::AnyMap additional_config = {}; +} // namespace + +namespace ov { +namespace test { +INSTANTIATE_TEST_SUITE_P(smoke_basic, + PermConvPermConcat, + ::testing::Combine(::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_GPU), + ::testing::ValuesIn(input_shapes), + ::testing::ValuesIn(kernel_shapes), + ::testing::ValuesIn(output_channels), + ::testing::Values(additional_config)), + PermConvPermConcat::getTestCaseName); +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/mul_conv_fusion.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/mul_conv_fusion.hpp index 46d65560b9e1ab..529b22c56e401e 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/mul_conv_fusion.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/mul_conv_fusion.hpp @@ -6,9 +6,12 @@ #include "shared_test_classes/subgraph/mul_conv_fusion.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { TEST_P(MulConvFusion, CompareWithRefs) { - Run(); + run(); } -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/multiply_add.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/multiply_add.hpp index f3d65830592133..93f3600048a90d 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/multiply_add.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/multiply_add.hpp @@ -6,10 +6,12 @@ #include "shared_test_classes/subgraph/multiply_add.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { TEST_P(MultiplyAddLayerTest, CompareWithRefs) { - Run(); + run(); }; -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/mvn_multiply_add.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/mvn_multiply_add.hpp index 8b118b1295f140..174dffd25ca873 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/mvn_multiply_add.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/mvn_multiply_add.hpp @@ -6,10 +6,12 @@ #include "shared_test_classes/subgraph/mvn_multiply_add.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { -TEST_P(MVNMultiplyAdd, CompareWithRefs){ - Run(); +TEST_P(MVNMultiplyAdd, CompareWithRefs) { + run(); }; -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/perm_conv_perm_concat.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/perm_conv_perm_concat.hpp index b4ad568c56b462..41582d13ffd009 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/perm_conv_perm_concat.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/perm_conv_perm_concat.hpp @@ -6,10 +6,12 @@ #include "shared_test_classes/subgraph/perm_conv_perm_concat.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { TEST_P(PermConvPermConcat, CompareWithRefs) { - Run(); + run(); } -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/mul_conv_fusion.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/mul_conv_fusion.hpp index 7486a950fffd6b..571a8903c32f75 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/mul_conv_fusion.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/mul_conv_fusion.hpp @@ -4,32 +4,33 @@ #pragma once -#include #include +#include #include -#include "shared_test_classes/base/layer_test_utils.hpp" -#include -#include - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - ngraph::NodeTypeInfo, // Convolution type - ngraph::Shape, // Input shape - ngraph::Shape, // Weights shape - ngraph::Shape, // Const shape - ngraph::element::Type, // Network precision - bool, // True if test is negative - std::string // Device name - > MulConvFusionParams; - -class MulConvFusion - : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { + +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { + +typedef std::tuple + MulConvFusionParams; + +class MulConvFusion : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseStaticTest { public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; }; -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multiply_add.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multiply_add.hpp index 1e016857d4728f..fd93d5a01e3560 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multiply_add.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multiply_add.hpp @@ -4,30 +4,28 @@ #pragma once -#include +#include #include +#include #include -#include -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include "common_test_utils/test_constants.hpp" -namespace SubgraphTestsDefinitions { +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { -using MultiplyAddParamsTuple = typename std::tuple< - std::vector, //input shapes - InferenceEngine::Precision, //Network precision - std::string>; //Device name +using MultiplyAddParamsTuple = typename std::tuple; // Device name -class MultiplyAddLayerTest: - public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon{ +class MultiplyAddLayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseStaticTest { public: - std::shared_ptr fn; - static std::string getTestCaseName(const testing::TestParamInfo &obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); + protected: void SetUp() override; }; -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/mvn_multiply_add.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/mvn_multiply_add.hpp index 800fc2cbb0caa1..f8218c2f04238f 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/mvn_multiply_add.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/mvn_multiply_add.hpp @@ -4,31 +4,34 @@ #pragma once -#include #include +#include #include -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { -typedef std::tuple< - std::pair, // Input shape, Constant shape - InferenceEngine::Precision, // Data precision - InferenceEngine::Precision, // Axes precision - std::vector, // Axes - bool, // Normalize variance - float, // Epsilon - std::string, // Epsilon mode - std::string // Device name -> mvnMultiplyAddParams; +typedef std::tuple, // Input shape, Constant shape + ov::element::Type, // Data precision + ov::element::Type, // Axes precision + std::vector, // Axes + bool, // Normalize variance + float, // Epsilon + std::string, // Epsilon mode + std::string // Device name + > + mvnMultiplyAddParams; -class MVNMultiplyAdd: public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon{ +class MVNMultiplyAdd : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseStaticTest { public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); + protected: void SetUp() override; }; -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/perm_conv_perm_concat.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/perm_conv_perm_concat.hpp index b8be31f93271e7..e9b47db5482dee 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/perm_conv_perm_concat.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/perm_conv_perm_concat.hpp @@ -5,32 +5,29 @@ #pragma once #include -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" - -namespace SubgraphTestsDefinitions { -typedef std::tuple< - InferenceEngine::Precision, // Network Precision - std::string, // Target Device - std::array, // Input shape - std::array, // Kernel shape - size_t, // Output channels - std::map // Configuration -> PermConvPermConcatParams; + +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { + +typedef std::tuple + PermConvPermConcatParams; class PermConvPermConcat : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { + virtual public ov::test::SubgraphBaseStaticTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; - void Run() override; }; -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/subgraph/mul_conv_fusion.cpp b/src/tests/functional/shared_test_classes/src/subgraph/mul_conv_fusion.cpp index d16090a2e1c819..411cff4a46ab21 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/mul_conv_fusion.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/mul_conv_fusion.cpp @@ -2,17 +2,20 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "transformations/common_optimizations/mul_conv_fusion.hpp" -#include "ngraph/pass/constant_folding.hpp" #include "shared_test_classes/subgraph/mul_conv_fusion.hpp" + +#include "common_test_utils/graph_comparator.hpp" +#include "openvino/pass/manager.hpp" #include "ov_models/builders.hpp" +#include "transformations/common_optimizations/mul_conv_fusion.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { -std::string MulConvFusion::getTestCaseName(const testing::TestParamInfo &obj) { - ngraph::NodeTypeInfo conv_type; - ngraph::Shape input_shape, weights_shape, const_shape; - ngraph::element::Type precision; +std::string MulConvFusion::getTestCaseName(const testing::TestParamInfo& obj) { + ov::NodeTypeInfo conv_type; + ov::Shape input_shape, weights_shape, const_shape; + ov::element::Type precision; std::string device; std::tie(conv_type, input_shape, weights_shape, const_shape, precision, std::ignore, device) = obj.param; std::ostringstream results; @@ -27,36 +30,43 @@ std::string MulConvFusion::getTestCaseName(const testing::TestParamInfoGetParam(); - auto param = std::make_shared(precision, input_shape); + std::tie(conv_type, input_shape, weights_shape, const_shape, precision, is_negative, targetDevice) = + this->GetParam(); + auto param = std::make_shared(precision, input_shape); auto spatial_dims = input_shape.size() - 2; auto mul_const = ngraph::builder::makeConstant(precision, const_shape, {}, true); - auto mul = std::make_shared(param, mul_const); - ngraph::Shape strides(spatial_dims, 1); + auto mul = std::make_shared(param, mul_const); + ov::Shape strides(spatial_dims, 1); std::vector pad_begin(spatial_dims, 0), pad_end(spatial_dims, 0); auto weights = ngraph::builder::makeConstant(precision, weights_shape, {}, true); - std::shared_ptr conv; - if (conv_type == ngraph::opset8::Convolution::get_type_info_static()) { - conv = std::make_shared(mul, weights, strides, pad_begin, pad_end, strides); - } else if (conv_type == ngraph::opset8::GroupConvolution::get_type_info_static()) { - conv = std::make_shared(mul, weights, strides, pad_begin, pad_end, strides); - } else if (conv_type == ngraph::opset8::ConvolutionBackpropData::get_type_info_static()) { - conv = std::make_shared(mul, weights, strides, pad_begin, pad_end, strides); - } else if (conv_type == ngraph::opset8::GroupConvolutionBackpropData::get_type_info_static()) { - conv = std::make_shared(mul, weights, strides, pad_begin, pad_end, strides); + std::shared_ptr conv; + if (conv_type == ov::op::v1::Convolution::get_type_info_static()) { + conv = std::make_shared(mul, weights, strides, pad_begin, pad_end, strides); + } else if (conv_type == ov::op::v1::GroupConvolution::get_type_info_static()) { + conv = std::make_shared(mul, weights, strides, pad_begin, pad_end, strides); + } else if (conv_type == ov::op::v1::ConvolutionBackpropData::get_type_info_static()) { + conv = + std::make_shared(mul, weights, strides, pad_begin, pad_end, strides); + } else if (conv_type == ov::op::v1::GroupConvolutionBackpropData::get_type_info_static()) { + conv = std::make_shared(mul, + weights, + strides, + pad_begin, + pad_end, + strides); } else { OPENVINO_THROW("Unsupported type"); } - function = std::make_shared(ngraph::OutputVector{conv}, ngraph::ParameterVector{param}); - auto cloned_function = ngraph::clone_function(*function); + function = std::make_shared(ov::OutputVector{conv}, ov::ParameterVector{param}); + auto cloned_function = function->clone(); - ngraph::pass::Manager manager; + ov::pass::Manager manager; manager.register_pass(); manager.register_pass(); manager.register_pass(); @@ -65,58 +75,75 @@ void MulConvFusion::SetUp() { bool functions_equal = false; if (!is_negative) { - auto param = std::make_shared(precision, input_shape); - ngraph::Shape strides(spatial_dims, 1); + auto param = std::make_shared(precision, input_shape); + ov::Shape strides(spatial_dims, 1); std::vector pad_begin(spatial_dims, 0), pad_end(spatial_dims, 0); - std::shared_ptr conv; - if (conv_type == ngraph::opset8::Convolution::get_type_info_static()) { - weights = std::make_shared(weights, mul_const); + std::shared_ptr conv; + if (conv_type == ov::op::v1::Convolution::get_type_info_static()) { + weights = std::make_shared(weights, mul_const); weights = ov::get_constant_from_source(weights); ASSERT_NE(nullptr, weights); - conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); - } else if (conv_type == ngraph::opset8::GroupConvolution::get_type_info_static()) { + conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); + } else if (conv_type == ov::op::v1::GroupConvolution::get_type_info_static()) { const_shape.insert(const_shape.begin(), weights_shape.size() - const_shape.size(), 1); auto G = const_shape[2] > 1 ? weights_shape[0] : 1; const_shape[0] = G; const_shape[2] /= G; - auto reshape = std::make_shared(mul_const, - ngraph::op::Constant::create(ngraph::element::u64, ngraph::Shape{const_shape.size()}, const_shape), false); - weights = std::make_shared(weights, reshape); + auto reshape = std::make_shared( + mul_const, + ov::op::v0::Constant::create(ov::element::u64, ov::Shape{const_shape.size()}, const_shape), + false); + weights = std::make_shared(weights, reshape); weights = ov::get_constant_from_source(weights); ASSERT_NE(nullptr, weights); - conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); - } else if (conv_type == ngraph::opset8::ConvolutionBackpropData::get_type_info_static()) { + conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); + } else if (conv_type == ov::op::v1::ConvolutionBackpropData::get_type_info_static()) { const_shape.insert(const_shape.begin(), weights_shape.size() - const_shape.size(), 1); const_shape[0] = const_shape[1]; const_shape[1] = 1; - auto reshape = std::make_shared(mul_const, - ngraph::op::Constant::create(ngraph::element::u64, ngraph::Shape{const_shape.size()}, const_shape), false); - weights = std::make_shared(weights, reshape); + auto reshape = std::make_shared( + mul_const, + ov::op::v0::Constant::create(ov::element::u64, ov::Shape{const_shape.size()}, const_shape), + false); + weights = std::make_shared(weights, reshape); weights = ov::get_constant_from_source(weights); ASSERT_NE(nullptr, weights); - conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); - } else if (conv_type == ngraph::opset8::GroupConvolutionBackpropData::get_type_info_static()) { + conv = std::make_shared(param, + weights, + strides, + pad_begin, + pad_end, + strides); + } else if (conv_type == ov::op::v1::GroupConvolutionBackpropData::get_type_info_static()) { const_shape.insert(const_shape.begin(), weights_shape.size() - const_shape.size(), 1); auto G = const_shape[2] > 1 ? weights_shape[0] : 1; const_shape[0] = G; const_shape[1] = const_shape[2] / G; const_shape[2] = 1; - auto reshape = std::make_shared(mul_const, - ngraph::op::Constant::create(ngraph::element::u64, ngraph::Shape{const_shape.size()}, const_shape), false); - weights = std::make_shared(weights, reshape); + auto reshape = std::make_shared( + mul_const, + ov::op::v0::Constant::create(ov::element::u64, ov::Shape{const_shape.size()}, const_shape), + false); + weights = std::make_shared(weights, reshape); weights = ov::get_constant_from_source(weights); ASSERT_NE(nullptr, weights); - conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); + conv = std::make_shared(param, + weights, + strides, + pad_begin, + pad_end, + strides); } else { OPENVINO_THROW("Unsupported type"); } - auto reference_function = std::make_shared(ngraph::OutputVector{conv}, ngraph::ParameterVector{param}); + auto reference_function = std::make_shared(ov::OutputVector{conv}, ov::ParameterVector{param}); std::tie(functions_equal, std::ignore) = compare_functions(cloned_function, reference_function, true); ASSERT_TRUE(functions_equal); } else { - auto reference_function = ngraph::clone_function(*function); + auto reference_function = function->clone(); std::tie(functions_equal, std::ignore) = compare_functions(cloned_function, reference_function, true); ASSERT_TRUE(functions_equal); } } -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/subgraph/multiply_add.cpp b/src/tests/functional/shared_test_classes/src/subgraph/multiply_add.cpp index 8b070a70026d15..dfc1dcdb5f7fd5 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/multiply_add.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/multiply_add.cpp @@ -4,37 +4,43 @@ #include "shared_test_classes/subgraph/multiply_add.hpp" -namespace SubgraphTestsDefinitions { -std::string MultiplyAddLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { - std::vector inputShapes; - InferenceEngine::Precision netPrecision; +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" + +namespace ov { +namespace test { + +std::string MultiplyAddLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { + ov::Shape inputShapes; + ov::element::Type element_type; std::string targetName; - std::tie(inputShapes, netPrecision, targetName) = obj.param; + std::tie(inputShapes, element_type, targetName) = obj.param; std::ostringstream results; results << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - results << "netPRC=" << netPrecision.name() << "_"; + results << "ET=" << element_type << "_"; results << "targetDevice=" << targetName << "_"; return results.str(); } void MultiplyAddLayerTest::SetUp() { - std::vector inputShape; - auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; - std::tie(inputShape, netPrecision, targetDevice) = this->GetParam(); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto paramOuts = ngraph::helpers::convert2OutputVector( - ngraph::helpers::castOps2Nodes(params)); + ov::Shape inputShape; + ov::element::Type element_type; + std::tie(inputShape, element_type, targetDevice) = this->GetParam(); + ov::ParameterVector params{std::make_shared(element_type, ov::PartialShape(inputShape))}; + auto paramOuts = + ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes(params)); std::vector constShape(inputShape.size(), 1); constShape[1] = inputShape[1]; - auto const_mul = ngraph::builder::makeConstant(ngPrc, constShape, {}, true); - auto mul = std::make_shared(paramOuts[0], const_mul); - auto const_add = ngraph::builder::makeConstant(ngPrc, constShape, {}, true); - auto add = std::make_shared(mul, const_add); - ngraph::ResultVector results{std::make_shared(add)}; - function = std::make_shared(results, params, "multiplyAdd"); + auto const_mul = ngraph::builder::makeConstant(element_type, constShape, {}, true); + auto mul = std::make_shared(paramOuts[0], const_mul); + auto const_add = ngraph::builder::makeConstant(element_type, constShape, {}, true); + auto add = std::make_shared(mul, const_add); + ov::ResultVector results{std::make_shared(add)}; + function = std::make_shared(results, params, "multiplyAdd"); } -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/subgraph/mvn_multiply_add.cpp b/src/tests/functional/shared_test_classes/src/subgraph/mvn_multiply_add.cpp index 2371182175c711..9ff6272b9ab529 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/mvn_multiply_add.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/mvn_multiply_add.cpp @@ -4,12 +4,16 @@ #include "shared_test_classes/subgraph/mvn_multiply_add.hpp" -namespace SubgraphTestsDefinitions { +#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" -std::string MVNMultiplyAdd::getTestCaseName(const testing::TestParamInfo &obj) { - std::pair shapes; - InferenceEngine::SizeVector inputShapes, constantShapes; - InferenceEngine::Precision dataPrecision, axesPrecision; +namespace ov { +namespace test { + +std::string MVNMultiplyAdd::getTestCaseName(const testing::TestParamInfo& obj) { + std::pair shapes; + ov::Shape inputShapes, constantShapes; + ov::element::Type dataPrecision, axesPrecision; std::vector axes; bool normalizeVariance; float eps; @@ -20,8 +24,8 @@ std::string MVNMultiplyAdd::getTestCaseName(const testing::TestParamInfo shapes; - InferenceEngine::SizeVector inputShapes, constantShapes; - InferenceEngine::Precision dataPrecision, axesPrecision; + std::pair shapes; + ov::Shape inputShapes, constantShapes; + ov::element::Type dataType, axesType; std::vector axes; bool normalizeVariance; float eps; std::string epsMode; - std::tie(shapes, dataPrecision, axesPrecision, axes, normalizeVariance, eps, epsMode, targetDevice) = this->GetParam(); + std::tie(shapes, dataType, axesType, axes, normalizeVariance, eps, epsMode, targetDevice) = this->GetParam(); std::tie(inputShapes, constantShapes) = shapes; - auto dataType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(dataPrecision); - auto axesType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(axesPrecision); - - ov::ParameterVector param {std::make_shared(dataType, ov::Shape(inputShapes))}; - auto paramOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes(param)); - auto axesNode = ngraph::builder::makeConstant(axesType, ngraph::Shape{axes.size()}, axes); + ov::ParameterVector param{std::make_shared(dataType, ov::Shape(inputShapes))}; + auto paramOuts = + ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes(param)); + auto axesNode = ngraph::builder::makeConstant(axesType, ov::Shape{axes.size()}, axes); auto mvn = ngraph::builder::makeMVN6(paramOuts[0], axesNode, normalizeVariance, eps, epsMode); auto gamma = ngraph::builder::makeConstant(dataType, constantShapes, {}, true); - auto mul = std::make_shared(mvn, gamma); + auto mul = std::make_shared(mvn, gamma); auto beta = ngraph::builder::makeConstant(dataType, constantShapes, {}, true); - auto add = std::make_shared(mul, beta); + auto add = std::make_shared(mul, beta); - ngraph::ResultVector results{std::make_shared(add)}; - function = std::make_shared(results, param, "MVNMultiplyAdd"); + ov::ResultVector results{std::make_shared(add)}; + function = std::make_shared(results, param, "MVNMultiplyAdd"); } -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/subgraph/perm_conv_perm_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/perm_conv_perm_concat.cpp index b488166297f618..e51e30a6caaa49 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/perm_conv_perm_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/perm_conv_perm_concat.cpp @@ -4,105 +4,97 @@ #include "shared_test_classes/subgraph/perm_conv_perm_concat.hpp" -namespace SubgraphTestsDefinitions { +#include "common_test_utils/data_utils.hpp" +#include "functional_test_utils/skip_tests_config.hpp" +#include "ov_models/builders.hpp" + +namespace ov { +namespace test { + std::string PermConvPermConcat::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; + ov::element::Type element_type; std::string targetName; - std::array input_shape; - std::array kernel_shape; + ov::Shape input_shape; + ov::Shape kernel_shape; size_t output_channels; - std::map configuration; - + ov::AnyMap configuration; - std::tie(netPrecision, targetName, input_shape, kernel_shape, output_channels, configuration) = obj.param; + std::tie(element_type, targetName, input_shape, kernel_shape, output_channels, configuration) = obj.param; std::ostringstream results; results << "IS=" << ov::test::utils::vec2str(std::vector(input_shape.begin(), input_shape.end())) << "_"; results << "KS=" << ov::test::utils::vec2str(std::vector(kernel_shape.begin(), kernel_shape.end())) << "_"; results << "OC=" << output_channels << "_"; - results << "netPRC=" << netPrecision.name() << "_"; + results << "ET=" << element_type << "_"; results << "targetDevice=" << targetName; for (auto const& configItem : configuration) { - results << "_configItem=" << configItem.first << "_" << configItem.second; + results << "_configItem=" << configItem.first << "_" << configItem.second.as(); } return results.str(); } void PermConvPermConcat::SetUp() { - InferenceEngine::Precision netPrecision; - std::array input_shape; - std::array kernel_shape; + ov::element::Type element_type; + ov::Shape input_shape; + ov::Shape kernel_shape; size_t output_channels; - std::map additional_config; + ov::AnyMap additional_config; - std::tie(netPrecision, targetDevice, input_shape, kernel_shape, output_channels, additional_config) = this->GetParam(); + std::tie(element_type, targetDevice, input_shape, kernel_shape, output_channels, additional_config) = + this->GetParam(); configuration.insert(additional_config.begin(), additional_config.end()); const std::size_t input_dim = std::accumulate(input_shape.begin(), input_shape.end(), 1, std::multiplies()); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - std::vector input_dims { 1, input_dim }; + std::vector input_dims{1, input_dim}; std::vector reshape_in_dims = std::vector(input_shape.begin(), input_shape.end()); - std::vector permute_in_order = { 0, 3, 1, 2 }; - std::vector permute_out_order = { 0, 2, 3, 1 }; + std::vector permute_in_order = {0, 3, 1, 2}; + std::vector permute_out_order = {0, 2, 3, 1}; - ov::ParameterVector input_parameter {std::make_shared(ngPrc, ov::Shape(input_dims))}; + ov::ParameterVector input_parameter{std::make_shared(element_type, ov::Shape(input_dims))}; - auto reshape_in_pattern = std::make_shared(ngraph::element::i64, - ngraph::Shape{4}, - reshape_in_dims); - auto reshape_in = std::make_shared(input_parameter[0], reshape_in_pattern, false); + auto reshape_in_pattern = std::make_shared(ov::element::i64, ov::Shape{4}, reshape_in_dims); + auto reshape_in = std::make_shared(input_parameter[0], reshape_in_pattern, false); - auto permute_in_params = std::make_shared(ngraph::element::i64, - ngraph::Shape{4}, - ngraph::Shape{permute_in_order}); - auto permute_in = std::make_shared(reshape_in, permute_in_params); + auto permute_in_params = + std::make_shared(ov::element::i64, ov::Shape{4}, ov::Shape{permute_in_order}); + auto permute_in = std::make_shared(reshape_in, permute_in_params); auto conv_in_shape = permute_in->get_output_shape(0); auto conv_weights_size = output_channels * (conv_in_shape[1]) * kernel_shape[0] * kernel_shape[1]; - auto conv = ngraph::builder::makeConvolution(permute_in, ngPrc, {kernel_shape[0], kernel_shape[1]}, {1, 1}, {0, 0}, {0, 0}, {1, 1}, - ngraph::op::PadType::VALID, output_channels, false, ov::test::utils::generate_float_numbers(conv_weights_size, -0.5f, 0.5f)); - - auto permute_out_params = std::make_shared(ngraph::element::i64, - ngraph::Shape{4}, - permute_out_order); - auto permute_out = std::make_shared(conv, permute_out_params); + auto conv = + ngraph::builder::makeConvolution(permute_in, + element_type, + {kernel_shape[0], kernel_shape[1]}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::VALID, + output_channels, + false, + ov::test::utils::generate_float_numbers(conv_weights_size, -0.5f, 0.5f)); + + auto permute_out_params = std::make_shared(ov::element::i64, ov::Shape{4}, permute_out_order); + auto permute_out = std::make_shared(conv, permute_out_params); auto permute_out_shape = permute_out->get_output_shape(0); - auto concat_const = ngraph::builder::makeConstant(ngPrc, {1, 1, 1, permute_out_shape[3]}, - ov::test::utils::generate_float_numbers(permute_out_shape[3], -10, 10)); + auto concat_const = + ngraph::builder::makeConstant(element_type, + {1, 1, 1, permute_out_shape[3]}, + ov::test::utils::generate_float_numbers(permute_out_shape[3], -10, 10)); auto concat = ngraph::builder::makeConcat({permute_out, concat_const}, 2); - auto reshape_out_pattern = std::make_shared(ngraph::element::i64, - ngraph::Shape{2}, + auto reshape_out_pattern = std::make_shared( + ov::element::i64, + ov::Shape{2}, InferenceEngine::SizeVector({1, (permute_out_shape[2] + 1) * permute_out_shape[3]})); - auto reshape_out = std::make_shared(concat, reshape_out_pattern, false); + auto reshape_out = std::make_shared(concat, reshape_out_pattern, false); - function = std::make_shared(reshape_out, input_parameter, "perm_conv_perm_concat"); + function = std::make_shared(reshape_out, input_parameter, "perm_conv_perm_concat"); } -void PermConvPermConcat::Run() { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - - LoadNetwork(); - - inferRequest = executableNetwork.CreateInferRequest(); - inputs.clear(); - - for (const auto &input : cnnNetwork.getInputsInfo()) { - const auto &info = input.second; - auto tensorDesc = info->getTensorDesc(); - - auto blob = FuncTestUtils::createAndFillBlobFloat(tensorDesc, 2, -1, 100, 111); - - FuncTestUtils::fillInputsBySinValues(blob); - inferRequest.SetBlob(info->name(), blob); - inputs.push_back(blob); - } - inferRequest.Infer(); - - Validate(); -} -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov From 31670dacb47392ab9c9a21c930fe6c2f365fa92c Mon Sep 17 00:00:00 2001 From: Andrei Gorbachev Date: Fri, 13 Oct 2023 10:36:47 +0100 Subject: [PATCH 186/257] [GPU] Refactor CTCGreedyDecoderSeqLen, CTCGreedyDecoder, CTCLoss (#20432) * CTCGreedyDecoderSeqLen, CTCGreedyDecoder * CTCLoss --------- Co-authored-by: Pavel Durandin --- .../single_layer_tests/ctc_greedy_decoder.cpp | 36 +++++++++---------- .../ctc_greedy_decoder_seq_len.cpp | 26 +++++++------- .../single_layer_tests/ctc_loss.cpp | 35 +++++++++--------- 3 files changed, 46 insertions(+), 51 deletions(-) diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/ctc_greedy_decoder.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/ctc_greedy_decoder.cpp index 2206490567e7e2..b7c2807ce37086 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/ctc_greedy_decoder.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/ctc_greedy_decoder.cpp @@ -3,35 +3,33 @@ // #include -#include "single_layer_tests/ctc_greedy_decoder.hpp" +#include "single_op_tests/ctc_greedy_decoder.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; -using namespace ngraph::helpers; - namespace { +using ov::test::CTCGreedyDecoderLayerTest; + // Common params -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 }; std::vector mergeRepeated{true, false}; +std::vector> input_shapes_static = { + {{ 50, 3, 3 }}, + {{ 50, 3, 7 }}, + {{ 50, 3, 8 }}, + {{ 50, 3, 16 }}, + {{ 50, 3, 128 }}, + {{ 50, 3, 49 }}, + {{ 50, 3, 55 }}, + {{ 1, 1, 16 }}}; + INSTANTIATE_TEST_SUITE_P(smoke_CtcGreedyDecoderBasic, CTCGreedyDecoderLayerTest, ::testing::Combine(::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(std::vector({50, 3, 3}), - std::vector({50, 3, 7}), - std::vector({50, 3, 8}), - std::vector({50, 3, 16}), - std::vector({50, 3, 128}), - std::vector({50, 3, 49}), - std::vector({50, 3, 55}), - std::vector({1, 1, 16})), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_static)), ::testing::ValuesIn(mergeRepeated), ::testing::Values(ov::test::utils::DEVICE_GPU)), CTCGreedyDecoderLayerTest::getTestCaseName); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/ctc_greedy_decoder_seq_len.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/ctc_greedy_decoder_seq_len.cpp index c015258d41ed24..12d318d107d342 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/ctc_greedy_decoder_seq_len.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/ctc_greedy_decoder_seq_len.cpp @@ -3,30 +3,28 @@ // #include -#include "single_layer_tests/ctc_greedy_decoder_seq_len.hpp" +#include "single_op_tests/ctc_greedy_decoder_seq_len.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; -using namespace ngraph::helpers; - namespace { +using ov::test::CTCGreedyDecoderSeqLenLayerTest; -std::vector> inputShape{{1, 1, 1}, {1, 6, 10}, {3, 3, 16}, {5, 3, 55}}; +std::vector> inputShape{{{1, 1, 1}}, {{1, 6, 10}}, {{3, 3, 16}}, {{5, 3, 55}}}; -const std::vector probPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16 +const std::vector probPrecisions = { + ov::element::f32, + ov::element::f16 }; -const std::vector idxPrecisions = { - InferenceEngine::Precision::I32, - InferenceEngine::Precision::I64 +const std::vector idxPrecisions = { + ov::element::i32, + ov::element::i64 }; std::vector mergeRepeated{true, false}; INSTANTIATE_TEST_SUITE_P(smoke_set1, CTCGreedyDecoderSeqLenLayerTest, - ::testing::Combine(::testing::ValuesIn(inputShape), + ::testing::Combine(::testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShape)), ::testing::Values(10), ::testing::ValuesIn(probPrecisions), ::testing::ValuesIn(idxPrecisions), @@ -37,8 +35,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_set1, INSTANTIATE_TEST_SUITE_P(smoke_set2, CTCGreedyDecoderSeqLenLayerTest, - ::testing::Combine(::testing::ValuesIn(std::vector>{{2, 8, 11}, - {4, 10, 55}}), + ::testing::Combine(::testing::ValuesIn(ov::test::static_shapes_to_test_representation(std::vector>{{{2, 8, 11}}, + {{4, 10, 55}}})), ::testing::ValuesIn(std::vector{5, 100}), ::testing::ValuesIn(probPrecisions), ::testing::ValuesIn(idxPrecisions), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/ctc_loss.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/ctc_loss.cpp index 9e71f45de30034..742b4974e6fc88 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/ctc_loss.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/ctc_loss.cpp @@ -2,22 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "single_layer_tests/ctc_loss.hpp" +#include "single_op_tests/ctc_loss.hpp" #include -using namespace LayerTestsDefinitions; - namespace { +using ov::test::CTCLossLayerTest; -const std::vector fPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16, +const std::vector fPrecisions = { + ov::element::f32, + ov::element::f16, }; -const std::vector iPrecisions = { - InferenceEngine::Precision::I32, - InferenceEngine::Precision::I64, +const std::vector iPrecisions = { + ov::element::i32, + ov::element::i64, }; const std::vector preprocessCollapseRepeated = {true, false}; @@ -25,7 +24,6 @@ const std::vector ctcMergeRepeated = {true, false}; const std::vector unique = {true, false}; const auto ctcLossArgsSubset1 = testing::Combine( - testing::Values(std::vector({2, 3, 3})), // logits shape testing::ValuesIn(std::vector>({{2, 3}, {3, 3}})), // logits length testing::ValuesIn( std::vector>>({{{0, 1, 0}, {1, 0, 1}}, {{0, 1, 2}, {1, 1, 1}}})), // labels @@ -38,14 +36,14 @@ const auto ctcLossArgsSubset1 = testing::Combine( INSTANTIATE_TEST_SUITE_P(smoke_CTCLoss_Set1, CTCLossLayerTest, testing::Combine(ctcLossArgsSubset1, - testing::ValuesIn(fPrecisions), - testing::ValuesIn(iPrecisions), - testing::Values(ov::test::utils::DEVICE_GPU)), + testing::Values(ov::test::static_shapes_to_test_representation({{2, 3, 3}})), // logits shape + testing::ValuesIn(fPrecisions), + testing::ValuesIn(iPrecisions), + testing::Values(ov::test::utils::DEVICE_GPU)), CTCLossLayerTest::getTestCaseName); const auto ctcLossArgsSubset2 = - testing::Combine(testing::Values(std::vector({3, 6, 8})), // logits shape - testing::ValuesIn(std::vector>({{6, 5, 6}, {5, 5, 5}})), // logits length + testing::Combine(testing::ValuesIn(std::vector>({{6, 5, 6}, {5, 5, 5}})), // logits length testing::ValuesIn(std::vector>>( {{{4, 1, 2, 3, 4, 5}, {5, 4, 3, 0, 1, 0}, {2, 1, 3, 1, 3, 0}}, {{2, 1, 5, 3, 2, 6}, {3, 3, 3, 3, 3, 3}, {6, 5, 6, 5, 6, 5}}})), // labels @@ -58,8 +56,9 @@ const auto ctcLossArgsSubset2 = INSTANTIATE_TEST_SUITE_P(smoke_CTCLoss_Set2, CTCLossLayerTest, testing::Combine(ctcLossArgsSubset2, - testing::ValuesIn(fPrecisions), - testing::ValuesIn(iPrecisions), - testing::Values(ov::test::utils::DEVICE_GPU)), + testing::Values(ov::test::static_shapes_to_test_representation({{3, 6, 8}})), // logits shape + testing::ValuesIn(fPrecisions), + testing::ValuesIn(iPrecisions), + testing::Values(ov::test::utils::DEVICE_GPU)), CTCLossLayerTest::getTestCaseName); } // namespace From 7bc8e0fb8f1adc274de1d52b18c235391ecc6cd2 Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Fri, 13 Oct 2023 12:10:35 +0200 Subject: [PATCH 187/257] [core]Shape and Node util functions size optimization (#20206) * Refactor shape_size util to reduce bin size * Make `check_new_args_count` non-template function * Use as not template check_new_args_count in multi-nominal --- src/core/include/openvino/core/node.hpp | 22 +++++++++------------- src/core/include/openvino/core/shape.hpp | 22 +++++++++------------- src/core/src/node.cpp | 11 +++++++++++ src/core/src/op/multinomial.cpp | 4 +--- 4 files changed, 30 insertions(+), 29 deletions(-) diff --git a/src/core/include/openvino/core/node.hpp b/src/core/include/openvino/core/node.hpp index 860290617709a7..ac1c61ce0f18d3 100644 --- a/src/core/include/openvino/core/node.hpp +++ b/src/core/include/openvino/core/node.hpp @@ -554,21 +554,17 @@ OPENVINO_API void NodeValidationFailure::create(const CheckLocInfo& check_loc_in NODE_VALIDATION_CHECK(std::make_pair(static_cast((node)), &(input_shapes)), __VA_ARGS__) namespace ov { -template -void check_new_args_count(const Node* node, T new_args) { - NODE_VALIDATION_CHECK(node, - new_args.size() == node->input_values().size(), - "clone_with_new_inputs() expected ", - node->input_values().size(), - " argument", - (node->input_values().size() == 1 ? "" : "s"), - " but got ", - new_args.size()); -} -} // namespace ov +/** + * @brief Check new arguments size if match node inputs count. + * + * This check is required in cloning ov::Node. + * + * @param node Pointer to node. + * @param new_args Vector with new outputs to check. + */ +void OPENVINO_API check_new_args_count(const Node* const node, const OutputVector& new_args); -namespace ov { /// \brief Visits a reference to a node that has been registered with the visitor. template <> class OPENVINO_API AttributeAdapter> : public VisitorAdapter { diff --git a/src/core/include/openvino/core/shape.hpp b/src/core/include/openvino/core/shape.hpp index 392bd9c48bd9d5..a04a864a8394fb 100644 --- a/src/core/include/openvino/core/shape.hpp +++ b/src/core/include/openvino/core/shape.hpp @@ -42,19 +42,6 @@ class Shape : public std::vector { OPENVINO_API std::string to_string() const; }; -/** - * @brief Number of elements in spanned by a shape - * @ingroup ov_model_cpp_api - */ -template -size_t shape_size(const SHAPE_TYPE& shape) { - size_t size = 1; - for (auto d : shape) { - size *= d; - } - return size; -} - /** * Number of elements in a subset of dimensions of a shape. * Returns a product of dimensions in a range [start_dim;end_dim) @@ -72,6 +59,15 @@ size_t shape_size(ForwardIt start_dim, const ForwardIt end_dim) { std::multiplies::value_type>()); } +/** + * @brief Number of elements in spanned by a shape + * @ingroup ov_model_cpp_api + */ +template +size_t shape_size(const SHAPE_TYPE& shape) { + return shape_size(shape.begin(), shape.end()); +} + /// Row-major strides for a shape template std::vector row_major_strides(const SHAPE_TYPE& shape) { diff --git a/src/core/src/node.cpp b/src/core/src/node.cpp index ee2c454bb6a235..492f0dec1e3a04 100644 --- a/src/core/src/node.cpp +++ b/src/core/src/node.cpp @@ -844,6 +844,17 @@ bool ov::Node::visit_attributes(AttributeVisitor&) { } namespace ov { +void check_new_args_count(const Node* const node, const OutputVector& new_args) { + NODE_VALIDATION_CHECK(node, + new_args.size() == node->input_values().size(), + "clone_with_new_inputs() expected ", + node->input_values().size(), + " argument", + (node->input_values().size() == 1 ? "" : "s"), + " but got ", + new_args.size()); +} + AttributeAdapter>::AttributeAdapter(std::shared_ptr& value) : m_ref(value) {} bool AttributeAdapter>::visit_attributes(AttributeVisitor& visitor) { diff --git a/src/core/src/op/multinomial.cpp b/src/core/src/op/multinomial.cpp index 90f41369364879..d8e6a967f38b58 100644 --- a/src/core/src/op/multinomial.cpp +++ b/src/core/src/op/multinomial.cpp @@ -6,11 +6,9 @@ #include -#include "bound_evaluate.hpp" #include "itt.hpp" #include "multinomial_shape_inference.hpp" #include "openvino/core/attribute_visitor.hpp" -#include "openvino/op/constant.hpp" #include "openvino/op/util/op_types.hpp" #include "openvino/reference/multinomial.hpp" @@ -60,7 +58,7 @@ void op::v13::Multinomial::validate_and_infer_types() { std::shared_ptr op::v13::Multinomial::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v13_Multinomial_clone_with_new_inputs); - check_new_args_count(this, new_args); + check_new_args_count(this, new_args); return std::make_shared(new_args.at(0), new_args.at(1), From 2478cf7df3c8e0cc23fbdd488f931247b3ba69fe Mon Sep 17 00:00:00 2001 From: Katarzyna Mitrus Date: Fri, 13 Oct 2023 12:50:02 +0200 Subject: [PATCH 188/257] [Opset13][pyAPI] Python API Multinomial-13 (#20400) * Init Multinomial op python API * Add python tests for Multinomial op * Update num_samples input description --- .../src/openvino/runtime/opset13/__init__.py | 1 + .../src/openvino/runtime/opset13/ops.py | 41 ++++++++++ .../tests/test_graph/test_multinomial.py | 78 +++++++++++++++++++ 3 files changed, 120 insertions(+) create mode 100644 src/bindings/python/tests/test_graph/test_multinomial.py diff --git a/src/bindings/python/src/openvino/runtime/opset13/__init__.py b/src/bindings/python/src/openvino/runtime/opset13/__init__.py index 4ea991bf77b9ec..9cdb7149569ebb 100644 --- a/src/bindings/python/src/openvino/runtime/opset13/__init__.py +++ b/src/bindings/python/src/openvino/runtime/opset13/__init__.py @@ -106,6 +106,7 @@ from openvino.runtime.opset4.ops import mish from openvino.runtime.opset1.ops import mod from openvino.runtime.opset9.ops import multiclass_nms +from openvino.runtime.opset13.ops import multinomial from openvino.runtime.opset1.ops import multiply from openvino.runtime.opset6.ops import mvn from openvino.runtime.opset1.ops import negative diff --git a/src/bindings/python/src/openvino/runtime/opset13/ops.py b/src/bindings/python/src/openvino/runtime/opset13/ops.py index f864d7fccca0ea..fff95b33d234d6 100644 --- a/src/bindings/python/src/openvino/runtime/opset13/ops.py +++ b/src/bindings/python/src/openvino/runtime/opset13/ops.py @@ -110,6 +110,47 @@ def bitwise_xor( ) +@nameable_op +def multinomial( + probs: NodeInput, + num_samples: NodeInput, + convert_type: str, + with_replacement: bool, + log_probs: bool, + global_seed: int = 0, + op_seed: int = 0, +) -> Node: + """Return a node which generates a sequence of class indices sampled from the multinomial distribution. + + :param probs: Tensor with probabilities of floating-point type, and shape [class_size] or [batch_size, class_size]. + :param num_samples: Tensor (scalar or 1D) a single element of type i32 or i64, + specifying the number of samples to draw from the multinomial distribution. + :param convert_type: Specifies the output tensor type, possible values: 'i64', 'i32'. + :param with_replacement: Flag that specifies whether to sample with replacement. + :param log_probs: Flag that specifies whether *probs* should be treated as unnormalized log probabilities. + :param global_seed: Specifies global seed value. Required to be a positive integer or 0. + :param op_seed: Specifies operational seed value. Required to be a positive integer or 0. + + :return: The new node performing Multinomial operation. + """ + inputs = as_nodes(probs, num_samples) + + if global_seed < 0: + raise RuntimeError(f"global_seed should be positive or 0. Got: {global_seed}") + + if op_seed < 0: + raise RuntimeError(f"op_seed should be positive or 0. Got: {op_seed}") + + attributes = { + "convert_type": convert_type, + "with_replacement": with_replacement, + "log_probs": log_probs, + "global_seed": global_seed, + "op_seed": op_seed, + } + return _get_node_factory_opset13().create("Multinomial", inputs, attributes) + + @nameable_op def nms_rotated( boxes: NodeInput, diff --git a/src/bindings/python/tests/test_graph/test_multinomial.py b/src/bindings/python/tests/test_graph/test_multinomial.py new file mode 100644 index 00000000000000..a1275837cc39d8 --- /dev/null +++ b/src/bindings/python/tests/test_graph/test_multinomial.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest + +import openvino.runtime.opset13 as ops +from openvino.runtime import PartialShape, Dimension, Type + + +@pytest.mark.parametrize( + ("probs_shape", "num_samples_shape", "convert_type", "with_replacement", "log_probs", "global_seed", "op_seed", "expected_out_shape"), + [ + ([4, 16], [], "i32", False, True, 7461, 1546, PartialShape([4, -1])), + ([8], [1], "i64", True, False, 0, 0, PartialShape([-1])), + ], +) +def test_multinomial_param_inputs(probs_shape, num_samples_shape, convert_type, with_replacement, log_probs, global_seed, op_seed, expected_out_shape): + probs = ops.parameter(probs_shape, dtype=np.float32) + num_samples = ops.parameter(num_samples_shape, dtype=np.int32) + + op = ops.multinomial(probs, num_samples, + convert_type=convert_type, + with_replacement=with_replacement, + log_probs=log_probs, + global_seed=global_seed, + op_seed=op_seed) + assert op.get_output_size() == 1 + assert op.get_type_name() == "Multinomial" + assert op.get_output_element_type(0) == Type.i32 if convert_type == "i32" else Type.i64 + assert op.get_output_partial_shape(0) == expected_out_shape + + +@pytest.mark.parametrize( + ("probs_array", "num_samples_val", "convert_type", "with_replacement", "log_probs", "global_seed", "op_seed", "expected_out_shape"), + [ + (np.array([0.7, 0.3, 0.6, 0.5]), 3, "i32", False, True, 111, 222, PartialShape([3])), + (np.array([[0.7, 0.3], [0.6, 0.5]]), 2, "i64", True, False, 111, 222, PartialShape([2, 2])), + ], +) +def test_multinomial_const_inputs(probs_array, num_samples_val, convert_type, with_replacement, log_probs, global_seed, op_seed, expected_out_shape): + probs = ops.constant(probs_array, dtype=np.float32) + num_samples = ops.constant(num_samples_val, dtype=np.int32) + + op = ops.multinomial(probs, num_samples, + convert_type=convert_type, + with_replacement=with_replacement, + log_probs=log_probs, + global_seed=global_seed, + op_seed=op_seed) + + assert op.get_output_size() == 1 + assert op.get_type_name() == "Multinomial" + assert op.get_output_element_type(0) == Type.i32 if convert_type == "i32" else Type.i64 + assert op.get_output_partial_shape(0) == expected_out_shape + + +@pytest.mark.parametrize( + ("probs_shape", "num_samples_shape", "convert_type", "with_replacement", "log_probs", "expected_out_shape"), + [ + ([10], [1], "i32", True, True, PartialShape([-1])), + ([2, 16], [], "i64", False, False, PartialShape([2, -1])), + ], +) +def test_multinomial_default_attrs(probs_shape, num_samples_shape, convert_type, with_replacement, log_probs, expected_out_shape): + probs = ops.parameter(probs_shape, dtype=np.float32) + num_samples = ops.parameter(num_samples_shape, dtype=np.int32) + + op = ops.multinomial(probs, num_samples, + convert_type=convert_type, + with_replacement=with_replacement, + log_probs=log_probs) + + assert op.get_output_size() == 1 + assert op.get_type_name() == "Multinomial" + assert op.get_output_element_type(0) == Type.i32 if convert_type == "i32" else Type.i64 + assert op.get_output_partial_shape(0) == expected_out_shape From a13eb5bf9d010ffef8a8d9e986e3b8c692c526ad Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Fri, 13 Oct 2023 12:53:58 +0200 Subject: [PATCH 189/257] [core]Migrate TopK operator to new API (#20254) * Migrate TopK to new API * Refactor compare_max for TopK * Unify check of k for const and non-const input * Update src/core/include/openvino/op/util/evaluate_helpers.hpp Co-authored-by: Tomasz Jankowski * Move `get_tensors_partial_shapes` to dev API --------- Co-authored-by: Tomasz Jankowski --- src/core/dev_api/validation_util.hpp | 5 + src/core/include/openvino/op/topk.hpp | 16 +- .../openvino/op/util/evaluate_helpers.hpp | 23 - .../include/openvino/reference/topk.hpp | 99 ++-- src/core/src/op/eye.cpp | 3 +- src/core/src/op/topk.cpp | 438 +++++++----------- src/core/src/op/util/evaluate_helpers.cpp | 17 - src/core/src/validation_util.cpp | 9 + 8 files changed, 243 insertions(+), 367 deletions(-) delete mode 100644 src/core/include/openvino/op/util/evaluate_helpers.hpp diff --git a/src/core/dev_api/validation_util.hpp b/src/core/dev_api/validation_util.hpp index c214b404798a9c..e93fefd1411eb9 100644 --- a/src/core/dev_api/validation_util.hpp +++ b/src/core/dev_api/validation_util.hpp @@ -78,5 +78,10 @@ bool try_apply_auto_padding(const PartialShape& image_shape, CoordinateDiff& padding_above, CoordinateDiff& padding_below); +/// @brief Get the tensors shapes as ov::PartialShape. +/// +/// @param tensors Input tensors vector to get their shapes. +/// @return Vector of partial shapes same size as input tensor vector. +OPENVINO_API std::vector get_tensors_partial_shapes(const TensorVector& tensors); } // namespace util } // namespace ov diff --git a/src/core/include/openvino/op/topk.hpp b/src/core/include/openvino/op/topk.hpp index 9c2ec7a9ce1492..cfc6ccd6cc5462 100644 --- a/src/core/include/openvino/op/topk.hpp +++ b/src/core/include/openvino/op/topk.hpp @@ -36,7 +36,7 @@ class OPENVINO_API TopK : public util::TopKBase { /// the biggest element of two. /// \param sort Specifies order of output elements and/or indices /// Accepted values: none, index, value - /// \param index_element_type Specyfies type of produced indices + /// \param index_element_type Specifies type of produced indices TopK(const Output& data, const Output& k, const int64_t axis, @@ -53,9 +53,7 @@ class OPENVINO_API TopK : public util::TopKBase { std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - OPENVINO_SUPPRESS_DEPRECATED_START - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - OPENVINO_SUPPRESS_DEPRECATED_END + bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override; bool has_evaluate() const override; protected: @@ -83,7 +81,7 @@ class OPENVINO_API TopK : public util::TopKBase { /// the biggest element of two. /// \param sort Specifies order of output elements and/or indices /// Accepted values: none, index, value - /// \param index_element_type Specyfies type of produced indices + /// \param index_element_type Specifies type of produced indices TopK(const Output& data, const Output& k, const int64_t axis, @@ -99,9 +97,7 @@ class OPENVINO_API TopK : public util::TopKBase { const element::Type& index_element_type = element::i32); std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - OPENVINO_SUPPRESS_DEPRECATED_START - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - OPENVINO_SUPPRESS_DEPRECATED_END + bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override; bool has_evaluate() const override; }; } // namespace v3 @@ -153,9 +149,7 @@ class OPENVINO_API TopK : public util::TopKBase { bool visit_attributes(AttributeVisitor& visitor) override; std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - OPENVINO_SUPPRESS_DEPRECATED_START - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - OPENVINO_SUPPRESS_DEPRECATED_END + bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override; bool has_evaluate() const override; bool get_stable() const { diff --git a/src/core/include/openvino/op/util/evaluate_helpers.hpp b/src/core/include/openvino/op/util/evaluate_helpers.hpp deleted file mode 100644 index 616528adf60d08..00000000000000 --- a/src/core/include/openvino/op/util/evaluate_helpers.hpp +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "openvino/core/partial_shape.hpp" -#include "openvino/runtime/tensor.hpp" - -namespace ov { -namespace op { -namespace util { - -/** - * @brief Get the tensors shapes as ov::PartialShape. - * - * @param tensors Input tensors vector to get its shapes. - * @return Vector of partial shapes sam size as input tensor vector. - */ -std::vector get_tensors_partial_shapes(const TensorVector& tensors); -} // namespace util -} // namespace op -} // namespace ov diff --git a/src/core/reference/include/openvino/reference/topk.hpp b/src/core/reference/include/openvino/reference/topk.hpp index c84fb54e9962bb..76ce901eb27f9a 100644 --- a/src/core/reference/include/openvino/reference/topk.hpp +++ b/src/core/reference/include/openvino/reference/topk.hpp @@ -8,7 +8,7 @@ #include #include -#include "openvino/op/topk.hpp" +#include "openvino/op/util/attr_types.hpp" #include "openvino/reference/utils/coordinate_index.hpp" #include "openvino/reference/utils/coordinate_transform.hpp" @@ -17,23 +17,11 @@ namespace reference { // This used to be lambda expressions but MSVC had difficulty compiling it. This way is more explicit. template inline bool compare_max(const std::tuple& a, const std::tuple& b) { -// this is intentional to be able to compare floats directly -// without using relative or absolute tolerance -#if defined(__GNUC__) -# pragma GCC diagnostic push -# pragma GCC diagnostic ignored "-Wfloat-equal" -#endif - if (std::get<0>(a) == std::get<0>(b)) { + if (std::get<0>(a) != std::get<0>(b)) { + return D ? std::get<0>(a) > std::get<0>(b) : std::get<0>(a) < std::get<0>(b); + } else { return std::get<1>(a) < std::get<1>(b); } -#if defined(__GNUC__) -# pragma GCC diagnostic pop -#endif - - if (D) - return std::get<0>(a) > std::get<0>(b); - else - return std::get<0>(a) < std::get<0>(b); } template @@ -41,63 +29,76 @@ inline bool compare_indices_ascending(const std::tuple& a, const std::tupl return std::get<1>(a) < std::get<1>(b); } -// TopK reference implementation provides stable indices output +/** + * @brief Reference implementation fo TopK operator + * + * @param arg Pointer to input data. + * @param out_indices Pointer to output indicies. + * @param out_values Pointer to output values. + * @param in_shape Input data shape. + * @param out_shape Output data (values, indicies) shape. + * @param axis Axis for search of top K elements. + * @param k Number to find of top elements. + * @param compute_max Select mode of find max or min. + * @param sort Sorting type. + */ template void topk(const T* arg, U* out_indices, T* out_values, const Shape& in_shape, const Shape& out_shape, - size_t axis, - size_t k, - bool compute_max, - op::TopKSortType sort = op::TopKSortType::NONE) { - using namespace std; + const size_t axis, + const size_t k, + const bool compute_max, + const op::TopKSortType sort = op::TopKSortType::NONE) { // Create temp vector for sorting. - vector> workspace(in_shape[axis]); - vector in_strides = row_major_strides(in_shape); - vector out_strides = row_major_strides(out_shape); - auto in_axis_stride = in_strides[axis]; - auto out_axis_stride = out_strides[axis]; + std::vector> workspace(in_shape[axis]); + const auto in_strides = row_major_strides(in_shape); + const auto out_strides = row_major_strides(out_shape); + const auto in_axis_stride = in_strides[axis]; + const auto out_axis_stride = out_strides[axis]; // Iterate over elements with 0 index at "axis" dimension auto traverse_shape = in_shape; traverse_shape[axis] = 1; CoordinateTransformBasic traverse_transform(traverse_shape); - for (const Coordinate& coord : traverse_transform) { + for (const auto& coord : traverse_transform) { auto arg_index = coordinate_index(coord, in_shape); auto out_index = coordinate_index(coord, out_shape); // Fill the temp vector U i = 0; - for (tuple& entry : workspace) { - get<0>(entry) = arg[arg_index]; - get<1>(entry) = i; + for (auto& entry : workspace) { + std::get<0>(entry) = arg[arg_index]; + std::get<1>(entry) = i; arg_index += in_axis_stride; - i++; - } - // Sort the temp vector - if (compute_max) { - nth_element(workspace.begin(), workspace.begin() + k, workspace.end(), compare_max); - } else { - nth_element(workspace.begin(), workspace.begin() + k, workspace.end(), compare_max); + ++i; } - // Write temp vector to output + + const auto cmp_func = compute_max ? compare_max : compare_max; + + typename std::decay::type sort_func; switch (sort) { - case op::TopKSortType::NONE: - break; case op::TopKSortType::SORT_INDICES: - std::sort(workspace.begin(), workspace.begin() + k, compare_indices_ascending); + sort_func = compare_indices_ascending; break; case op::TopKSortType::SORT_VALUES: - if (compute_max) - std::sort(workspace.begin(), workspace.begin() + k, compare_max); - else - std::sort(workspace.begin(), workspace.begin() + k, compare_max); + sort_func = cmp_func; + break; + default: + sort_func = nullptr; + break; } - for (size_t j = 0; j < k; j++) { + + std::nth_element(workspace.begin(), workspace.begin() + k, workspace.end(), cmp_func); + if (sort_func) { + std::sort(workspace.begin(), workspace.begin() + k, sort_func); + } + + for (size_t j = 0; j < k; ++j) { const auto& entry = workspace[j]; - out_values[out_index] = get<0>(entry); - out_indices[out_index] = get<1>(entry); + out_values[out_index] = std::get<0>(entry); + out_indices[out_index] = std::get<1>(entry); out_index += out_axis_stride; } } diff --git a/src/core/src/op/eye.cpp b/src/core/src/op/eye.cpp index edf9abbb06f4c4..4f1ecca6d47ad7 100644 --- a/src/core/src/op/eye.cpp +++ b/src/core/src/op/eye.cpp @@ -8,7 +8,6 @@ #include "eye_shape_inference.hpp" #include "itt.hpp" #include "openvino/core/validation_util.hpp" -#include "openvino/op/util/evaluate_helpers.hpp" #include "openvino/reference/eye.hpp" namespace ov { @@ -107,7 +106,7 @@ bool Eye::evaluate(TensorVector& outputs, const TensorVector& inputs) const { OPENVINO_ASSERT(outputs.size() == 1); // Inputs size and shapes checked by shape_infer - const auto input_shapes = util::get_tensors_partial_shapes(inputs); + const auto input_shapes = ov::util::get_tensors_partial_shapes(inputs); const auto output_shape = shape_infer(this, input_shapes, make_tensor_accessor(inputs)).front().to_shape(); int64_t diagonal_index; diff --git a/src/core/src/op/topk.cpp b/src/core/src/op/topk.cpp index da56c6bb7494c7..a84d0490d9bebd 100644 --- a/src/core/src/op/topk.cpp +++ b/src/core/src/op/topk.cpp @@ -4,163 +4,153 @@ #include "openvino/op/topk.hpp" -#include -#include - +#include "element_visitor.hpp" #include "itt.hpp" -#include "openvino/core/attribute_visitor.hpp" -#include "openvino/core/axis_vector.hpp" -#include "openvino/core/dimension_tracker.hpp" -#include "openvino/core/shape.hpp" #include "openvino/core/validation_util.hpp" -#include "openvino/op/constant.hpp" -#include "openvino/op/util/op_types.hpp" #include "openvino/reference/topk.hpp" - -using namespace std; +#include "topk_shape_inference.hpp" namespace ov { -OPENVINO_SUPPRESS_DEPRECATED_START +namespace op { namespace topk { +namespace validate { namespace { -template -inline bool evaluate_execute(const ngraph::HostTensorPtr& arg0, - const ngraph::HostTensorPtr& out_indices, - const ngraph::HostTensorPtr& out_values, - const ov::Shape out_shape, - const size_t axis, - const size_t k, - const bool compute_max, - const op::v1::TopK::SortType sort) { - using T = typename element_type_traits::value_type; - using U = typename element_type_traits::value_type; - const ov::Shape in_shape = arg0->get_shape(); - out_indices->set_shape(out_shape); - out_indices->set_element_type(INDEX_ET); - - out_values->set_shape(out_shape); - out_values->set_element_type(arg0->get_element_type()); - - ov::reference::topk(arg0->get_data_ptr(), - out_indices->get_data_ptr(), - out_values->get_data_ptr(), - in_shape, - out_shape, - axis, - k, - compute_max, - sort); - return true; -} - -#define EXECUTE_EVALUATE_TOPK(a, ...) \ - case element::Type_t::a: { \ - OV_OP_SCOPE(OV_PP_CAT3(exec_topk_eval, _, a)); \ - rc = evaluate_execute(__VA_ARGS__); \ - } break - -template -bool evaluate(const ngraph::HostTensorPtr& arg, - const ngraph::HostTensorPtr& out_indices, - const ngraph::HostTensorPtr& out_values, - const ov::Shape out_shape, - const size_t axis, - const size_t k, - const bool max, - const op::v1::TopK::SortType sort, - const element::Type index_et) { - bool rc = true; - switch (index_et) { - EXECUTE_EVALUATE_TOPK(i32, arg, out_indices, out_values, out_shape, axis, k, max, sort); - EXECUTE_EVALUATE_TOPK(i64, arg, out_indices, out_values, out_shape, axis, k, max, sort); +bool data_type(const element::Type& et) { + switch (et) { + case element::f16: + case element::f32: + case element::i32: + case element::i64: + case element::u32: + case element::u64: + return true; default: - rc = false; - break; + return false; } - return rc; } -bool evaluate_topk(const ngraph::HostTensorPtr& arg, - const ngraph::HostTensorPtr& out_indices, - const ngraph::HostTensorPtr& out_values, - const ov::Shape out_shape, - const size_t axis, - const size_t k, - const bool max, - const op::v1::TopK::SortType sort, - const element::Type index_et) { - bool rc = true; - switch (arg->get_element_type()) { - OPENVINO_TYPE_CASE(evaluate_topk, i32, arg, out_indices, out_values, out_shape, axis, k, max, sort, index_et); - OPENVINO_TYPE_CASE(evaluate_topk, i64, arg, out_indices, out_values, out_shape, axis, k, max, sort, index_et); - OPENVINO_TYPE_CASE(evaluate_topk, u32, arg, out_indices, out_values, out_shape, axis, k, max, sort, index_et); - OPENVINO_TYPE_CASE(evaluate_topk, u64, arg, out_indices, out_values, out_shape, axis, k, max, sort, index_et); - OPENVINO_TYPE_CASE(evaluate_topk, f16, arg, out_indices, out_values, out_shape, axis, k, max, sort, index_et); - OPENVINO_TYPE_CASE(evaluate_topk, f32, arg, out_indices, out_values, out_shape, axis, k, max, sort, index_et); +bool k_type(const element::Type& et) { + switch (et) { + case element::i8: + case element::i16: + case element::i32: + case element::i64: + case element::u8: + case element::u16: + case element::u32: + case element::u64: + return true; default: - rc = false; - break; + return false; } - return rc; } -bool TopK_evaluate(const ov::op::util::TopKBase* const node, - const HostTensorVector& outputs, - const HostTensorVector& inputs) { - const auto& arg_shape = inputs[0]->get_shape(); - OPENVINO_SUPPRESS_DEPRECATED_START - const auto axis = normalize_axis(node, node->get_provided_axis(), arg_shape.size()); - OPENVINO_SUPPRESS_DEPRECATED_END - const auto compute_max = node->get_mode() == ov::op::TopKMode::MAX; - const auto sort_type = node->get_sort_type(); - - const auto input_shapes = vector{inputs[0]->get_partial_shape(), inputs[1]->get_partial_shape()}; - auto output_shape = shape_infer(node, input_shapes, ov::make_tensor_accessor(inputs)).front().to_shape(); +} // namespace +} // namespace validate + +struct Evaluate : public element::NoAction { + using element::NoAction::visit; + + template > + static result_type visit(const Tensor& in, + Tensor& out_values, + Tensor& out_indices, + const Shape& out_shape, + const size_t axis, + const bool compute_max, + const TopKSortType sort) { + using namespace ov::element; + return IfTypeOf::apply(out_indices.get_element_type(), + in.data(), + out_values.data(), + out_indices, + in.get_shape(), + out_shape, + axis, + out_shape[axis], + compute_max, + sort); + } + +private: + struct EvalByIdxType : public element::NoAction { + using element::NoAction::visit; + + template > + static result_type visit(const T* in_first, + T* out_first, + Tensor& out_indices, + const Shape& in_shape, + const Shape& out_shape, + const size_t axis, + const size_t k, + const bool compute_max, + const TopKSortType sort) { + reference::topk(in_first, + out_indices.data(), + out_first, + in_shape, + out_shape, + axis, + k, + compute_max, + sort); + return true; + } + }; +}; + +namespace { +bool evaluate(const util::TopKBase* const node, TensorVector& outputs, const TensorVector& inputs) { + auto output_shapes = shape_infer(node, ov::util::get_tensors_partial_shapes(inputs), make_tensor_accessor(inputs)); + OPENVINO_ASSERT(outputs.size() == output_shapes.size()); + auto output_shape = output_shapes.front().get_shape(); + const auto axis = ov::util::normalize(node->get_provided_axis(), output_shape.size()); if (output_shape[axis] == 0) { // the kernel can't handle K (output_shape[axis]) equal 0, use arg_shape[axis] instead. - output_shape[axis] = arg_shape[axis]; + output_shape[axis] = inputs[0].get_shape()[axis]; } - const size_t k = output_shape[axis]; - OPENVINO_ASSERT(k <= arg_shape[axis], "'K' exceeds the dimension of top_k_axis"); - - // TopK reference implementation provides stable indices output so this parameter is not passed on - return evaluate_topk(inputs[0], - outputs[1], - outputs[0], - output_shape, - axis, - k, - compute_max, - sort_type, - node->get_index_element_type()); + for (auto& t : outputs) { + t.set_shape(output_shape); + } + + using namespace ov::element; + return IfTypeOf::apply(inputs[0].get_element_type(), + inputs[0], + outputs[0], + outputs[1], + output_shape, + axis, + (node->get_mode() == ov::op::TopKMode::MAX), + node->get_sort_type()); } } // namespace } // namespace topk // v1 version starts - -op::v1::TopK::TopK(const Output& data, - const Output& k, - const int64_t axis, - const std::string& mode, - const std::string& sort, - const element::Type& index_element_type) +namespace v1 { +TopK::TopK(const Output& data, + const Output& k, + const int64_t axis, + const std::string& mode, + const std::string& sort, + const element::Type& index_element_type) : util::TopKBase(data, k, axis, mode, sort, index_element_type) { constructor_validate_and_infer_types(); } -op::v1::TopK::TopK(const Output& data, - const Output& k, - const int64_t axis, - const Mode mode, - const SortType sort, - const element::Type& index_element_type) +TopK::TopK(const Output& data, + const Output& k, + const int64_t axis, + const Mode mode, + const SortType sort, + const element::Type& index_element_type) : util::TopKBase(data, k, axis, mode, sort, index_element_type) { constructor_validate_and_infer_types(); } -void op::v1::TopK::k_type_check(const element::Type& k_element_type) const { +void TopK::k_type_check(const element::Type& k_element_type) const { NODE_VALIDATION_CHECK( this, k_element_type == element::i8 || k_element_type == element::i32 || k_element_type == element::i64, @@ -169,156 +159,84 @@ void op::v1::TopK::k_type_check(const element::Type& k_element_type) const { ")."); } -shared_ptr op::v1::TopK::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr TopK::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v1_TopK_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), m_axis, m_mode, m_sort, m_index_element_type); + return std::make_shared(new_args.at(0), new_args.at(1), m_axis, m_mode, m_sort, m_index_element_type); } -bool op::v1::TopK::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool TopK::evaluate(TensorVector& outputs, const TensorVector& inputs) const { OV_OP_SCOPE(v1_TopK_evaluate); - return topk::TopK_evaluate(this, outputs, inputs); + return topk::evaluate(this, outputs, inputs); } -bool op::v1::TopK::has_evaluate() const { +bool TopK::has_evaluate() const { OV_OP_SCOPE(v1_TopK_has_evaluate); - - switch (get_input_element_type(0)) { - case element::i32: - case element::i64: - case element::u32: - case element::u64: - case element::f16: - case element::f32: - break; - default: - return false; - } - - if (op::util::is_constant(input_value(1).get_node())) { - switch (get_input_element_type(1)) { - case element::i8: - case element::i32: - case element::i64: - break; - default: - return false; - } - } else { - switch (get_input_element_type(1)) { - case element::i8: - case element::i16: - case element::i32: - case element::i64: - case element::u8: - case element::u16: - case element::u32: - case element::u64: - break; - default: - return false; - } - } - - return true; + return topk::validate::data_type(get_input_element_type(0)) && topk::validate::k_type(get_input_element_type(1)); } +} // namespace v1 // v3 version starts -op::v3::TopK::TopK(const Output& data, - const Output& k, - const int64_t axis, - const std::string& mode, - const std::string& sort, - const element::Type& index_element_type) +namespace v3 { +TopK::TopK(const Output& data, + const Output& k, + const int64_t axis, + const std::string& mode, + const std::string& sort, + const element::Type& index_element_type) : TopK(data, k, axis, as_enum(mode), as_enum(sort), index_element_type) {} -op::v3::TopK::TopK(const Output& data, - const Output& k, - const int64_t axis, - const Mode mode, - const SortType sort, - const element::Type& index_element_type) +TopK::TopK(const Output& data, + const Output& k, + const int64_t axis, + const Mode mode, + const SortType sort, + const element::Type& index_element_type) : util::TopKBase{data, k, axis, mode, sort, index_element_type} { constructor_validate_and_infer_types(); } -shared_ptr op::v3::TopK::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr TopK::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v3_TopK_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), m_axis, m_mode, m_sort, m_index_element_type); + return std::make_shared(new_args.at(0), new_args.at(1), m_axis, m_mode, m_sort, m_index_element_type); } -bool op::v3::TopK::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool TopK::evaluate(TensorVector& outputs, const TensorVector& inputs) const { OV_OP_SCOPE(v3_TopK_evaluate); - return topk::TopK_evaluate(this, outputs, inputs); + return topk::evaluate(this, outputs, inputs); } -bool op::v3::TopK::has_evaluate() const { +bool TopK::has_evaluate() const { OV_OP_SCOPE(v3_TopK_has_evaluate); - - switch (get_input_element_type(0)) { - case element::i32: - case element::i64: - case element::u32: - case element::u64: - case element::f16: - case element::f32: - break; - default: - return false; - } - - if (op::util::is_constant(input_value(1).get_node())) { - switch (get_input_element_type(1)) { - case element::i8: - case element::i32: - case element::i64: - break; - default: - return false; - } - } else { - switch (get_input_element_type(1)) { - case element::i8: - case element::i16: - case element::i32: - case element::i64: - case element::u8: - case element::u16: - case element::u32: - case element::u64: - break; - default: - return false; - } - } - - return true; + return topk::validate::data_type(get_input_element_type(0)) && topk::validate::k_type(get_input_element_type(1)); } +} // namespace v3 // =============== V11 =============== -ov::op::v11::TopK::TopK(const Output& data, - const Output& k, - const int64_t axis, - const std::string& mode, - const std::string& sort, - const element::Type& index_element_type, - const bool stable) +namespace v11 { +TopK::TopK(const Output& data, + const Output& k, + const int64_t axis, + const std::string& mode, + const std::string& sort, + const element::Type& index_element_type, + const bool stable) : TopK(data, k, axis, as_enum(mode), as_enum(sort), index_element_type, stable) {} -ov::op::v11::TopK::TopK(const Output& data, - const Output& k, - const int64_t axis, - const TopKMode mode, - const TopKSortType sort, - const element::Type& index_element_type, - const bool stable) +TopK::TopK(const Output& data, + const Output& k, + const int64_t axis, + const TopKMode mode, + const TopKSortType sort, + const element::Type& index_element_type, + const bool stable) : util::TopKBase{data, k, axis, mode, sort, index_element_type}, m_stable{stable} { constructor_validate_and_infer_types(); } -void ov::op::v11::TopK::validate_and_infer_types() { +void TopK::validate_and_infer_types() { OV_OP_SCOPE(v11_TopK_validate_and_infer_types); if (m_stable) { @@ -331,44 +249,34 @@ void ov::op::v11::TopK::validate_and_infer_types() { util::TopKBase::validate_and_infer_types(); } -bool ov::op::v11::TopK::visit_attributes(AttributeVisitor& visitor) { +bool TopK::visit_attributes(AttributeVisitor& visitor) { OV_OP_SCOPE(v11_TopK_visit_attributes); util::TopKBase::visit_attributes(visitor); visitor.on_attribute("stable", m_stable); return true; } -std::shared_ptr ov::op::v11::TopK::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr TopK::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v11_TopK_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), - new_args.at(1), - m_axis, - m_mode, - m_sort, - m_index_element_type, - m_stable); + return std::make_shared(new_args.at(0), + new_args.at(1), + m_axis, + m_mode, + m_sort, + m_index_element_type, + m_stable); } -bool ov::op::v11::TopK::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool TopK::evaluate(TensorVector& outputs, const TensorVector& inputs) const { OV_OP_SCOPE(v11_TopK_evaluate); - return topk::TopK_evaluate(this, outputs, inputs); + return topk::evaluate(this, outputs, inputs); } -bool ov::op::v11::TopK::has_evaluate() const { +bool TopK::has_evaluate() const { OV_OP_SCOPE(v11_TopK_has_evaluate); - - switch (get_input_element_type(0)) { - case element::i32: - case element::i64: - case element::u32: - case element::u64: - case element::f16: - case element::f32: - break; - default: - return false; - } - return true; + return topk::validate::data_type(get_input_element_type(0)); } +} // namespace v11 +} // namespace op } // namespace ov diff --git a/src/core/src/op/util/evaluate_helpers.cpp b/src/core/src/op/util/evaluate_helpers.cpp index 4e21da40bfe013..cffc57e6fbd87c 100644 --- a/src/core/src/op/util/evaluate_helpers.cpp +++ b/src/core/src/op/util/evaluate_helpers.cpp @@ -4,8 +4,6 @@ #include "ngraph/op/util/evaluate_helpers.hpp" -#include "openvino/op/util/evaluate_helpers.hpp" - namespace ngraph { AxisSet get_normalized_axes_from_tensor(const HostTensorPtr tensor, const ngraph::Rank& rank, @@ -17,18 +15,3 @@ AxisSet get_normalized_axes_from_tensor(const HostTensorPtr tensor, return AxisSet{normalized_axes}; } } // namespace ngraph - -namespace ov { -namespace op { -namespace util { -std::vector get_tensors_partial_shapes(const TensorVector& tensors) { - std::vector shapes; - shapes.reserve(tensors.size()); - for (const auto& t : tensors) { - shapes.emplace_back(t.get_shape()); - } - return shapes; -} -} // namespace util -} // namespace op -} // namespace ov diff --git a/src/core/src/validation_util.cpp b/src/core/src/validation_util.cpp index 2e1db9dd6864f1..803364b289008d 100644 --- a/src/core/src/validation_util.cpp +++ b/src/core/src/validation_util.cpp @@ -1384,5 +1384,14 @@ std::shared_ptr get_constant_from_source(const Output& source) { return {}; } } + +std::vector get_tensors_partial_shapes(const TensorVector& tensors) { + std::vector shapes; + shapes.reserve(tensors.size()); + for (const auto& t : tensors) { + shapes.emplace_back(t.get_shape()); + } + return shapes; +} } // namespace util } // namespace ov From 44a3255add277b8d67b0ef53e8e3c63c2b09c739 Mon Sep 17 00:00:00 2001 From: Helena Kloosterman Date: Fri, 13 Oct 2023 14:42:15 +0200 Subject: [PATCH 190/257] Small fix for GPU memory allocation documentation (#20394) --- src/plugins/intel_gpu/docs/memory_allocation_gpu_plugin.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plugins/intel_gpu/docs/memory_allocation_gpu_plugin.md b/src/plugins/intel_gpu/docs/memory_allocation_gpu_plugin.md index eb548eb386abe4..1ad9039435fb56 100644 --- a/src/plugins/intel_gpu/docs/memory_allocation_gpu_plugin.md +++ b/src/plugins/intel_gpu/docs/memory_allocation_gpu_plugin.md @@ -20,7 +20,7 @@ calls the corresponding memory object wrapper for each allocation type: [gpu_buf ## Dump memory allocation history -The memory allocation history is being managed by the `engine`, which can be dumped by setting the environment variable `OV_GPU_Verbose=1` if OpenVino is built with the cmake configuration `ENABLE_DEBUG_CAPS=ON`. +The memory allocation history is being managed by the `engine`, which can be dumped by setting the environment variable `OV_GPU_Verbose=2` if OpenVINO is built with the cmake configuration `ENABLE_DEBUG_CAPS=ON`. ```cpp ... GPU_Debug: Allocate 58982400 bytes of usm_host allocation type (current=117969612; max=117969612) From 739afa9d564139df650b8c605302c68ba22143e0 Mon Sep 17 00:00:00 2001 From: Anastasiia Pnevskaia Date: Fri, 13 Oct 2023 18:12:38 +0200 Subject: [PATCH 191/257] Guide for input/output in original FW. (#20141) * Added guide for input/output in original FW. * Apply suggestions from code review Co-authored-by: Roman Kazantsev * Removed unused import. * Apply suggestions from code review Co-authored-by: Roman Kazantsev * Text format corrections. * Header format correction. * Minor correction. * Minor corrections. * Minor corrections. * Removed unused import. * Update docs/OV_Converter_UG/prepare_model/convert_model/MO_OVC_transition.md Co-authored-by: Tatiana Savina * Update docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition.md Co-authored-by: Nico Galoppo * Examples format change. Added PyTorch example. * Example corrected. * Added PyTorch example. * Small correction. * Apply suggestions from code review Co-authored-by: Maxim Vafin * Added note. * Corrected note. --------- Co-authored-by: Roman Kazantsev Co-authored-by: Tatiana Savina Co-authored-by: Nico Galoppo Co-authored-by: Maxim Vafin --- .../mo_ovc_transition.md | 227 ++++++++++++++++++ 1 file changed, 227 insertions(+) diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition.md index 9de12249a341f8..e45eac3b3a3c67 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition.md +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition.md @@ -624,6 +624,233 @@ Here is the guide to transition from legacy model preprocessing to new API prepr - Not available in OVC tool. Please check Python API. +Cutting Off Parts of a Model +############################ + +Performing surgery by cutting model inputs and outputs from a model is no longer available in the new conversion API. Instead, we recommend performing the cut in the original framework. +Below are examples of model cutting of TensorFlow protobuf, TensorFlow SavedModel, and ONNX formats with the legacy conversion API, compared to achieving the same cut with tools provided by the Tensorflow and ONNX frameworks. +For PyTorch, TensorFlow 2 Keras, and PaddlePaddle, we recommend changing the original model code to perform the model cut. + +Note: This guide does not cover the cutting a model by input port of an operation that MO tool provides using `input` and `output` options, for example, `--input 1:name_op`. + +``PyTorch`` +########### + +Model cut for PyTorch is not available in legacy API. + +When it is needed to remove a whole module from the model it is possible to replace such modules with `Identity`. Below is the example of removing `conv1` and `bn1` modules at the input and `fc` module at the output of the resnet50 model. + +.. code-block:: py + :force: + + import openvino as ov + import torch + import torchvision + from torch.nn import Identity + + # Load pretrained model + model = torchvision.models.resnet50(weights='DEFAULT') + + # input cut + model.conv1 = Identity() + model.bn1 = Identity() + + # output cut + model.fc = Identity() + + # convert and compile the model + ov_model = ov.convert_model(model, input=([-1,64,-1,-1], torch.float32)) + compiled_model = ov.compile_model(ov_model) + +When it is needed to remove one or more outputs from the model it is possible to create a wrapper for the model and only output the needed output. Below is the example of removing second output from the model. + +.. code-block:: py + :force: + + import openvino as ov + import torch + + # Example of model with multiple outputs + class Model(torch.nn.Module): + def __init__(self): + super(Model, self).__init__() + self.linear1 = torch.nn.Linear(100, 200) + self.activation1 = torch.nn.ReLU() + self.linear2 = torch.nn.Linear(200, 10) + self.activation2 = torch.nn.Sigmoid() + + def forward(self, x): + x = self.linear1(x) + x = self.activation1(x) + y = self.linear2(x) + y = self.activation2(y) + return x, y + + # New model, where some outputs are cut + class CutModel(torch.nn.Module): + def __init__(self): + super(CutModel, self).__init__() + self.model = Model() + + def forward(self, x): + + # get first output + x, _ = self.model(x) + + return x + + # Model with output cut + cut_model = CutModel() + + # convert and compile the model + ov_model = ov.convert_model(cut_model, input=([-1,-1,-1], torch.float32)) + compiled_model = ov.compile_model(ov_model) + + +``TensorFlow protobuf format / tf.Graph / tf.GraphDef`` +####################################################### + +Legacy API. + +.. code-block:: py + :force: + + import openvino as ov + import openvino.tools.mo as mo + + import tensorflow as tf + + def load_graph(model_path): + graph_def = tf.compat.v1.GraphDef() + with open(model_path, "rb") as f: + graph_def.ParseFromString(f.read()) + with tf.compat.v1.Graph().as_default() as graph: + tf.graph_util.import_graph_def(graph_def, name="") + return graph + + # Load TF model + graph = load_graph("/path_to_model/HugeCTR.pb") + + # Convert the model with input and output cut + input_name = "concat" + output_name = "MatVec_3/Squeeze" + ov_model = mo.convert_model(graph, input=(input_name, [-1, -1]), output=output_name) + + # Compile the model + compiled_model = ov.compile_model(ov_model) + +Model cut in original FW. + +.. code-block:: py + :force: + + import openvino as ov + import tensorflow as tf + + from tensorflow.python.tools.strip_unused_lib import strip_unused + + def load_graph(model_path): + graph_def = tf.compat.v1.GraphDef() + with open(model_path, "rb") as f: + graph_def.ParseFromString(f.read()) + with tf.compat.v1.Graph().as_default() as graph: + tf.graph_util.import_graph_def(graph_def, name="") + return graph + + # Load TF model + graph = load_graph("/path_to_model/HugeCTR.pb") + + # Cut the model + input_name = "concat" + output_name = "MatVec_3/Squeeze" + graph_def = graph.as_graph_def() + new_graph_def = strip_unused(graph_def, [input_name], [output_name], tf.float32.as_datatype_enum) + + # Convert and compile model + ov_model = ov.convert_model(new_graph_def, input=[-1, -1]) + cmp_model = ov.compile_model(ov_model) + + +``TensorFlow SavedModel format`` +################################ + +Model cut for SavedModel format is not available in legacy API. + +Example of model cut in original FW. + +.. code-block:: py + :force: + + import openvino as ov + import tensorflow_hub as hub + + import tensorflow as tf + from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 + from tensorflow.python.tools.strip_unused_lib import strip_unused + + # Load TF model + model = hub.load("https://tfhub.dev/svampeatlas/vision/embedder/fungi_V2/1?tf-hub-format=compressed") + + # Convert model to GraphDef + model_func = model.signatures["default"] + frozen_func = convert_variables_to_constants_v2(model_func) + graph_def = frozen_func.graph.as_graph_def() + + # Cut the model + input_name = 'InceptionV4/InceptionV4/Conv2d_2b_3x3/Relu' + output_name = 'InceptionV4/InceptionV4/Mixed_7c/concat' + new_graph_def = strip_unused(graph_def, [input_name], [output_name], tf.float32.as_datatype_enum) + + # Convert and compile the model + ov_model = ov.convert_model(new_graph_def) + compiled_model = ov.compile_model(ov_model) + + +``ONNX`` +######## + + +Legacy API. + +.. code-block:: py + :force: + + import openvino as ov + import openvino.tools.mo as mo + + input_path = "/path_to_model/yolov8x.onnx" + + # Convert model and perform input and output cut + input_name = "/model.2/Concat_output_0" + output_name = "/model.22/Concat_3_output_0" + ov_model = mo.convert_model(input_path, input=input_name, output=output_name) + + # Compile model + ov.compile_model(ov_model) + +Model cut in original FW. + +.. code-block:: py + :force: + + import onnx + import openvino as ov + + input_path = "/path_to_model/yolov8x.onnx" + + # Cut the model + input_name = "/model.2/Concat_output_0" + output_name = "/model.22/Concat_3_output_0" + cut_model_path = "/path_to_model/yolov8x_cut.onnx" + onnx.utils.extract_model(input_path, cut_model_path, [input_name], [output_name]) + + # Convert model + ov_model = ov.convert_model(cut_model_path) + + # Compile model + ov.compile_model(ov_model) + + Supported Frameworks in MO vs OVC ################################# From 1bdd65bfc41831ac7f13ec3be972728f1a424e52 Mon Sep 17 00:00:00 2001 From: Karol Blaszczak Date: Fri, 13 Oct 2023 18:21:39 +0200 Subject: [PATCH 192/257] [DOCS] update selector tool (#20342) (#20454) authored-by: Alexander Suvorov --- ...ector-56fddec6.js => selector-114afa0d.js} | 35 +++++++++++-------- ...tor-12b6d7d.html => selector-68d2f71.html} | 11 +++--- .../installing-openvino-overview.md | 2 +- 3 files changed, 26 insertions(+), 22 deletions(-) rename docs/_static/selector-tool/assets/{selector-56fddec6.js => selector-114afa0d.js} (51%) rename docs/_static/selector-tool/{selector-12b6d7d.html => selector-68d2f71.html} (76%) diff --git a/docs/_static/selector-tool/assets/selector-56fddec6.js b/docs/_static/selector-tool/assets/selector-114afa0d.js similarity index 51% rename from docs/_static/selector-tool/assets/selector-56fddec6.js rename to docs/_static/selector-tool/assets/selector-114afa0d.js index 3437a6b4b55dbc..2878b10357074f 100644 --- a/docs/_static/selector-tool/assets/selector-56fddec6.js +++ b/docs/_static/selector-tool/assets/selector-114afa0d.js @@ -1,4 +1,4 @@ -var of=Object.defineProperty;var sf=(e,t,n)=>t in e?of(e,t,{enumerable:!0,configurable:!0,writable:!0,value:n}):e[t]=n;var De=(e,t,n)=>(sf(e,typeof t!="symbol"?t+"":t,n),n);function lf(e){return e&&e.__esModule&&Object.prototype.hasOwnProperty.call(e,"default")?e.default:e}var Ar={},af={get exports(){return Ar},set exports(e){Ar=e}},Ti={},D={},uf={get exports(){return D},set exports(e){D=e}},j={};/** +var af=Object.defineProperty;var uf=(e,t,n)=>t in e?af(e,t,{enumerable:!0,configurable:!0,writable:!0,value:n}):e[t]=n;var je=(e,t,n)=>(uf(e,typeof t!="symbol"?t+"":t,n),n);function cf(e){return e&&e.__esModule&&Object.prototype.hasOwnProperty.call(e,"default")?e.default:e}var Ar={},df={get exports(){return Ar},set exports(e){Ar=e}},Ti={},j={},pf={get exports(){return j},set exports(e){j=e}},D={};/** * @license React * react.production.min.js * @@ -6,7 +6,7 @@ var of=Object.defineProperty;var sf=(e,t,n)=>t in e?of(e,t,{enumerable:!0,config * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. - */var ro=Symbol.for("react.element"),cf=Symbol.for("react.portal"),df=Symbol.for("react.fragment"),pf=Symbol.for("react.strict_mode"),ff=Symbol.for("react.profiler"),hf=Symbol.for("react.provider"),mf=Symbol.for("react.context"),gf=Symbol.for("react.forward_ref"),vf=Symbol.for("react.suspense"),yf=Symbol.for("react.memo"),_f=Symbol.for("react.lazy"),Aa=Symbol.iterator;function kf(e){return e===null||typeof e!="object"?null:(e=Aa&&e[Aa]||e["@@iterator"],typeof e=="function"?e:null)}var Nc={isMounted:function(){return!1},enqueueForceUpdate:function(){},enqueueReplaceState:function(){},enqueueSetState:function(){}},Ec=Object.assign,xc={};function ur(e,t,n){this.props=e,this.context=t,this.refs=xc,this.updater=n||Nc}ur.prototype.isReactComponent={};ur.prototype.setState=function(e,t){if(typeof e!="object"&&typeof e!="function"&&e!=null)throw Error("setState(...): takes an object of state variables to update or a function which returns an object of state variables.");this.updater.enqueueSetState(this,e,t,"setState")};ur.prototype.forceUpdate=function(e){this.updater.enqueueForceUpdate(this,e,"forceUpdate")};function Pc(){}Pc.prototype=ur.prototype;function Vl(e,t,n){this.props=e,this.context=t,this.refs=xc,this.updater=n||Nc}var Fl=Vl.prototype=new Pc;Fl.constructor=Vl;Ec(Fl,ur.prototype);Fl.isPureReactComponent=!0;var za=Array.isArray,Cc=Object.prototype.hasOwnProperty,Dl={current:null},Rc={key:!0,ref:!0,__self:!0,__source:!0};function Tc(e,t,n){var r,o={},i=null,s=null;if(t!=null)for(r in t.ref!==void 0&&(s=t.ref),t.key!==void 0&&(i=""+t.key),t)Cc.call(t,r)&&!Rc.hasOwnProperty(r)&&(o[r]=t[r]);var l=arguments.length-2;if(l===1)o.children=n;else if(1t in e?of(e,t,{enumerable:!0,config * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. - */var xf=D,Pf=Symbol.for("react.element"),Cf=Symbol.for("react.fragment"),Rf=Object.prototype.hasOwnProperty,Tf=xf.__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED.ReactCurrentOwner,Lf={key:!0,ref:!0,__self:!0,__source:!0};function Lc(e,t,n){var r,o={},i=null,s=null;n!==void 0&&(i=""+n),t.key!==void 0&&(i=""+t.key),t.ref!==void 0&&(s=t.ref);for(r in t)Rf.call(t,r)&&!Lf.hasOwnProperty(r)&&(o[r]=t[r]);if(e&&e.defaultProps)for(r in t=e.defaultProps,t)o[r]===void 0&&(o[r]=t[r]);return{$$typeof:Pf,type:e,key:i,ref:s,props:o,_owner:Tf.current}}Ti.Fragment=Cf;Ti.jsx=Lc;Ti.jsxs=Lc;(function(e){e.exports=Ti})(af);const ni=Ar.Fragment,_=Ar.jsx,I=Ar.jsxs;document.body.style.cssText+=` + */var Rf=j,Tf=Symbol.for("react.element"),bf=Symbol.for("react.fragment"),Lf=Object.prototype.hasOwnProperty,If=Rf.__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED.ReactCurrentOwner,Vf={key:!0,ref:!0,__self:!0,__source:!0};function jc(e,t,n){var r,o={},i=null,s=null;n!==void 0&&(i=""+n),t.key!==void 0&&(i=""+t.key),t.ref!==void 0&&(s=t.ref);for(r in t)Lf.call(t,r)&&!Vf.hasOwnProperty(r)&&(o[r]=t[r]);if(e&&e.defaultProps)for(r in t=e.defaultProps,t)o[r]===void 0&&(o[r]=t[r]);return{$$typeof:Tf,type:e,key:i,ref:s,props:o,_owner:If.current}}Ti.Fragment=bf;Ti.jsx=jc;Ti.jsxs=jc;(function(e){e.exports=Ti})(df);const ni=Ar.Fragment,_=Ar.jsx,L=Ar.jsxs;document.body.style.cssText+=` overflow: hidden; -`;const If=()=>{const e={type:"size",height:document.body.offsetHeight};window.parent.postMessage(e)};new ResizeObserver(If).observe(document.body);function fe(e){return fe=typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?function(t){return typeof t}:function(t){return t&&typeof Symbol=="function"&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t},fe(e)}function ct(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function bf(e,t){if(fe(e)!=="object"||e===null)return e;var n=e[Symbol.toPrimitive];if(n!==void 0){var r=n.call(e,t||"default");if(fe(r)!=="object")return r;throw new TypeError("@@toPrimitive must return a primitive value.")}return(t==="string"?String:Number)(e)}function Ic(e){var t=bf(e,"string");return fe(t)==="symbol"?t:String(t)}function $a(e,t){for(var n=0;ne.length)&&(t=e.length);for(var n=0,r=new Array(t);n1&&arguments[1]!==void 0?arguments[1]:{};ct(this,e),this.init(t,n)}return dt(e,[{key:"init",value:function(n){var r=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{};this.prefix=r.prefix||"i18next:",this.logger=n||Df,this.options=r,this.debug=r.debug}},{key:"setDebug",value:function(n){this.debug=n}},{key:"log",value:function(){for(var n=arguments.length,r=new Array(n),o=0;o1?r-1:0),i=1;i-1?l.replace(/###/g,"."):l}function o(){return!e||typeof e=="string"}for(var i=typeof t!="string"?[].concat(t):t.split(".");i.length>1;){if(o())return{};var s=r(i.shift());!e[s]&&n&&(e[s]=new n),Object.prototype.hasOwnProperty.call(e,s)?e=e[s]:e={}}return o()?{}:{obj:e,k:r(i.shift())}}function Ya(e,t,n){var r=Ul(e,t,Object),o=r.obj,i=r.k;o[i]=n}function Af(e,t,n,r){var o=Ul(e,t,Object),i=o.obj,s=o.k;i[s]=i[s]||[],r&&(i[s]=i[s].concat(n)),r||i[s].push(n)}function ri(e,t){var n=Ul(e,t),r=n.obj,o=n.k;if(r)return r[o]}function Ga(e,t,n){var r=ri(e,n);return r!==void 0?r:ri(t,n)}function Dc(e,t,n){for(var r in t)r!=="__proto__"&&r!=="constructor"&&(r in e?typeof e[r]=="string"||e[r]instanceof String||typeof t[r]=="string"||t[r]instanceof String?n&&(e[r]=t[r]):Dc(e[r],t[r],n):e[r]=t[r]);return e}function bn(e){return e.replace(/[\-\[\]\/\{\}\(\)\*\+\?\.\\\^\$\|]/g,"\\$&")}var zf={"&":"&","<":"<",">":">",'"':""","'":"'","/":"/"};function Mf(e){return typeof e=="string"?e.replace(/[&<>"'\/]/g,function(t){return zf[t]}):e}var Ii=typeof window<"u"&&window.navigator&&typeof window.navigator.userAgentData>"u"&&window.navigator.userAgent&&window.navigator.userAgent.indexOf("MSIE")>-1,$f=[" ",",","?","!",";"];function Bf(e,t,n){t=t||"",n=n||"";var r=$f.filter(function(l){return t.indexOf(l)<0&&n.indexOf(l)<0});if(r.length===0)return!0;var o=new RegExp("(".concat(r.map(function(l){return l==="?"?"\\?":l}).join("|"),")")),i=!o.test(e);if(!i){var s=e.indexOf(n);s>0&&!o.test(e.substring(0,s))&&(i=!0)}return i}function Qa(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter(function(o){return Object.getOwnPropertyDescriptor(e,o).enumerable})),n.push.apply(n,r)}return n}function mo(e){for(var t=1;t"u"||!Reflect.construct||Reflect.construct.sham)return!1;if(typeof Proxy=="function")return!0;try{return Boolean.prototype.valueOf.call(Reflect.construct(Boolean,[],function(){})),!0}catch{return!1}}function jc(e,t){var n=arguments.length>2&&arguments[2]!==void 0?arguments[2]:".";if(e){if(e[t])return e[t];for(var r=t.split(n),o=e,i=0;ii+s;)s++,l=r.slice(i,i+s).join(n),a=o[l];if(a===void 0)return;if(a===null)return null;if(t.endsWith(l)){if(typeof a=="string")return a;if(l&&typeof a[l]=="string")return a[l]}var u=r.slice(i+s).join(n);return u?jc(a,u,n):void 0}o=o[r[i]]}return o}}var Wf=function(e){Li(n,e);var t=Kf(n);function n(r){var o,i=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{ns:["translation"],defaultNS:"translation"};return ct(this,n),o=t.call(this),Ii&&tn.call(Wt(o)),o.data=r||{},o.options=i,o.options.keySeparator===void 0&&(o.options.keySeparator="."),o.options.ignoreJSONStructure===void 0&&(o.options.ignoreJSONStructure=!0),o}return dt(n,[{key:"addNamespaces",value:function(o){this.options.ns.indexOf(o)<0&&this.options.ns.push(o)}},{key:"removeNamespaces",value:function(o){var i=this.options.ns.indexOf(o);i>-1&&this.options.ns.splice(i,1)}},{key:"getResource",value:function(o,i,s){var l=arguments.length>3&&arguments[3]!==void 0?arguments[3]:{},a=l.keySeparator!==void 0?l.keySeparator:this.options.keySeparator,u=l.ignoreJSONStructure!==void 0?l.ignoreJSONStructure:this.options.ignoreJSONStructure,f=[o,i];s&&typeof s!="string"&&(f=f.concat(s)),s&&typeof s=="string"&&(f=f.concat(a?s.split(a):s)),o.indexOf(".")>-1&&(f=o.split("."));var d=ri(this.data,f);return d||!u||typeof s!="string"?d:jc(this.data&&this.data[o]&&this.data[o][i],s,a)}},{key:"addResource",value:function(o,i,s,l){var a=arguments.length>4&&arguments[4]!==void 0?arguments[4]:{silent:!1},u=this.options.keySeparator;u===void 0&&(u=".");var f=[o,i];s&&(f=f.concat(u?s.split(u):s)),o.indexOf(".")>-1&&(f=o.split("."),l=i,i=f[1]),this.addNamespaces(i),Ya(this.data,f,l),a.silent||this.emit("added",o,i,s,l)}},{key:"addResources",value:function(o,i,s){var l=arguments.length>3&&arguments[3]!==void 0?arguments[3]:{silent:!1};for(var a in s)(typeof s[a]=="string"||Object.prototype.toString.apply(s[a])==="[object Array]")&&this.addResource(o,i,a,s[a],{silent:!0});l.silent||this.emit("added",o,i,s)}},{key:"addResourceBundle",value:function(o,i,s,l,a){var u=arguments.length>5&&arguments[5]!==void 0?arguments[5]:{silent:!1},f=[o,i];o.indexOf(".")>-1&&(f=o.split("."),l=s,s=i,i=f[1]),this.addNamespaces(i);var d=ri(this.data,f)||{};l?Dc(d,s,a):d=mo(mo({},d),s),Ya(this.data,f,d),u.silent||this.emit("added",o,i,s)}},{key:"removeResourceBundle",value:function(o,i){this.hasResourceBundle(o,i)&&delete this.data[o][i],this.removeNamespaces(i),this.emit("removed",o,i)}},{key:"hasResourceBundle",value:function(o,i){return this.getResource(o,i)!==void 0}},{key:"getResourceBundle",value:function(o,i){return i||(i=this.options.defaultNS),this.options.compatibilityAPI==="v1"?mo(mo({},{}),this.getResource(o,i)):this.getResource(o,i)}},{key:"getDataByLanguage",value:function(o){return this.data[o]}},{key:"hasLanguageSomeTranslations",value:function(o){var i=this.getDataByLanguage(o),s=i&&Object.keys(i)||[];return!!s.find(function(l){return i[l]&&Object.keys(i[l]).length>0})}},{key:"toJSON",value:function(){return this.data}}]),n}(tn),Uc={processors:{},addPostProcessor:function(t){this.processors[t.name]=t},handle:function(t,n,r,o,i){var s=this;return t.forEach(function(l){s.processors[l]&&(n=s.processors[l].process(n,r,o,i))}),n}};function qa(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter(function(o){return Object.getOwnPropertyDescriptor(e,o).enumerable})),n.push.apply(n,r)}return n}function we(e){for(var t=1;t"u"||!Reflect.construct||Reflect.construct.sham)return!1;if(typeof Proxy=="function")return!0;try{return Boolean.prototype.valueOf.call(Reflect.construct(Boolean,[],function(){})),!0}catch{return!1}}var Xa={},Ja=function(e){Li(n,e);var t=Yf(n);function n(r){var o,i=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{};return ct(this,n),o=t.call(this),Ii&&tn.call(Wt(o)),Uf(["resourceStore","languageUtils","pluralResolver","interpolator","backendConnector","i18nFormat","utils"],r,Wt(o)),o.options=i,o.options.keySeparator===void 0&&(o.options.keySeparator="."),o.logger=yt.create("translator"),o}return dt(n,[{key:"changeLanguage",value:function(o){o&&(this.language=o)}},{key:"exists",value:function(o){var i=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{interpolation:{}};if(o==null)return!1;var s=this.resolve(o,i);return s&&s.res!==void 0}},{key:"extractFromKey",value:function(o,i){var s=i.nsSeparator!==void 0?i.nsSeparator:this.options.nsSeparator;s===void 0&&(s=":");var l=i.keySeparator!==void 0?i.keySeparator:this.options.keySeparator,a=i.ns||this.options.defaultNS||[],u=s&&o.indexOf(s)>-1,f=!this.options.userDefinedKeySeparator&&!i.keySeparator&&!this.options.userDefinedNsSeparator&&!i.nsSeparator&&!Bf(o,s,l);if(u&&!f){var d=o.match(this.interpolator.nestingRegexp);if(d&&d.length>0)return{key:o,namespaces:a};var h=o.split(s);(s!==l||s===l&&this.options.ns.indexOf(h[0])>-1)&&(a=h.shift()),o=h.join(l)}return typeof a=="string"&&(a=[a]),{key:o,namespaces:a}}},{key:"translate",value:function(o,i,s){var l=this;if(fe(i)!=="object"&&this.options.overloadTranslationOptionHandler&&(i=this.options.overloadTranslationOptionHandler(arguments)),i||(i={}),o==null)return"";Array.isArray(o)||(o=[String(o)]);var a=i.returnDetails!==void 0?i.returnDetails:this.options.returnDetails,u=i.keySeparator!==void 0?i.keySeparator:this.options.keySeparator,f=this.extractFromKey(o[o.length-1],i),d=f.key,h=f.namespaces,g=h[h.length-1],v=i.lng||this.language,k=i.appendNamespaceToCIMode||this.options.appendNamespaceToCIMode;if(v&&v.toLowerCase()==="cimode"){if(k){var O=i.nsSeparator||this.options.nsSeparator;return a?{res:"".concat(g).concat(O).concat(d),usedKey:d,exactUsedKey:d,usedLng:v,usedNS:g}:"".concat(g).concat(O).concat(d)}return a?{res:d,usedKey:d,exactUsedKey:d,usedLng:v,usedNS:g}:d}var p=this.resolve(o,i),c=p&&p.res,m=p&&p.usedKey||d,y=p&&p.exactUsedKey||d,S=Object.prototype.toString.apply(c),w=["[object Number]","[object Function]","[object RegExp]"],E=i.joinArrays!==void 0?i.joinArrays:this.options.joinArrays,x=!this.i18nFormat||this.i18nFormat.handleAsObject,V=typeof c!="string"&&typeof c!="boolean"&&typeof c!="number";if(x&&c&&V&&w.indexOf(S)<0&&!(typeof E=="string"&&S==="[object Array]")){if(!i.returnObjects&&!this.options.returnObjects){this.options.returnedObjectHandler||this.logger.warn("accessing an object - but returnObjects options is not enabled!");var P=this.options.returnedObjectHandler?this.options.returnedObjectHandler(m,c,we(we({},i),{},{ns:h})):"key '".concat(d," (").concat(this.language,")' returned an object instead of string.");return a?(p.res=P,p):P}if(u){var K=S==="[object Array]",Ce=K?[]:{},Ot=K?y:m;for(var Ze in c)if(Object.prototype.hasOwnProperty.call(c,Ze)){var Tn="".concat(Ot).concat(u).concat(Ze);Ce[Ze]=this.translate(Tn,we(we({},i),{joinArrays:!1,ns:h})),Ce[Ze]===Tn&&(Ce[Ze]=c[Ze])}c=Ce}}else if(x&&typeof E=="string"&&S==="[object Array]")c=c.join(E),c&&(c=this.extendTranslation(c,o,i,s));else{var ft=!1,et=!1,R=i.count!==void 0&&typeof i.count!="string",b=n.hasDefaultValue(i),F=R?this.pluralResolver.getSuffix(v,i.count,i):"",A=i["defaultValue".concat(F)]||i.defaultValue;!this.isValidLookup(c)&&b&&(ft=!0,c=A),this.isValidLookup(c)||(et=!0,c=d);var q=i.missingKeyNoValueFallbackToKey||this.options.missingKeyNoValueFallbackToKey,Nt=q&&et?void 0:c,Ve=b&&A!==c&&this.options.updateMissing;if(et||ft||Ve){if(this.logger.log(Ve?"updateKey":"missingKey",v,g,d,Ve?A:c),u){var Ln=this.resolve(d,we(we({},i),{},{keySeparator:!1}));Ln&&Ln.res&&this.logger.warn("Seems the loaded translations were in flat JSON format instead of nested. Either set keySeparator: false on init or make sure your translations are published in nested format.")}var Fe=[],Et=this.languageUtils.getFallbackCodes(this.options.fallbackLng,i.lng||this.language);if(this.options.saveMissingTo==="fallback"&&Et&&Et[0])for(var Gi=0;Gi1&&arguments[1]!==void 0?arguments[1]:{},l,a,u,f,d;return typeof o=="string"&&(o=[o]),o.forEach(function(h){if(!i.isValidLookup(l)){var g=i.extractFromKey(h,s),v=g.key;a=v;var k=g.namespaces;i.options.fallbackNS&&(k=k.concat(i.options.fallbackNS));var O=s.count!==void 0&&typeof s.count!="string",p=O&&!s.ordinal&&s.count===0&&i.pluralResolver.shouldUseIntlApi(),c=s.context!==void 0&&(typeof s.context=="string"||typeof s.context=="number")&&s.context!=="",m=s.lngs?s.lngs:i.languageUtils.toResolveHierarchy(s.lng||i.language,s.fallbackLng);k.forEach(function(y){i.isValidLookup(l)||(d=y,!Xa["".concat(m[0],"-").concat(y)]&&i.utils&&i.utils.hasLoadedNamespace&&!i.utils.hasLoadedNamespace(d)&&(Xa["".concat(m[0],"-").concat(y)]=!0,i.logger.warn('key "'.concat(a,'" for languages "').concat(m.join(", "),`" won't get resolved as namespace "`).concat(d,'" was not yet loaded'),"This means something IS WRONG in your setup. You access the t function before i18next.init / i18next.loadNamespace / i18next.changeLanguage was done. Wait for the callback or Promise to resolve before accessing it!!!")),m.forEach(function(S){if(!i.isValidLookup(l)){f=S;var w=[v];if(i.i18nFormat&&i.i18nFormat.addLookupKeys)i.i18nFormat.addLookupKeys(w,v,S,y,s);else{var E;O&&(E=i.pluralResolver.getSuffix(S,s.count,s));var x="".concat(i.options.pluralSeparator,"zero");if(O&&(w.push(v+E),p&&w.push(v+x)),c){var V="".concat(v).concat(i.options.contextSeparator).concat(s.context);w.push(V),O&&(w.push(V+E),p&&w.push(V+x))}}for(var P;P=w.pop();)i.isValidLookup(l)||(u=P,l=i.getResource(S,y,P,s))}}))})}}),{res:l,usedKey:a,exactUsedKey:u,usedLng:f,usedNS:d}}},{key:"isValidLookup",value:function(o){return o!==void 0&&!(!this.options.returnNull&&o===null)&&!(!this.options.returnEmptyString&&o==="")}},{key:"getResource",value:function(o,i,s){var l=arguments.length>3&&arguments[3]!==void 0?arguments[3]:{};return this.i18nFormat&&this.i18nFormat.getResource?this.i18nFormat.getResource(o,i,s,l):this.resourceStore.getResource(o,i,s,l)}}],[{key:"hasDefaultValue",value:function(o){var i="defaultValue";for(var s in o)if(Object.prototype.hasOwnProperty.call(o,s)&&i===s.substring(0,i.length)&&o[s]!==void 0)return!0;return!1}}]),n}(tn);function Ji(e){return e.charAt(0).toUpperCase()+e.slice(1)}var Za=function(){function e(t){ct(this,e),this.options=t,this.supportedLngs=this.options.supportedLngs||!1,this.logger=yt.create("languageUtils")}return dt(e,[{key:"getScriptPartFromCode",value:function(n){if(!n||n.indexOf("-")<0)return null;var r=n.split("-");return r.length===2||(r.pop(),r[r.length-1].toLowerCase()==="x")?null:this.formatLanguageCode(r.join("-"))}},{key:"getLanguagePartFromCode",value:function(n){if(!n||n.indexOf("-")<0)return n;var r=n.split("-");return this.formatLanguageCode(r[0])}},{key:"formatLanguageCode",value:function(n){if(typeof n=="string"&&n.indexOf("-")>-1){var r=["hans","hant","latn","cyrl","cans","mong","arab"],o=n.split("-");return this.options.lowerCaseLng?o=o.map(function(i){return i.toLowerCase()}):o.length===2?(o[0]=o[0].toLowerCase(),o[1]=o[1].toUpperCase(),r.indexOf(o[1].toLowerCase())>-1&&(o[1]=Ji(o[1].toLowerCase()))):o.length===3&&(o[0]=o[0].toLowerCase(),o[1].length===2&&(o[1]=o[1].toUpperCase()),o[0]!=="sgn"&&o[2].length===2&&(o[2]=o[2].toUpperCase()),r.indexOf(o[1].toLowerCase())>-1&&(o[1]=Ji(o[1].toLowerCase())),r.indexOf(o[2].toLowerCase())>-1&&(o[2]=Ji(o[2].toLowerCase()))),o.join("-")}return this.options.cleanCode||this.options.lowerCaseLng?n.toLowerCase():n}},{key:"isSupportedCode",value:function(n){return(this.options.load==="languageOnly"||this.options.nonExplicitSupportedLngs)&&(n=this.getLanguagePartFromCode(n)),!this.supportedLngs||!this.supportedLngs.length||this.supportedLngs.indexOf(n)>-1}},{key:"getBestMatchFromCodes",value:function(n){var r=this;if(!n)return null;var o;return n.forEach(function(i){if(!o){var s=r.formatLanguageCode(i);(!r.options.supportedLngs||r.isSupportedCode(s))&&(o=s)}}),!o&&this.options.supportedLngs&&n.forEach(function(i){if(!o){var s=r.getLanguagePartFromCode(i);if(r.isSupportedCode(s))return o=s;o=r.options.supportedLngs.find(function(l){if(l.indexOf(s)===0)return l})}}),o||(o=this.getFallbackCodes(this.options.fallbackLng)[0]),o}},{key:"getFallbackCodes",value:function(n,r){if(!n)return[];if(typeof n=="function"&&(n=n(r)),typeof n=="string"&&(n=[n]),Object.prototype.toString.apply(n)==="[object Array]")return n;if(!r)return n.default||[];var o=n[r];return o||(o=n[this.getScriptPartFromCode(r)]),o||(o=n[this.formatLanguageCode(r)]),o||(o=n[this.getLanguagePartFromCode(r)]),o||(o=n.default),o||[]}},{key:"toResolveHierarchy",value:function(n,r){var o=this,i=this.getFallbackCodes(r||this.options.fallbackLng||[],n),s=[],l=function(u){u&&(o.isSupportedCode(u)?s.push(u):o.logger.warn("rejecting language code not found in supportedLngs: ".concat(u)))};return typeof n=="string"&&n.indexOf("-")>-1?(this.options.load!=="languageOnly"&&l(this.formatLanguageCode(n)),this.options.load!=="languageOnly"&&this.options.load!=="currentOnly"&&l(this.getScriptPartFromCode(n)),this.options.load!=="currentOnly"&&l(this.getLanguagePartFromCode(n))):typeof n=="string"&&l(this.formatLanguageCode(n)),i.forEach(function(a){s.indexOf(a)<0&&l(o.formatLanguageCode(a))}),s}}]),e}(),Qf=[{lngs:["ach","ak","am","arn","br","fil","gun","ln","mfe","mg","mi","oc","pt","pt-BR","tg","tl","ti","tr","uz","wa"],nr:[1,2],fc:1},{lngs:["af","an","ast","az","bg","bn","ca","da","de","dev","el","en","eo","es","et","eu","fi","fo","fur","fy","gl","gu","ha","hi","hu","hy","ia","it","kk","kn","ku","lb","mai","ml","mn","mr","nah","nap","nb","ne","nl","nn","no","nso","pa","pap","pms","ps","pt-PT","rm","sco","se","si","so","son","sq","sv","sw","ta","te","tk","ur","yo"],nr:[1,2],fc:2},{lngs:["ay","bo","cgg","fa","ht","id","ja","jbo","ka","km","ko","ky","lo","ms","sah","su","th","tt","ug","vi","wo","zh"],nr:[1],fc:3},{lngs:["be","bs","cnr","dz","hr","ru","sr","uk"],nr:[1,2,5],fc:4},{lngs:["ar"],nr:[0,1,2,3,11,100],fc:5},{lngs:["cs","sk"],nr:[1,2,5],fc:6},{lngs:["csb","pl"],nr:[1,2,5],fc:7},{lngs:["cy"],nr:[1,2,3,8],fc:8},{lngs:["fr"],nr:[1,2],fc:9},{lngs:["ga"],nr:[1,2,3,7,11],fc:10},{lngs:["gd"],nr:[1,2,3,20],fc:11},{lngs:["is"],nr:[1,2],fc:12},{lngs:["jv"],nr:[0,1],fc:13},{lngs:["kw"],nr:[1,2,3,4],fc:14},{lngs:["lt"],nr:[1,2,10],fc:15},{lngs:["lv"],nr:[1,2,0],fc:16},{lngs:["mk"],nr:[1,2],fc:17},{lngs:["mnk"],nr:[0,1,2],fc:18},{lngs:["mt"],nr:[1,2,11,20],fc:19},{lngs:["or"],nr:[2,1],fc:2},{lngs:["ro"],nr:[1,2,20],fc:20},{lngs:["sl"],nr:[5,1,2,3],fc:21},{lngs:["he","iw"],nr:[1,2,20,21],fc:22}],qf={1:function(t){return+(t>1)},2:function(t){return+(t!=1)},3:function(t){return 0},4:function(t){return t%10==1&&t%100!=11?0:t%10>=2&&t%10<=4&&(t%100<10||t%100>=20)?1:2},5:function(t){return t==0?0:t==1?1:t==2?2:t%100>=3&&t%100<=10?3:t%100>=11?4:5},6:function(t){return t==1?0:t>=2&&t<=4?1:2},7:function(t){return t==1?0:t%10>=2&&t%10<=4&&(t%100<10||t%100>=20)?1:2},8:function(t){return t==1?0:t==2?1:t!=8&&t!=11?2:3},9:function(t){return+(t>=2)},10:function(t){return t==1?0:t==2?1:t<7?2:t<11?3:4},11:function(t){return t==1||t==11?0:t==2||t==12?1:t>2&&t<20?2:3},12:function(t){return+(t%10!=1||t%100==11)},13:function(t){return+(t!==0)},14:function(t){return t==1?0:t==2?1:t==3?2:3},15:function(t){return t%10==1&&t%100!=11?0:t%10>=2&&(t%100<10||t%100>=20)?1:2},16:function(t){return t%10==1&&t%100!=11?0:t!==0?1:2},17:function(t){return t==1||t%10==1&&t%100!=11?0:1},18:function(t){return t==0?0:t==1?1:2},19:function(t){return t==1?0:t==0||t%100>1&&t%100<11?1:t%100>10&&t%100<20?2:3},20:function(t){return t==1?0:t==0||t%100>0&&t%100<20?1:2},21:function(t){return t%100==1?1:t%100==2?2:t%100==3||t%100==4?3:0},22:function(t){return t==1?0:t==2?1:(t<0||t>10)&&t%10==0?2:3}},Xf=["v1","v2","v3"],eu={zero:0,one:1,two:2,few:3,many:4,other:5};function Jf(){var e={};return Qf.forEach(function(t){t.lngs.forEach(function(n){e[n]={numbers:t.nr,plurals:qf[t.fc]}})}),e}var Zf=function(){function e(t){var n=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{};ct(this,e),this.languageUtils=t,this.options=n,this.logger=yt.create("pluralResolver"),(!this.options.compatibilityJSON||this.options.compatibilityJSON==="v4")&&(typeof Intl>"u"||!Intl.PluralRules)&&(this.options.compatibilityJSON="v3",this.logger.error("Your environment seems not to be Intl API compatible, use an Intl.PluralRules polyfill. Will fallback to the compatibilityJSON v3 format handling.")),this.rules=Jf()}return dt(e,[{key:"addRule",value:function(n,r){this.rules[n]=r}},{key:"getRule",value:function(n){var r=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{};if(this.shouldUseIntlApi())try{return new Intl.PluralRules(n,{type:r.ordinal?"ordinal":"cardinal"})}catch{return}return this.rules[n]||this.rules[this.languageUtils.getLanguagePartFromCode(n)]}},{key:"needsPlural",value:function(n){var r=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{},o=this.getRule(n,r);return this.shouldUseIntlApi()?o&&o.resolvedOptions().pluralCategories.length>1:o&&o.numbers.length>1}},{key:"getPluralFormsOfKey",value:function(n,r){var o=arguments.length>2&&arguments[2]!==void 0?arguments[2]:{};return this.getSuffixes(n,o).map(function(i){return"".concat(r).concat(i)})}},{key:"getSuffixes",value:function(n){var r=this,o=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{},i=this.getRule(n,o);return i?this.shouldUseIntlApi()?i.resolvedOptions().pluralCategories.sort(function(s,l){return eu[s]-eu[l]}).map(function(s){return"".concat(r.options.prepend).concat(s)}):i.numbers.map(function(s){return r.getSuffix(n,s,o)}):[]}},{key:"getSuffix",value:function(n,r){var o=arguments.length>2&&arguments[2]!==void 0?arguments[2]:{},i=this.getRule(n,o);return i?this.shouldUseIntlApi()?"".concat(this.options.prepend).concat(i.select(r)):this.getSuffixRetroCompatible(i,r):(this.logger.warn("no plural rule found for: ".concat(n)),"")}},{key:"getSuffixRetroCompatible",value:function(n,r){var o=this,i=n.noAbs?n.plurals(r):n.plurals(Math.abs(r)),s=n.numbers[i];this.options.simplifyPluralSuffix&&n.numbers.length===2&&n.numbers[0]===1&&(s===2?s="plural":s===1&&(s=""));var l=function(){return o.options.prepend&&s.toString()?o.options.prepend+s.toString():s.toString()};return this.options.compatibilityJSON==="v1"?s===1?"":typeof s=="number"?"_plural_".concat(s.toString()):l():this.options.compatibilityJSON==="v2"||this.options.simplifyPluralSuffix&&n.numbers.length===2&&n.numbers[0]===1?l():this.options.prepend&&i.toString()?this.options.prepend+i.toString():i.toString()}},{key:"shouldUseIntlApi",value:function(){return!Xf.includes(this.options.compatibilityJSON)}}]),e}();function tu(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter(function(o){return Object.getOwnPropertyDescriptor(e,o).enumerable})),n.push.apply(n,r)}return n}function tt(e){for(var t=1;t0&&arguments[0]!==void 0?arguments[0]:{};ct(this,e),this.logger=yt.create("interpolator"),this.options=t,this.format=t.interpolation&&t.interpolation.format||function(n){return n},this.init(t)}return dt(e,[{key:"init",value:function(){var n=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{};n.interpolation||(n.interpolation={escapeValue:!0});var r=n.interpolation;this.escape=r.escape!==void 0?r.escape:Mf,this.escapeValue=r.escapeValue!==void 0?r.escapeValue:!0,this.useRawValueToEscape=r.useRawValueToEscape!==void 0?r.useRawValueToEscape:!1,this.prefix=r.prefix?bn(r.prefix):r.prefixEscaped||"{{",this.suffix=r.suffix?bn(r.suffix):r.suffixEscaped||"}}",this.formatSeparator=r.formatSeparator?r.formatSeparator:r.formatSeparator||",",this.unescapePrefix=r.unescapeSuffix?"":r.unescapePrefix||"-",this.unescapeSuffix=this.unescapePrefix?"":r.unescapeSuffix||"",this.nestingPrefix=r.nestingPrefix?bn(r.nestingPrefix):r.nestingPrefixEscaped||bn("$t("),this.nestingSuffix=r.nestingSuffix?bn(r.nestingSuffix):r.nestingSuffixEscaped||bn(")"),this.nestingOptionsSeparator=r.nestingOptionsSeparator?r.nestingOptionsSeparator:r.nestingOptionsSeparator||",",this.maxReplaces=r.maxReplaces?r.maxReplaces:1e3,this.alwaysFormat=r.alwaysFormat!==void 0?r.alwaysFormat:!1,this.resetRegExp()}},{key:"reset",value:function(){this.options&&this.init(this.options)}},{key:"resetRegExp",value:function(){var n="".concat(this.prefix,"(.+?)").concat(this.suffix);this.regexp=new RegExp(n,"g");var r="".concat(this.prefix).concat(this.unescapePrefix,"(.+?)").concat(this.unescapeSuffix).concat(this.suffix);this.regexpUnescape=new RegExp(r,"g");var o="".concat(this.nestingPrefix,"(.+?)").concat(this.nestingSuffix);this.nestingRegexp=new RegExp(o,"g")}},{key:"interpolate",value:function(n,r,o,i){var s=this,l,a,u,f=this.options&&this.options.interpolation&&this.options.interpolation.defaultVariables||{};function d(O){return O.replace(/\$/g,"$$$$")}var h=function(p){if(p.indexOf(s.formatSeparator)<0){var c=Ga(r,f,p);return s.alwaysFormat?s.format(c,void 0,o,tt(tt(tt({},i),r),{},{interpolationkey:p})):c}var m=p.split(s.formatSeparator),y=m.shift().trim(),S=m.join(s.formatSeparator).trim();return s.format(Ga(r,f,y),S,o,tt(tt(tt({},i),r),{},{interpolationkey:y}))};this.resetRegExp();var g=i&&i.missingInterpolationHandler||this.options.missingInterpolationHandler,v=i&&i.interpolation&&i.interpolation.skipOnVariables!==void 0?i.interpolation.skipOnVariables:this.options.interpolation.skipOnVariables,k=[{regex:this.regexpUnescape,safeValue:function(p){return d(p)}},{regex:this.regexp,safeValue:function(p){return s.escapeValue?d(s.escape(p)):d(p)}}];return k.forEach(function(O){for(u=0;l=O.regex.exec(n);){var p=l[1].trim();if(a=h(p),a===void 0)if(typeof g=="function"){var c=g(n,l,i);a=typeof c=="string"?c:""}else if(i&&Object.prototype.hasOwnProperty.call(i,p))a="";else if(v){a=l[0];continue}else s.logger.warn("missed to pass in variable ".concat(p," for interpolating ").concat(n)),a="";else typeof a!="string"&&!s.useRawValueToEscape&&(a=Wa(a));var m=O.safeValue(a);if(n=n.replace(l[0],m),v?(O.regex.lastIndex+=a.length,O.regex.lastIndex-=l[0].length):O.regex.lastIndex=0,u++,u>=s.maxReplaces)break}}),n}},{key:"nest",value:function(n,r){var o=this,i=arguments.length>2&&arguments[2]!==void 0?arguments[2]:{},s,l,a;function u(g,v){var k=this.nestingOptionsSeparator;if(g.indexOf(k)<0)return g;var O=g.split(new RegExp("".concat(k,"[ ]*{"))),p="{".concat(O[1]);g=O[0],p=this.interpolate(p,a);var c=p.match(/'/g),m=p.match(/"/g);(c&&c.length%2===0&&!m||m.length%2!==0)&&(p=p.replace(/'/g,'"'));try{a=JSON.parse(p),v&&(a=tt(tt({},v),a))}catch(y){return this.logger.warn("failed parsing options string in nesting for key ".concat(g),y),"".concat(g).concat(k).concat(p)}return delete a.defaultValue,g}for(;s=this.nestingRegexp.exec(n);){var f=[];a=tt({},i),a=a.replace&&typeof a.replace!="string"?a.replace:a,a.applyPostProcessor=!1,delete a.defaultValue;var d=!1;if(s[0].indexOf(this.formatSeparator)!==-1&&!/{.*}/.test(s[1])){var h=s[1].split(this.formatSeparator).map(function(g){return g.trim()});s[1]=h.shift(),f=h,d=!0}if(l=r(u.call(this,s[1].trim(),a),a),l&&s[0]===n&&typeof l!="string")return l;typeof l!="string"&&(l=Wa(l)),l||(this.logger.warn("missed to resolve ".concat(s[1]," for nesting ").concat(n)),l=""),d&&(l=f.reduce(function(g,v){return o.format(g,v,i.lng,tt(tt({},i),{},{interpolationkey:s[1].trim()}))},l.trim())),n=n.replace(s[0],l),this.regexp.lastIndex=0}return n}}]),e}();function nu(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter(function(o){return Object.getOwnPropertyDescriptor(e,o).enumerable})),n.push.apply(n,r)}return n}function xt(e){for(var t=1;t-1){var r=e.split("(");t=r[0].toLowerCase().trim();var o=r[1].substring(0,r[1].length-1);if(t==="currency"&&o.indexOf(":")<0)n.currency||(n.currency=o.trim());else if(t==="relativetime"&&o.indexOf(":")<0)n.range||(n.range=o.trim());else{var i=o.split(";");i.forEach(function(s){if(s){var l=s.split(":"),a=Ff(l),u=a[0],f=a.slice(1),d=f.join(":").trim().replace(/^'+|'+$/g,"");n[u.trim()]||(n[u.trim()]=d),d==="false"&&(n[u.trim()]=!1),d==="true"&&(n[u.trim()]=!0),isNaN(d)||(n[u.trim()]=parseInt(d,10))}})}}return{formatName:t,formatOptions:n}}function Vn(e){var t={};return function(r,o,i){var s=o+JSON.stringify(i),l=t[s];return l||(l=e(o,i),t[s]=l),l(r)}}var nh=function(){function e(){var t=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{};ct(this,e),this.logger=yt.create("formatter"),this.options=t,this.formats={number:Vn(function(n,r){var o=new Intl.NumberFormat(n,xt({},r));return function(i){return o.format(i)}}),currency:Vn(function(n,r){var o=new Intl.NumberFormat(n,xt(xt({},r),{},{style:"currency"}));return function(i){return o.format(i)}}),datetime:Vn(function(n,r){var o=new Intl.DateTimeFormat(n,xt({},r));return function(i){return o.format(i)}}),relativetime:Vn(function(n,r){var o=new Intl.RelativeTimeFormat(n,xt({},r));return function(i){return o.format(i,r.range||"day")}}),list:Vn(function(n,r){var o=new Intl.ListFormat(n,xt({},r));return function(i){return o.format(i)}})},this.init(t)}return dt(e,[{key:"init",value:function(n){var r=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{interpolation:{}},o=r.interpolation;this.formatSeparator=o.formatSeparator?o.formatSeparator:o.formatSeparator||","}},{key:"add",value:function(n,r){this.formats[n.toLowerCase().trim()]=r}},{key:"addCached",value:function(n,r){this.formats[n.toLowerCase().trim()]=Vn(r)}},{key:"format",value:function(n,r,o){var i=this,s=arguments.length>3&&arguments[3]!==void 0?arguments[3]:{},l=r.split(this.formatSeparator),a=l.reduce(function(u,f){var d=th(f),h=d.formatName,g=d.formatOptions;if(i.formats[h]){var v=u;try{var k=s&&s.formatParams&&s.formatParams[s.interpolationkey]||{},O=k.locale||k.lng||s.locale||s.lng||o;v=i.formats[h](u,O,xt(xt(xt({},g),s),k))}catch(p){i.logger.warn(p)}return v}else i.logger.warn("there was no format function for ".concat(h));return u},n);return a}}]),e}();function ru(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter(function(o){return Object.getOwnPropertyDescriptor(e,o).enumerable})),n.push.apply(n,r)}return n}function ou(e){for(var t=1;t"u"||!Reflect.construct||Reflect.construct.sham)return!1;if(typeof Proxy=="function")return!0;try{return Boolean.prototype.valueOf.call(Reflect.construct(Boolean,[],function(){})),!0}catch{return!1}}function ih(e,t){e.pending[t]!==void 0&&(delete e.pending[t],e.pendingCount--)}var sh=function(e){Li(n,e);var t=rh(n);function n(r,o,i){var s,l=arguments.length>3&&arguments[3]!==void 0?arguments[3]:{};return ct(this,n),s=t.call(this),Ii&&tn.call(Wt(s)),s.backend=r,s.store=o,s.services=i,s.languageUtils=i.languageUtils,s.options=l,s.logger=yt.create("backendConnector"),s.waitingReads=[],s.maxParallelReads=l.maxParallelReads||10,s.readingCalls=0,s.maxRetries=l.maxRetries>=0?l.maxRetries:5,s.retryTimeout=l.retryTimeout>=1?l.retryTimeout:350,s.state={},s.queue=[],s.backend&&s.backend.init&&s.backend.init(i,l.backend,l),s}return dt(n,[{key:"queueLoad",value:function(o,i,s,l){var a=this,u={},f={},d={},h={};return o.forEach(function(g){var v=!0;i.forEach(function(k){var O="".concat(g,"|").concat(k);!s.reload&&a.store.hasResourceBundle(g,k)?a.state[O]=2:a.state[O]<0||(a.state[O]===1?f[O]===void 0&&(f[O]=!0):(a.state[O]=1,v=!1,f[O]===void 0&&(f[O]=!0),u[O]===void 0&&(u[O]=!0),h[k]===void 0&&(h[k]=!0)))}),v||(d[g]=!0)}),(Object.keys(u).length||Object.keys(f).length)&&this.queue.push({pending:f,pendingCount:Object.keys(f).length,loaded:{},errors:[],callback:l}),{toLoad:Object.keys(u),pending:Object.keys(f),toLoadLanguages:Object.keys(d),toLoadNamespaces:Object.keys(h)}}},{key:"loaded",value:function(o,i,s){var l=o.split("|"),a=l[0],u=l[1];i&&this.emit("failedLoading",a,u,i),s&&this.store.addResourceBundle(a,u,s),this.state[o]=i?-1:2;var f={};this.queue.forEach(function(d){Af(d.loaded,[a],u),ih(d,o),i&&d.errors.push(i),d.pendingCount===0&&!d.done&&(Object.keys(d.loaded).forEach(function(h){f[h]||(f[h]={});var g=d.loaded[h];g.length&&g.forEach(function(v){f[h][v]===void 0&&(f[h][v]=!0)})}),d.done=!0,d.errors.length?d.callback(d.errors):d.callback())}),this.emit("loaded",f),this.queue=this.queue.filter(function(d){return!d.done})}},{key:"read",value:function(o,i,s){var l=this,a=arguments.length>3&&arguments[3]!==void 0?arguments[3]:0,u=arguments.length>4&&arguments[4]!==void 0?arguments[4]:this.retryTimeout,f=arguments.length>5?arguments[5]:void 0;if(!o.length)return f(null,{});if(this.readingCalls>=this.maxParallelReads){this.waitingReads.push({lng:o,ns:i,fcName:s,tried:a,wait:u,callback:f});return}this.readingCalls++;var d=function(k,O){if(l.readingCalls--,l.waitingReads.length>0){var p=l.waitingReads.shift();l.read(p.lng,p.ns,p.fcName,p.tried,p.wait,p.callback)}if(k&&O&&a2&&arguments[2]!==void 0?arguments[2]:{},a=arguments.length>3?arguments[3]:void 0;if(!this.backend)return this.logger.warn("No backend was added via i18next.use. Will not load resources."),a&&a();typeof o=="string"&&(o=this.languageUtils.toResolveHierarchy(o)),typeof i=="string"&&(i=[i]);var u=this.queueLoad(o,i,l,a);if(!u.toLoad.length)return u.pending.length||a(),null;u.toLoad.forEach(function(f){s.loadOne(f)})}},{key:"load",value:function(o,i,s){this.prepareLoading(o,i,{},s)}},{key:"reload",value:function(o,i,s){this.prepareLoading(o,i,{reload:!0},s)}},{key:"loadOne",value:function(o){var i=this,s=arguments.length>1&&arguments[1]!==void 0?arguments[1]:"",l=o.split("|"),a=l[0],u=l[1];this.read(a,u,"read",void 0,void 0,function(f,d){f&&i.logger.warn("".concat(s,"loading namespace ").concat(u," for language ").concat(a," failed"),f),!f&&d&&i.logger.log("".concat(s,"loaded namespace ").concat(u," for language ").concat(a),d),i.loaded(o,f,d)})}},{key:"saveMissing",value:function(o,i,s,l,a){var u=arguments.length>5&&arguments[5]!==void 0?arguments[5]:{},f=arguments.length>6&&arguments[6]!==void 0?arguments[6]:function(){};if(this.services.utils&&this.services.utils.hasLoadedNamespace&&!this.services.utils.hasLoadedNamespace(i)){this.logger.warn('did not save key "'.concat(s,'" as the namespace "').concat(i,'" was not yet loaded'),"This means something IS WRONG in your setup. You access the t function before i18next.init / i18next.loadNamespace / i18next.changeLanguage was done. Wait for the callback or Promise to resolve before accessing it!!!");return}if(!(s==null||s==="")){if(this.backend&&this.backend.create){var d=ou(ou({},u),{},{isUpdate:a}),h=this.backend.create.bind(this.backend);if(h.length<6)try{var g;h.length===5?g=h(o,i,s,l,d):g=h(o,i,s,l),g&&typeof g.then=="function"?g.then(function(v){return f(null,v)}).catch(f):f(null,g)}catch(v){f(v)}else h(o,i,s,l,f,d)}!o||!o[0]||this.store.addResource(o[0],i,s,l)}}}]),n}(tn);function iu(){return{debug:!1,initImmediate:!0,ns:["translation"],defaultNS:["translation"],fallbackLng:["dev"],fallbackNS:!1,supportedLngs:!1,nonExplicitSupportedLngs:!1,load:"all",preload:!1,simplifyPluralSuffix:!0,keySeparator:".",nsSeparator:":",pluralSeparator:"_",contextSeparator:"_",partialBundledLanguages:!1,saveMissing:!1,updateMissing:!1,saveMissingTo:"fallback",saveMissingPlurals:!0,missingKeyHandler:!1,missingInterpolationHandler:!1,postProcess:!1,postProcessPassResolved:!1,returnNull:!0,returnEmptyString:!0,returnObjects:!1,joinArrays:!1,returnedObjectHandler:!1,parseMissingKeyHandler:!1,appendNamespaceToMissingKey:!1,appendNamespaceToCIMode:!1,overloadTranslationOptionHandler:function(t){var n={};if(fe(t[1])==="object"&&(n=t[1]),typeof t[1]=="string"&&(n.defaultValue=t[1]),typeof t[2]=="string"&&(n.tDescription=t[2]),fe(t[2])==="object"||fe(t[3])==="object"){var r=t[3]||t[2];Object.keys(r).forEach(function(o){n[o]=r[o]})}return n},interpolation:{escapeValue:!0,format:function(t,n,r,o){return t},prefix:"{{",suffix:"}}",formatSeparator:",",unescapePrefix:"-",nestingPrefix:"$t(",nestingSuffix:")",nestingOptionsSeparator:",",maxReplaces:1e3,skipOnVariables:!0}}}function su(e){return typeof e.ns=="string"&&(e.ns=[e.ns]),typeof e.fallbackLng=="string"&&(e.fallbackLng=[e.fallbackLng]),typeof e.fallbackNS=="string"&&(e.fallbackNS=[e.fallbackNS]),e.supportedLngs&&e.supportedLngs.indexOf("cimode")<0&&(e.supportedLngs=e.supportedLngs.concat(["cimode"])),e}function lu(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter(function(o){return Object.getOwnPropertyDescriptor(e,o).enumerable})),n.push.apply(n,r)}return n}function ht(e){for(var t=1;t"u"||!Reflect.construct||Reflect.construct.sham)return!1;if(typeof Proxy=="function")return!0;try{return Boolean.prototype.valueOf.call(Reflect.construct(Boolean,[],function(){})),!0}catch{return!1}}function go(){}function uh(e){var t=Object.getOwnPropertyNames(Object.getPrototypeOf(e));t.forEach(function(n){typeof e[n]=="function"&&(e[n]=e[n].bind(e))})}var oi=function(e){Li(n,e);var t=lh(n);function n(){var r,o=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},i=arguments.length>1?arguments[1]:void 0;if(ct(this,n),r=t.call(this),Ii&&tn.call(Wt(r)),r.options=su(o),r.services={},r.logger=yt,r.modules={external:[]},uh(Wt(r)),i&&!r.isInitialized&&!o.isClone){if(!r.options.initImmediate)return r.init(o,i),oo(r,Wt(r));setTimeout(function(){r.init(o,i)},0)}return r}return dt(n,[{key:"init",value:function(){var o=this,i=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},s=arguments.length>1?arguments[1]:void 0;typeof i=="function"&&(s=i,i={}),!i.defaultNS&&i.defaultNS!==!1&&i.ns&&(typeof i.ns=="string"?i.defaultNS=i.ns:i.ns.indexOf("translation")<0&&(i.defaultNS=i.ns[0]));var l=iu();this.options=ht(ht(ht({},l),this.options),su(i)),this.options.compatibilityAPI!=="v1"&&(this.options.interpolation=ht(ht({},l.interpolation),this.options.interpolation)),i.keySeparator!==void 0&&(this.options.userDefinedKeySeparator=i.keySeparator),i.nsSeparator!==void 0&&(this.options.userDefinedNsSeparator=i.nsSeparator);function a(p){return p?typeof p=="function"?new p:p:null}if(!this.options.isClone){this.modules.logger?yt.init(a(this.modules.logger),this.options):yt.init(null,this.options);var u;this.modules.formatter?u=this.modules.formatter:typeof Intl<"u"&&(u=nh);var f=new Za(this.options);this.store=new Wf(this.options.resources,this.options);var d=this.services;d.logger=yt,d.resourceStore=this.store,d.languageUtils=f,d.pluralResolver=new Zf(f,{prepend:this.options.pluralSeparator,compatibilityJSON:this.options.compatibilityJSON,simplifyPluralSuffix:this.options.simplifyPluralSuffix}),u&&(!this.options.interpolation.format||this.options.interpolation.format===l.interpolation.format)&&(d.formatter=a(u),d.formatter.init(d,this.options),this.options.interpolation.format=d.formatter.format.bind(d.formatter)),d.interpolator=new eh(this.options),d.utils={hasLoadedNamespace:this.hasLoadedNamespace.bind(this)},d.backendConnector=new sh(a(this.modules.backend),d.resourceStore,d,this.options),d.backendConnector.on("*",function(p){for(var c=arguments.length,m=new Array(c>1?c-1:0),y=1;y1?c-1:0),y=1;y0&&h[0]!=="dev"&&(this.options.lng=h[0])}!this.services.languageDetector&&!this.options.lng&&this.logger.warn("init: no languageDetector is used and no lng is defined");var g=["getResource","hasResourceBundle","getResourceBundle","getDataByLanguage"];g.forEach(function(p){o[p]=function(){var c;return(c=o.store)[p].apply(c,arguments)}});var v=["addResource","addResources","addResourceBundle","removeResourceBundle"];v.forEach(function(p){o[p]=function(){var c;return(c=o.store)[p].apply(c,arguments),o}});var k=hr(),O=function(){var c=function(y,S){o.isInitialized&&!o.initializedStoreOnce&&o.logger.warn("init: i18next is already initialized. You should call init just once!"),o.isInitialized=!0,o.options.isClone||o.logger.log("initialized",o.options),o.emit("initialized",o.options),k.resolve(S),s(y,S)};if(o.languages&&o.options.compatibilityAPI!=="v1"&&!o.isInitialized)return c(null,o.t.bind(o));o.changeLanguage(o.options.lng,c)};return this.options.resources||!this.options.initImmediate?O():setTimeout(O,0),k}},{key:"loadResources",value:function(o){var i=this,s=arguments.length>1&&arguments[1]!==void 0?arguments[1]:go,l=s,a=typeof o=="string"?o:this.language;if(typeof o=="function"&&(l=o),!this.options.resources||this.options.partialBundledLanguages){if(a&&a.toLowerCase()==="cimode")return l();var u=[],f=function(g){if(g){var v=i.services.languageUtils.toResolveHierarchy(g);v.forEach(function(k){u.indexOf(k)<0&&u.push(k)})}};if(a)f(a);else{var d=this.services.languageUtils.getFallbackCodes(this.options.fallbackLng);d.forEach(function(h){return f(h)})}this.options.preload&&this.options.preload.forEach(function(h){return f(h)}),this.services.backendConnector.load(u,this.options.ns,function(h){!h&&!i.resolvedLanguage&&i.language&&i.setResolvedLanguage(i.language),l(h)})}else l(null)}},{key:"reloadResources",value:function(o,i,s){var l=hr();return o||(o=this.languages),i||(i=this.options.ns),s||(s=go),this.services.backendConnector.reload(o,i,function(a){l.resolve(),s(a)}),l}},{key:"use",value:function(o){if(!o)throw new Error("You are passing an undefined module! Please check the object you are passing to i18next.use()");if(!o.type)throw new Error("You are passing a wrong module! Please check the object you are passing to i18next.use()");return o.type==="backend"&&(this.modules.backend=o),(o.type==="logger"||o.log&&o.warn&&o.error)&&(this.modules.logger=o),o.type==="languageDetector"&&(this.modules.languageDetector=o),o.type==="i18nFormat"&&(this.modules.i18nFormat=o),o.type==="postProcessor"&&Uc.addPostProcessor(o),o.type==="formatter"&&(this.modules.formatter=o),o.type==="3rdParty"&&this.modules.external.push(o),this}},{key:"setResolvedLanguage",value:function(o){if(!(!o||!this.languages)&&!(["cimode","dev"].indexOf(o)>-1))for(var i=0;i-1)&&this.store.hasLanguageSomeTranslations(s)){this.resolvedLanguage=s;break}}}},{key:"changeLanguage",value:function(o,i){var s=this;this.isLanguageChangingTo=o;var l=hr();this.emit("languageChanging",o);var a=function(h){s.language=h,s.languages=s.services.languageUtils.toResolveHierarchy(h),s.resolvedLanguage=void 0,s.setResolvedLanguage(h)},u=function(h,g){g?(a(g),s.translator.changeLanguage(g),s.isLanguageChangingTo=void 0,s.emit("languageChanged",g),s.logger.log("languageChanged",g)):s.isLanguageChangingTo=void 0,l.resolve(function(){return s.t.apply(s,arguments)}),i&&i(h,function(){return s.t.apply(s,arguments)})},f=function(h){!o&&!h&&s.services.languageDetector&&(h=[]);var g=typeof h=="string"?h:s.services.languageUtils.getBestMatchFromCodes(h);g&&(s.language||a(g),s.translator.language||s.translator.changeLanguage(g),s.services.languageDetector&&s.services.languageDetector.cacheUserLanguage&&s.services.languageDetector.cacheUserLanguage(g)),s.loadResources(g,function(v){u(v,g)})};return!o&&this.services.languageDetector&&!this.services.languageDetector.async?f(this.services.languageDetector.detect()):!o&&this.services.languageDetector&&this.services.languageDetector.async?this.services.languageDetector.detect.length===0?this.services.languageDetector.detect().then(f):this.services.languageDetector.detect(f):f(o),l}},{key:"getFixedT",value:function(o,i,s){var l=this,a=function u(f,d){var h;if(fe(d)!=="object"){for(var g=arguments.length,v=new Array(g>2?g-2:0),k=2;k1&&arguments[1]!==void 0?arguments[1]:{};if(!this.isInitialized)return this.logger.warn("hasLoadedNamespace: i18next was not initialized",this.languages),!1;if(!this.languages||!this.languages.length)return this.logger.warn("hasLoadedNamespace: i18n.languages were undefined or empty",this.languages),!1;var l=this.resolvedLanguage||this.languages[0],a=this.options?this.options.fallbackLng:!1,u=this.languages[this.languages.length-1];if(l.toLowerCase()==="cimode")return!0;var f=function(g,v){var k=i.services.backendConnector.state["".concat(g,"|").concat(v)];return k===-1||k===2};if(s.precheck){var d=s.precheck(this,f);if(d!==void 0)return d}return!!(this.hasResourceBundle(l,o)||!this.services.backendConnector.backend||this.options.resources&&!this.options.partialBundledLanguages||f(l,o)&&(!a||f(u,o)))}},{key:"loadNamespaces",value:function(o,i){var s=this,l=hr();return this.options.ns?(typeof o=="string"&&(o=[o]),o.forEach(function(a){s.options.ns.indexOf(a)<0&&s.options.ns.push(a)}),this.loadResources(function(a){l.resolve(),i&&i(a)}),l):(i&&i(),Promise.resolve())}},{key:"loadLanguages",value:function(o,i){var s=hr();typeof o=="string"&&(o=[o]);var l=this.options.preload||[],a=o.filter(function(u){return l.indexOf(u)<0});return a.length?(this.options.preload=l.concat(a),this.loadResources(function(u){s.resolve(),i&&i(u)}),s):(i&&i(),Promise.resolve())}},{key:"dir",value:function(o){if(o||(o=this.resolvedLanguage||(this.languages&&this.languages.length>0?this.languages[0]:this.language)),!o)return"rtl";var i=["ar","shu","sqr","ssh","xaa","yhd","yud","aao","abh","abv","acm","acq","acw","acx","acy","adf","ads","aeb","aec","afb","ajp","apc","apd","arb","arq","ars","ary","arz","auz","avl","ayh","ayl","ayn","ayp","bbz","pga","he","iw","ps","pbt","pbu","pst","prp","prd","ug","ur","ydd","yds","yih","ji","yi","hbo","men","xmn","fa","jpr","peo","pes","prs","dv","sam","ckb"],s=this.services&&this.services.languageUtils||new Za(iu());return i.indexOf(s.getLanguagePartFromCode(o))>-1||o.toLowerCase().indexOf("-arab")>1?"rtl":"ltr"}},{key:"cloneInstance",value:function(){var o=this,i=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},s=arguments.length>1&&arguments[1]!==void 0?arguments[1]:go,l=ht(ht(ht({},this.options),i),{isClone:!0}),a=new n(l);(i.debug!==void 0||i.prefix!==void 0)&&(a.logger=a.logger.clone(i));var u=["store","services","language"];return u.forEach(function(f){a[f]=o[f]}),a.services=ht({},this.services),a.services.utils={hasLoadedNamespace:a.hasLoadedNamespace.bind(a)},a.translator=new Ja(a.services,a.options),a.translator.on("*",function(f){for(var d=arguments.length,h=new Array(d>1?d-1:0),g=1;g0&&arguments[0]!==void 0?arguments[0]:{},t=arguments.length>1?arguments[1]:void 0;return new oi(e,t)});var ce=oi.createInstance();ce.createInstance=oi.createInstance;ce.createInstance;ce.dir;ce.init;ce.loadResources;ce.reloadResources;ce.use;ce.changeLanguage;ce.getFixedT;ce.t;ce.exists;ce.setDefaultNamespace;ce.hasLoadedNamespace;ce.loadNamespaces;ce.loadLanguages;function ch(e,t){if(e==null)return{};var n={},r=Object.keys(e),o,i;for(i=0;i=0)&&(n[o]=e[o]);return n}function Al(e,t){if(e==null)return{};var n=ch(e,t),r,o;if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(o=0;o=0)&&Object.prototype.propertyIsEnumerable.call(e,r)&&(n[r]=e[r])}return n}var dh={area:!0,base:!0,br:!0,col:!0,embed:!0,hr:!0,img:!0,input:!0,link:!0,meta:!0,param:!0,source:!0,track:!0,wbr:!0},ph=/\s([^'"/\s><]+?)[\s/>]|([^\s=]+)=\s?(".*?"|'.*?')/g;function au(e){var t={type:"tag",name:"",voidElement:!1,attrs:{},children:[]},n=e.match(/<\/?([^\s]+?)[/\s>]/);if(n&&(t.name=n[1],(dh[n[1]]||e.charAt(e.length-2)==="/")&&(t.voidElement=!0),t.name.startsWith("!--"))){var r=e.indexOf("-->");return{type:"comment",comment:r!==-1?e.slice(4,r):""}}for(var o=new RegExp(ph),i=null;(i=o.exec(e))!==null;)if(i[0].trim())if(i[1]){var s=i[1].trim(),l=[s,""];s.indexOf("=")>-1&&(l=s.split("=")),t.attrs[l[0]]=l[1],o.lastIndex--}else i[2]&&(t.attrs[i[2]]=i[3].trim().substring(1,i[3].length-1));return t}var fh=/<[a-zA-Z0-9\-\!\/](?:"[^"]*"|'[^']*'|[^'">])*>/g,hh=/^\s*$/,mh=Object.create(null);function Ac(e,t){switch(t.type){case"text":return e+t.content;case"tag":return e+="<"+t.name+(t.attrs?function(n){var r=[];for(var o in n)r.push(o+'="'+n[o]+'"');return r.length?" "+r.join(" "):""}(t.attrs):"")+(t.voidElement?"/>":">"),t.voidElement?e:e+t.children.reduce(Ac,"")+"";case"comment":return e+""}}var gh={parse:function(e,t){t||(t={}),t.components||(t.components=mh);var n,r=[],o=[],i=-1,s=!1;if(e.indexOf("<")!==0){var l=e.indexOf("<");r.push({type:"text",content:l===-1?e:e.substring(0,l)})}return e.replace(fh,function(a,u){if(s){if(a!=="")return;s=!1}var f,d=a.charAt(1)!=="/",h=a.startsWith("");return{type:"comment",comment:r!==-1?e.slice(4,r):""}}for(var o=new RegExp(mh),i=null;(i=o.exec(e))!==null;)if(i[0].trim())if(i[1]){var s=i[1].trim(),l=[s,""];s.indexOf("=")>-1&&(l=s.split("=")),t.attrs[l[0]]=l[1],o.lastIndex--}else i[2]&&(t.attrs[i[2]]=i[3].trim().substring(1,i[3].length-1));return t}var gh=/<[a-zA-Z0-9\-\!\/](?:"[^"]*"|'[^']*'|[^'">])*>/g,vh=/^\s*$/,yh=Object.create(null);function Kc(e,t){switch(t.type){case"text":return e+t.content;case"tag":return e+="<"+t.name+(t.attrs?function(n){var r=[];for(var o in n)r.push(o+'="'+n[o]+'"');return r.length?" "+r.join(" "):""}(t.attrs):"")+(t.voidElement?"/>":">"),t.voidElement?e:e+t.children.reduce(Kc,"")+"";case"comment":return e+""}}var _h={parse:function(e,t){t||(t={}),t.components||(t.components=yh);var n,r=[],o=[],i=-1,s=!1;if(e.indexOf("<")!==0){var l=e.indexOf("<");r.push({type:"text",content:l===-1?e:e.substring(0,l)})}return e.replace(gh,function(a,u){if(s){if(a!=="")return;s=!1}var f,d=a.charAt(1)!=="/",h=a.startsWith("|nGraph \nIR|Converter + Data-->|OpenVINO \nIR|Converter Converter-->|Linear \nIR|Control end - Frontend[Tokenizer]-->|nGraph \nIR|Data + Frontend[Tokenizer]-->|OpenVINO \nIR|Data Control-->|Linear \nIR|Backend[Generator] end Source --> Frontend @@ -258,13 +258,13 @@ classDef daisy1 fill:#FFE17A, stroke: #FEC91B, color: #262626 class Frontend,Optimizer,Backend steel1 class Source,Executable daisy1 ``` -Instead of a source code, `Snippets` take `nGraph` model as an input. -Then the `Tokenizer` (which is essentially a `Snippets` `Frontend`) parses an input `nGraph model`, and tries to find a part of the model that could be processed by `Snippets`. -If such a part is found, `Tokenizer` converts it to an `nGraph IR` and stores inside a `Subgraph` node. -`nGraph IR` - is one of the two `IR` types used by `Snippets`, it is simply a small `nGraph model` that can contain `Snippets`-specific operations. +Instead of a source code, `Snippets` take `OpenVINO` model as an input. +Then the `Tokenizer` (which is essentially a `Snippets` `Frontend`) parses an input `OpenVINO model`, and tries to find a part of the model that could be processed by `Snippets`. +If such a part is found, `Tokenizer` converts it to an `OpenVINO IR` and stores inside a `Subgraph` node. +`OpenVINO IR` - is one of the two `IR` types used by `Snippets`, it is simply a small `OpenVINO model` that can contain `Snippets`-specific operations. -`nGraph IR` is then passed to the `Optimizer` unit that in turn consists of three subunits. -The purpose of the first subunit is to perform data flow optimizations. The second subunit converts `nGraph IR` (data-flow-oriented representation) to `Linear IR` (control-flow-focused IR). Finally, the third subunit is dedicated to control flow optimizations. +`OpenVINO IR` is then passed to the `Optimizer` unit that in turn consists of three subunits. +The purpose of the first subunit is to perform data flow optimizations. The second subunit converts `OpenVINO IR` (data-flow-oriented representation) to `Linear IR` (control-flow-focused IR). Finally, the third subunit is dedicated to control flow optimizations. After all optimizations, the `Linear IR` is used by the `Generator` (which is `Snippets` `Backend`) to produce executable code, which we will refer to as `Kernel`. As discussed in the Introduction, the purpose of the `Kernel` is to process a part of the initial tensor, and several `Kernels` are usually executed in parallel to process the whole tensor. @@ -280,7 +280,7 @@ The `Snippets` integration into the plugin pipeline is schematically depicted be graph LR subgraph Plugin[ Plugin pipeline ] direction LR - subgraph ngraph[ Transformations on nGraph model ] + subgraph openvino[ Transformations on OpenVINO model ] direction LR common[Common \n Transformations] lpt[Low \n Precision] @@ -305,7 +305,7 @@ The `Snippets` integration into the plugin pipeline is schematically depicted be create-->execute end end - Source[nGraph \n model]-->|Main \n flow|common + Source[OpenVINO \n model]-->|Main \n flow|common convert~~~internal classDef no-bg-color fill:none,stroke-width:0px classDef steel1 fill:#B9D6E5, stroke: #86B3CA, color: #262626 @@ -315,15 +315,15 @@ class tokenize,optimize,generate steel1 class Source,Executable daisy1 class create,execute dafault_node1 ``` -As one can see from the picture, overall plugin pipeline consists of two major blocks: the first block applies transformations to `nGraph model` while the second one works with the internal plugin graph representation. Since `Snippets` is a backend-independent framework, it can't work with the plugin graph or plugin-specific `Ops` directly, so the tokenization is performed immediately before plugin-specific operations are introduced into the graph (`Conversion to Plugin opset`). -`Tokenizer` replaces parts of the `nGraph model` that can be executed by `Snippets` with `ov::op::Subgraph` nGraph nodes. -Each of the nodes stores a piece of the initial `nGraph model` that was replaced by the node. -This piece is stored as an nGraph model itself, which we refer to as `nGraph IR` to distinguish from the original `nGraph model`. +As one can see from the picture, overall plugin pipeline consists of two major blocks: the first block applies transformations to `OpenVINO model` while the second one works with the internal plugin graph representation. Since `Snippets` is a backend-independent framework, it can't work with the plugin graph or plugin-specific `Ops` directly, so the tokenization is performed immediately before plugin-specific operations are introduced into the graph (`Conversion to Plugin opset`). +`Tokenizer` replaces parts of the `OpenVINO model` that can be executed by `Snippets` with `ov::op::Subgraph` OpenVINO nodes. +Each of the nodes stores a piece of the initial `OpenVINO model` that was replaced by the node. +This piece is stored as an OpenVINO model itself, which we refer to as `OpenVINO IR` to distinguish from the original `OpenVINO model`. Note that sometimes the exact type of `IR` is not important in our discussion. -In such cases, we will refer to the `IR` (`nGraph` or `Linear`) as `body function`, or simply `body`. +In such cases, we will refer to the `IR` (`OpenVINO` or `Linear`) as `body function`, or simply `body`. -When the plugin finalizes all `nGraph model` transformations, the model is converted to an internal plugin graph representation. -At this point `ov::op::Subgraph` is converted to `ov::intel_cpu::node::Snippet` which still retains the `nGraph IR`. +When the plugin finalizes all `OpenVINO model` transformations, the model is converted to an internal plugin graph representation. +At this point `ov::op::Subgraph` is converted to `ov::intel_cpu::node::Snippet` which still retains the `OpenVINO IR`. This IR is then optimized and an executable `Kernel` is produced during the `CreateComputePrimitive` stage (`CreatePrimitive()` stage in CPU plugin). Finally, multiple copies of the produced kernel executed in parallel during the `Execute` stage. @@ -332,7 +332,7 @@ To summarize, `Snippets` workflow consists of three major blocks: `Tokenizer`, ` ### Tokenizer -`Tokenizer` is run on an `nGraph model` and its main purpose is to identify subgraphs that are suitable for code generation. +`Tokenizer` is run on an `OpenVINO model` and its main purpose is to identify subgraphs that are suitable for code generation. These subgraphs are then replaced with the `ov::op::Subgraph` node. This stage is called tokenization because the `Tokenizer` employs a greedy algorithm similar to the ones used for parsing input stream of characters into tokens. One of the distinctive features of this algorithm is its flexibility, so it can seamlessly handle arbitrary operations' patterns. @@ -371,8 +371,8 @@ The tokenization algorithm is depicted on the flowchart below. ``` Let us briefly describe the process: 1. If a Node is not supported by `Snippets`, then ignore it and proceed to the next one. -2. If a Node has no `Subgraph` parents, then replace it with `Subgraph` node and copy the initial Node to the `Subgraph's` body (which is in the `nGraph IR` form). -3. If a Node has a single `Subgraph` parent, then attach it to the `Subgraph`. It means copy the Node to the `Subgraph's` body, and remove it from the original `nGraph model`. Note that if the Node has more than one parent, corresponding parents' outputs will be connected with the updated `Subgraph` as shown on the diagram below. +2. If a Node has no `Subgraph` parents, then replace it with `Subgraph` node and copy the initial Node to the `Subgraph's` body (which is in the `OpenVINO IR` form). +3. If a Node has a single `Subgraph` parent, then attach it to the `Subgraph`. It means copy the Node to the `Subgraph's` body, and remove it from the original `OpenVINO model`. Note that if the Node has more than one parent, corresponding parents' outputs will be connected with the updated `Subgraph` as shown on the diagram below. 4. If a Node has multiple `Subgraph` parents, then they will be merged into a single `Subgraph` and the Node will be attached to it. ```mermaid graph LR @@ -409,7 +409,7 @@ If a `Constant` is not scalar, then it can't be tokenized since storing `Constan Please refer to the [collapse_subgraph.cpp](../src/pass/collapse_subgraph.cpp) to gain more insights on the tokenization process. There is however one more aspect of the tokenization process that is worth covering here. -As discussed in the **Plugin integration** section above, the `Tokenizer` is executed before the plugin converts the `nGraph model` to an internal graph representation. +As discussed in the **Plugin integration** section above, the `Tokenizer` is executed before the plugin converts the `OpenVINO model` to an internal graph representation. It means that the tokenized nodes will not be visible to the plugin (since they are hidden inside `Subrgaphs'` body functions), so they will be ignored by plugin optimization passes. In particular, the plugin won't be able to fuse the nodes using the OneDNN post-ops mechanism. This type of fusings is backend-specific, therefore can't be supported by `Snippets` directly, but it's still important from the performance perspective. @@ -424,15 +424,15 @@ Please, refer to the [snippets_mark_skipped.cpp](../../../plugins/intel_cpu/src/ As briefly discussed in the ***Architecture*** section, `Optimizer` consists of two major units: the first one performs data flow optimization, and the second one is focused on control flow. Note however that some data-flow-related passes can be performed only after the control flow optimizations, so the second unit modifies the dataflow as well. Nevertheless, we will refer to the units as `Data flow optimizer` and `Control flow optimizer` to reflect their main purpose. -Keep in mind that, as discussed above, the `Data flow optimizer` operates exclusively on the `nGraph IR`, while the `Control flow optimizer` works with the `Linear IR`. +Keep in mind that, as discussed above, the `Data flow optimizer` operates exclusively on the `OpenVINO IR`, while the `Control flow optimizer` works with the `Linear IR`. We will discuss these units in more detail below. #### Data flow optimizer Before `Data flow optimizer` can modify data flow, it needs to perform a preliminary stage called `Canonicalization`. To understand the stage's purpose we need to make a step back to the tokenization. - The `Tokenizer` saves a part of the initial `nGraph function` in `Subgraph's` body. - The problem is that the `nGraph function` has no information about data layouts that will be used by the `Subgraph's` parents during the `Execution` stage. + The `Tokenizer` saves a part of the initial `OpenVINO function` in `Subgraph's` body. + The problem is that the `OpenVINO function` has no information about data layouts that will be used by the `Subgraph's` parents during the `Execution` stage. This happens because the plugin assigns layouts on internal graph representation well after the tokenization is finished. The purpose of `Canonicalization` is to incorporate the plugin-defined input layouts into the body function. If an input's layout was changed to a blocked one, then the corresponding body input `Parameter` will be reshaped, and new shapes will be propagated through the body function. @@ -485,17 +485,17 @@ The managers will be executed on different stages of the pipeline to enable more #### Control flow optimizer As follows from its name, the main objective of `Control flow optimizer` is to manage and optimize control flow of the kernel. -Since the `nGraph IR` doesn't have an explicit control flow representation, a special control-flow-oriented `IR` was developed. +Since the `OpenVINO IR` doesn't have an explicit control flow representation, a special control-flow-oriented `IR` was developed. It is called `Linear IR` (or simply `LIR`), let's discuss it first, before we consider the transformation pipeline. ##### Linear Intermediate Representation `Linear IR` is specially designed to facilitate manipulations with control flow. -It is called linear, because it is essentially a sequence of `Expressions` (an analog of nGraph `Op`) that represents control flow. +It is called linear, because it is essentially a sequence of `Expressions` (an analog of OpenVINO `Op`) that represents control flow. So if `Expression 1` is followed by `Expression 2` in `LIR` then the code for `Expression 1` will be emitted before the code for `Expression 2`. Note that this doesn't necessarily mean that the `Expression 2` uses the result of `Expression 1`, they can be completely unrelated from the data flow standpoint. The only restriction here is that all the `Expression's` inputs must be ready by the time it is executed. -This restriction is the same as in `nGraph IR`, but an important distinction here is that `LIR` allows to permute `Expressions` while this data-dependency condition is fulfilled. +This restriction is the same as in `OpenVINO IR`, but an important distinction here is that `LIR` allows to permute `Expressions` while this data-dependency condition is fulfilled. So the `LIR` preserves data dependencies, but also allows for a more control on expressions' order that represents control flow. This is a brief rationale behind the linear `IR`, now let's move to the implementation. @@ -536,13 +536,13 @@ flowchart LR class consumers no-bg ``` -`LinearIR` is our graph representation, it's an analog to an nGraph model. +`LinearIR` is our graph representation, it's an analog to an OpenVINO model. It is simply a container for `Expressions`, the order of `Expressions` represents control flow. -`LIR` also incorporates a range of useful methods to manage the `Expressions`, for example `create_expression(...)` to build `Expressions` from nGraph nodes, or `replace_input(...)` to modify data dependencies between `Expressions`. +`LIR` also incorporates a range of useful methods to manage the `Expressions`, for example `create_expression(...)` to build `Expressions` from OpenVINO nodes, or `replace_input(...)` to modify data dependencies between `Expressions`. Please refer to the implementation in [linear_ir.cpp](../src/lowered/linear_ir.cpp) for more details. `Expression` is the main building block of a `Linear IR`. -It contains a pointer to the nGraph node it was created from and a pointer to the emitter it will be mapped to (which is null until `Expression::init_emitter(...)` is called). +It contains a pointer to the OpenVINO node it was created from and a pointer to the emitter it will be mapped to (which is null until `Expression::init_emitter(...)` is called). An `Expression` can have an arbitrary number of inputs and outputs, we will refer to them simply as ports. Every port can be uniquely identified by the `ExpressionPort` class. The `ExpressionPort` contains a pointer to the `Expression` which port it represents, the port type (`input` or `output`) and its index (input/output number). @@ -556,7 +556,7 @@ This information will be used by the control flow optimization pipeline to deter An `Expression` internally stores two separate vectors of input and output `PortDescriptors` which could be accessed by calling `get_input_port_descriptors()` or `get_input_port_descriptor(i)` (and similar for outputs). Finally, `PortConnectors` specify how the `Expression's` ports are connected. -Note that an `Expression` output can be connected to several inputs (like with nGraph nodes), So every `PortConnector` stores one source `ExpressionPort` and a set of consumer `ExpressionPorts` that can be accessed by the `get_source()` or `get_consumers()` methods, respectively. +Note that an `Expression` output can be connected to several inputs (like with OpenVINO nodes), So every `PortConnector` stores one source `ExpressionPort` and a set of consumer `ExpressionPorts` that can be accessed by the `get_source()` or `get_consumers()` methods, respectively. Like with `PortDescriptors`, an `Expression` stores input and output `PortConnectors` in two separate vectors accessed via `get_input_port_connector(i)` (or its output twin). An example on how `PortConnectors` can be used to move between `Expressions` is given on the right side of the above picture. @@ -622,7 +622,7 @@ Please see [assign_registers.cpp](../src/lowered/pass/assign_registers.cpp) and When the `Preparation` is finished, the `Generator` constructs target-specific emitters by calling `init_emitter(target)` method for every `Expression` in the `LinearIR`, where the `target` is a `TargetMachine` instance. The `TargetMachine` is a class that provides generator with target-specific information, such as supported instruction sets, vector register size etc. -`TargetMachine` also maps the nGraph's `DiscreteTypeInfo` (stored in the `Expression`) to the emitter that actually implements the operation. +`TargetMachine` also maps the OpenVINO's `DiscreteTypeInfo` (stored in the `Expression`) to the emitter that actually implements the operation. The mapping is done using the `jitters` map defined in [target_machine.hpp](../include/snippets/target_machine.hpp). In order for this mechanism to work, every `Snippets'` code generation backend should create emitter implementations derived from the `Emitter` base class defined in [emitter.hpp](../include/snippets/emitter.hpp). The backend then should create its own target machine class (derived from the common `TargetMachine`) and populate the `jitters` map, see the [cpu_generator.cpp](../../../plugins/intel_cpu/src/emitters/x64/cpu_generator.cpp) for an implementation example. diff --git a/src/common/snippets/include/snippets/itt.hpp b/src/common/snippets/include/snippets/itt.hpp index 0c594165ab5776..4a617f5a06e645 100644 --- a/src/common/snippets/include/snippets/itt.hpp +++ b/src/common/snippets/include/snippets/itt.hpp @@ -9,7 +9,7 @@ #pragma once -#include +#include namespace ov { namespace pass { @@ -26,7 +26,7 @@ OV_CC_DOMAINS(internal_op); /* * RUN_ON_FUNCTION_SCOPE macro allows to disable the run_on_function pass * MATCHER_SCOPE macro allows to disable the MatcherPass if matcher isn't applied - * INTERNAL_OP_SCOPE macro allows to disable parts of internal nGraph operations if they are not used + * INTERNAL_OP_SCOPE macro allows to disable parts of internal openvino operations if they are not used */ #if defined(SELECTIVE_BUILD_ANALYZER) diff --git a/src/common/snippets/include/snippets/op/loop.hpp b/src/common/snippets/include/snippets/op/loop.hpp index fefc1368bb4307..1fd51649fc65d1 100644 --- a/src/common/snippets/include/snippets/op/loop.hpp +++ b/src/common/snippets/include/snippets/op/loop.hpp @@ -6,7 +6,7 @@ #include "openvino/op/op.hpp" #include "snippets/emitter.hpp" -#include "ngraph/op/parameter.hpp" +#include "openvino/op/parameter.hpp" namespace ov { namespace snippets { diff --git a/src/common/snippets/include/snippets/op/powerstatic.hpp b/src/common/snippets/include/snippets/op/powerstatic.hpp index 5a1d0abb23ffb4..d76fa48e0601aa 100644 --- a/src/common/snippets/include/snippets/op/powerstatic.hpp +++ b/src/common/snippets/include/snippets/op/powerstatic.hpp @@ -5,7 +5,6 @@ #pragma once #include "openvino/op/op.hpp" -#include #include namespace ov { diff --git a/src/common/snippets/include/snippets/op/scalar.hpp b/src/common/snippets/include/snippets/op/scalar.hpp index 43ecb1aad671cc..2720ffdc062091 100644 --- a/src/common/snippets/include/snippets/op/scalar.hpp +++ b/src/common/snippets/include/snippets/op/scalar.hpp @@ -5,7 +5,7 @@ #pragma once #include "openvino/op/op.hpp" -#include "ngraph/op/constant.hpp" +#include "openvino/op/constant.hpp" namespace ov { namespace snippets { diff --git a/src/common/snippets/include/snippets/op/subgraph.hpp b/src/common/snippets/include/snippets/op/subgraph.hpp index dab2de53e56d47..a9321e957e273c 100644 --- a/src/common/snippets/include/snippets/op/subgraph.hpp +++ b/src/common/snippets/include/snippets/op/subgraph.hpp @@ -190,10 +190,10 @@ class Subgraph : public ov::op::util::SubGraphOp { std::shared_ptr m_shape_infer = nullptr; - class NgraphShapeInfer : public ShapeInferSnippetsNode { - std::shared_ptr m_ngraph_body; + class OVShapeInfer : public ShapeInferSnippetsNode { + std::shared_ptr m_ov_body; public: - explicit NgraphShapeInfer(const std::shared_ptr& body); + explicit OVShapeInfer(const std::shared_ptr& body); Result infer(const std::vector& input_shapes) override; }; }; diff --git a/src/common/snippets/include/snippets/pass/propagate_precision.hpp b/src/common/snippets/include/snippets/pass/propagate_precision.hpp index 1f5bd0cf9542bf..6f805cb1b68808 100644 --- a/src/common/snippets/include/snippets/pass/propagate_precision.hpp +++ b/src/common/snippets/include/snippets/pass/propagate_precision.hpp @@ -5,7 +5,7 @@ #pragma once #include -#include +#include "openvino/pass/pass.hpp" #include "snippets/generator.hpp" namespace ov { diff --git a/src/common/snippets/include/snippets/shape_inference/shape_inference.hpp b/src/common/snippets/include/snippets/shape_inference/shape_inference.hpp index af7d29f8e3f3c3..9066d571cbb4e6 100644 --- a/src/common/snippets/include/snippets/shape_inference/shape_inference.hpp +++ b/src/common/snippets/include/snippets/shape_inference/shape_inference.hpp @@ -38,7 +38,7 @@ class IShapeInferSnippets { }; /** - * Shape inference class for Subgraph node (both nGraph and Linear IRs). + * Shape inference class for Subgraph node (both openvino and Linear IRs). * It stores the result of the last shape inference, so it can be reused in optimization pipeline. * */ diff --git a/src/common/snippets/src/lowered/expression_factory.cpp b/src/common/snippets/src/lowered/expression_factory.cpp index 34651fd6dbbbd2..cd5cfe0db74c53 100644 --- a/src/common/snippets/src/lowered/expression_factory.cpp +++ b/src/common/snippets/src/lowered/expression_factory.cpp @@ -69,7 +69,7 @@ ExpressionPtr LinearIR::ExpressionFactory::create(const std::shared_ptr(new IOExpression(res, model->get_result_index(res), linear_ir.m_shape_infer_factory)); create_expression_inputs(linear_ir, expr); - // The Result node don't need output port (because of sense of the node). But each node in ngraph must have one output at least. + // The Result node don't need output port (because of sense of the node). But each node in openvino must have one output at least. // The port descriptors are automatically created in constructor. We manually clean output ports. expr->m_output_port_descriptors.clear(); expr->validate(); @@ -110,7 +110,7 @@ ExpressionPtr LinearIR::ExpressionFactory::create(const std::shared_ptr(last_input.get_expr()->get_node()), "LoopEnd expression expects LoopBegin on last input"); expr->m_input_port_descriptors[inputs.size() - 1] = last_input.get_descriptor_ptr()->clone(); init_expression_inputs(expr, inputs); - // The LoopEnd node don't need output port (because of sense of the node). But each node in ngraph must have one output at least. + // The LoopEnd node don't need output port (because of sense of the node). But each node in openvino must have one output at least. // The port descriptors are automatically created in constructor. We manually clean output ports. expr->m_output_port_descriptors.clear(); expr->validate(); diff --git a/src/common/snippets/src/lowered/pass/identify_buffers.cpp b/src/common/snippets/src/lowered/pass/identify_buffers.cpp index 02aabc93ead6ac..d411da67af38d6 100644 --- a/src/common/snippets/src/lowered/pass/identify_buffers.cpp +++ b/src/common/snippets/src/lowered/pass/identify_buffers.cpp @@ -36,7 +36,7 @@ std::vector IdentifyBuffers::create_adjacency_matrix(const LinearIR& linea auto get_buffer_idx = [&](const std::shared_ptr& buffer) { const auto iter = std::find(buffers.cbegin(), buffers.cend(), buffer); - NGRAPH_CHECK(iter != buffers.cend(), "Buffer wasn't find in Buffer system of Subgraph"); + OPENVINO_ASSERT(iter != buffers.cend(), "Buffer wasn't find in Buffer system of Subgraph"); return std::distance(buffers.cbegin(), iter); }; diff --git a/src/common/snippets/src/op/brgemm.cpp b/src/common/snippets/src/op/brgemm.cpp index b64a4328a83b1c..5cce5d85c13a82 100644 --- a/src/common/snippets/src/op/brgemm.cpp +++ b/src/common/snippets/src/op/brgemm.cpp @@ -127,7 +127,7 @@ ov::PartialShape Brgemm::get_planar_output_shape(const ov::PartialShape& output_ } ov::PartialShape Brgemm::get_output_partial_shape(const std::vector& input_shapes) const { - NGRAPH_CHECK(input_shapes.size() == 2, "BRGEMM expects 2 input shapes for shape inference"); + OPENVINO_ASSERT(input_shapes.size() == 2, "BRGEMM expects 2 input shapes for shape inference"); // Note: All majors checks are missed because Brgemm is transformed from MatMul with whole shape infer support diff --git a/src/common/snippets/src/op/fill.cpp b/src/common/snippets/src/op/fill.cpp index 437f594cdfc519..05f79495ae1748 100644 --- a/src/common/snippets/src/op/fill.cpp +++ b/src/common/snippets/src/op/fill.cpp @@ -32,7 +32,7 @@ std::shared_ptr Fill::clone_with_new_inputs(const OutputVector& new_args) void Fill::validate_and_infer_types() { INTERNAL_OP_SCOPE(Fill_validate_and_infer_types); const auto in_type = get_input_element_type(0); - NGRAPH_CHECK(in_type.size() == 4, "Fill operation supports only element types with 4 byte size but got:" + std::to_string(in_type.size())); + OPENVINO_ASSERT(in_type.size() == 4, "Fill operation supports only element types with 4 byte size but got:" + std::to_string(in_type.size())); set_output_type(0, get_input_element_type(0), get_input_partial_shape(0)); } diff --git a/src/common/snippets/src/op/load.cpp b/src/common/snippets/src/op/load.cpp index d1a7d0f2cb523e..868ed4294e6dab 100644 --- a/src/common/snippets/src/op/load.cpp +++ b/src/common/snippets/src/op/load.cpp @@ -40,13 +40,13 @@ std::shared_ptr Load::clone_with_new_inputs(const OutputVector& new_args) LoadReshape::LoadReshape(const Output& x, const size_t count, const size_t offset, std::vector order) : Load(x, count, offset), m_order(std::move(order)) { const auto& in_shape = x.get_partial_shape(); - NGRAPH_CHECK(in_shape.is_static(), "LoadReshape supports only static input shapes"); + OPENVINO_ASSERT(in_shape.is_static(), "LoadReshape supports only static input shapes"); const auto in_shape_size = in_shape.size(); - NGRAPH_CHECK(m_order.size() == in_shape_size, "LoadReshape got new_order of invalid size"); - NGRAPH_CHECK(*std::max_element(m_order.begin(), m_order.end()) == in_shape_size - 1 && + OPENVINO_ASSERT(m_order.size() == in_shape_size, "LoadReshape got new_order of invalid size"); + OPENVINO_ASSERT(*std::max_element(m_order.begin(), m_order.end()) == in_shape_size - 1 && *std::min_element(m_order.begin(), m_order.end()) == 0, "LoadReshape detected invalid values in new_order"); const std::set unique_dims(order.begin(), order.end()); - NGRAPH_CHECK(unique_dims.size() == order.size(), "LoadReshape order must not contain repeated elements"); + OPENVINO_ASSERT(unique_dims.size() == order.size(), "LoadReshape order must not contain repeated elements"); constructor_validate_and_infer_types(); } diff --git a/src/common/snippets/src/op/memory_access.cpp b/src/common/snippets/src/op/memory_access.cpp index 117c1bd14e2e7f..f98d72be7f94f5 100644 --- a/src/common/snippets/src/op/memory_access.cpp +++ b/src/common/snippets/src/op/memory_access.cpp @@ -73,25 +73,25 @@ bool MemoryAccess::is_memory_access_output_port(size_t idx) const { void MemoryAccess::set_input_port_descriptor(const PortDescriptor& desc, const size_t i) { const auto it = m_input_ports.find(i); - NGRAPH_CHECK(it != m_input_ports.end(), "Index of input port descriptor should be less than count of input ports"); + OPENVINO_ASSERT(it != m_input_ports.end(), "Index of input port descriptor should be less than count of input ports"); (*it).second = { desc.count, desc.offset, i}; } void MemoryAccess::set_output_port_descriptor(const PortDescriptor& desc, const size_t i) { const auto it = m_output_ports.find(i); - NGRAPH_CHECK(it != m_output_ports.end(), "Index of output port descriptor should be less than count of output ports"); + OPENVINO_ASSERT(it != m_output_ports.end(), "Index of output port descriptor should be less than count of output ports"); (*it).second = { desc.count, desc.offset, i}; } const MemoryAccess::PortDescriptor& MemoryAccess::get_input_port_descriptor(const size_t i) const { const auto it = m_input_ports.find(i); - NGRAPH_CHECK(it != m_input_ports.end(), "Index of input port descriptor should be less than count of input ports"); + OPENVINO_ASSERT(it != m_input_ports.end(), "Index of input port descriptor should be less than count of input ports"); return (*it).second; } const MemoryAccess::PortDescriptor& MemoryAccess::get_output_port_descriptor(const size_t i) const { const auto it = m_output_ports.find(i); - NGRAPH_CHECK(it != m_output_ports.end(), "Index of output port descriptor should be less than count of output ports"); + OPENVINO_ASSERT(it != m_output_ports.end(), "Index of output port descriptor should be less than count of output ports"); return (*it).second; } diff --git a/src/common/snippets/src/op/subgraph.cpp b/src/common/snippets/src/op/subgraph.cpp index 5de4dae47a95a4..dc13bb3e8bb716 100644 --- a/src/common/snippets/src/op/subgraph.cpp +++ b/src/common/snippets/src/op/subgraph.cpp @@ -160,7 +160,7 @@ Subgraph::Subgraph(const OutputVector& args, const std::shared_ptr& b for (size_t i = 0; i < body->get_output_size(); ++i) m_output_descriptions[0].push_back(std::make_shared(i, i)); m_transformations_allowed = false; - m_shape_infer = std::make_shared(body); + m_shape_infer = std::make_shared(body); } Subgraph::Subgraph(const NodeVector& args, const std::shared_ptr& body) @@ -292,7 +292,7 @@ auto Subgraph::wrap_node_as_subgraph(const std::shared_ptr& node) -> s } void Subgraph::fill_empty_output_names(const Output& target_output_node, const Output& replacement_output_node) { - NGRAPH_SUPPRESS_DEPRECATED_START + OPENVINO_SUPPRESS_DEPRECATED_START auto& out_tensor = target_output_node.get_tensor(); const std::string new_name = ov::op::util::get_ie_output_name(replacement_output_node); if (ov::descriptor::get_ov_tensor_legacy_name(out_tensor).empty()) { @@ -301,7 +301,7 @@ void Subgraph::fill_empty_output_names(const Output& target_output_node, c if (!replacement_output_node.get_names().empty()) { out_tensor.set_names(replacement_output_node.get_names()); } - NGRAPH_SUPPRESS_DEPRECATED_END + OPENVINO_SUPPRESS_DEPRECATED_END } auto Subgraph::constant_input_should_be_inside_body(const std::shared_ptr& node) -> bool { @@ -484,18 +484,18 @@ IShapeInferSnippets::Result Subgraph::shape_infer(const std::vectorinfer(input_shapes); } -Subgraph::NgraphShapeInfer::NgraphShapeInfer(const std::shared_ptr& body) : - m_ngraph_body(body) { - OPENVINO_ASSERT(m_ngraph_body, "Can't initialize shape infer with empty body"); +Subgraph::OVShapeInfer::OVShapeInfer(const std::shared_ptr& body) : + m_ov_body(body) { + OPENVINO_ASSERT(m_ov_body, "Can't initialize shape infer with empty body"); } -IShapeInferSnippets::Result Subgraph::NgraphShapeInfer::infer(const std::vector& input_shapes) { - const ParameterVector& parameters = m_ngraph_body->get_parameters(); - const ResultVector& results = m_ngraph_body->get_results(); +IShapeInferSnippets::Result Subgraph::OVShapeInfer::infer(const std::vector& input_shapes) { + const ParameterVector& parameters = m_ov_body->get_parameters(); + const ResultVector& results = m_ov_body->get_results(); OPENVINO_ASSERT(parameters.size() == input_shapes.size(), "Got invalid number of input shapes to reshape subgraph body"); for (size_t i = 0; i < parameters.size(); ++i) parameters[i]->set_partial_shape(utils::vdims_to_pshape(input_shapes[i].get())); - m_ngraph_body->validate_nodes_and_infer_types(); + m_ov_body->validate_nodes_and_infer_types(); std::vector outputDims; for (const auto& res : results) outputDims.emplace_back(utils::pshape_to_vdims(res->get_input_partial_shape(0))); @@ -702,7 +702,7 @@ snippets::Schedule Subgraph::generate(const std::vector diff --git a/src/common/snippets/src/pass/hash.cpp b/src/common/snippets/src/pass/hash.cpp index 48dd9586ae4337..2f975ef2cbccee 100644 --- a/src/common/snippets/src/pass/hash.cpp +++ b/src/common/snippets/src/pass/hash.cpp @@ -10,8 +10,6 @@ #include #include -#include "ngraph/ops.hpp" -#include "ngraph/opsets/opset.hpp" #include "openvino/core/except.hpp" #include "openvino/core/meta_data.hpp" #include "openvino/core/model.hpp" @@ -169,7 +167,7 @@ class SnippetsHasher : public ov::AttributeVisitor { m_node_type_name(node_type_name) {} void on_adapter(const std::string& name, ov::ValueAccessor& adapter) override { - if (const auto& a = ov::as_type>>(&adapter)) { + if (const auto& a = ov::as_type>>(&adapter)) { m_hash = hash_combine(hash_combine(m_hash, name), a->get()->get_info().variable_id); } else if (const auto& a = ov::as_type>>(&adapter)) { diff --git a/src/common/snippets/src/pass/propagate_precision.cpp b/src/common/snippets/src/pass/propagate_precision.cpp index 6ba1f5f3d09ad1..568db74d6a5c0a 100644 --- a/src/common/snippets/src/pass/propagate_precision.cpp +++ b/src/common/snippets/src/pass/propagate_precision.cpp @@ -32,7 +32,7 @@ bool ov::snippets::pass::PropagatePrecision::run_on_model(const std::shared_ptr< auto type_info = op->get_type_info(); std::set supported_precisions; // TODO: At the moment Softmax is decomposed on Linear IR level. - // When Softmax will be decomposed on NGraph level, remove it + // When Softmax will be decomposed on openvino level, remove it if (type_info.is_castable(ov::op::v1::Softmax::get_type_info_static())) { supported_precisions = {{ov::element::f32}}; } else { diff --git a/src/common/snippets/src/pass/softmax_reshape_elimination.cpp b/src/common/snippets/src/pass/softmax_reshape_elimination.cpp index 2f60f1e1155c76..36a0afb7c11325 100644 --- a/src/common/snippets/src/pass/softmax_reshape_elimination.cpp +++ b/src/common/snippets/src/pass/softmax_reshape_elimination.cpp @@ -10,7 +10,7 @@ #include "openvino/core/rt_info.hpp" #include "openvino/pass/pattern/op/wrap_type.hpp" -#include +#include "openvino/core/validation_util.hpp" ov::snippets::pass::SoftmaxReshapeElimination::SoftmaxReshapeElimination() { MATCHER_SCOPE(SoftmaxReshapeElimination); diff --git a/src/common/transformations/CMakeLists.txt b/src/common/transformations/CMakeLists.txt index 67907b0c265d5c..e7d365ca32492e 100644 --- a/src/common/transformations/CMakeLists.txt +++ b/src/common/transformations/CMakeLists.txt @@ -50,7 +50,7 @@ target_link_libraries(${TARGET_NAME} INTERFACE openvino::runtime) # even the Transformations library is supposed to be Plugin API # we still have some code compiled as transformations, but headers are -# part of ngraph core API +# part of openvino core API # so, we need to mark this library as important for ABI free ov_abi_free_target(${TARGET_NAME}_obj) diff --git a/src/common/transformations/tests/common_optimizations/fq_mul_fusion_test.cpp b/src/common/transformations/tests/common_optimizations/fq_mul_fusion_test.cpp index 61f43c937be16e..7dcf6a9c44b3c3 100644 --- a/src/common/transformations/tests/common_optimizations/fq_mul_fusion_test.cpp +++ b/src/common/transformations/tests/common_optimizations/fq_mul_fusion_test.cpp @@ -13,7 +13,6 @@ #include "common_test_utils/ov_test_utils.hpp" #include "common_test_utils/test_common.hpp" #include "functional_test_utils/plugin_cache.hpp" -#include "ie_core.hpp" #include "openvino/core/model.hpp" #include "openvino/opsets/opset4.hpp" #include "openvino/pass/manager.hpp" diff --git a/src/common/transformations/tests/common_optimizations/fq_reshape_fusion.cpp b/src/common/transformations/tests/common_optimizations/fq_reshape_fusion.cpp index 8e92a5e3f7797f..8127ad129ef34b 100644 --- a/src/common/transformations/tests/common_optimizations/fq_reshape_fusion.cpp +++ b/src/common/transformations/tests/common_optimizations/fq_reshape_fusion.cpp @@ -10,7 +10,6 @@ #include #include -#include "cnn_network_ngraph_impl.hpp" #include "common_test_utils/ov_test_utils.hpp" #include "openvino/core/model.hpp" #include "openvino/opsets/opset4.hpp" @@ -19,7 +18,6 @@ using namespace ov; using namespace testing; -using namespace InferenceEngine; namespace { @@ -32,8 +30,8 @@ struct FQReshapeFusionTestCase { bool is_negative; }; -class nGraphFQReshapeFusionTests : public ov::test::TestsCommon, - public testing::WithParamInterface> { +class FQReshapeFusionTests : public ov::test::TestsCommon, + public testing::WithParamInterface> { public: std::shared_ptr f, ref_f; @@ -115,7 +113,7 @@ class nGraphFQReshapeFusionTests : public ov::test::TestsCommon, } }; -TEST_P(nGraphFQReshapeFusionTests, ReshapeMatMul) { +TEST_P(FQReshapeFusionTests, ReshapeMatMul) { auto unh = std::make_shared(); pass::Manager manager; manager.register_pass(unh); @@ -134,7 +132,7 @@ TEST_P(nGraphFQReshapeFusionTests, ReshapeMatMul) { INSTANTIATE_TEST_SUITE_P( NGraph, - nGraphFQReshapeFusionTests, + FQReshapeFusionTests, testing::Values( // positive FQReshapeFusionTestCase{{1, 2, 1, 3}, diff --git a/src/common/transformations/tests/common_optimizations/mish_fusion_test.cpp b/src/common/transformations/tests/common_optimizations/mish_fusion_test.cpp index 61d236a6355628..4fa1af8088d6b2 100644 --- a/src/common/transformations/tests/common_optimizations/mish_fusion_test.cpp +++ b/src/common/transformations/tests/common_optimizations/mish_fusion_test.cpp @@ -19,7 +19,7 @@ using namespace ov; using namespace testing; -// LPT to nGraph migration: temporary disabling unexpected not reproduced fails on CI: +// LPT to openvino migration: temporary disabling unexpected not reproduced fails on CI: // https://openvino-ci.intel.com/job/private-ci/job/ie/job/build-linux-ubuntu18_i386/478/ TEST_F(TransformationTestsF, MishFusing) { { diff --git a/src/common/transformations/tests/smart_reshape/sr_mimicking_sbs.cpp b/src/common/transformations/tests/smart_reshape/sr_mimicking_sbs.cpp index 5b11259cbaf998..40f954312b7f76 100644 --- a/src/common/transformations/tests/smart_reshape/sr_mimicking_sbs.cpp +++ b/src/common/transformations/tests/smart_reshape/sr_mimicking_sbs.cpp @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include #include "common_test_utils/ov_test_utils.hpp" @@ -20,15 +19,9 @@ TEST(SmartReshapeTests, MimickingSBS) { f = std::make_shared(NodeVector{reshape}, ParameterVector{input}); } - InferenceEngine::CNNNetwork network(f); - auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.setBatchSize(2)); - check_unique_names(f, unh); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({12, 4})); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({2, 2, 3, 4})); + EXPECT_ANY_THROW(set_batch(f, 2)); } TEST(SmartReshapeTests, MimickingSBS_1) { @@ -40,15 +33,9 @@ TEST(SmartReshapeTests, MimickingSBS_1) { f = std::make_shared(NodeVector{reshape}, ParameterVector{input}); } - InferenceEngine::CNNNetwork network(f); - auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.setBatchSize(2)); - check_unique_names(f, unh); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({2, 24})); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({2, 2, 3, 4})); + EXPECT_ANY_THROW(set_batch(f, 2)); } TEST(SmartReshapeTests, MimickingSBS_2) { @@ -60,13 +47,7 @@ TEST(SmartReshapeTests, MimickingSBS_2) { f = std::make_shared(NodeVector{reshape}, ParameterVector{input}); } - InferenceEngine::CNNNetwork network(f); - auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.setBatchSize(1)); - check_unique_names(f, unh); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({6, 4})); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 2, 3, 4})); + EXPECT_ANY_THROW(set_batch(f, 1)); } diff --git a/src/common/transformations/tests/smart_reshape/sr_proposal_scales.cpp b/src/common/transformations/tests/smart_reshape/sr_proposal_scales.cpp index 5e8088a9f2371d..06408dc2807d36 100644 --- a/src/common/transformations/tests/smart_reshape/sr_proposal_scales.cpp +++ b/src/common/transformations/tests/smart_reshape/sr_proposal_scales.cpp @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include #include "common_test_utils/ov_test_utils.hpp" @@ -39,12 +38,9 @@ TEST(SmartReshapeTests, Proposal1Scales) { f = std::make_shared(NodeVector{proposal}, ParameterVector{input_0, input_1, input_2}); } - InferenceEngine::CNNNetwork network(f); auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.setBatchSize(2)); - check_unique_names(f, unh); - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({600, 5})); + EXPECT_ANY_THROW(set_batch(f, 2)); } TEST(SmartReshapeTests, Proposal1Scales_WithConvert) { @@ -75,12 +71,9 @@ TEST(SmartReshapeTests, Proposal1Scales_WithConvert) { f = std::make_shared(NodeVector{proposal}, ParameterVector{input_0, input_1, input_2}); } - InferenceEngine::CNNNetwork network(f); auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.setBatchSize(2)); - check_unique_names(f, unh); - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({600, 5})); + EXPECT_ANY_THROW(set_batch(f, 2)); } TEST(SmartReshapeTests, Proposal4Scales) { @@ -110,14 +103,9 @@ TEST(SmartReshapeTests, Proposal4Scales) { f = std::make_shared(NodeVector{proposal}, ParameterVector{input_0, input_1, input_2}); } - InferenceEngine::CNNNetwork network(f); - auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.setBatchSize(2)); - check_unique_names(f, unh); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({600, 5})); + EXPECT_ANY_THROW(set_batch(f, 2)); } TEST(SmartReshapeTests, Proposal4Scales_WithConvert) { @@ -148,12 +136,7 @@ TEST(SmartReshapeTests, Proposal4Scales_WithConvert) { f = std::make_shared(NodeVector{proposal}, ParameterVector{input_0, input_1, input_2}); } - InferenceEngine::CNNNetwork network(f); - auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.setBatchSize(2)); - check_unique_names(f, unh); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({600, 5})); + EXPECT_ANY_THROW(set_batch(f, 2)); } diff --git a/src/common/transformations/tests/smart_reshape/sr_reshape_1d.cpp b/src/common/transformations/tests/smart_reshape/sr_reshape_1d.cpp index bad3962e3fd080..d98cb32f258f4d 100644 --- a/src/common/transformations/tests/smart_reshape/sr_reshape_1d.cpp +++ b/src/common/transformations/tests/smart_reshape/sr_reshape_1d.cpp @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include #include "common_test_utils/ov_test_utils.hpp" @@ -19,20 +18,16 @@ TEST(SmartReshapeTests, Reshape1d) { f = std::make_shared(NodeVector{reshape}, ParameterVector{input}); } - InferenceEngine::CNNNetwork network(f); - - ASSERT_TRUE( - network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({5})); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); + ASSERT_TRUE(f->get_parameters()[0]->get_partial_shape().compatible({5})); auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.reshape( - InferenceEngine::ICNNNetwork::InputShapes{{f->get_parameters()[0]->get_friendly_name(), {1, 3, 300, 300}}})); + ASSERT_NO_THROW(f->reshape({{1, 3, 300, 300}})); check_unique_names(f, unh); - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({270000})); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 3, 300, 300})); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({270000})); + ASSERT_TRUE(f->get_parameters()[0]->get_partial_shape().compatible({1, 3, 300, 300})); } TEST(SmartReshapeTests, Reshape1d_negative) { @@ -44,19 +39,10 @@ TEST(SmartReshapeTests, Reshape1d_negative) { f = std::make_shared(NodeVector{reshape}, ParameterVector{input, pattern}); } - InferenceEngine::CNNNetwork network(f); - - ASSERT_TRUE( - network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().is_dynamic()); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); + ASSERT_TRUE(f->get_parameters()[0]->get_partial_shape().is_dynamic()); auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.reshape( - InferenceEngine::ICNNNetwork::InputShapes{{f->get_parameters()[0]->get_friendly_name(), {1, 3, 300, 300}}})); - check_unique_names(f, unh); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({270000})); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 3, 300, 300})); - ASSERT_FALSE(network.getFunction()->get_parameters()[1]->get_output_target_inputs(0).empty()); + EXPECT_ANY_THROW(f->reshape({{1, 3, 300, 300}})); } diff --git a/src/common/transformations/tests/smart_reshape/sr_strided_slice_squeeze.cpp b/src/common/transformations/tests/smart_reshape/sr_strided_slice_squeeze.cpp index 002dc860dfffba..3c9053594ff68c 100644 --- a/src/common/transformations/tests/smart_reshape/sr_strided_slice_squeeze.cpp +++ b/src/common/transformations/tests/smart_reshape/sr_strided_slice_squeeze.cpp @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include #include "common_test_utils/ov_test_utils.hpp" @@ -27,19 +26,13 @@ TEST(SmartReshapeTests, SS_Squeeze) { f = std::make_shared(NodeVector{relu}, ParameterVector{input}); } - InferenceEngine::CNNNetwork network(f); - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({3})) - << network.getFunction()->get_results()[0]->get_output_partial_shape(0); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 3})); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({3})) + << f->get_results()[0]->get_output_partial_shape(0); + ASSERT_TRUE(f->get_parameters()[0]->get_partial_shape().compatible({1, 3})); auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.setBatchSize(2)); - check_unique_names(f, unh); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({3})) - << network.getFunction()->get_results()[0]->get_output_partial_shape(0); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({2, 3})); + EXPECT_ANY_THROW(set_batch(f, 2)); } TEST(SmartReshapeTests, SS_Squeeze_partial_begin_end_mask) { @@ -59,21 +52,19 @@ TEST(SmartReshapeTests, SS_Squeeze_partial_begin_end_mask) { f = std::make_shared(NodeVector{relu}, ParameterVector{input}); } - InferenceEngine::CNNNetwork network(f); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({1, 768})) - << network.getFunction()->get_results()[0]->get_output_partial_shape(0); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 128, 768})); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({1, 768})) + << f->get_results()[0]->get_output_partial_shape(0); + ASSERT_TRUE(f->get_parameters()[0]->get_partial_shape().compatible({1, 128, 768})); auto unh = std::make_shared(); init_unique_names(f, unh); - auto inputname = network.getFunction()->get_parameters()[0]->get_friendly_name(); - ASSERT_NO_THROW(network.reshape(InferenceEngine::ICNNNetwork::InputShapes{{inputname, {2, 128, 768}}})); + auto inputname = f->get_parameters()[0]->get_friendly_name(); + ASSERT_NO_THROW(f->reshape({{2, 128, 768}})); check_unique_names(f, unh); - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({2, 768})) - << network.getFunction()->get_results()[0]->get_output_partial_shape(0); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({2, 128, 768})); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({2, 768})) + << f->get_results()[0]->get_output_partial_shape(0); + ASSERT_TRUE(f->get_parameters()[0]->get_partial_shape().compatible({2, 128, 768})); } TEST(SmartReshapeTests, SS_Squeeze_partial_begin_end) { @@ -95,21 +86,19 @@ TEST(SmartReshapeTests, SS_Squeeze_partial_begin_end) { f = std::make_shared(NodeVector{relu}, ParameterVector{input}); } - InferenceEngine::CNNNetwork network(f); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({1, 768})) - << network.getFunction()->get_results()[0]->get_output_partial_shape(0); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 1, 768})); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({1, 768})) + << f->get_results()[0]->get_output_partial_shape(0); + ASSERT_TRUE(f->get_parameters()[0]->get_partial_shape().compatible({1, 1, 768})); auto unh = std::make_shared(); init_unique_names(f, unh); - auto inputname = network.getFunction()->get_parameters()[0]->get_friendly_name(); - ASSERT_NO_THROW(network.reshape(InferenceEngine::ICNNNetwork::InputShapes{{inputname, {2, 1, 768}}})); + auto inputname = f->get_parameters()[0]->get_friendly_name(); + ASSERT_NO_THROW(f->reshape({{2, 1, 768}})); check_unique_names(f, unh); - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({2, 768})) - << network.getFunction()->get_results()[0]->get_output_partial_shape(0); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({2, 1, 768})); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({2, 768})) + << f->get_results()[0]->get_output_partial_shape(0); + ASSERT_TRUE(f->get_parameters()[0]->get_partial_shape().compatible({2, 1, 768})); } TEST(SmartReshapeTests, SS_Squeeze_mask_use_negative) { @@ -128,15 +117,13 @@ TEST(SmartReshapeTests, SS_Squeeze_mask_use_negative) { f = std::make_shared(NodeVector{squeeze}, ParameterVector{input}); } - InferenceEngine::CNNNetwork network(f); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({1, 3})) - << network.getFunction()->get_results()[0]->get_output_partial_shape(0); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 3})); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({1, 3})) + << f->get_results()[0]->get_output_partial_shape(0); + ASSERT_TRUE(f->get_parameters()[0]->get_partial_shape().compatible({1, 3})); auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_ANY_THROW(network.setBatchSize(2)); + ASSERT_ANY_THROW(set_batch(f, 2)); check_unique_names(f, unh); } @@ -156,15 +143,13 @@ TEST(SmartReshapeTests, SS_Squeeze_negative_stride_negative) { f = std::make_shared(NodeVector{relu}, ParameterVector{input}); } - InferenceEngine::CNNNetwork network(f); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({3})) - << network.getFunction()->get_results()[0]->get_output_partial_shape(0); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 3})); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({3})) + << f->get_results()[0]->get_output_partial_shape(0); + ASSERT_TRUE(f->get_parameters()[0]->get_partial_shape().compatible({1, 3})); auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_ANY_THROW(network.setBatchSize(2)); + ASSERT_ANY_THROW(set_batch(f, 2)); check_unique_names(f, unh); } @@ -185,20 +170,13 @@ TEST(SmartReshapeTests, SS_SharedSqueezes) { f = std::make_shared(NodeVector{relu_1, relu_2}, ParameterVector{input}); } - InferenceEngine::CNNNetwork network(f); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({3})) - << network.getFunction()->get_results()[0]->get_output_partial_shape(0); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 3})); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({3})) + << f->get_results()[0]->get_output_partial_shape(0); + ASSERT_TRUE(f->get_parameters()[0]->get_partial_shape().compatible({1, 3})); auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.setBatchSize(2)); - check_unique_names(f, unh); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({3})) - << network.getFunction()->get_results()[0]->get_output_partial_shape(0); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({2, 3})); + EXPECT_ANY_THROW(set_batch(f, 2)); } TEST(SmartReshapeTests, SS_SqueezeNegativeAxes) { @@ -218,20 +196,13 @@ TEST(SmartReshapeTests, SS_SqueezeNegativeAxes) { f = std::make_shared(NodeVector{relu}, ParameterVector{input}); } - InferenceEngine::CNNNetwork network(f); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({3, 8, 2})) - << network.getFunction()->get_results()[0]->get_output_partial_shape(0); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 3, 1, 8, 1, 2})); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({3, 8, 2})) + << f->get_results()[0]->get_output_partial_shape(0); + ASSERT_TRUE(f->get_parameters()[0]->get_partial_shape().compatible({1, 3, 1, 8, 1, 2})); auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.setBatchSize(2)); - check_unique_names(f, unh); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({3, 8, 2})) - << network.getFunction()->get_results()[0]->get_output_partial_shape(0); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({2, 3, 1, 8, 1, 2})); + EXPECT_ANY_THROW(set_batch(f, 2)); } TEST(SmartReshapeTests, Squeeze_SSNegativeAxes) { @@ -250,18 +221,11 @@ TEST(SmartReshapeTests, Squeeze_SSNegativeAxes) { f = std::make_shared(NodeVector{ss}, ParameterVector{input}); } - InferenceEngine::CNNNetwork network(f); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({3, 8, 2})) - << network.getFunction()->get_results()[0]->get_output_partial_shape(0); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 3, 1, 8, 1, 2})); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({3, 8, 2})) + << f->get_results()[0]->get_output_partial_shape(0); + ASSERT_TRUE(f->get_parameters()[0]->get_partial_shape().compatible({1, 3, 1, 8, 1, 2})); auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.setBatchSize(2)); - check_unique_names(f, unh); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({3, 8, 2})) - << network.getFunction()->get_results()[0]->get_output_partial_shape(0); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({2, 3, 1, 8, 1, 2})); + EXPECT_ANY_THROW(set_batch(f, 2)); } diff --git a/src/common/transformations/tests/smart_reshape/sr_sub_graph_ops.cpp b/src/common/transformations/tests/smart_reshape/sr_sub_graph_ops.cpp index 9ff6aa84ca7419..25c30db2fa4339 100644 --- a/src/common/transformations/tests/smart_reshape/sr_sub_graph_ops.cpp +++ b/src/common/transformations/tests/smart_reshape/sr_sub_graph_ops.cpp @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include #include "common_test_utils/ov_test_utils.hpp" @@ -49,26 +48,17 @@ TEST(SmartReshapeTests, TensorIteratorStaticParameters) { f = std::make_shared(OutputVector{out0, out1, out2, out3}, ParameterVector{X, Y, M}); } - InferenceEngine::CNNNetwork network(f); - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({})); - ASSERT_TRUE(network.getFunction()->get_results()[1]->get_output_partial_shape(0).compatible({1, 1, 1})); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({})); + ASSERT_TRUE(f->get_results()[1]->get_output_partial_shape(0).compatible({1, 1, 1})); // concat output (seq len = 1, so it means num_iter = 1) - ASSERT_TRUE(network.getFunction()->get_results()[2]->get_output_partial_shape(0).compatible({1, 1, 1})); - ASSERT_TRUE(network.getFunction()->get_results()[3]->get_output_partial_shape(0).compatible({1, 1, 1})); + ASSERT_TRUE(f->get_results()[2]->get_output_partial_shape(0).compatible({1, 1, 1})); + ASSERT_TRUE(f->get_results()[3]->get_output_partial_shape(0).compatible({1, 1, 1})); auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.reshape( - InferenceEngine::ICNNNetwork::InputShapes{{f->get_parameters()[0]->get_friendly_name(), {32, 1, 10}}, - {f->get_parameters()[1]->get_friendly_name(), {32, 10, 1}}, - {f->get_parameters()[2]->get_friendly_name(), {32, 1, 10}}})); - check_unique_names(f, unh); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({})); - ASSERT_TRUE(network.getFunction()->get_results()[1]->get_output_partial_shape(0).compatible({32, 1, 10})); - // concat output - ASSERT_TRUE(network.getFunction()->get_results()[2]->get_output_partial_shape(0).compatible({32, 10, 10})); - ASSERT_TRUE(network.getFunction()->get_results()[3]->get_output_partial_shape(0).compatible({32, 1, 1})); + EXPECT_ANY_THROW(f->reshape({{f->get_parameters()[0]->get_friendly_name(), {32, 1, 10}}, + {f->get_parameters()[1]->get_friendly_name(), {32, 10, 1}}, + {f->get_parameters()[2]->get_friendly_name(), {32, 1, 10}}})); } TEST(SmartReshapeTests, TensorIteratorDynamicParameters) { @@ -109,26 +99,17 @@ TEST(SmartReshapeTests, TensorIteratorDynamicParameters) { f = std::make_shared(OutputVector{out0, out1, out2, out3}, ParameterVector{X, Y, M}); } - InferenceEngine::CNNNetwork network(f); - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({})); - ASSERT_TRUE(network.getFunction()->get_results()[1]->get_output_partial_shape(0).compatible({1, 1, 1})); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({})); + ASSERT_TRUE(f->get_results()[1]->get_output_partial_shape(0).compatible({1, 1, 1})); // concat output (seq len = 1, so it means num_iter = 1) - ASSERT_TRUE(network.getFunction()->get_results()[2]->get_output_partial_shape(0).compatible({1, 1, 1})); - ASSERT_TRUE(network.getFunction()->get_results()[3]->get_output_partial_shape(0).compatible({1, 1, 1})); + ASSERT_TRUE(f->get_results()[2]->get_output_partial_shape(0).compatible({1, 1, 1})); + ASSERT_TRUE(f->get_results()[3]->get_output_partial_shape(0).compatible({1, 1, 1})); auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.reshape( - InferenceEngine::ICNNNetwork::InputShapes{{f->get_parameters()[0]->get_friendly_name(), {32, 1, 10}}, - {f->get_parameters()[1]->get_friendly_name(), {32, 10, 1}}, - {f->get_parameters()[2]->get_friendly_name(), {32, 1, 10}}})); - check_unique_names(f, unh); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({})); - ASSERT_TRUE(network.getFunction()->get_results()[1]->get_output_partial_shape(0).compatible({32, 1, 10})); - // concat output - ASSERT_TRUE(network.getFunction()->get_results()[2]->get_output_partial_shape(0).compatible({32, 10, 10})); - ASSERT_TRUE(network.getFunction()->get_results()[3]->get_output_partial_shape(0).compatible({32, 1, 1})); + EXPECT_ANY_THROW(f->reshape({{f->get_parameters()[0]->get_friendly_name(), {32, 1, 10}}, + {f->get_parameters()[1]->get_friendly_name(), {32, 10, 1}}, + {f->get_parameters()[2]->get_friendly_name(), {32, 1, 10}}})); } TEST(SmartReshapeTests, LoopStaticParameters) { @@ -174,29 +155,17 @@ TEST(SmartReshapeTests, LoopStaticParameters) { f = std::make_shared(OutputVector{out0, out1, out2, out3}, ParameterVector{X, Y, M}); } - InferenceEngine::CNNNetwork network(f); - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({})); - ASSERT_TRUE( - network.getFunction()->get_results()[1]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({})); + ASSERT_TRUE(f->get_results()[1]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); // concat output - ASSERT_TRUE( - network.getFunction()->get_results()[2]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); - ASSERT_TRUE( - network.getFunction()->get_results()[3]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); + ASSERT_TRUE(f->get_results()[2]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); + ASSERT_TRUE(f->get_results()[3]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.reshape( - InferenceEngine::ICNNNetwork::InputShapes{{f->get_parameters()[0]->get_friendly_name(), {32, 1, 10}}, - {f->get_parameters()[1]->get_friendly_name(), {32, 10, 1}}, - {f->get_parameters()[2]->get_friendly_name(), {32, 1, 10}}})); - check_unique_names(f, unh); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({})); - ASSERT_TRUE(network.getFunction()->get_results()[1]->get_output_partial_shape(0).compatible({32, 1, 10})); - // concat output - ASSERT_TRUE(network.getFunction()->get_results()[2]->get_output_partial_shape(0).compatible({32, 10, 10})); - ASSERT_TRUE(network.getFunction()->get_results()[3]->get_output_partial_shape(0).compatible({32, 1, 1})); + EXPECT_ANY_THROW(f->reshape({{f->get_parameters()[0]->get_friendly_name(), {32, 1, 10}}, + {f->get_parameters()[1]->get_friendly_name(), {32, 10, 1}}, + {f->get_parameters()[2]->get_friendly_name(), {32, 1, 10}}})); } TEST(SmartReshapeTests, LoopDynamicParameters) { @@ -242,29 +211,17 @@ TEST(SmartReshapeTests, LoopDynamicParameters) { f = std::make_shared(OutputVector{out0, out1, out2, out3}, ParameterVector{X, Y, M}); } - InferenceEngine::CNNNetwork network(f); - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({})); - ASSERT_TRUE( - network.getFunction()->get_results()[1]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({})); + ASSERT_TRUE(f->get_results()[1]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); // concat output - ASSERT_TRUE( - network.getFunction()->get_results()[2]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); - ASSERT_TRUE( - network.getFunction()->get_results()[3]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); + ASSERT_TRUE(f->get_results()[2]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); + ASSERT_TRUE(f->get_results()[3]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.reshape( - InferenceEngine::ICNNNetwork::InputShapes{{f->get_parameters()[0]->get_friendly_name(), {32, 1, 10}}, - {f->get_parameters()[1]->get_friendly_name(), {32, 10, 1}}, - {f->get_parameters()[2]->get_friendly_name(), {32, 1, 10}}})); - check_unique_names(f, unh); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({})); - ASSERT_TRUE(network.getFunction()->get_results()[1]->get_output_partial_shape(0).compatible({32, 1, 10})); - // concat output - ASSERT_TRUE(network.getFunction()->get_results()[2]->get_output_partial_shape(0).compatible({32, 10, 10})); - ASSERT_TRUE(network.getFunction()->get_results()[3]->get_output_partial_shape(0).compatible({32, 1, 1})); + EXPECT_ANY_THROW(f->reshape({{f->get_parameters()[0]->get_friendly_name(), {32, 1, 10}}, + {f->get_parameters()[1]->get_friendly_name(), {32, 10, 1}}, + {f->get_parameters()[2]->get_friendly_name(), {32, 1, 10}}})); } TEST(SmartReshapeTests, LoopParentParametersUsedInBody) { @@ -314,29 +271,17 @@ TEST(SmartReshapeTests, LoopParentParametersUsedInBody) { f = std::make_shared(OutputVector{out0, out1, out2, out3}, ParameterVector{X, Y, M}); } - InferenceEngine::CNNNetwork network(f); - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({})); - ASSERT_TRUE( - network.getFunction()->get_results()[1]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({})); + ASSERT_TRUE(f->get_results()[1]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); // concat output - ASSERT_TRUE( - network.getFunction()->get_results()[2]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); - ASSERT_TRUE( - network.getFunction()->get_results()[3]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); + ASSERT_TRUE(f->get_results()[2]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); + ASSERT_TRUE(f->get_results()[3]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.reshape( - InferenceEngine::ICNNNetwork::InputShapes{{f->get_parameters()[0]->get_friendly_name(), {4, 3, 2}}, - {f->get_parameters()[1]->get_friendly_name(), {4, 3, 2}}, - {f->get_parameters()[2]->get_friendly_name(), {4, 3, 2}}})); - check_unique_names(f, unh); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({})); - ASSERT_TRUE(network.getFunction()->get_results()[1]->get_output_partial_shape(0).compatible({4, 3, 2})); - // concat output - ASSERT_TRUE(network.getFunction()->get_results()[2]->get_output_partial_shape(0).compatible({4, 30, 2})); - ASSERT_TRUE(network.getFunction()->get_results()[3]->get_output_partial_shape(0).compatible({4, 3, 2})); + EXPECT_ANY_THROW(f->reshape({{f->get_parameters()[0]->get_friendly_name(), {4, 3, 2}}, + {f->get_parameters()[1]->get_friendly_name(), {4, 3, 2}}, + {f->get_parameters()[2]->get_friendly_name(), {4, 3, 2}}})); } TEST(SmartReshapeTests, TensorIteratorParentParameterUsedInBody) { @@ -381,24 +326,15 @@ TEST(SmartReshapeTests, TensorIteratorParentParameterUsedInBody) { f = std::make_shared(OutputVector{out0, out1, out2, out3}, ParameterVector{X, Y, M}); } - InferenceEngine::CNNNetwork network(f); - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({})); - ASSERT_TRUE(network.getFunction()->get_results()[1]->get_output_partial_shape(0).compatible({1, 1, 1})); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({})); + ASSERT_TRUE(f->get_results()[1]->get_output_partial_shape(0).compatible({1, 1, 1})); // concat output (seq len = 1, so it means num_iter = 1) - ASSERT_TRUE(network.getFunction()->get_results()[2]->get_output_partial_shape(0).compatible({1, 1, 1})); - ASSERT_TRUE(network.getFunction()->get_results()[3]->get_output_partial_shape(0).compatible({1, 1, 1})); + ASSERT_TRUE(f->get_results()[2]->get_output_partial_shape(0).compatible({1, 1, 1})); + ASSERT_TRUE(f->get_results()[3]->get_output_partial_shape(0).compatible({1, 1, 1})); auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.reshape( - InferenceEngine::ICNNNetwork::InputShapes{{f->get_parameters()[0]->get_friendly_name(), {32, 1, 10}}, - {f->get_parameters()[1]->get_friendly_name(), {1, 1, 1}}, - {f->get_parameters()[2]->get_friendly_name(), {32, 1, 10}}})); - check_unique_names(f, unh); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({})); - ASSERT_TRUE(network.getFunction()->get_results()[1]->get_output_partial_shape(0).compatible({32, 1, 10})); - // concat output - ASSERT_TRUE(network.getFunction()->get_results()[2]->get_output_partial_shape(0).compatible({32, 10, 10})); - ASSERT_TRUE(network.getFunction()->get_results()[3]->get_output_partial_shape(0).compatible({32, 1, 1})); + EXPECT_ANY_THROW(f->reshape({{f->get_parameters()[0]->get_friendly_name(), {32, 1, 10}}, + {f->get_parameters()[1]->get_friendly_name(), {1, 1, 1}}, + {f->get_parameters()[2]->get_friendly_name(), {32, 1, 10}}})); } diff --git a/src/common/transformations/tests/utils/primitives_priority_test.cpp b/src/common/transformations/tests/utils/primitives_priority_test.cpp index a748477e2b9137..64f6330a1da188 100644 --- a/src/common/transformations/tests/utils/primitives_priority_test.cpp +++ b/src/common/transformations/tests/utils/primitives_priority_test.cpp @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include #include @@ -22,8 +21,6 @@ using namespace ov; using namespace testing; -using namespace InferenceEngine; -using namespace InferenceEngine::details; TEST(TransformationTests, ConvBiasFusion) { std::shared_ptr f(nullptr); @@ -46,12 +43,7 @@ TEST(TransformationTests, ConvBiasFusion) { std::unordered_map pp; - InferenceEngine::CNNNetwork network(f); - - // Set PrimitivesPriority to all Convolutions - auto model = network.getFunction(); - ASSERT_NE(nullptr, model); - for (auto& op : model->get_ops()) { + for (auto& op : f->get_ops()) { if (auto conv = std::dynamic_pointer_cast(op)) { auto& rtInfo = conv->get_rt_info(); rtInfo[ov::PrimitivesPriority::get_type_info_static()] = ov::PrimitivesPriority("test"); @@ -59,8 +51,7 @@ TEST(TransformationTests, ConvBiasFusion) { } } - auto clonedNetwork = InferenceEngine::details::cloneNetwork(network); - auto funcs = clonedNetwork.getFunction(); + auto funcs = f->clone(); for (auto& op : funcs->get_ops()) { if (auto conv = std::dynamic_pointer_cast(op)) { From e8861fb16adff7990a044a3e37decef2fee30608 Mon Sep 17 00:00:00 2001 From: Mikhail Ryzhov Date: Sat, 14 Oct 2023 11:19:36 +0200 Subject: [PATCH 198/257] extended logs (#20442) --- .github/workflows/linux.yml | 5 +++-- .github/workflows/windows.yml | 6 ++++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index c8fc68c1229b09..59e021e24eb153 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -1136,13 +1136,14 @@ jobs: with: name: test-results-functional-cpu path: | - ${{ env.INSTALL_TEST_DIR }}/TEST*.xml + ${{ env.INSTALL_TEST_DIR }}/temp/*.log + ${{ env.INSTALL_TEST_DIR }}/logs/*.log ${{ env.INSTALL_TEST_DIR }}/logs/failed/*.log ${{ env.INSTALL_TEST_DIR }}/logs/crashed/*.log ${{ env.INSTALL_TEST_DIR }}/logs/hanged/*.log ${{ env.INSTALL_TEST_DIR }}/logs/interapted/*.log - ${{ env.INSTALL_TEST_DIR }}/logs/disabled_tests.log ${{ env.INSTALL_TEST_DIR }}/logs/hash_table.csv + ${{ env.PARALLEL_TEST_CACHE }} if-no-files-found: 'error' TensorFlow_Hub_Models_Tests: diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index c33663c8d52bc4..98a53622a0b6da 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -301,7 +301,7 @@ jobs: # Find and install the core OV wheel $ovCoreWheelPath=Get-ChildItem -Path "${{ env.INSTALL_DIR }}\tools" -Filter openvino-*.whl | % { $_.FullName } python3 -m pip install "$ovCoreWheelPath" - + # Find and install the dev OV wheel $ovDevWheelPath=Get-ChildItem -Path "${{ env.INSTALL_DIR }}\tools" -Filter openvino_dev*.whl | % { $_.FullName } python3 -m pip install "$ovDevWheelPath[mxnet,caffe,kaldi,onnx,tensorflow2,pytorch]" @@ -695,9 +695,11 @@ jobs: name: test-results-functional-cpu path: | ${{ env.INSTALL_TEST_DIR }}/temp/*.log + ${{ env.INSTALL_TEST_DIR }}/logs/*.log ${{ env.INSTALL_TEST_DIR }}/logs/failed/*.log ${{ env.INSTALL_TEST_DIR }}/logs/crashed/*.log ${{ env.INSTALL_TEST_DIR }}/logs/hanged/*.log ${{ env.INSTALL_TEST_DIR }}/logs/interapted/*.log - ${{ env.INSTALL_TEST_DIR }}/logs/*.log + ${{ env.INSTALL_TEST_DIR }}/logs/hash_table.csv + ${{ env.PARALLEL_TEST_CACHE }} if-no-files-found: 'error' From 6f6017724f7d0998e58c2589489a5516a7f912e6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 15 Oct 2023 03:14:14 +0400 Subject: [PATCH 199/257] Bump pytest-timeout from 2.1.0 to 2.2.0 in /src/bindings/python (#20310) Bumps [pytest-timeout](https://github.com/pytest-dev/pytest-timeout) from 2.1.0 to 2.2.0. - [Commits](https://github.com/pytest-dev/pytest-timeout/compare/2.1.0...2.2.0) --- updated-dependencies: - dependency-name: pytest-timeout dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- src/bindings/python/constraints.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/bindings/python/constraints.txt b/src/bindings/python/constraints.txt index 9db99017681f4f..20e888bde84984 100644 --- a/src/bindings/python/constraints.txt +++ b/src/bindings/python/constraints.txt @@ -5,7 +5,7 @@ numpy>=1.16.6,<1.27 # Python bindings, frontends pytest>=5.0,<7.5 pytest-dependency==0.5.1 pytest-html==3.2.0 -pytest-timeout==2.1.0 +pytest-timeout==2.2.0 # Python bindings py>=1.9.0 From f107b7663fa17434d07b6cc249a6645a96192a68 Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Mon, 16 Oct 2023 06:16:43 +0200 Subject: [PATCH 200/257] Migrate Split operator to new API (#20263) --- src/core/include/openvino/op/split.hpp | 5 +- .../include/openvino/reference/split.hpp | 17 ++- src/core/reference/src/op/split.cpp | 42 ++++--- src/core/src/op/split.cpp | 110 +++++++++--------- 4 files changed, 97 insertions(+), 77 deletions(-) diff --git a/src/core/include/openvino/op/split.hpp b/src/core/include/openvino/op/split.hpp index 918457c0d84a05..6137f0591cfba1 100644 --- a/src/core/include/openvino/op/split.hpp +++ b/src/core/include/openvino/op/split.hpp @@ -39,9 +39,8 @@ class OPENVINO_API Split : public Op { void set_num_splits(const size_t num_splits) { m_num_splits = num_splits; } - OPENVINO_SUPPRESS_DEPRECATED_START - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - OPENVINO_SUPPRESS_DEPRECATED_END + + bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override; bool evaluate_lower(TensorVector& outputs) const override; bool evaluate_upper(TensorVector& outputs) const override; bool has_evaluate() const override; diff --git a/src/core/reference/include/openvino/reference/split.hpp b/src/core/reference/include/openvino/reference/split.hpp index dcbede1883a409..6e3564ed035bb1 100644 --- a/src/core/reference/include/openvino/reference/split.hpp +++ b/src/core/reference/include/openvino/reference/split.hpp @@ -4,17 +4,28 @@ #pragma once -#include +#include -#include "openvino/reference/slice.hpp" +#include "openvino/core/shape.hpp" namespace ov { namespace reference { + +/** + * @brief Reference implementation of Split operator. + * + * @param data Pointer to input data. + * @param data_shape Input data shape. + * @param elem_size Size of single element type. + * @param axis Axis used for split input data. + * @param num_splits Number of splits + * @param out_data Pointer to output data pointers (must have size of num_splits) + */ void split(const char* data, const Shape& data_shape, size_t elem_size, int64_t axis, size_t num_splits, char** out_data); -} +} // namespace reference } // namespace ov diff --git a/src/core/reference/src/op/split.cpp b/src/core/reference/src/op/split.cpp index 6186bdd5af941d..855fc29c4a1be9 100644 --- a/src/core/reference/src/op/split.cpp +++ b/src/core/reference/src/op/split.cpp @@ -6,35 +6,43 @@ #include -#include +#include -using namespace ov; +#include "openvino/core/coordinate.hpp" +#include "openvino/reference/slice.hpp" -void reference::split(const char* data, - const Shape& data_shape, - size_t elem_size, - int64_t axis, - size_t num_splits, - char** out_data) { +namespace ov { +namespace reference { + +void split(const char* data, + const Shape& data_shape, + const size_t elem_size, + const int64_t axis, + const size_t num_splits, + char** out_data) { const size_t part_length = data_shape.at(axis) / num_splits; - Shape output_shape = data_shape; - output_shape.at(axis) = part_length; + auto output_shape = data_shape; + output_shape[axis] = part_length; - std::vector lower_bounds(data_shape.size(), 0); - std::vector upper_bounds = data_shape; - upper_bounds.at(axis) = part_length; + Coordinate lower_bounds(data_shape.size(), 0); + Coordinate upper_bounds = output_shape; + auto& lb_at_axis = lower_bounds[axis]; + auto& ub_at_axis = upper_bounds[axis]; - for (size_t i = 0; i < num_splits; ++i) { + const auto out_last = std::next(out_data, num_splits); + for (auto out_first = out_data; out_first != out_last; ++out_first) { reference::slice(data, - out_data[i], + *out_first, data_shape, lower_bounds, upper_bounds, Strides(lower_bounds.size(), 1), output_shape, elem_size); - lower_bounds.at(axis) += part_length; - upper_bounds.at(axis) += part_length; + lb_at_axis += part_length; + ub_at_axis += part_length; } } +} // namespace reference +} // namespace ov diff --git a/src/core/src/op/split.cpp b/src/core/src/op/split.cpp index dc2ac72b509db7..4c5563e892321c 100644 --- a/src/core/src/op/split.cpp +++ b/src/core/src/op/split.cpp @@ -2,42 +2,46 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "openvino/reference/split.hpp" +#include "openvino/op/split.hpp" #include -#include #include "bound_evaluate.hpp" #include "itt.hpp" -#include "ngraph/attribute_visitor.hpp" -#include "ngraph/builder/split.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/split.hpp" -#include "ngraph/op/util/op_types.hpp" -#include "ngraph/runtime/host_tensor.hpp" -#include "ngraph/validation_util.hpp" - -using namespace std; -using namespace ngraph; - -op::v1::Split::Split(const Output& data, const Output& axis, const size_t num_splits) +#include "openvino/core/validation_util.hpp" +#include "openvino/reference/split.hpp" +#include "split_shape_inference.hpp" + +namespace ov { +namespace op { + +namespace v1 { +namespace validate { +namespace { +bool axis_type(const element::Type& et) { + return et.is_integral_number(); +} +} // namespace +} // namespace validate + +Split::Split(const Output& data, const Output& axis, const size_t num_splits) : Op({data, axis}), m_num_splits{num_splits} { constructor_validate_and_infer_types(); } -bool ngraph::op::v1::Split::visit_attributes(AttributeVisitor& visitor) { +bool Split::visit_attributes(AttributeVisitor& visitor) { OV_OP_SCOPE(v1_Split_visit_attributes); visitor.on_attribute("num_splits", m_num_splits); return true; } -void op::v1::Split::validate_and_infer_types() { +void Split::validate_and_infer_types() { OV_OP_SCOPE(v1_Split_validate_and_infer_types); const auto& axis_et = get_input_element_type(1); NODE_VALIDATION_CHECK(this, - axis_et.is_integral_number(), + validate::axis_type(axis_et), "Element type of 'axis' input must be integer. Got: ", axis_et); @@ -58,72 +62,70 @@ void op::v1::Split::validate_and_infer_types() { set_input_is_relevant_to_shape(0); } -shared_ptr op::v1::Split::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr Split::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v1_Split_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), m_num_splits); + return std::make_shared(new_args.at(0), new_args.at(1), m_num_splits); } -OPENVINO_SUPPRESS_DEPRECATED_START -bool op::v1::Split::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool Split::evaluate(TensorVector& outputs, const TensorVector& inputs) const { OV_OP_SCOPE(v1_Split_evaluate); - OPENVINO_SUPPRESS_DEPRECATED_START - OPENVINO_ASSERT(validate_host_tensor_vector(outputs, m_num_splits) && validate_host_tensor_vector(inputs, 2)); - OPENVINO_SUPPRESS_DEPRECATED_END + OPENVINO_ASSERT(outputs.size() == m_num_splits); - if (has_evaluate()) { + const auto output_shapes = + shape_infer(this, ov::util::get_tensors_partial_shapes(inputs), make_tensor_accessor(inputs)); + const auto& axis_tensor = inputs[1]; + const auto result = validate::axis_type(axis_tensor.get_element_type()); + if (result) { const auto& data_tensor = inputs[0]; - const auto& axis_tensor = inputs[1]; - - const auto input_shapes = - std::vector{data_tensor->get_partial_shape(), axis_tensor->get_partial_shape()}; - - auto output_shapes = shape_infer(this, input_shapes, make_tensor_accessor(inputs)); auto outputs_data = std::vector(m_num_splits); - for (size_t i = 0; i < m_num_splits; ++i) { - outputs[i]->set_shape(output_shapes[i].get_shape()); - outputs_data[i] = outputs[i]->get_data_ptr(); + { + auto outputs_it = outputs.begin(); + auto outputs_data_it = outputs_data.begin(); + for (const auto& p_shape : output_shapes) { + outputs_it->set_shape(p_shape.get_shape()); + *outputs_data_it = static_cast(outputs_it->data()); + ++outputs_it, ++outputs_data_it; + } } - OPENVINO_SUPPRESS_DEPRECATED_START - auto axis = host_tensor_2_vector(axis_tensor)[0]; - axis = normalize_axis(this, axis, data_tensor->get_partial_shape().rank()); - OPENVINO_SUPPRESS_DEPRECATED_END + auto axis = get_tensor_data_as(axis_tensor).front(); + axis = ov::util::normalize(axis, data_tensor.get_shape().size()); - ov::reference::split(data_tensor->get_data_ptr(), - data_tensor->get_shape(), - data_tensor->get_element_type().size(), + ov::reference::split(static_cast(data_tensor.data()), + data_tensor.get_shape(), + data_tensor.get_element_type().size(), axis, m_num_splits, outputs_data.data()); - return true; } - return false; + + return result; } -OPENVINO_SUPPRESS_DEPRECATED_END -bool op::v1::Split::has_evaluate() const { +bool Split::has_evaluate() const { OV_OP_SCOPE(v1_Split_has_evaluate); - return get_input_element_type(1).is_integral_number(); + return validate::axis_type(get_input_element_type(1)); } -bool op::v1::Split::evaluate_lower(ov::TensorVector& output_values) const { +bool Split::evaluate_lower(ov::TensorVector& output_values) const { OV_OP_SCOPE(v1_Split_evaluate_lower); - - return input(1).get_tensor().has_and_set_bound() && default_lower_bound_evaluator(this, output_values); + return get_input_tensor(1).has_and_set_bound() && default_lower_bound_evaluator(this, output_values); } -bool op::v1::Split::evaluate_upper(ov::TensorVector& output_values) const { +bool Split::evaluate_upper(ov::TensorVector& output_values) const { OV_OP_SCOPE(v1_Split_evaluate_upper); - - return input(1).get_tensor().has_and_set_bound() && default_upper_bound_evaluator(this, output_values); + return get_input_tensor(1).has_and_set_bound() && default_upper_bound_evaluator(this, output_values); } -bool op::v1::Split::evaluate_label(TensorLabelVector& output_labels) const { +bool Split::evaluate_label(TensorLabelVector& output_labels) const { OPENVINO_ASSERT(output_labels.size() == get_num_splits()); OPENVINO_SUPPRESS_DEPRECATED_START - return input(1).get_tensor().has_and_set_bound() && default_label_evaluator(this, output_labels); + return get_input_tensor(1).has_and_set_bound() && default_label_evaluator(this, output_labels); OPENVINO_SUPPRESS_DEPRECATED_END } +} // namespace v1 +} // namespace op +} // namespace ov From 893517dd4a3ae6ba2e1f7e5a60a19c63a88d0b39 Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Mon, 16 Oct 2023 06:54:38 +0200 Subject: [PATCH 201/257] Migrate LogicalAnd to new API (#20423) --- src/core/include/openvino/op/logical_and.hpp | 5 +- .../include/openvino/reference/and.hpp | 26 +++-- src/core/src/op/logical_and.cpp | 98 +++++++------------ 3 files changed, 51 insertions(+), 78 deletions(-) diff --git a/src/core/include/openvino/op/logical_and.hpp b/src/core/include/openvino/op/logical_and.hpp index 6d55f8f3585e0f..382679d16b78e1 100644 --- a/src/core/include/openvino/op/logical_and.hpp +++ b/src/core/include/openvino/op/logical_and.hpp @@ -35,10 +35,7 @@ class OPENVINO_API LogicalAnd : public util::BinaryElementwiseLogical { const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool visit_attributes(AttributeVisitor& visitor) override; - OPENVINO_SUPPRESS_DEPRECATED_START - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - OPENVINO_SUPPRESS_DEPRECATED_END + bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override; bool has_evaluate() const override; }; } // namespace v1 diff --git a/src/core/reference/include/openvino/reference/and.hpp b/src/core/reference/include/openvino/reference/and.hpp index 326e4b59d773af..8f43b045d6398c 100644 --- a/src/core/reference/include/openvino/reference/and.hpp +++ b/src/core/reference/include/openvino/reference/and.hpp @@ -4,31 +4,37 @@ #pragma once -#include +#include +#include #include "openvino/core/shape.hpp" -#include "openvino/op/util/attr_types.hpp" #include "openvino/reference/autobroadcast_binop.hpp" namespace ov { namespace reference { -template +template void logical_and(const T* arg0, const T* arg1, T* out, size_t count) { - for (size_t i = 0; i < count; i++) { - out[i] = static_cast(arg0[i] && arg1[i]); - } + std::transform(arg0, std::next(arg0, count), arg1, out, std::logical_and()); } -template +/** + * @brief Reference implementation of binary elementwise LogicalAnd operator. + * + * @param arg0 Pointer to input 0 data. + * @param arg1 Pointer to input 1 data. + * @param out Pointer to output data. + * @param arg_shape0 Input 0 shape. + * @param arg_shape1 Input 1 shape. + * @param broadcast_spec Broadcast specification mode. + */ +template void logical_and(const T* arg0, const T* arg1, T* out, const Shape& arg0_shape, const Shape& arg1_shape, const op::AutoBroadcastSpec& broadcast_spec) { - autobroadcast_binop(arg0, arg1, out, arg0_shape, arg1_shape, broadcast_spec, [](T x, T y) -> T { - return static_cast(x && y); - }); + autobroadcast_binop(arg0, arg1, out, arg0_shape, arg1_shape, broadcast_spec, std::logical_and()); } } // namespace reference } // namespace ov diff --git a/src/core/src/op/logical_and.cpp b/src/core/src/op/logical_and.cpp index d6f451715a564d..fe8bd612ed2d85 100644 --- a/src/core/src/op/logical_and.cpp +++ b/src/core/src/op/logical_and.cpp @@ -2,83 +2,53 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/logical_and.hpp" + #include "itt.hpp" -#include "ngraph/op/and.hpp" -#include "ngraph/runtime/host_tensor.hpp" -#include "ngraph/validation_util.hpp" #include "openvino/reference/and.hpp" +#include "utils.hpp" -using namespace std; -using namespace ngraph; - -op::v1::LogicalAnd::LogicalAnd(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast) +namespace ov { +namespace op { +namespace v1 { +LogicalAnd::LogicalAnd(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) : BinaryElementwiseLogical(arg0, arg1, auto_broadcast) { constructor_validate_and_infer_types(); } -bool op::v1::LogicalAnd::visit_attributes(AttributeVisitor& visitor) { - OV_OP_SCOPE(v1_LogicalAnd_visit_attributes); - BinaryElementwiseLogical::visit_attributes(visitor); - return true; -} - -shared_ptr op::v1::LogicalAnd::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr LogicalAnd::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v1_LogicalAnd_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); + return std::make_shared(new_args.at(0), new_args.at(1), get_autob()); } -OPENVINO_SUPPRESS_DEPRECATED_START -namespace logand { -namespace { -template -bool evaluate(const HostTensorPtr& arg0, - const HostTensorPtr& arg1, - const HostTensorPtr& out, - const op::AutoBroadcastSpec& broadcast_spec) { - ov::reference::logical_and(arg0->get_data_ptr(), - arg1->get_data_ptr(), - out->get_data_ptr(), - arg0->get_shape(), - arg1->get_shape(), - broadcast_spec); - return true; -} - -bool evaluate_logand(const HostTensorPtr& arg0, - const HostTensorPtr& arg1, - const HostTensorPtr& out, - const op::AutoBroadcastSpec& broadcast_spec) { - bool rc = true; - out->set_broadcast(broadcast_spec, arg0, arg1); - switch (arg0->get_element_type()) { - OPENVINO_TYPE_CASE(evaluate_logand, boolean, arg0, arg1, out, broadcast_spec); - default: - rc = false; - break; - } - return rc; -} -} // namespace -} // namespace logand - -bool op::v1::LogicalAnd::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool LogicalAnd::evaluate(TensorVector& outputs, const TensorVector& inputs) const { OV_OP_SCOPE(v1_LogicalAnd_evaluate); - OPENVINO_SUPPRESS_DEPRECATED_START - OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 2)); - OPENVINO_SUPPRESS_DEPRECATED_END - return logand::evaluate_logand(inputs[0], inputs[1], outputs[0], get_autob()); + OPENVINO_ASSERT(outputs.size() == 1); + OPENVINO_ASSERT(inputs.size() == 2); + + const auto& shape_0 = inputs[0].get_shape(); + const auto& shape_1 = inputs[1].get_shape(); + outputs[0].set_shape(infer_broadcast_shape(this, shape_0, shape_1)); + + if (inputs[0].get_element_type() == element::boolean) { + using T = fundamental_type_for; + reference::logical_and(inputs[0].data(), + inputs[1].data(), + outputs[0].data(), + shape_0, + shape_1, + get_autob()); + return true; + } else { + return false; + } } -bool op::v1::LogicalAnd::has_evaluate() const { +bool LogicalAnd::has_evaluate() const { OV_OP_SCOPE(v1_LogicalAnd_has_evaluate); - switch (get_input_element_type(0)) { - case ngraph::element::boolean: - return true; - default: - break; - } - return false; + return get_input_element_type(0) == element::boolean; } +} // namespace v1 +} // namespace op +} // namespace ov From fb93638cb2489af8e00b1f8123a814540289777e Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Mon, 16 Oct 2023 06:56:24 +0200 Subject: [PATCH 202/257] [core]Migrate LogicalOr to new API (#20421) * Migrate LogicalOr to new API * Remove leftovers --- src/core/include/openvino/op/logical_or.hpp | 4 +- .../include/openvino/reference/or.hpp | 29 +++--- src/core/src/op/logical_or.cpp | 91 +++++++------------ 3 files changed, 53 insertions(+), 71 deletions(-) diff --git a/src/core/include/openvino/op/logical_or.hpp b/src/core/include/openvino/op/logical_or.hpp index 15c00eea04baf3..1dab36217b175a 100644 --- a/src/core/include/openvino/op/logical_or.hpp +++ b/src/core/include/openvino/op/logical_or.hpp @@ -34,9 +34,7 @@ class OPENVINO_API LogicalOr : public util::BinaryElementwiseLogical { std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - OPENVINO_SUPPRESS_DEPRECATED_START - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - OPENVINO_SUPPRESS_DEPRECATED_END + bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override; bool has_evaluate() const override; }; } // namespace v1 diff --git a/src/core/reference/include/openvino/reference/or.hpp b/src/core/reference/include/openvino/reference/or.hpp index 7e821de63e3c03..4b0d760ec41349 100644 --- a/src/core/reference/include/openvino/reference/or.hpp +++ b/src/core/reference/include/openvino/reference/or.hpp @@ -4,31 +4,38 @@ #pragma once -#include +#include +#include #include "openvino/core/shape.hpp" -#include "openvino/op/util/attr_types.hpp" #include "openvino/reference/autobroadcast_binop.hpp" namespace ov { namespace reference { -template -void logical_or(const T* arg0, const T* arg1, T* out, size_t count) { - for (size_t i = 0; i < count; i++) { - out[i] = static_cast(arg0[i] || arg1[i]); - } + +template +void logical_or(const T* arg0, const T* arg1, T* out, const size_t count) { + std::transform(arg0, std::next(arg0, count), arg1, out, std::logical_or()); } -template +/** + * @brief Reference implementation of binary elementwise LogicalOr operator. + * + * @param arg0 Pointer to input 0 data. + * @param arg1 Pointer to input 1 data. + * @param out Pointer to output data. + * @param arg_shape0 Input 0 shape. + * @param arg_shape1 Input 1 shape. + * @param broadcast_spec Broadcast specification mode. + */ +template void logical_or(const T* arg0, const T* arg1, T* out, const Shape& arg0_shape, const Shape& arg1_shape, const op::AutoBroadcastSpec& broadcast_spec) { - autobroadcast_binop(arg0, arg1, out, arg0_shape, arg1_shape, broadcast_spec, [](T x, T y) -> T { - return static_cast(x || y); - }); + autobroadcast_binop(arg0, arg1, out, arg0_shape, arg1_shape, broadcast_spec, std::logical_or()); } } // namespace reference } // namespace ov diff --git a/src/core/src/op/logical_or.cpp b/src/core/src/op/logical_or.cpp index c473e6c12e385f..403089318de314 100644 --- a/src/core/src/op/logical_or.cpp +++ b/src/core/src/op/logical_or.cpp @@ -2,77 +2,54 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/logical_or.hpp" + #include "itt.hpp" -#include "ngraph/op/or.hpp" -#include "ngraph/runtime/host_tensor.hpp" -#include "ngraph/validation_util.hpp" #include "openvino/reference/or.hpp" +#include "utils.hpp" -using namespace std; -using namespace ngraph; +namespace ov { +namespace op { +namespace v1 { -op::v1::LogicalOr::LogicalOr(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast) +LogicalOr::LogicalOr(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) : BinaryElementwiseLogical(arg0, arg1, auto_broadcast) { constructor_validate_and_infer_types(); } -shared_ptr op::v1::LogicalOr::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr LogicalOr::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v1_LogicalOr_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); -} - -OPENVINO_SUPPRESS_DEPRECATED_START -namespace logor { -namespace { -template -bool evaluate(const HostTensorPtr& arg0, - const HostTensorPtr& arg1, - const HostTensorPtr& out, - const op::AutoBroadcastSpec& broadcast_spec) { - ov::reference::logical_or(arg0->get_data_ptr(), - arg1->get_data_ptr(), - out->get_data_ptr(), - arg0->get_shape(), - arg1->get_shape(), - broadcast_spec); - return true; -} - -bool evaluate_logor(const HostTensorPtr& arg0, - const HostTensorPtr& arg1, - const HostTensorPtr& out, - const op::AutoBroadcastSpec& broadcast_spec) { - bool rc = true; - out->set_broadcast(broadcast_spec, arg0, arg1); - switch (arg0->get_element_type()) { - OPENVINO_TYPE_CASE(evaluate_logor, boolean, arg0, arg1, out, broadcast_spec); - default: - rc = false; - break; - } - return rc; + return std::make_shared(new_args.at(0), new_args.at(1), get_autob()); } -} // namespace -} // namespace logor -bool op::v1::LogicalOr::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool LogicalOr::evaluate(TensorVector& outputs, const TensorVector& inputs) const { OV_OP_SCOPE(v1_LogicalOr_evaluate); - OPENVINO_SUPPRESS_DEPRECATED_START - OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 2)); - OPENVINO_SUPPRESS_DEPRECATED_END - return logor::evaluate_logor(inputs[0], inputs[1], outputs[0], get_autob()); + OPENVINO_ASSERT(outputs.size() == 1); + OPENVINO_ASSERT(inputs.size() == 2); + + const auto& shape_0 = inputs[0].get_shape(); + const auto& shape_1 = inputs[1].get_shape(); + outputs[0].set_shape(infer_broadcast_shape(this, shape_0, shape_1)); + + if (inputs[0].get_element_type() == element::boolean) { + using T = fundamental_type_for; + reference::logical_or(inputs[0].data(), + inputs[1].data(), + outputs[0].data(), + shape_0, + shape_1, + get_autob()); + return true; + } else { + return false; + } } -bool op::v1::LogicalOr::has_evaluate() const { +bool LogicalOr::has_evaluate() const { OV_OP_SCOPE(v1_LogicalOr_has_evaluate); - switch (get_input_element_type(0)) { - case ngraph::element::boolean: - return true; - default: - break; - } - return false; + return get_input_element_type(0) == element::boolean; } +} // namespace v1 +} // namespace op +} // namespace ov From 00618a429bfb4e1ca3cbca941dfd236338ae765f Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 16 Oct 2023 12:54:30 +0400 Subject: [PATCH 203/257] Relocatable OpenVINO Dev package improvements (#20461) --- ...DeveloperPackageConfigRelocatable.cmake.in | 4 +--- src/bindings/python/CMakeLists.txt | 21 ++++++++++--------- thirdparty/dependencies.cmake | 21 +------------------ thirdparty/pugixml | 2 +- 4 files changed, 14 insertions(+), 34 deletions(-) diff --git a/cmake/templates/OpenVINODeveloperPackageConfigRelocatable.cmake.in b/cmake/templates/OpenVINODeveloperPackageConfigRelocatable.cmake.in index ed9826e663e8d9..a4cdb93d387c58 100644 --- a/cmake/templates/OpenVINODeveloperPackageConfigRelocatable.cmake.in +++ b/cmake/templates/OpenVINODeveloperPackageConfigRelocatable.cmake.in @@ -9,9 +9,7 @@ include(CMakeFindDependencyMacro) # Variables to export in plugin's projects set(ov_options "@OV_OPTIONS@") -list(APPEND ov_options CMAKE_CXX_COMPILER_LAUNCHER CMAKE_C_COMPILER_LAUNCHER - CMAKE_CXX_LINKER_LAUNCHER CMAKE_C_LINKER_LAUNCHER - CMAKE_INSTALL_PREFIX CPACK_GENERATOR) +list(APPEND ov_options CPACK_GENERATOR) if(APPLE) list(APPEND ov_options CMAKE_OSX_ARCHITECTURES CMAKE_OSX_DEPLOYMENT_TARGET) diff --git a/src/bindings/python/CMakeLists.txt b/src/bindings/python/CMakeLists.txt index 89d21c8a7c91f4..a2e8945a807776 100644 --- a/src/bindings/python/CMakeLists.txt +++ b/src/bindings/python/CMakeLists.txt @@ -28,6 +28,8 @@ if(NOT DEFINED OpenVINO_SOURCE_DIR) if(NOT EXISTS "${OpenVINO_BINARY_DIR}/cmake_install.cmake") set(OpenVINODeveloperPackage_RELOCATABLE ON) endif() + + set(OpenVINO_SOURCE_DIR "${OpenVINOPython_SOURCE_DIR}/../../../") endif() if(NOT DEFINED OpenVINODeveloperPackage_RELOCATABLE) @@ -154,8 +156,8 @@ endfunction() set(INIT_FILES_RUNTIME "${OpenVINOPython_SOURCE_DIR}/src/openvino/__init__.py" "${OpenVINOPython_SOURCE_DIR}/src/compatibility/openvino/__init__.py" - "${OpenVINOPython_SOURCE_DIR}/../../../tools/ovc/openvino/__init__.py" - "${OpenVINOPython_SOURCE_DIR}/../../../tools/benchmark_tool/openvino/__init__.py") + "${OpenVINO_SOURCE_DIR}/tools/ovc/openvino/__init__.py" + "${OpenVINO_SOURCE_DIR}/tools/benchmark_tool/openvino/__init__.py") ov_check_init_files_alignment("${INIT_FILES_RUNTIME}") @@ -319,15 +321,15 @@ macro(ov_define_setup_py_dependencies) "${CMAKE_CURRENT_SOURCE_DIR}/wheel/setup.py" "${OpenVINOPython_SOURCE_DIR}/requirements.txt" "${OpenVINOPython_SOURCE_DIR}/wheel/readme.txt" - "${OpenVINOPython_SOURCE_DIR}/../../../LICENSE" - "${OpenVINOPython_SOURCE_DIR}/../../../licensing/onednn_third-party-programs.txt" - "${OpenVINOPython_SOURCE_DIR}/../../../licensing/runtime-third-party-programs.txt" - "${OpenVINOPython_SOURCE_DIR}/../../../licensing/tbb_third-party-programs.txt" - "${OpenVINOPython_SOURCE_DIR}/../../../docs/install_guides/pypi-openvino-rt.md") + "${OpenVINO_SOURCE_DIR}/LICENSE" + "${OpenVINO_SOURCE_DIR}/licensing/onednn_third-party-programs.txt" + "${OpenVINO_SOURCE_DIR}/licensing/runtime-third-party-programs.txt" + "${OpenVINO_SOURCE_DIR}/licensing/tbb_third-party-programs.txt" + "${OpenVINO_SOURCE_DIR}/docs/install_guides/pypi-openvino-rt.md") if(wheel_pre_release) list(APPEND ov_setup_py_deps - "${OpenVINOPython_SOURCE_DIR}/../../../docs/install_guides/pre-release-note.md") + "${OpenVINO_SOURCE_DIR}/docs/install_guides/pre-release-note.md") endif() endmacro() @@ -404,9 +406,8 @@ if(ENABLE_TESTS) endif() if(OpenVINODeveloperPackage_FOUND) - # TODO: understand whether it's required # provides a callback function to describe each component in repo - include("${OpenVINOPython_SOURCE_DIR}/../../../cmake/packaging/packaging.cmake") + include("${OpenVINO_SOURCE_DIR}/cmake/packaging/packaging.cmake") ov_cpack(${OV_CPACK_COMPONENTS_ALL}) endif() diff --git a/thirdparty/dependencies.cmake b/thirdparty/dependencies.cmake index 1524378287fdfa..fac4752c318250 100644 --- a/thirdparty/dependencies.cmake +++ b/thirdparty/dependencies.cmake @@ -266,26 +266,7 @@ if(NOT TARGET openvino::pugixml) function(ov_build_pugixml) function(ov_build_pugixml_static) set(BUILD_SHARED_LIBS OFF) - function(install) - cmake_parse_arguments(_install "" "EXPORT" "" ${ARGV}) - if(_install_EXPORT STREQUAL "pugixml-targets") - # does nothing! - # we need to override 'export' command to prevent cmake issue with multiple - # export sets for pugixml-target. Currently, it's installed only by OpenVINO - else() - _install(${ARGV}) - endif() - endfunction() - function(export) - cmake_parse_arguments(_export "" "EXPORT" "" ${ARGV}) - if(_export_EXPORT STREQUAL "pugixml-targets") - # does nothing! - # we need to override 'export' command to prevent cmake issue with multiple - # export sets for pugixml-target. Currently, it's installed only by OpenVINO - else() - _export(${ARGV}) - endif() - endfunction() + set(PUGIXML_INSTALL OFF CACHE BOOL "" FORCE) add_subdirectory(thirdparty/pugixml EXCLUDE_FROM_ALL) endfunction() ov_build_pugixml_static() diff --git a/thirdparty/pugixml b/thirdparty/pugixml index a0e064336317c9..2e357d19a3228c 160000 --- a/thirdparty/pugixml +++ b/thirdparty/pugixml @@ -1 +1 @@ -Subproject commit a0e064336317c9347a91224112af9933598714e9 +Subproject commit 2e357d19a3228c0a301727aac6bea6fecd982d21 From 146ca36f8ffb1d959a2d53f394fa83c90623714f Mon Sep 17 00:00:00 2001 From: Sergey Shlyapnikov Date: Mon, 16 Oct 2023 13:47:57 +0400 Subject: [PATCH 204/257] [GPU] Minor debug improvements: add suffix to shape agnostic kernels names and show batch_hash for impl_types::any (#20275) --- src/plugins/intel_gpu/src/graph/program_node.cpp | 3 ++- .../intel_gpu/src/kernel_selector/kernel_base_opencl.cpp | 4 ++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/plugins/intel_gpu/src/graph/program_node.cpp b/src/plugins/intel_gpu/src/graph/program_node.cpp index 62c11e2f7e8066..f72a38af85589f 100644 --- a/src/plugins/intel_gpu/src/graph/program_node.cpp +++ b/src/plugins/intel_gpu/src/graph/program_node.cpp @@ -208,7 +208,8 @@ std::unique_ptr program_node::desc_to_json() const { #endif impls.push_back(selected_impl->get_kernel_name()); - if (get_preferred_impl_type() == impl_types::ocl) { + auto preferred_impl_type = get_preferred_impl_type(); + if (preferred_impl_type != impl_types::onednn && preferred_impl_type != impl_types::cpu) { json_composite cl_dump_info; cl_dump_info.add("batch_hash", selected_impl->get_kernels_dump_info().first); cl_dump_info.add("kernel_entry", selected_impl->get_kernels_dump_info().second); diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernel_base_opencl.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernel_base_opencl.cpp index d0d052b44c4ed3..b382561afdac34 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernel_base_opencl.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernel_base_opencl.cpp @@ -80,6 +80,10 @@ std::string KernelBaseOpenCL::GetEntryPoint(const std::string& templateName, // UniqueID = program_id + processing_index + additional weight/reorder tag kernelID += "_" + params.uniqueID + "_" + std::to_string(partID); + // Add "__sa" suffix for shape agnostic kernels + if (params.is_shape_agnostic) + kernelID += "__sa"; + return kernelID; } From f655d2177d5eda48bffad9c6fbf3b1728872062b Mon Sep 17 00:00:00 2001 From: Sergey Shlyapnikov Date: Mon, 16 Oct 2023 13:52:45 +0400 Subject: [PATCH 205/257] [GPU] Fix scatter_nd_update output paddings handling (#20447) --- .../intel_gpu/src/graph/scatter_nd_update.cpp | 3 +- .../cl_kernels/scatter_nd_update_ref.cl | 5 +- .../scatter_nd_update_kernel_ref.cpp | 7 ++- .../src/kernel_selector/tensor_type.h | 4 ++ .../test_cases/scatter_nd_update_gpu_test.cpp | 61 +++++++++++++++++++ 5 files changed, 77 insertions(+), 3 deletions(-) diff --git a/src/plugins/intel_gpu/src/graph/scatter_nd_update.cpp b/src/plugins/intel_gpu/src/graph/scatter_nd_update.cpp index 40d2b48d8edfaf..0f64b64a0bcad7 100644 --- a/src/plugins/intel_gpu/src/graph/scatter_nd_update.cpp +++ b/src/plugins/intel_gpu/src/graph/scatter_nd_update.cpp @@ -69,8 +69,9 @@ scatter_nd_update_inst::typed_primitive_inst(network& network, scatter_nd_update void scatter_nd_update_inst::on_execute() { auto input1_shape = _impl_params->input_layouts[1].get_partial_shape(); auto input2_shape = _impl_params->input_layouts[2].get_partial_shape(); + auto same_layouts = _impl_params->input_layouts[0] == _impl_params->output_layouts[0]; - if ((ov::shape_size(input1_shape.to_shape()) == 0) || (ov::shape_size(input2_shape.to_shape()) == 0)) + if (same_layouts && ((ov::shape_size(input1_shape.to_shape()) == 0) || (ov::shape_size(input2_shape.to_shape()) == 0))) reuse_input(); } diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/scatter_nd_update_ref.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/scatter_nd_update_ref.cl index 4ec9b665760e34..8c48ad4d4e9979 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/scatter_nd_update_ref.cl +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/scatter_nd_update_ref.cl @@ -36,8 +36,10 @@ KERNEL(scatter_nd_update_ref)(OPTIONAL_SHAPE_INFO_ARG const __global INPUT0_TYPE* data, +#ifdef IS_SECOND_ITER const __global INPUT1_TYPE* indices, const __global INPUT2_TYPE* updates, +#endif __global OUTPUT_TYPE* output #if HAS_FUSED_OPS_DECLS , FUSED_OPS_DECLS @@ -56,8 +58,9 @@ KERNEL(scatter_nd_update_ref)(OPTIONAL_SHAPE_INFO_ARG const uint f = dim2 % OUTPUT_FEATURE_NUM; const uint b = dim2 / OUTPUT_FEATURE_NUM; + const uint input_idx = GET_UPDATES_INDEX(INPUT0, ORDER); const uint output_idx = GET_OUTPUT_INDEX(ORDER); - INPUT0_TYPE val = data[output_idx]; + INPUT0_TYPE val = data[input_idx]; #if HAS_FUSED_OPS FUSED_OPS_FIRST_KERNEL; output[output_idx] = TO_OUTPUT_TYPE(FUSED_OPS_RESULT_FIRST_KERNEL); diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_nd_update_kernel_ref.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_nd_update_kernel_ref.cpp index 9fbe45f3da02a4..1680d39ca27bb6 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_nd_update_kernel_ref.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_nd_update_kernel_ref.cpp @@ -170,6 +170,10 @@ KernelsData ScatterNDUpdateKernelRef::GetKernelsData(const Params& params, const kd.kernels[i].params.workGroups.global = dispatchData.gws; kd.kernels[i].params.workGroups.local = dispatchData.lws; kd.kernels[i].skip_execution = KernelData::SkipKernelExecution(prim_params); + + // Do not skip copy stage if output buffer is not empty or requires modification + if (i == 0 && prim_params.outputs[0].LogicalSize() != 0 && prim_params.outputs[0] != prim_params.inputs[0]) + kd.kernels[i].skip_execution = false; } }; @@ -178,6 +182,7 @@ KernelsData ScatterNDUpdateKernelRef::GetKernelsData(const Params& params, const for (int i = 0; i < 2; i++) { auto dispatchData = SetDefault(newParams, (i == 1)); auto entry_point = GetEntryPoint(kernelName, newParams.layerID, params, options, i); + auto inputs_number = i == 0 ? 1 : 3; if (i == 1) { size_t input0_rank = newParams.inputs[0].LogicalDims().size(); @@ -213,7 +218,7 @@ KernelsData ScatterNDUpdateKernelRef::GetKernelsData(const Params& params, const clKernelData& kernel = kd.kernels[i]; FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point, - "", false, false, 3, GetFusedPrimitiveInputsCount(params), 1, newParams.has_dynamic_tensors()); + "", false, false, inputs_number, GetFusedPrimitiveInputsCount(params), 1, newParams.has_dynamic_tensors()); } return {kd}; diff --git a/src/plugins/intel_gpu/src/kernel_selector/tensor_type.h b/src/plugins/intel_gpu/src/kernel_selector/tensor_type.h index 3d54dfabade1c0..97f087e6f2a051 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/tensor_type.h +++ b/src/plugins/intel_gpu/src/kernel_selector/tensor_type.h @@ -621,6 +621,10 @@ struct TensorBaseT : public TensorBase { return same; } + bool operator!=(const TensorBaseT& t) const { + return !(*this == t); + } + bool SameDims(const TensorBaseT& t) const { bool same = dtype == t.dtype && layout == t.layout && dims.size() == t.dims.size(); if (same) { diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/scatter_nd_update_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/scatter_nd_update_gpu_test.cpp index d905755e789a71..b3f68e0f00b349 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/scatter_nd_update_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/scatter_nd_update_gpu_test.cpp @@ -4458,6 +4458,67 @@ TEST(scatter_nd_update_gpu, dynamic) { } } + +TEST(scatter_nd_update_gpu, dynamic_padded_output) { + // Dictionary : 2x1x2x8 + // Indexes : 0x3 + // Updates : 0x8 + // Output : 2x1x2x8 + // Input values in fp32 + // + auto& engine = get_test_engine(); + + auto input1_layout = layout{ ov::PartialShape::dynamic(4), data_types::f32, format::bfyx }; + auto input2_layout = layout{ ov::PartialShape::dynamic(2), data_types::f32, format::bfyx }; + auto input3_layout = layout{ ov::PartialShape::dynamic(2), data_types::f32, format::bfyx }; + + auto input1 = engine.allocate_memory({ { 1, 1, 2, 8 }, data_types::f32, format::bfyx }); // Dictionary + auto input2 = engine.allocate_memory({ { 0, 3 }, data_types::f32, format::bfyx }); // Indexes + auto input3 = engine.allocate_memory({ { 0, 8 }, data_types::f32, format::bfyx }); // Updates + + set_values(input1, { + 0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, + 8.f, 9.f, 10.f, 11.f, 12.f, 13.f, 14.f, 15.f, + }); + + topology topology; + topology.add(input_layout("InputData", input1_layout)); + topology.add(input_layout("InputIndices", input2_layout)); + topology.add(input_layout("InputUpdates", input3_layout)); + topology.add( + scatter_nd_update("scatter_nd_update", input_info("InputData"), input_info("InputIndices"), input_info("InputUpdates"), 2, padding({0, 0, 1, 1})) + ); + + ExecutionConfig config = get_test_default_config(engine); + config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); + network network(engine, topology, config); + + network.set_input_data("InputData", input1); + network.set_input_data("InputIndices", input2); + network.set_input_data("InputUpdates", input3); + + auto inst = network.get_primitive("scatter_nd_update"); + auto impl = inst->get_impl(); + ASSERT_TRUE(impl != nullptr); + ASSERT_TRUE(impl->is_dynamic()); + + auto outputs = network.execute(); + + auto output = outputs.at("scatter_nd_update").get_memory(); + cldnn::mem_lock output_ptr(output, get_test_stream()); + + std::vector expected_results = { + 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, + 0.f, 0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 0.f, + 0.f, 8.f, 9.f, 10.f, 11.f, 12.f, 13.f, 14.f, 15.f, 0.f, + 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, + }; + + for (size_t i = 0; i < expected_results.size(); ++i) { + ASSERT_EQ(expected_results[i], output_ptr[i]); + } +} + TEST(scatter_nd_update_gpu, dynamic_5d) { tests::random_generator rg(std::string(::testing::UnitTest::GetInstance()->current_test_info()->test_suite_name()) + std::string(::testing::UnitTest::GetInstance()->current_test_info()->name())); From 44ac9099b947af4dfd9c50067036aa558a0643f6 Mon Sep 17 00:00:00 2001 From: Tatiana Savina Date: Mon, 16 Oct 2023 11:53:50 +0200 Subject: [PATCH 206/257] [DOCS] Add openvino-dev deprecation note (#20480) * add deprecation note * address comments --- docs/install_guides/pypi-openvino-dev.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/install_guides/pypi-openvino-dev.md b/docs/install_guides/pypi-openvino-dev.md index 8d53e6488f1602..08a16318b42d92 100644 --- a/docs/install_guides/pypi-openvino-dev.md +++ b/docs/install_guides/pypi-openvino-dev.md @@ -3,6 +3,7 @@ > **NOTE**: This version is pre-release software and has not undergone full release validation or qualification. No support is offered on pre-release software and APIs/behavior are subject to change. It should NOT be incorporated into any production software/solution and instead should be used only for early testing and integration while awaiting a final release version of this software. +> **NOTE**: OpenVINO™ Development Tools package has been deprecated and will be discontinued with 2024.0 release. To learn more, refer to the [OpenVINO Legacy Features and Components page](https://docs.openvino.ai/2023.1/openvino_legacy_features.html). Intel® Distribution of OpenVINO™ toolkit is an open-source toolkit for optimizing and deploying AI inference. It can be used to develop applications and solutions based on deep learning tasks, such as: emulation of human vision, automatic speech recognition, natural language processing, recommendation systems, etc. It provides high-performance and rich deployment options, from edge to cloud. From a00d28aac0efda832209dbaf6794481a1c16005c Mon Sep 17 00:00:00 2001 From: Vladimir Paramuzov Date: Mon, 16 Oct 2023 14:54:32 +0400 Subject: [PATCH 207/257] [GPU] Type traits cleanup (#20455) --- .../include/intel_gpu/runtime/layout.hpp | 116 +++--------------- .../intel_gpu/runtime/shape_predictor.hpp | 2 +- .../graph_optimizer/concat_input_order.cpp | 4 + .../src/graph/impls/cpu/activation.cpp | 3 +- .../src/graph/impls/cpu/detection_output.cpp | 8 +- .../graph/impls/cpu/non_max_suppression.cpp | 32 ++--- .../src/graph/impls/cpu/proposal.cpp | 24 ++-- .../graph/impls/onednn/convolution_onednn.cpp | 4 +- .../intel_gpu/src/graph/primitive_inst.cpp | 2 +- src/plugins/intel_gpu/src/graph/prior_box.cpp | 4 +- .../src/plugin/sync_infer_request.cpp | 5 +- .../intel_gpu/src/runtime/shape_predictor.cpp | 4 +- .../module_tests/shape_predictor_test.cpp | 4 +- .../shape_infer/random_uniform_si_test.cpp | 10 +- .../tests/unit/shape_infer/range_si_test.cpp | 10 +- .../unit/test_cases/permute_gpu_test.cpp | 5 +- 16 files changed, 83 insertions(+), 154 deletions(-) diff --git a/src/plugins/intel_gpu/include/intel_gpu/runtime/layout.hpp b/src/plugins/intel_gpu/include/intel_gpu/runtime/layout.hpp index 3dad0cea4e008b..679f4c51ea6881 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/runtime/layout.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/runtime/layout.hpp @@ -13,10 +13,10 @@ #include #include #include -#include -#include -#include +#include "openvino/core/partial_shape.hpp" +#include "openvino/core/type/element_type.hpp" +#include "openvino/core/type/element_type_traits.hpp" #include "intel_gpu/graph/serialization/binary_buffer.hpp" #include "intel_gpu/graph/serialization/vector_serializer.hpp" @@ -28,33 +28,9 @@ namespace cldnn { /// @addtogroup cpp_memory Memory description and management /// @{ -constexpr size_t float_type_mask = 0x80; -constexpr size_t uint_type_mask = 0x40; -constexpr size_t bin_type_mask = 0x20; - /// @brief Possible data types could be stored in memory. using data_types = ov::element::Type_t; -/// Converts @ref data_types to C++ type. -template -struct data_type_to_type; -#ifndef DOXYGEN_SHOULD_SKIP_THIS -template <> -struct data_type_to_type { typedef uint32_t type; }; -template <> -struct data_type_to_type { typedef uint8_t type; }; -template <> -struct data_type_to_type { typedef int8_t type; }; -template <> -struct data_type_to_type { typedef int32_t type; }; -template <> -struct data_type_to_type { typedef int64_t type; }; -template <> -struct data_type_to_type { typedef ov::float16 type; }; -template <> -struct data_type_to_type { typedef float type; }; -#endif - /// Helper class to identify key properties for data_types. struct data_type_traits { static size_t size_of(data_types data_type) { @@ -72,52 +48,27 @@ struct data_type_traits { return et.is_quantized() && et.bitwidth() == 8; } - static size_t align_of(data_types data_type) { - switch (data_type) { - case data_types::u1: - return alignof(data_type_to_type::type); - case data_types::i8: - return alignof(data_type_to_type::type); - case data_types::u8: - return alignof(data_type_to_type::type); - case data_types::i32: - return alignof(data_type_to_type::type); - case data_types::i64: - return alignof(data_type_to_type::type); - case data_types::f16: - return alignof(data_type_to_type::type); - case data_types::f32: - return alignof(data_type_to_type::type); - default: - return size_t(1); - } - } - - static std::string name(data_types data_type) { - return ov::element::Type(data_type).get_type_name(); - } + static ov::element::Type max_type(ov::element::Type t1, ov::element::Type t2) { + if (t1 == ov::element::u1) + return t2; - static data_types max_type(data_types dt1, data_types dt2) { - if (dt1 == data_types::u1) - return dt2; + if (t2 == ov::element::u1) + return t1; - if (dt2 == data_types::u1) - return dt1; + if (t1.bitwidth() < t2.bitwidth()) + return t2; - if (size_of(dt1) < size_of(dt2)) - return dt2; + if (t1.bitwidth() > t2.bitwidth()) + return t1; - if (size_of(dt1) > size_of(dt2)) - return dt1; + if (t2.is_real()) + return t2; - if (is_floating_point(dt2)) - return dt2; - - return dt1; + return t1; } - static bool is_quantized(data_types dt) { - return is_i8_u8(dt); + static bool is_quantized(ov::element::Type t) { + return t.is_quantized(); } template @@ -132,7 +83,7 @@ struct data_type_traits { case data_types::i64: return static_cast(std::numeric_limits::max()); case data_types::f16: - return static_cast(65504); + return static_cast(std::numeric_limits::max()); case data_types::f32: return static_cast(std::numeric_limits::max()); default: @@ -152,7 +103,7 @@ struct data_type_traits { case data_types::i64: return static_cast(std::numeric_limits::lowest()); case data_types::f16: - return static_cast(-65504); + return static_cast(std::numeric_limits::lowest()); case data_types::f32: return static_cast(std::numeric_limits::lowest()); default: @@ -170,44 +121,17 @@ inline data_types element_type_to_data_type(ov::element::Type t) { switch (t) { case ov::element::Type_t::i16: case ov::element::Type_t::u16: - case ov::element::Type_t::f32: case ov::element::Type_t::f64: return cldnn::data_types::f32; - case ov::element::Type_t::f16: - return cldnn::data_types::f16; - case ov::element::Type_t::u8: - return cldnn::data_types::u8; - case ov::element::Type_t::i8: - return cldnn::data_types::i8; - case ov::element::Type_t::i32: case ov::element::Type_t::u32: case ov::element::Type_t::u64: return cldnn::data_types::i32; - case ov::element::Type_t::i64: - return cldnn::data_types::i64; case ov::element::Type_t::boolean: return cldnn::data_types::u8; - case ov::element::Type_t::u1: - return cldnn::data_types::u1; - default: - throw std::runtime_error("Can't convert " + t.get_type_name() + " element type"); + default: return t; } } -/// Helper function to get both data_types and format::type in a single, unique value. Useable in 'case' statement. -constexpr auto fuse(data_types dt, cldnn::format::type fmt) -> decltype(static_cast::type>(dt) | - static_cast::type>(fmt)) { - using dt_type = std::underlying_type::type; - using fmt_type = std::underlying_type::type; - using fmt_narrow_type = int16_t; - - return static_cast(fmt) <= std::numeric_limits::max() && - static_cast(dt) <= (std::numeric_limits::max() >> (sizeof(fmt_narrow_type) * 8)) - ? (static_cast(dt) << (sizeof(fmt_narrow_type) * 8)) | - (static_cast(fmt) >= 0 ? static_cast(fmt) : static_cast(-1)) - : throw std::invalid_argument("data_type and/or format values are too big to be fused into single value"); -} - /// @brief Represents data padding information. struct padding { /// @brief Filling value for padding area. diff --git a/src/plugins/intel_gpu/include/intel_gpu/runtime/shape_predictor.hpp b/src/plugins/intel_gpu/include/intel_gpu/runtime/shape_predictor.hpp index 2f2e614c29f2c6..51f09989502a13 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/runtime/shape_predictor.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/runtime/shape_predictor.hpp @@ -50,7 +50,7 @@ struct ShapePredictor { /// says if shape is successfully predicted and can be preallocated, and the second element is ov::Shape itself. std::pair predict_preallocation_shape(const std::string& id, const ov::Shape& current_shape, - size_t dt_size, + size_t dt_bitwidth, bool can_reuse_buffer); bool can_preallocate(size_t desired_buffer_size); diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/concat_input_order.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/concat_input_order.cpp index 1f2016e8d6706e..de6d6c62859bd9 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/concat_input_order.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/concat_input_order.cpp @@ -23,6 +23,8 @@ bool can_shuffle_features(program_node& node, stream& stream) { if (node.is_type()) { auto& conv_node = node.as(); auto& wei_node = conv_node.weights(); + if (ov::element::Type(wei_node.get_output_layout().data_type).bitwidth() < 8) + return false; return conv_node.get_groups() == 1 && conv_node.get_deformable_groups() == 1 && !conv_node.get_transposed() && @@ -32,6 +34,8 @@ bool can_shuffle_features(program_node& node, stream& stream) { if (node.is_type()) { auto& fc_node = node.as(); auto& wei_node = fc_node.weights(); + if (ov::element::Type(wei_node.get_output_layout().data_type).bitwidth() < 8) + return false; return wei_node.is_type() && wei_node.is_constant() && !wei_node.is_output(); } diff --git a/src/plugins/intel_gpu/src/graph/impls/cpu/activation.cpp b/src/plugins/intel_gpu/src/graph/impls/cpu/activation.cpp index 57c0f057455ba7..7f1e7abcb9b580 100644 --- a/src/plugins/intel_gpu/src/graph/impls/cpu/activation.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/cpu/activation.cpp @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/core/type/element_type_traits.hpp" #include "register.hpp" #include "activation_inst.h" #include "implementation_map.hpp" @@ -108,7 +109,7 @@ struct activation_impl : public typed_primitive_impl { input_host_tensors.push_back(make_tensor(params->input_layouts[i], input_mem_ptrs[i]->lock(stream, mem_lock_type::read))); // Most of the evaluate functions expect same data type for all inputs, so we need to convert params from float - typename data_type_to_type
::type param_a = static_cast::type>(additional_params.a); + auto param_a = static_cast::value_type>(additional_params.a); auto input_dt = instance.get_input_layout().data_type; diff --git a/src/plugins/intel_gpu/src/graph/impls/cpu/detection_output.cpp b/src/plugins/intel_gpu/src/graph/impls/cpu/detection_output.cpp index 9e9cd8b1c93389..f15d143e28539c 100644 --- a/src/plugins/intel_gpu/src/graph/impls/cpu/detection_output.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/cpu/detection_output.cpp @@ -839,11 +839,11 @@ struct detection_output_impl : typed_primitive_impl { std::vector>>> scoreIndexPairs; if (instance.location_memory()->get_layout().data_type == data_types::f32) { - prepare_data::type>(stream, instance, bboxes, confidences, scoreIndexPairs); - generate_detections::type>(stream, instance, num_of_images, bboxes, confidences, scoreIndexPairs); + prepare_data::value_type>(stream, instance, bboxes, confidences, scoreIndexPairs); + generate_detections::value_type>(stream, instance, num_of_images, bboxes, confidences, scoreIndexPairs); } else { - prepare_data::type>(stream, instance, bboxes, confidences, scoreIndexPairs); - generate_detections::type>(stream, instance, num_of_images, bboxes, confidences, scoreIndexPairs); + prepare_data::value_type>(stream, instance, bboxes, confidences, scoreIndexPairs); + generate_detections::value_type>(stream, instance, num_of_images, bboxes, confidences, scoreIndexPairs); } ev->set(); diff --git a/src/plugins/intel_gpu/src/graph/impls/cpu/non_max_suppression.cpp b/src/plugins/intel_gpu/src/graph/impls/cpu/non_max_suppression.cpp index 7afca0cb91c91f..cfb05c176c06ca 100644 --- a/src/plugins/intel_gpu/src/graph/impls/cpu/non_max_suppression.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/cpu/non_max_suppression.cpp @@ -149,9 +149,9 @@ vector2D load_boxes(stream& stream, memory::ptr mem, bool center_p auto data_type = mem->get_layout().data_type; switch (data_type) { case cldnn::data_types::f16: - return load_boxes_impl::type>(stream, mem, center_point); + return load_boxes_impl::value_type>(stream, mem, center_point); case cldnn::data_types::f32: - return load_boxes_impl::type>(stream, mem, center_point); + return load_boxes_impl::value_type>(stream, mem, center_point); default: throw std::runtime_error("Non max suppression - unsupported boxes data type"); } @@ -186,9 +186,9 @@ vector3D load_scores(stream& stream, memory::ptr mem) { auto data_type = mem->get_layout().data_type; switch (data_type) { case cldnn::data_types::f16: - return load_scores_impl::type>(stream, mem); + return load_scores_impl::value_type>(stream, mem); case cldnn::data_types::f32: - return load_scores_impl::type>(stream, mem); + return load_scores_impl::value_type>(stream, mem); default: throw std::runtime_error("Non max suppression - unsupported scores data type"); } @@ -207,11 +207,11 @@ T load_scalar(stream& stream, memory::ptr mem) { auto data_type = mem->get_layout().data_type; switch (data_type) { case cldnn::data_types::i32: - return load_scalar_impl::type>(stream, mem); + return load_scalar_impl::value_type>(stream, mem); case cldnn::data_types::f16: - return load_scalar_impl::type>(stream, mem); + return load_scalar_impl::value_type>(stream, mem); case cldnn::data_types::f32: - return load_scalar_impl::type>(stream, mem); + return load_scalar_impl::value_type>(stream, mem); default: throw std::runtime_error("Non max suppression - unsupported data type"); } @@ -244,13 +244,13 @@ void store_result(stream& stream, memory::ptr mem, const std::vectorget_layout().data_type; switch (data_type) { case cldnn::data_types::i32: - store_result_impl::type>(stream, mem, result); + store_result_impl::value_type>(stream, mem, result); break; case cldnn::data_types::f16: - store_result_impl::type>(stream, mem, result); + store_result_impl::value_type>(stream, mem, result); break; case cldnn::data_types::f32: - store_result_impl::type>(stream, mem, result); + store_result_impl::value_type>(stream, mem, result); break; default: throw std::runtime_error("Non max suppression - unsupported output data type"); @@ -261,10 +261,10 @@ void store_first_output(stream& stream, memory::ptr mem, const std::vectorget_layout().data_type; switch (data_type) { case cldnn::data_types::i32: - store_result_impl::type>(stream, mem, result); + store_result_impl::value_type>(stream, mem, result); break; case cldnn::data_types::i64: - store_result_impl::type>(stream, mem, result); + store_result_impl::value_type>(stream, mem, result); break; default: throw std::runtime_error("Non max suppression - unsupported output data type"); @@ -298,10 +298,10 @@ void store_second_output(stream& stream, memory::ptr mem, const std::vectorget_layout().data_type; switch (data_type) { case cldnn::data_types::f16: - store_second_output_impl::type>(stream, mem, result); + store_second_output_impl::value_type>(stream, mem, result); break; case cldnn::data_types::f32: - store_second_output_impl::type>(stream, mem, result); + store_second_output_impl::value_type>(stream, mem, result); break; default: throw std::runtime_error("Non max suppression - unsupported second output data type"); @@ -319,10 +319,10 @@ void store_third_output(stream& stream, memory::ptr mem, const std::vectorget_layout().data_type; switch (data_type) { case cldnn::data_types::i32: - store_third_output_impl::type>(stream, mem, result); + store_third_output_impl::value_type>(stream, mem, result); break; case cldnn::data_types::i64: - store_third_output_impl::type>(stream, mem, result); + store_third_output_impl::value_type>(stream, mem, result); break; default: throw std::runtime_error("Non max suppression - unsupported third output data type"); diff --git a/src/plugins/intel_gpu/src/graph/impls/cpu/proposal.cpp b/src/plugins/intel_gpu/src/graph/impls/cpu/proposal.cpp index 461035c1defd75..2670949f8e9284 100644 --- a/src/plugins/intel_gpu/src/graph/impls/cpu/proposal.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/cpu/proposal.cpp @@ -396,9 +396,9 @@ struct proposal_impl : typed_primitive_impl { auto ev = instance.get_network().get_stream().create_user_event(false); im_info_t im_info; if (instance.dep_memory(proposal_inst::image_info_index).get_layout().data_type == data_types::f16) { - read_image_info::type>(stream, instance, im_info); + read_image_info::value_type>(stream, instance, im_info); } else { - read_image_info::type>(stream, instance, im_info); + read_image_info::value_type>(stream, instance, im_info); } if (instance.dep_memory(proposal_inst::cls_scores_index).get_layout().data_type != @@ -408,26 +408,26 @@ struct proposal_impl : typed_primitive_impl { if (instance.dependencies().size() == 4) { auto proposal_probabilities = instance.dep_memory_ptr(proposal_inst::proposal_probabilities_out); if (instance.dep_memory(proposal_inst::cls_scores_index).get_layout().data_type == data_types::f16) { - mem_lock::type, mem_lock_type::read> proposal_prob_ptr{proposal_probabilities, stream}; - execute::type>(stream, instance, im_info, proposal_prob_ptr.data()); + mem_lock::value_type, mem_lock_type::read> proposal_prob_ptr{proposal_probabilities, stream}; + execute::value_type>(stream, instance, im_info, proposal_prob_ptr.data()); } else { - mem_lock::type, mem_lock_type::read> proposal_prob_ptr{proposal_probabilities, stream}; - execute::type>(stream, instance, im_info, proposal_prob_ptr.data()); + mem_lock::value_type, mem_lock_type::read> proposal_prob_ptr{proposal_probabilities, stream}; + execute::value_type>(stream, instance, im_info, proposal_prob_ptr.data()); } } else if (instance.outputs_memory_count() == 2) { auto proposal_probabilities = instance.output_memory_ptr(1); if (instance.dep_memory(proposal_inst::cls_scores_index).get_layout().data_type == data_types::f16) { - mem_lock::type, mem_lock_type::write> proposal_prob_ptr{proposal_probabilities, stream}; - execute::type>(stream, instance, im_info, proposal_prob_ptr.data()); + mem_lock::value_type, mem_lock_type::write> proposal_prob_ptr{proposal_probabilities, stream}; + execute::value_type>(stream, instance, im_info, proposal_prob_ptr.data()); } else { - mem_lock::type, mem_lock_type::write> proposal_prob_ptr{proposal_probabilities, stream}; - execute::type>(stream, instance, im_info, proposal_prob_ptr.data()); + mem_lock::value_type, mem_lock_type::write> proposal_prob_ptr{proposal_probabilities, stream}; + execute::value_type>(stream, instance, im_info, proposal_prob_ptr.data()); } } else { if (instance.dep_memory(proposal_inst::cls_scores_index).get_layout().data_type == data_types::f16) { - execute::type>(stream, instance, im_info); + execute::value_type>(stream, instance, im_info); } else { - execute::type>(stream, instance, im_info); + execute::value_type>(stream, instance, im_info); } } diff --git a/src/plugins/intel_gpu/src/graph/impls/onednn/convolution_onednn.cpp b/src/plugins/intel_gpu/src/graph/impls/onednn/convolution_onednn.cpp index 075929afa765fb..aa11884b2445bc 100644 --- a/src/plugins/intel_gpu/src/graph/impls/onednn/convolution_onednn.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/onednn/convolution_onednn.cpp @@ -121,9 +121,9 @@ struct convolution_onednn : typed_primitive_onednn_impl { } if (a_zp_dtype == data_types::i8) { - set_activation_zero_points_attr::type>(attrs, a_zp.as(), zero_point_mask); + set_activation_zero_points_attr::value_type>(attrs, a_zp.as(), zero_point_mask); } else { // if (a_zp_dtype == data_types::u8) - set_activation_zero_points_attr::type>(attrs, a_zp.as(), zero_point_mask); + set_activation_zero_points_attr::value_type>(attrs, a_zp.as(), zero_point_mask); } } diff --git a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp index 44b1fec8bb0963..6c1e88de349115 100644 --- a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp +++ b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp @@ -425,7 +425,7 @@ event::ptr primitive_inst::realloc_if_needed() { auto current_shape = actual_layout.get_shape(); auto& sp = get_network().get_shape_predictor(); - auto dt_size = data_type_traits::size_of(actual_layout.data_type); + auto dt_size = ov::element::Type(actual_layout.data_type).bitwidth(); auto prealloc_info = sp.predict_preallocation_shape(id(), current_shape, dt_size, can_reuse_buffer); if (prealloc_info.first && sp.can_preallocate(ov::shape_size(prealloc_info.second) * dt_size)) { auto new_layout = actual_layout; diff --git a/src/plugins/intel_gpu/src/graph/prior_box.cpp b/src/plugins/intel_gpu/src/graph/prior_box.cpp index 899f0db6f2ba4a..571a2c6d92c218 100644 --- a/src/plugins/intel_gpu/src/graph/prior_box.cpp +++ b/src/plugins/intel_gpu/src/graph/prior_box.cpp @@ -401,12 +401,12 @@ void prior_box_node::calc_result() { // perform calculations if (get_output_layout().data_type == data_types::f16) - calculate_prior_box_output::type>(result, + calculate_prior_box_output::value_type>(result, get_program().get_stream(), input().get_output_layout(), *typed_desc()); else - calculate_prior_box_output::type>(result, + calculate_prior_box_output::value_type>(result, get_program().get_stream(), input().get_output_layout(), *typed_desc()); diff --git a/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp b/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp index 6e9e8bbf353803..61ac1424c7649e 100644 --- a/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp +++ b/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp @@ -167,11 +167,10 @@ bool same_host_mem(cldnn::memory::cptr memory, const uint8_t* host_ptr) { } ov::Shape predict_shape(const std::string& name, const ov::Shape current_shape, ov::element::Type element_type, cldnn::ShapePredictor& shape_predictor) { - auto et_size = cldnn::ceil_div(element_type.bitwidth(), 8); - auto prealloc_info = shape_predictor.predict_preallocation_shape(name, current_shape, et_size, false); + auto prealloc_info = shape_predictor.predict_preallocation_shape(name, current_shape, element_type.bitwidth(), false); const auto& preallocation_shape = prealloc_info.second; auto can_preallocate_buffer = prealloc_info.first && - shape_predictor.can_preallocate(ov::shape_size(preallocation_shape) * et_size); + shape_predictor.can_preallocate(cldnn::ceil_div(ov::shape_size(preallocation_shape) * element_type.bitwidth(), 8)); if (can_preallocate_buffer) { return preallocation_shape; } diff --git a/src/plugins/intel_gpu/src/runtime/shape_predictor.cpp b/src/plugins/intel_gpu/src/runtime/shape_predictor.cpp index 2d398ee89ff1de..1ff00c905bd073 100644 --- a/src/plugins/intel_gpu/src/runtime/shape_predictor.cpp +++ b/src/plugins/intel_gpu/src/runtime/shape_predictor.cpp @@ -58,7 +58,7 @@ bool ShapePredictor::can_preallocate(size_t desired_buffer_size) { std::pair ShapePredictor::predict_preallocation_shape(const std::string& id, const ov::Shape& current_shape, - size_t dt_size, + size_t dt_bitwidth, bool can_reuse_buffer) { add_shape(id, current_shape); @@ -110,7 +110,7 @@ std::pair ShapePredictor::predict_preallocation_shape(const std for (size_t i = 0; i < current_shape.size(); ++i) single_iter_shape.push_back(diffs[0][i] == 0 ? current_shape[i] : 1); - if (ov::shape_size(single_iter_shape) * dt_size > _max_per_iter_size) + if (ceil_div(ov::shape_size(single_iter_shape) * dt_bitwidth, 8) > _max_per_iter_size) can_use_iterations_preallocation = false; } diff --git a/src/plugins/intel_gpu/tests/unit/module_tests/shape_predictor_test.cpp b/src/plugins/intel_gpu/tests/unit/module_tests/shape_predictor_test.cpp index e6c78b4fd8513e..5e5caa91cd7d3f 100644 --- a/src/plugins/intel_gpu/tests/unit/module_tests/shape_predictor_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/module_tests/shape_predictor_test.cpp @@ -25,10 +25,10 @@ TEST_P(shape_predictor_tests, prediction) { ShapePredictor sp(&engine, p.buffers_preallocation_ratio); std::pair result; - const auto dt_size = 4; + const auto dt_bitwidth = ov::element::f32.bitwidth(); for (auto& shape : in_shapes) - result = sp.predict_preallocation_shape("dummy_name", shape, dt_size, p.can_reuse_buffer); + result = sp.predict_preallocation_shape("dummy_name", shape, dt_bitwidth, p.can_reuse_buffer); ASSERT_TRUE(result.first == !expected_predicted_shape.empty()); ASSERT_EQ(result.second, expected_predicted_shape); diff --git a/src/plugins/intel_gpu/tests/unit/shape_infer/random_uniform_si_test.cpp b/src/plugins/intel_gpu/tests/unit/shape_infer/random_uniform_si_test.cpp index 6597351a0c728c..f41cafc9e2d4f9 100644 --- a/src/plugins/intel_gpu/tests/unit/shape_infer/random_uniform_si_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/shape_infer/random_uniform_si_test.cpp @@ -67,19 +67,19 @@ TEST_P(random_uniform_si_test, shape_infer) { set_values(allocated_mem, {ov::float16(val).to_bits()}); break; case data_types::f32: - set_values(allocated_mem, {static_cast::type>(val)}); + set_values(allocated_mem, {static_cast::value_type>(val)}); break; case data_types::i32: - set_values(allocated_mem, {static_cast::type>(val)}); + set_values(allocated_mem, {static_cast::value_type>(val)}); break; case data_types::i64: - set_values(allocated_mem, {static_cast::type>(val)}); + set_values(allocated_mem, {static_cast::value_type>(val)}); break; case data_types::i8: - set_values(allocated_mem, {static_cast::type>(val)}); + set_values(allocated_mem, {static_cast::value_type>(val)}); break; case data_types::u8: - set_values(allocated_mem, {static_cast::type>(val)}); + set_values(allocated_mem, {static_cast::value_type>(val)}); break; default: break; diff --git a/src/plugins/intel_gpu/tests/unit/shape_infer/range_si_test.cpp b/src/plugins/intel_gpu/tests/unit/shape_infer/range_si_test.cpp index 2430d628aa2c42..b079017d5c12e0 100644 --- a/src/plugins/intel_gpu/tests/unit/shape_infer/range_si_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/shape_infer/range_si_test.cpp @@ -66,19 +66,19 @@ TEST_P(range_si_test, shape_infer) { set_values(prim_mem, {ov::float16(p.vals[idx]).to_bits()}); break; case data_types::f32: - set_values(prim_mem, {static_cast::type>(p.vals[idx])}); + set_values(prim_mem, {static_cast::value_type>(p.vals[idx])}); break; case data_types::i32: - set_values(prim_mem, {static_cast::type>(p.vals[idx])}); + set_values(prim_mem, {static_cast::value_type>(p.vals[idx])}); break; case data_types::i64: - set_values(prim_mem, {static_cast::type>(p.vals[idx])}); + set_values(prim_mem, {static_cast::value_type>(p.vals[idx])}); break; case data_types::i8: - set_values(prim_mem, {static_cast::type>(p.vals[idx])}); + set_values(prim_mem, {static_cast::value_type>(p.vals[idx])}); break; case data_types::u8: - set_values(prim_mem, {static_cast::type>(p.vals[idx])}); + set_values(prim_mem, {static_cast::value_type>(p.vals[idx])}); break; default: break; diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/permute_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/permute_gpu_test.cpp index 7be1609c64a9c9..ac22cc773f885a 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/permute_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/permute_gpu_test.cpp @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/core/type/element_type_traits.hpp" #include "test_utils.h" #include "random_generator.hpp" @@ -1924,7 +1925,7 @@ void TiledPermuteTest::run_test(const std::vector& si const std::string & permute_opt, std::vector permute_order, bool is_caching_test) { // convert ov::float16 to ov::float16 - using type_ = typename data_type_to_type::type; + using type_ = typename ov::element_type_traits::value_type; using type = typename std::conditional::value, ov::float16, type_>::type; std::vector internal_sizes(sizes); @@ -2318,7 +2319,7 @@ struct TiledPerformancePermuteTest : TiledPermuteTest { auto& engine = get_test_engine(); // convert ov::float16 to ov::float16 - using type_ = typename data_type_to_type::type; + using type_ = typename ov::element_type_traits::value_type; using type = typename std::conditional::value, ov::float16, type_>::type; std::vector internal_sizes(sizes); From c24d1b4abcc565eef1f87193c364edbfcfcfe047 Mon Sep 17 00:00:00 2001 From: Irina Efode Date: Mon, 16 Oct 2023 15:19:07 +0400 Subject: [PATCH 208/257] [CONFORMANCE][CPU] Add Expected failures for CPU Opset13 OP (#20490) * [CONFORMANCE][CPU] Add Expected failures for CPU Opset13 OP * Update expected_failures_OP.csv * Update expected_failures_OP.csv --- .../skip_configs/CPU/expected_failures_OP.csv | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/skip_configs/CPU/expected_failures_OP.csv b/src/tests/test_utils/functional_test_utils/layer_tests_summary/skip_configs/CPU/expected_failures_OP.csv index 84d3e26eb35fc2..07f091dd7a222b 100644 --- a/src/tests/test_utils/functional_test_utils/layer_tests_summary/skip_configs/CPU/expected_failures_OP.csv +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/skip_configs/CPU/expected_failures_OP.csv @@ -1130,3 +1130,10 @@ conformance_PRelu/ReadIRTest.ImportExport/Op=PRelu.1_Type=f32_IR=20e7e74f55eb5fb conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_IR=RegionYolo-1_750_Device=CPU_Shape=static_Config=(),5.06332e-06 conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=i32_IR=28f23780d4ca0d40671caf79d5cd9223ad8f6dc2fa5ade2521f3d99586eeeb7f_Device=CPU_Shape=static_Config=(),9.72615e-07 conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_IR=c301804445f273eef62f41f02204711d9d6e571da28c76ab447d7d90983b0032_Device=CPU_Shape=dynamic_Config=(),0.000113281 +conformance/OpImplCheckTest.checkPluginImplementation/Function=BitwiseAnd_opset13_Device=CPU_Config=(),1 +conformance/OpImplCheckTest.checkPluginImplementation/Function=BitwiseOr_opset13_Device=CPU_Config=(),1 +conformance/OpImplCheckTest.checkPluginImplementation/Function=BitwiseNot_opset13_Device=CPU_Config=(),1 +conformance/OpImplCheckTest.checkPluginImplementation/Function=Multinomial_opset13_Device=CPU_Config=(),1 +conformance/OpImplCheckTest.checkPluginImplementation/Function=NMSRotated_opset13_Device=CPU_Config=(),1 +conformance/OpImplCheckTest.checkPluginImplementation/Function=LSTMSequence_opset1_Device=CPU_Config=(),1 +conformance/OpImplCheckTest.checkPluginImplementation/Function=BitwiseXor_opset13_Device=CPU_Config=(),1 From a5b5623ece0d492b4d65a4213402f77904f13f9a Mon Sep 17 00:00:00 2001 From: Siddhant Chauhan Date: Mon, 16 Oct 2023 17:22:30 +0530 Subject: [PATCH 209/257] [TF FE][TF Hub] Support Xlogy operation (#20467) * [TF FE][TF Hub] Support Xlogy operation * fix * fix * fix * fix * Update tests/layer_tests/tensorflow_tests/test_tf_Xlogy.py * Update tests/layer_tests/tensorflow_tests/test_tf_Xlogy.py --------- Co-authored-by: Roman Kazantsev --- src/frontends/tensorflow/src/op_table.cpp | 1 + .../include/common_op_table.hpp | 1 + .../tensorflow_common/src/op/xlogy.cpp | 43 ++++++++++++++++ .../tensorflow_tests/test_tf_Xlogy.py | 49 +++++++++++++++++++ 4 files changed, 94 insertions(+) create mode 100644 src/frontends/tensorflow_common/src/op/xlogy.cpp create mode 100644 tests/layer_tests/tensorflow_tests/test_tf_Xlogy.py diff --git a/src/frontends/tensorflow/src/op_table.cpp b/src/frontends/tensorflow/src/op_table.cpp index fce3af3f0a235b..7c093d7301a640 100644 --- a/src/frontends/tensorflow/src/op_table.cpp +++ b/src/frontends/tensorflow/src/op_table.cpp @@ -286,6 +286,7 @@ const std::map get_supported_ops() { {"While", CreatorFunction(translate_while_op)}, {"Where", CreatorFunction(translate_where_op)}, {"Xdivy", CreatorFunction(translate_x_div_y_op)}, + {"Xlogy", CreatorFunction(translate_xlogy_op)}, {"ZerosLike", CreatorFunction(translate_zeros_like_op)}, // Translators for SavedModel and MetaGraph diff --git a/src/frontends/tensorflow_common/include/common_op_table.hpp b/src/frontends/tensorflow_common/include/common_op_table.hpp index 17a865acfb0e99..9c1f995f25f1e8 100644 --- a/src/frontends/tensorflow_common/include/common_op_table.hpp +++ b/src/frontends/tensorflow_common/include/common_op_table.hpp @@ -149,6 +149,7 @@ OP_CONVERTER(translate_unravel_index_op); OP_CONVERTER(translate_unsorted_segment_sum_op); OP_CONVERTER(translate_where_op); OP_CONVERTER(translate_x_div_y_op); +OP_CONVERTER(translate_xlogy_op); OP_CONVERTER(translate_zeros_like_op); // Translators for internal operations diff --git a/src/frontends/tensorflow_common/src/op/xlogy.cpp b/src/frontends/tensorflow_common/src/op/xlogy.cpp new file mode 100644 index 00000000000000..8b4f9da063a27e --- /dev/null +++ b/src/frontends/tensorflow_common/src/op/xlogy.cpp @@ -0,0 +1,43 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_op_table.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert_like.hpp" +#include "openvino/op/equal.hpp" +#include "openvino/op/log.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/select.hpp" + +using namespace std; +using namespace ov::opset10; + +namespace ov { +namespace frontend { +namespace tensorflow { +namespace op { +OutputVector translate_xlogy_op(const NodeContext& node) { + default_op_checks(node, 2, {"Xlogy"}); + auto x = node.get_input(0); + auto y = node.get_input(1); + + // prepare auxiliary zero constant of the same type as the input + auto zero = create_same_type_const_scalar(x, 0); + + // compute a mask to identify where x is equal to 0 + auto is_zero = make_shared(x, zero); + + // compute x * log(y) elementwise + auto xlog_y = make_shared(x, make_shared(y)); + + // create the output tensor using Select to handle the x == 0 condition + auto result = make_shared(is_zero, zero, xlog1py); + + set_node_name(node.get_name(), result); + return result->outputs(); +} +} // namespace op +} // namespace tensorflow +} // namespace frontend +} // namespace ov diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Xlog1py.py b/tests/layer_tests/tensorflow_tests/test_tf_Xlog1py.py new file mode 100644 index 00000000000000..7c80fbdad88b09 --- /dev/null +++ b/tests/layer_tests/tensorflow_tests/test_tf_Xlog1py.py @@ -0,0 +1,49 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import tensorflow as tf +from common.tf_layer_test_class import CommonTFLayerTest + + +class TestXlog1py(CommonTFLayerTest): + def _prepare_input(self, inputs_info): + assert 'x' in inputs_info + assert 'y' in inputs_info + x_shape = inputs_info['x'] + y_shape = inputs_info['y'] + inputs_data = {} + # x = [-3 ,3] y = [1, 2] + # generate x in way to have zeros + inputs_data['x'] = (6 * np.random.random(size=x_shape).astype(np.float32) - 3) * \ + np.random.randint(2, size=x_shape).astype(np.float32) + inputs_data['y'] = np.random.random(size=y_shape).astype(np.float32) + 1 + return inputs_data + + def create_xlog1py_net(self, input_shape, input_type): + self.input_type = input_type + tf.compat.v1.reset_default_graph() + # Create the graph and model + with tf.compat.v1.Session() as sess: + x = tf.compat.v1.placeholder(input_type, input_shape, 'x') + y = tf.compat.v1.placeholder(input_type, input_shape, 'y') + tf.raw_ops.Xlog1py(x=x, y=y) + tf.compat.v1.global_variables_initializer() + tf_net = sess.graph_def + + return tf_net, None + + test_data_basic = [ + dict(input_shape=[10, 20], input_type=np.float32), + dict(input_shape=[2, 3, 4], input_type=np.float32), + ] + + @pytest.mark.parametrize("params", test_data_basic) + @pytest.mark.precommit_tf_fe + @pytest.mark.nightly + def test_xlog1py_basic(self, params, ie_device, precision, ir_version, temp_dir, + use_new_frontend, use_old_api): + self._test(*self.create_xlog1py_net(**params), + ie_device, precision, ir_version, temp_dir=temp_dir, + use_new_frontend=use_new_frontend, use_old_api=use_old_api) From 3ced4a23e7eabab1ce1b507fb70ed7fb18dddd9f Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Tue, 17 Oct 2023 11:36:48 +0400 Subject: [PATCH 224/257] Remove TREAT_WARNING option from the doc (#20507) --- docs/dev/cmake_options_for_custom_compilation.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/dev/cmake_options_for_custom_compilation.md b/docs/dev/cmake_options_for_custom_compilation.md index 847997e067d5c3..f8cfa3255ead49 100644 --- a/docs/dev/cmake_options_for_custom_compilation.md +++ b/docs/dev/cmake_options_for_custom_compilation.md @@ -157,8 +157,6 @@ In this case OpenVINO CMake scripts take `TBBROOT` environment variable into acc * `ENABLE_CLANG_FORMAT` enables [Clang format] code style check: * `ON` is default. * Used only for ngraph component. -* `TREAT_WARNING_AS_ERROR` treats all warnings as an error: - * `OFF` is default. * `ENABLE_FASTER_BUILD` enables [precompiled headers] and [unity build] using CMake: * `OFF` is default. * `ENABLE_INTEGRITYCHECK` builds DLLs with [/INTEGRITYCHECK] flag: From 07831c9dce7418c7d28146cf7152bb8273dfeba1 Mon Sep 17 00:00:00 2001 From: Maciej Smyk Date: Tue, 17 Oct 2023 09:59:14 +0200 Subject: [PATCH 225/257] [DOCS] Supported formats update for Benchmark C++ Tool for master (#20449) * Update cpp_benchmark_tool.md * Update cpp_benchmark_tool.md * Update cpp_benchmark_tool.md --- .../learn_openvino/openvino_samples/cpp_benchmark_tool.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/articles_en/learn_openvino/openvino_samples/cpp_benchmark_tool.md b/docs/articles_en/learn_openvino/openvino_samples/cpp_benchmark_tool.md index 168d3b85ba898a..9a056ffc79a4cd 100644 --- a/docs/articles_en/learn_openvino/openvino_samples/cpp_benchmark_tool.md +++ b/docs/articles_en/learn_openvino/openvino_samples/cpp_benchmark_tool.md @@ -24,7 +24,7 @@ To use the C++ benchmark_app, you must first build it following the :doc:`Build If you installed OpenVINO Runtime using PyPI or Anaconda Cloud, only the :doc:`Benchmark Python Tool ` is available, and you should follow the usage instructions on that page instead. -The benchmarking application works with models in the OpenVINO IR (``model.xml`` and ``model.bin``) and ONNX (``model.onnx``) formats. Make sure to :doc:`convert your models ` if necessary. +The benchmarking application works with models in the OpenVINO IR, TensorFlow, TensorFlow Lite, PaddlePaddle, PyTorch and ONNX formats. If you need it, OpenVINO also allows you to :doc:`convert your models `. To run benchmarking with default options on a model, use the following command: From 05297a5572e4e6fb35c63117fbcc34a43db2b6be Mon Sep 17 00:00:00 2001 From: Georgy Krivoruchko Date: Tue, 17 Oct 2023 01:38:24 -0700 Subject: [PATCH 226/257] Added coverage of "0" in bounds to ReduceProd (#18583) * Check for ReduceProd + SoftMax fix * Check for ReduceProd + SoftMax fix * Fix after moving on get_constant_max_of_type * Extended tests and added coverage for other types * Code optimization --- src/core/src/bound_evaluate.cpp | 48 +++++++++++++++++++++++- src/core/src/bound_evaluate.hpp | 8 +++- src/core/src/op/reduce_prod.cpp | 27 +++++++++++-- src/core/tests/type_prop/reduce_prod.cpp | 36 ++++++++++++++++++ 4 files changed, 113 insertions(+), 6 deletions(-) diff --git a/src/core/src/bound_evaluate.cpp b/src/core/src/bound_evaluate.cpp index cf3dc5bf21e3da..e3b784b521c2af 100644 --- a/src/core/src/bound_evaluate.cpp +++ b/src/core/src/bound_evaluate.cpp @@ -479,13 +479,13 @@ bool ov::interval_bound_evaluator(const Node* node, return fully_defined; } -bool ov::tensor_is_positive(const Tensor& bound) { +bool ov::tensor_is_non_negative(const Tensor& bound) { const auto bound_constant = std::make_shared(bound.get_element_type(), bound.get_shape(), bound.data()); const auto zero_constant = op::v0::Constant::create(bound.get_element_type(), {1}, {0}); OutputVector greater(1); - bool folded = std::make_shared(bound_constant, zero_constant) + bool folded = std::make_shared(bound_constant, zero_constant) ->constant_fold(greater, {bound_constant, zero_constant}); OPENVINO_ASSERT(folded); @@ -500,6 +500,50 @@ bool ov::tensor_is_positive(const Tensor& bound) { return std::dynamic_pointer_cast(all[0].get_node_shared_ptr())->cast_vector()[0]; } +bool ov::tensor_has_max_value(const Tensor& bound) { + const auto bound_constant = + std::make_shared(bound.get_element_type(), bound.get_shape(), bound.data()); + OPENVINO_SUPPRESS_DEPRECATED_START + auto max_constant = ngraph::get_constant_max_of_type(bound.get_element_type()); + OPENVINO_SUPPRESS_DEPRECATED_END + OutputVector equal(1); + + bool folded = std::make_shared(bound_constant, max_constant) + ->constant_fold(equal, {bound_constant, max_constant}); + OPENVINO_ASSERT(folded); + + auto axes_vector = std::vector(equal[0].get_shape().size()); + std::iota(axes_vector.begin(), axes_vector.end(), 0); + const auto axes = op::v0::Constant::create(element::i64, {axes_vector.size()}, axes_vector); + + OutputVector all(1); + folded = std::make_shared(equal[0], axes)->constant_fold(all, {equal[0], axes}); + OPENVINO_ASSERT(folded && ov::is_type(all[0].get_node_shared_ptr())); + OPENVINO_ASSERT(all[0].get_shape() == Shape{}); + return std::dynamic_pointer_cast(all[0].get_node_shared_ptr())->cast_vector()[0]; +} + +bool ov::tensor_has_zero_value(const Tensor& bound) { + const auto bound_constant = + std::make_shared(bound.get_element_type(), bound.get_shape(), bound.data()); + const auto zero_constant = op::v0::Constant::create(bound.get_element_type(), {1}, {0}); + OutputVector equal(1); + + bool folded = std::make_shared(bound_constant, zero_constant) + ->constant_fold(equal, {bound_constant, zero_constant}); + OPENVINO_ASSERT(folded); + + auto axes_vector = std::vector(equal[0].get_shape().size()); + std::iota(axes_vector.begin(), axes_vector.end(), 0); + const auto axes = op::v0::Constant::create(element::i64, {axes_vector.size()}, axes_vector); + + OutputVector all(1); + folded = std::make_shared(equal[0], axes)->constant_fold(all, {equal[0], axes}); + OPENVINO_ASSERT(folded && ov::is_type(all[0].get_node_shared_ptr())); + OPENVINO_ASSERT(all[0].get_shape() == Shape{}); + return std::dynamic_pointer_cast(all[0].get_node_shared_ptr())->cast_vector()[0]; +} + bool ov::has_and_set_equal_bounds(const Output& source) { if (op::util::is_constant(source.get_node_shared_ptr())) return true; diff --git a/src/core/src/bound_evaluate.hpp b/src/core/src/bound_evaluate.hpp index 297f69d661f131..364a1eef3aa180 100644 --- a/src/core/src/bound_evaluate.hpp +++ b/src/core/src/bound_evaluate.hpp @@ -10,7 +10,13 @@ namespace ov { // bool could_propagate(const Output& output, std::vector& order); /// \brief Checks if all the elements of the bound Tensor are positive -bool tensor_is_positive(const Tensor& bound); +bool tensor_is_non_negative(const Tensor& bound); + +/// \brief Checks if any element of the bound Tensor has max possible value +bool tensor_has_max_value(const Tensor& bound); + +/// \brief Checks if any element of the bound Tensor has zero value +bool tensor_has_zero_value(const Tensor& bound); /// \brief Estimates upper bound for node output tensors using only upper bounds of the nodes /// inputs. diff --git a/src/core/src/op/reduce_prod.cpp b/src/core/src/op/reduce_prod.cpp index 9d345ae63cf301..dbd89a8ae09192 100644 --- a/src/core/src/op/reduce_prod.cpp +++ b/src/core/src/op/reduce_prod.cpp @@ -7,6 +7,7 @@ #include "bound_evaluate.hpp" #include "element_visitor.hpp" #include "itt.hpp" +#include "ngraph/validation_util.hpp" #include "openvino/core/shape_util.hpp" #include "openvino/op/util/axes_util.hpp" #include "openvino/reference/reduce_prod.hpp" @@ -19,7 +20,7 @@ bool has_positive_bounds_on_data(const Node* const op) { const auto& lb = op->get_input_tensor(0).get_lower_value(); const auto& ub = op->get_input_tensor(0).get_upper_value(); - return lb && ub && tensor_is_positive(lb) && tensor_is_positive(ub); + return lb && ub && tensor_is_non_negative(lb) && tensor_is_non_negative(ub); } } // namespace @@ -83,9 +84,29 @@ bool ReduceProd::evaluate_lower(ov::TensorVector& output_values) const { } bool ReduceProd::evaluate_upper(ov::TensorVector& output_values) const { - return reduce_prod::has_positive_bounds_on_data(this) && get_input_tensor(1).has_and_set_bound() && - default_upper_bound_evaluator(this, output_values); + if (!reduce_prod::has_positive_bounds_on_data(this) || !get_input_tensor(1).has_and_set_bound()) + return false; + // We need to cover a corner case: if an Upper Bound comes from ShapeOf and contains + // dynamic dimension (-1) - it has a value 0x7FFFFFFFFFFFFFFF, which points on + // a maximum possible value. For example, Upper Bound of shape [-1, 12] is + // [0x7FFFFFFFFFFFFFFF, 12]. + // In such case we shouldn't evaluate a real ReduceProd because it'll cause an + // overflow and returns wrong value. We should return an Upper Bound as for [-1], + // which will be evaluated as [0x7FFFFFFFFFFFFFFF] + // In case dimensions has a zero dimension - it should return 0 in any case + if (tensor_has_max_value(get_input_tensor(0).get_upper_value()) && + !tensor_has_zero_value(get_input_tensor(0).get_upper_value())) { + OPENVINO_SUPPRESS_DEPRECATED_START + auto max_constant = ngraph::get_constant_max_of_type(get_output_element_type(0)); + OPENVINO_SUPPRESS_DEPRECATED_END + OPENVINO_ASSERT(max_constant->get_byte_size() <= output_values[0].get_byte_size()); + memcpy(output_values[0].data(), max_constant->get_data_ptr(), max_constant->get_byte_size()); + return true; + } + + return default_upper_bound_evaluator(this, output_values); } + } // namespace v1 } // namespace op } // namespace ov diff --git a/src/core/tests/type_prop/reduce_prod.cpp b/src/core/tests/type_prop/reduce_prod.cpp index 20cc6699a6b61b..4def461f847a69 100644 --- a/src/core/tests/type_prop/reduce_prod.cpp +++ b/src/core/tests/type_prop/reduce_prod.cpp @@ -11,6 +11,9 @@ using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_reduce_prod, ReduceTest, Type); INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_reduce_prod_et, ReduceArithmeticTest, Type); +INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_reduce_prod_dynamic, ReduceTest, Type); +INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_reduce_prod_dynamic_zero, ReduceTest, Type); +INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_reduce_prod_scalar, ReduceTest, Type); TEST(type_prop, reduce_prod_value_propagation) { const auto param = std::make_shared(element::f32, PartialShape{{1, 8}, {2, 3}, 6}); @@ -22,3 +25,36 @@ TEST(type_prop, reduce_prod_value_propagation) { EXPECT_EQ(reshape->get_element_type(), ov::element::f32); EXPECT_EQ(reshape->get_output_partial_shape(0), (PartialShape{{12, 144}})); } + +TEST(type_prop, reduce_prod_value_propagation_dynamic) { + const auto param = std::make_shared(element::f32, PartialShape{-1, 12, 32, 32}); + const auto shape_of = std::make_shared(param); + const auto reduce_prod = + std::make_shared(shape_of, ov::op::v0::Constant::create(element::i64, {1}, {0}), true); + const auto reshape = std::make_shared(param, reduce_prod, false); + + EXPECT_EQ(reshape->get_element_type(), ov::element::f32); + EXPECT_EQ(reshape->get_output_partial_shape(0), (PartialShape{-1})); +} + +TEST(type_prop, reduce_prod_value_propagation_dynamic_zero) { + const auto param = std::make_shared(element::f32, PartialShape{-1, 12, 0, -1}); + const auto shape_of = std::make_shared(param); + const auto reduce_prod = + std::make_shared(shape_of, ov::op::v0::Constant::create(element::i64, {1}, {0}), true); + const auto reshape = std::make_shared(param, reduce_prod, false); + + EXPECT_EQ(reshape->get_element_type(), ov::element::f32); + EXPECT_EQ(reshape->get_output_partial_shape(0), (PartialShape{0})); +} + +TEST(type_prop, reduce_prod_value_propagation_scalar) { + const auto param = std::make_shared(element::f32, PartialShape{0}); + const auto shape_of = std::make_shared(param); + const auto reduce_prod = + std::make_shared(shape_of, ov::op::v0::Constant::create(element::i64, {1}, {0}), true); + const auto reshape = std::make_shared(param, reduce_prod, false); + + EXPECT_EQ(reshape->get_element_type(), ov::element::f32); + EXPECT_EQ(reshape->get_output_partial_shape(0), (PartialShape{0})); +} From 93a70497bd3b8ee60b65992bb219097b119e8361 Mon Sep 17 00:00:00 2001 From: Vladislav Golubev Date: Tue, 17 Oct 2023 10:47:15 +0200 Subject: [PATCH 227/257] [CPU] fp16 weights decompression works with conversion to f32 (#20427) --- .../transformation_pipeline.cpp | 8 ++- .../src/matmul_weights_decompression.cpp | 53 ++++++++++++++----- 2 files changed, 47 insertions(+), 14 deletions(-) diff --git a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp index 002123781b84df..f87bfb4f1b055f 100644 --- a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp +++ b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp @@ -225,12 +225,18 @@ void Transformations::PreLpt(const std::vector& defaultPrecis return false; } // TODO: Uncomment when group decompression is supported - // else if (ov::is_type(consumer)) { + // if (ov::is_type(consumer)) { // consumer = get_single_consumer(consumer); // if (consumer != nullptr && ov::is_type(consumer)) { // return false; // } // } + if (ov::is_type(consumer)) { + consumer = get_single_consumer(consumer); + if (consumer != nullptr && ov::is_type(consumer)) { + return false; + } + } return true; }, ov::pass::MarkDequantizationSubgraph); } diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_weights_decompression.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_weights_decompression.cpp index e29ed1f4bfce94..b107b406cd833a 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_weights_decompression.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_weights_decompression.cpp @@ -14,18 +14,26 @@ using namespace ov::test; namespace SubgraphTestsDefinitions { /* - * Subtract_const(U8/NF4) + * WP - weights precision + * DP - decompression precision + * IP - input precision + * Opt - optional + * Subtract_const(WP) * / - * Weights(U8/NF4) Convert(F32) + * Weights(WP) Convert(DP) * | / - * Convert(F32) Reshape - * \ / Multiply_const(F32) - * Subtract(opt) / - * \ Reshape - * \ / - * Multiply + * Convert(DP) Reshape (Opt) + * \ / Multiply_const(DP) + * Subtract(Opt) / + * \ Reshape (Opt) + * \ / + * Multiply * | - * Data(F32) Transpose(opt) + * Reshape (in case of group decompression) + * | + * Convert (if IP != DP) + * | + * Data(IP) Transpose(Opt) * \ / * Matmul * | @@ -46,6 +54,7 @@ struct ShapeParams { }; using MatmulWeightsDecompressionParams = std::tuple obj) { ShapeParams shape_params; ov::test::ElementType weights_precision; + ov::test::ElementType decompression_precision; bool transpose; bool decompression_sub; bool reshape_on_decompression; @@ -69,6 +79,7 @@ class MatmulWeightsDecompression : public testing::WithParamInterface(weights_precision, transformed_weights_shape, {}, true); weights->set_friendly_name("Compressed_weights"); - auto weights_convert = std::make_shared(weights, data_precision); + auto weights_convert = std::make_shared(weights, decompression_precision); std::shared_ptr mul_parent = weights_convert; auto output_channels = *weights_shape.rbegin(); @@ -152,7 +165,7 @@ class MatmulWeightsDecompression : public testing::WithParamInterface(weights_precision, scaleshift_const_shape, {}, true); - std::shared_ptr shift_convert = std::make_shared(shift_const, data_precision); + std::shared_ptr shift_convert = std::make_shared(shift_const, decompression_precision); if (reshape_on_decompression_constant) { auto shift_reshape_const = ov::opset10::Constant::create(ov::element::i32, {scaleshift_target_shape.size()}, scaleshift_target_shape); auto shift_reshape = std::make_shared(shift_convert, shift_reshape_const, false); @@ -161,7 +174,7 @@ class MatmulWeightsDecompression : public testing::WithParamInterface(weights_convert, shift_convert); } - std::shared_ptr scale_const = ngraph::builder::makeConstant(data_precision, scaleshift_const_shape, {}, true); + std::shared_ptr scale_const = ngraph::builder::makeConstant(decompression_precision, scaleshift_const_shape, {}, true); if (reshape_on_decompression_constant) { auto scale_reshape_const = ov::opset10::Constant::create(ov::element::i32, {scaleshift_target_shape.size()}, scaleshift_target_shape); auto scale_reshape = std::make_shared(scale_const, scale_reshape_const, false); @@ -175,6 +188,9 @@ class MatmulWeightsDecompression : public testing::WithParamInterface(last_node, target_shape_node, false); } + if (decompression_precision != data_precision) { + last_node = std::make_shared(last_node, data_precision); + } if (transpose_weights) { const size_t rank = last_node->get_output_partial_shape(0).size(); std::vector order(rank); @@ -191,6 +207,7 @@ class MatmulWeightsDecompression : public testing::WithParamInterface(test_param); + const bool should_fuse = std::get<8>(test_param); const size_t expected_count = should_fuse ? 0 : 1; CheckNumberOfNodesWithType(compiledModel, "Convert", expected_count); CheckNumberOfNodesWithType(compiledModel, "Eltwise", expected_count); @@ -304,6 +325,7 @@ bool shouldUseDecompressionKernelBasic() { } const std::vector weights_precisions = {ov::element::u8, ov::element::nf4}; +const std::vector decompression_precisions = {ov::element::f32}; const std::vector input_shapes_basic = { {{{-1, -1, -1}, {{1, 4, 16}, {10, 16, 16}}}, {16, 32}}, {{{}, {{1, 4, 16}}}, {16, 32}, 2ul}, @@ -331,6 +353,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_MatMulCompressedWeights_basic, MatmulWeightsDecompression, ::testing::Combine(::testing::ValuesIn(input_shapes_basic), ::testing::ValuesIn(weights_precisions), + ::testing::ValuesIn(decompression_precisions), ::testing::Values(true), ::testing::Values(true), ::testing::Values(true), @@ -343,6 +366,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_MatMulCompressedWeights_big, MatmulWeightsDecompression, ::testing::Combine(::testing::ValuesIn(input_shapes_big), ::testing::ValuesIn(weights_precisions), + ::testing::ValuesIn(decompression_precisions), ::testing::Values(true), ::testing::Values(true), ::testing::Values(true), @@ -364,11 +388,13 @@ const std::vector input_shapes_corner_cases_big = { const std::vector transpose_weights = {true, false}; const std::vector add_decompression_sub = {true, false}; const std::vector reshape_on_decompression = {true, false}; +const std::vector decompression_precisions_corner_cases = {ov::element::f16, ov::element::f32}; INSTANTIATE_TEST_SUITE_P(smoke_MatMulCompressedWeights_corner_cases_basic, MatmulWeightsDecompression, ::testing::Combine(::testing::ValuesIn(input_shapes_corner_cases_basic), ::testing::ValuesIn(weights_precisions), + ::testing::ValuesIn(decompression_precisions_corner_cases), ::testing::ValuesIn(transpose_weights), ::testing::ValuesIn(add_decompression_sub), ::testing::ValuesIn(reshape_on_decompression), @@ -381,6 +407,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_MatMulCompressedWeights_corner_cases_big, MatmulWeightsDecompression, ::testing::Combine(::testing::ValuesIn(input_shapes_corner_cases_big), ::testing::ValuesIn(weights_precisions), + ::testing::ValuesIn(decompression_precisions_corner_cases), ::testing::ValuesIn(transpose_weights), ::testing::ValuesIn(add_decompression_sub), ::testing::ValuesIn(reshape_on_decompression), From e1a29ae96e7af4b5b50cabd9d3a7329db2e2b5af Mon Sep 17 00:00:00 2001 From: Tomasz Jankowski Date: Tue, 17 Oct 2023 11:30:18 +0200 Subject: [PATCH 228/257] [core] Migrate MaxPool operator to new API (#20424) * Use API 2.0 in MaxPool v1 * Use API 2.0 in MaxPool v8 * Fix type selection * Fix type selection * Avoid redundant local copies --- src/core/include/openvino/op/max_pool.hpp | 11 +- src/core/src/op/max_pool.cpp | 443 ++++++++++------------ 2 files changed, 201 insertions(+), 253 deletions(-) diff --git a/src/core/include/openvino/op/max_pool.hpp b/src/core/include/openvino/op/max_pool.hpp index c1741eef6cb717..534f8b1d067397 100644 --- a/src/core/include/openvino/op/max_pool.hpp +++ b/src/core/include/openvino/op/max_pool.hpp @@ -43,13 +43,8 @@ class OPENVINO_API MaxPool : public op::util::MaxPoolBase { std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - OPENVINO_SUPPRESS_DEPRECATED_START - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - OPENVINO_SUPPRESS_DEPRECATED_END + bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override; bool has_evaluate() const override; - -private: - bool evaluate_maxpool(const HostTensorVector& outputs, const HostTensorVector& inputs) const; }; } // namespace v1 @@ -119,10 +114,8 @@ class OPENVINO_API MaxPool : public op::util::MaxPoolBase { m_axis = axis; } + bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override; bool has_evaluate() const override; - OPENVINO_SUPPRESS_DEPRECATED_START - bool evaluate(const HostTensorVector&, const HostTensorVector&) const override; - OPENVINO_SUPPRESS_DEPRECATED_END private: Strides m_dilations; diff --git a/src/core/src/op/max_pool.cpp b/src/core/src/op/max_pool.cpp index d40c13644cd3cd..df74c1e6a105b6 100644 --- a/src/core/src/op/max_pool.cpp +++ b/src/core/src/op/max_pool.cpp @@ -2,32 +2,30 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/max_pool.hpp" +#include "openvino/op/max_pool.hpp" #include "itt.hpp" #include "max_pool_shape_inference.hpp" -#include "ngraph/attribute_visitor.hpp" -#include "ngraph/op/add.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/runtime/host_tensor.hpp" -#include "ngraph/validation_util.hpp" +#include "openvino/core/attribute_visitor.hpp" +#include "openvino/core/validation_util.hpp" #include "openvino/reference/max_pool.hpp" -using namespace std; -using namespace ngraph; +namespace ov { +namespace op { +namespace v1 { -op::v1::MaxPool::MaxPool(const Output& arg, - const Strides& strides, - const ov::Shape& pads_begin, - const ov::Shape& pads_end, - const ov::Shape& kernel, - const op::RoundingType rounding_type, - const PadType auto_pad) - : op::util::MaxPoolBase(arg, strides, pads_begin, pads_end, kernel, rounding_type, auto_pad) { +MaxPool::MaxPool(const Output& arg, + const Strides& strides, + const Shape& pads_begin, + const Shape& pads_end, + const Shape& kernel, + const RoundingType rounding_type, + const PadType auto_pad) + : util::MaxPoolBase(arg, strides, pads_begin, pads_end, kernel, rounding_type, auto_pad) { constructor_validate_and_infer_types(); } -bool ngraph::op::v1::MaxPool::visit_attributes(AttributeVisitor& visitor) { +bool MaxPool::visit_attributes(AttributeVisitor& visitor) { OV_OP_SCOPE(v1_MaxPool_visit_attributes); visitor.on_attribute("strides", m_strides); visitor.on_attribute("pads_begin", m_pads_begin); @@ -38,7 +36,7 @@ bool ngraph::op::v1::MaxPool::visit_attributes(AttributeVisitor& visitor) { return true; } -void op::v1::MaxPool::validate_and_infer_types() { +void MaxPool::validate_and_infer_types() { OV_OP_SCOPE(v1_MaxPool_validate_and_infer_types); OPENVINO_SUPPRESS_DEPRECATED_START @@ -47,219 +45,105 @@ void op::v1::MaxPool::validate_and_infer_types() { set_output_type(0, get_input_element_type(0), output_shapes.front()); } -shared_ptr op::v1::MaxPool::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr MaxPool::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v1_MaxPool_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), - m_strides, - m_pads_begin, - m_pads_end, - m_kernel, - m_rounding_type, - m_auto_pad); + return std::make_shared(new_args.at(0), + m_strides, + m_pads_begin, + m_pads_end, + m_kernel, + m_rounding_type, + m_auto_pad); } -OPENVINO_SUPPRESS_DEPRECATED_START namespace maxpool { -namespace { -template -inline bool evaluate(const HostTensorPtr& arg, - const HostTensorPtr& out, - const ov::Shape& out_shape, - const ov::Shape& window_shape, - const Strides& window_movement_strides, - const ov::Shape& padding_below, - const ov::Shape& padding_above) { - using T = typename element_type_traits::value_type; - out->set_shape(out_shape); - ov::reference::max_pool(arg->get_data_ptr(), - out->get_data_ptr(), - arg->get_shape(), - out_shape, - window_shape, - window_movement_strides, - padding_below, - padding_above); - return true; -} - -bool evaluate_maxpool(const HostTensorPtr& arg, - const HostTensorPtr& out, - const ov::Shape& out_shape, - const ov::Shape& kernel, - const Strides& strides, - const ov::Shape& pad_begin, - const ov::Shape& pad_end) { - bool rc = true; - auto arg_shape = arg->get_shape(); +struct Evaluate : element::NoAction { + using element::NoAction::visit; - switch (out->get_element_type()) { - OPENVINO_TYPE_CASE(evaluate_maxpool, i32, arg, out, out_shape, kernel, strides, pad_begin, pad_end); - OPENVINO_TYPE_CASE(evaluate_maxpool, i64, arg, out, out_shape, kernel, strides, pad_begin, pad_end); - OPENVINO_TYPE_CASE(evaluate_maxpool, u32, arg, out, out_shape, kernel, strides, pad_begin, pad_end); - OPENVINO_TYPE_CASE(evaluate_maxpool, u64, arg, out, out_shape, kernel, strides, pad_begin, pad_end); - OPENVINO_TYPE_CASE(evaluate_maxpool, f16, arg, out, out_shape, kernel, strides, pad_begin, pad_end); - OPENVINO_TYPE_CASE(evaluate_maxpool, f32, arg, out, out_shape, kernel, strides, pad_begin, pad_end); - default: - rc = false; - break; + template > + static result_type visit(const Tensor& in, + Tensor& out, + const Shape& in_shape, + const Shape& out_shape, + const Shape& kernel, + const Strides& strides, + const Shape& pads_begin, + const Shape& pads_end) { + reference::max_pool(in.data(), + out.data(), + in_shape, + out_shape, + kernel, + strides, + pads_begin, + pads_end); + return true; } - return rc; -} -} // namespace +}; } // namespace maxpool -bool op::v1::MaxPool::evaluate_maxpool(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - const auto input_shapes = std::vector{inputs[0]->get_partial_shape()}; +bool MaxPool::evaluate(TensorVector& outputs, const TensorVector& inputs) const { + OV_OP_SCOPE(v1_MaxPool_evaluate); + const auto input_shapes = std::vector{inputs[0].get_shape()}; auto pads_begin = m_pads_begin; auto pads_end = m_pads_end; - auto out_shape = shape_infer(this, input_shapes, pads_begin, pads_end).front(); + const auto output_shape = shape_infer(this, input_shapes, pads_begin, pads_end).front(); - return maxpool::evaluate_maxpool(inputs[0], - outputs[0], - out_shape.get_shape(), - get_kernel(), - get_strides(), - get_pads_begin(), - get_pads_end()); -} -bool op::v1::MaxPool::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - OV_OP_SCOPE(v1_MaxPool_evaluate); - return evaluate_maxpool(outputs, inputs); + outputs[0].set_shape(output_shape.get_shape()); + using namespace ov::element; + return IfTypeOf::apply(inputs[0].get_element_type(), + inputs[0], + outputs[0], + inputs[0].get_shape(), + outputs[0].get_shape(), + get_kernel(), + get_strides(), + get_pads_begin(), + get_pads_end()); } -bool op::v1::MaxPool::has_evaluate() const { +bool MaxPool::has_evaluate() const { OV_OP_SCOPE(v1_MaxPool_has_evaluate); switch (get_input_element_type(0)) { - case ngraph::element::i32: - case ngraph::element::i64: - case ngraph::element::u32: - case ngraph::element::u64: - case ngraph::element::f16: - case ngraph::element::f32: + case element::i32: + case element::i64: + case element::u32: + case element::u64: + case element::f16: + case element::f32: return true; default: - break; + return false; } - return false; } +} // namespace v1 +} // namespace op +} // namespace ov // ------------------------------ V8 ------------------------------ +namespace ov { +namespace op { +namespace v8 { -namespace maxpool_v8 { -namespace { -template -inline bool evaluate(const HostTensorPtr& data, - const HostTensorPtr& values, - const HostTensorPtr& indices, - const ov::Shape& out_shape, - const ov::Shape& kernel, - const Strides& strides, - const Strides& dilations, - const ov::Shape& pads_begin, - const ov::Shape& pads_end, - const int64_t axis) { - using Values_t = typename element_type_traits::value_type; - using Indices_t = typename element_type_traits::value_type; - ov::reference::max_pool(data->get_data_ptr(), - values->get_data_ptr(), - indices->get_data_ptr(), - data->get_shape(), - out_shape, - kernel, - strides, - dilations, - pads_begin, - pads_end, - axis); - return true; -} - -bool evaluate_maxpool(const HostTensorPtr& data, - const HostTensorPtr& values, - const HostTensorPtr& indices, - const ov::Shape& out_shape, - const ov::Shape& kernel, - const Strides& strides, - const Strides& dilations, - const ov::Shape& pads_begin, - const ov::Shape& pads_end, - const int64_t axis) { -#define EVAL_MAX_POOL_8(data_et, index_et) \ - OPENVINO_2_TYPES_CASE(maxpool_v8::evaluate_maxpool, \ - data_et, \ - index_et, \ - data, \ - values, \ - indices, \ - out_shape, \ - kernel, \ - strides, \ - dilations, \ - pads_begin, \ - pads_end, \ - axis) - - bool rc = true; - switch (indices->get_element_type()) { - case element::Type_t::i32: { - switch (data->get_element_type()) { - EVAL_MAX_POOL_8(i8, i32); - EVAL_MAX_POOL_8(i32, i32); - EVAL_MAX_POOL_8(i64, i32); - EVAL_MAX_POOL_8(u8, i32); - EVAL_MAX_POOL_8(u32, i32); - EVAL_MAX_POOL_8(u64, i32); - EVAL_MAX_POOL_8(f16, i32); - EVAL_MAX_POOL_8(f32, i32); - default: - rc = false; - break; - } - } break; - case element::Type_t::i64: { - switch (data->get_element_type()) { - EVAL_MAX_POOL_8(i8, i64); - EVAL_MAX_POOL_8(i32, i64); - EVAL_MAX_POOL_8(i64, i64); - EVAL_MAX_POOL_8(u8, i64); - EVAL_MAX_POOL_8(u32, i64); - EVAL_MAX_POOL_8(u64, i64); - EVAL_MAX_POOL_8(f16, i64); - EVAL_MAX_POOL_8(f32, i64); - default: - rc = false; - break; - } - } break; - default: - rc = false; - break; - } - - return rc; -} -} // namespace -} // namespace maxpool_v8 - -op::v8::MaxPool::MaxPool(const Output& arg, - const Strides& strides, - const Strides& dilations, - const ov::Shape& pads_begin, - const ov::Shape& pads_end, - const ov::Shape& kernel, - const op::RoundingType rounding_type, - const PadType auto_pad, - const element::Type index_element_type, - const int64_t axis) - : op::util::MaxPoolBase(arg, strides, pads_begin, pads_end, kernel, rounding_type, auto_pad), +MaxPool::MaxPool(const Output& arg, + const Strides& strides, + const Strides& dilations, + const Shape& pads_begin, + const Shape& pads_end, + const Shape& kernel, + const RoundingType rounding_type, + const PadType auto_pad, + const element::Type index_element_type, + const int64_t axis) + : util::MaxPoolBase(arg, strides, pads_begin, pads_end, kernel, rounding_type, auto_pad), m_dilations{dilations}, m_index_element_type{index_element_type}, m_axis{axis} { constructor_validate_and_infer_types(); } -bool ngraph::op::v8::MaxPool::visit_attributes(AttributeVisitor& visitor) { +bool MaxPool::visit_attributes(AttributeVisitor& visitor) { OV_OP_SCOPE(v8_MaxPool_visit_attributes); visitor.on_attribute("strides", m_strides); visitor.on_attribute("dilations", m_dilations); @@ -273,13 +157,13 @@ bool ngraph::op::v8::MaxPool::visit_attributes(AttributeVisitor& visitor) { return true; } -void op::v8::MaxPool::validate_and_infer_types() { +void MaxPool::validate_and_infer_types() { OV_OP_SCOPE(v8_MaxPool_validate_and_infer_types); const auto input_shape = get_input_partial_shape(0); if (input_shape.rank().is_static()) { OPENVINO_SUPPRESS_DEPRECATED_START - m_axis = ngraph::normalize_axis(this, m_axis, input_shape.rank()); + m_axis = normalize_axis(this, m_axis, input_shape.rank()); OPENVINO_SUPPRESS_DEPRECATED_END } @@ -290,55 +174,126 @@ void op::v8::MaxPool::validate_and_infer_types() { set_output_type(1, m_index_element_type, output_shapes[1]); } -shared_ptr op::v8::MaxPool::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr MaxPool::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v8_MaxPool_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), - m_strides, - m_dilations, - m_pads_begin, - m_pads_end, - m_kernel, - m_rounding_type, - m_auto_pad, - m_index_element_type, - m_axis); + return std::make_shared(new_args.at(0), + m_strides, + m_dilations, + m_pads_begin, + m_pads_end, + m_kernel, + m_rounding_type, + m_auto_pad, + m_index_element_type, + m_axis); } -bool op::v8::MaxPool::has_evaluate() const { - OV_OP_SCOPE(v8_MaxPool_has_evaluate); - switch (get_input_element_type(0)) { - case ngraph::element::i8: - case ngraph::element::i32: - case ngraph::element::i64: - case ngraph::element::u8: - case ngraph::element::u32: - case ngraph::element::u64: - case ngraph::element::f16: - case ngraph::element::f32: - return true; - default: - break; +namespace maxpool { +struct Evaluate : element::NoAction { + using element::NoAction::visit; + + template > + static result_type visit(const Tensor& in, + Tensor& out_values, + Tensor& out_indices, + const Shape& in_shape, + const Shape& out_shape, + const Shape& kernel, + const Strides& strides, + const Strides& dilations, + const Shape& pads_begin, + const Shape& pads_end, + const int64_t axis) { + using namespace ov::element; + return IfTypeOf::apply(out_indices.get_element_type(), + in.data(), + out_values.data(), + out_indices, + in_shape, + out_shape, + kernel, + strides, + dilations, + pads_begin, + pads_end, + axis); } - return false; -} -bool op::v8::MaxPool::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +private: + struct EvalByIdxType : public element::NoAction { + using element::NoAction::visit; + + template > + static result_type visit(const T* in_data, + T* out_values_data, + Tensor& out_indices, + const Shape& in_shape, + const Shape& out_shape, + const Shape& kernel, + const Strides& strides, + const Strides& dilations, + const Shape& pads_begin, + const Shape& pads_end, + const int64_t axis) { + reference::max_pool(in_data, + out_values_data, + out_indices.data(), + in_shape, + out_shape, + kernel, + strides, + dilations, + pads_begin, + pads_end, + axis); + return true; + } + }; +}; +} // namespace maxpool + +bool MaxPool::evaluate(TensorVector& outputs, const TensorVector& inputs) const { OV_OP_SCOPE(v8_MaxPool_evaluate); - const auto input_shapes = std::vector{inputs[0]->get_partial_shape()}; + const auto input_shapes = std::vector{inputs[0].get_shape()}; auto pads_begin = m_pads_begin; auto pads_end = m_pads_end; - auto out_shape = shape_infer(this, input_shapes, pads_begin, pads_end).front(); + const auto output_shape = shape_infer(this, input_shapes, pads_begin, pads_end).front(); - return maxpool_v8::evaluate_maxpool(inputs[0], - outputs[0], - outputs[1], - out_shape.get_shape(), - get_kernel(), - get_strides(), - get_dilations(), - get_pads_begin(), - get_pads_end(), - get_axis()); + outputs[0].set_shape(output_shape.get_shape()); + using namespace ov::element; + return IfTypeOf::apply(inputs[0].get_element_type(), + inputs[0], + outputs[0], + outputs[1], + inputs[0].get_shape(), + outputs[0].get_shape(), + get_kernel(), + get_strides(), + get_dilations(), + get_pads_begin(), + get_pads_end(), + get_axis()); } + +bool MaxPool::has_evaluate() const { + OV_OP_SCOPE(v8_MaxPool_has_evaluate); + switch (get_input_element_type(0)) { + case element::i8: + case element::i32: + case element::i64: + case element::u8: + case element::u32: + case element::u64: + case element::f16: + case element::f32: + return true; + default: + return false; + } +} + +} // namespace v8 +} // namespace op +} // namespace ov From a1daedc2e17d1da25897d78026b1c24f17c7acc0 Mon Sep 17 00:00:00 2001 From: Oleg Pipikin Date: Tue, 17 Oct 2023 12:43:55 +0200 Subject: [PATCH 229/257] Disable `NormalizeL2` and `NonZero` f16 tests on MacOs Arm64 (#20451) --- .../functional/shared_tests_instances/skip_tests_config.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 47c789928db4ea..9e9baece4c63e1 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -198,6 +198,9 @@ std::vector disabledTestPatterns() { // Issue: 122177 retVector.emplace_back(R"(.*smoke_LSTMCellCommon/LSTMCellTest.Inference.*_modelType=f16.*)"); retVector.emplace_back(R"(.*smoke_LSTMSequenceCommonZeroClip/LSTMSequenceTest.Inference.*_modelType=f16.*)"); + // Issue 122699 + retVector.emplace_back(R"(.*smoke_nonzero/NonZeroLayerTest.Inference.*inPRC=f16.*)"); + retVector.emplace_back(R"(.*NormalizeL2LayerTest.Inference.*netPRC=f16.*)"); #endif #if defined(OPENVINO_ARCH_X86) From 404f4e29b73f5b0b20d56ef687be7bc71c798d4b Mon Sep 17 00:00:00 2001 From: Katz Sasaki Date: Tue, 17 Oct 2023 22:26:37 +0900 Subject: [PATCH 230/257] fix typo of config check error message (#20517) --- src/plugins/intel_gpu/src/runtime/execution_config.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/plugins/intel_gpu/src/runtime/execution_config.cpp b/src/plugins/intel_gpu/src/runtime/execution_config.cpp index 1b4719660820fd..8bcb853a4c3090 100644 --- a/src/plugins/intel_gpu/src/runtime/execution_config.cpp +++ b/src/plugins/intel_gpu/src/runtime/execution_config.cpp @@ -87,7 +87,7 @@ void ExecutionConfig::set_property(const AnyMap& config) { for (auto& kv : config) { auto& name = kv.first; auto& val = kv.second; - OPENVINO_ASSERT(is_supported(kv.first), "[GPU] Attepmpt to set property ", name, " (", val.as(), ") which was not registered!\n"); + OPENVINO_ASSERT(is_supported(kv.first), "[GPU] Attempt to set property ", name, " (", val.as(), ") which was not registered!\n"); OPENVINO_ASSERT(property_validators.at(name)->is_valid(val), "[GPU] Invalid value for property ", name, ": ", val.as()); internal_properties[name] = val; } @@ -109,7 +109,7 @@ void ExecutionConfig::set_user_property(const AnyMap& config) { auto& name = kv.first; auto& val = kv.second; bool supported = is_supported(name) && supported_properties.at(name) == PropertyVisibility::PUBLIC; - OPENVINO_ASSERT(supported, "[GPU] Attepmpt to set user property ", name, " (", val.as(), ") which was not registered or internal!\n"); + OPENVINO_ASSERT(supported, "[GPU] Attempt to set user property ", name, " (", val.as(), ") which was not registered or internal!\n"); OPENVINO_ASSERT(property_validators.at(name)->is_valid(val), "[GPU] Invalid value for property ", name, ": `", val.as(), "`"); user_properties[kv.first] = kv.second; From e87d147f4cf8685e6ad1e291e1c7689243961fd7 Mon Sep 17 00:00:00 2001 From: Aleksandr Voron Date: Tue, 17 Oct 2023 16:09:44 +0200 Subject: [PATCH 231/257] [CPU] [ARM] Enable Pooling SLT tests on ARM (#18013) --- .../src/nodes/executors/acl/acl_pooling.cpp | 20 +- .../src/nodes/executors/acl/acl_pooling.hpp | 7 +- src/plugins/intel_cpu/src/nodes/pooling.cpp | 72 +- .../single_layer_tests/classes/pooling.cpp | 464 ++++++++++++ .../single_layer_tests/classes/pooling.hpp | 69 ++ .../instances/common/pooling.cpp | 181 +++++ .../instances/x64/pooling.cpp | 148 ++++ .../functional/single_layer_tests/pooling.cpp | 704 ------------------ 8 files changed, 924 insertions(+), 741 deletions(-) create mode 100644 src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/pooling.cpp create mode 100644 src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/pooling.hpp create mode 100644 src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/pooling.cpp create mode 100644 src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/pooling.cpp delete mode 100644 src/plugins/intel_cpu/tests/functional/single_layer_tests/pooling.cpp diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_pooling.cpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_pooling.cpp index 72021a5c6c5812..9cd6e3c43df7b0 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_pooling.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_pooling.cpp @@ -20,7 +20,8 @@ bool AclPoolingExecutor::isSupported(const TensorInfo& srcTensorInfo, DataLayout dataLayout, const VectorDims* indDims, PoolingLayerInfo* pool_info, - Pooling3dLayerInfo* pool3d_info) { + Pooling3dLayerInfo* pool3d_info, + bool ignoreOutShapeErrors) { unsigned int pad_left = (poolingAttrs.data_pad_begin.size() >= 2u) ? poolingAttrs.data_pad_begin[1] : poolingAttrs.data_pad_begin[0]; unsigned int pad_right = (poolingAttrs.data_pad_end.size() >= 2u) ? poolingAttrs.data_pad_end[1] : poolingAttrs.data_pad_end[0]; unsigned int pad_top = (poolingAttrs.data_pad_begin.size() >= 2u) ? poolingAttrs.data_pad_begin[0] : 0; @@ -46,7 +47,12 @@ bool AclPoolingExecutor::isSupported(const TensorInfo& srcTensorInfo, // The combination of parameters: NCHW + CEIL gives an accuracy problem in AvgPool. // One workaround is to disable the ACL executor for these parameters. // Then OneDNN will run this case in ACL backend as reorder -> NHWC -> reorder - if (dataLayout == arm_compute::DataLayout::NCHW && poolingAttrs.rounding == op::RoundingType::CEIL) return false; + if (pool_type == PoolingType::AVG && + dataLayout == arm_compute::DataLayout::NCHW && + poolingAttrs.rounding == op::RoundingType::CEIL) { + DEBUG_LOG("NCHW + CEIL gives an accuracy problem in ACL AvgPool. ACL executor will not be created."); + return false; + } DimensionRoundingType round = (poolingAttrs.rounding == op::RoundingType::CEIL) ? DimensionRoundingType::CEIL : DimensionRoundingType::FLOOR; @@ -82,12 +88,22 @@ bool AclPoolingExecutor::isSupported(const TensorInfo& srcTensorInfo, arm_compute::Status s = arm_compute::NEPoolingLayer::validate(&srcTensorInfo, &dstTensorInfo, *pool_info, &indTensorInfo); if (!s) { DEBUG_LOG("NEPoolingLayer validation with indices failed: ", s.error_description()); + if (ignoreOutShapeErrors && + s.error_description().find("Tensors have different shapes") != std::string::npos) { + DEBUG_LOG("Ignore shape error because the flag ignoreOutShapeErrors is set"); + return true; + } return false; } } else { arm_compute::Status s = arm_compute::NEPoolingLayer::validate(&srcTensorInfo, &dstTensorInfo, *pool_info); if (!s) { DEBUG_LOG("NEPoolingLayer validation without indices failed: ", s.error_description()); + if (ignoreOutShapeErrors && + s.error_description().find("Tensors have different shapes") != std::string::npos) { + DEBUG_LOG("Ignore shape error because the flag ignoreOutShapeErrors is set"); + return true; + } return false; } } diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_pooling.hpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_pooling.hpp index 44a2e999057d32..2525ccb490468a 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_pooling.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_pooling.hpp @@ -31,7 +31,8 @@ class AclPoolingExecutor : public PoolingExecutor { arm_compute::DataLayout dataLayout, const VectorDims* indDims, arm_compute::PoolingLayerInfo* pool_info, - arm_compute::Pooling3dLayerInfo* pool3d_info); + arm_compute::Pooling3dLayerInfo* pool3d_info, + bool ignoreOutShapeErrors = false); impl_desc_type getImplType() const override { return implType; @@ -79,8 +80,8 @@ class AclPoolingExecutorBuilder : public PoolingExecutorBuilder { if (dstDescs.size() == 2u && dstDescs[1]->getPrecision() != InferenceEngine::Precision::U32) { - DEBUG_LOG("AclPoolingExecutor does not support precisions:", - " dst[1]=", dstDescs[1]->getPrecision()); + DEBUG_LOG("AclPoolingExecutor supports U32 as indices precisions only. ", + "Passed indices precision: ", dstDescs[1]->getPrecision()); return false; } diff --git a/src/plugins/intel_cpu/src/nodes/pooling.cpp b/src/plugins/intel_cpu/src/nodes/pooling.cpp index 0bd6f3208c1e87..42aa97d062702b 100644 --- a/src/plugins/intel_cpu/src/nodes/pooling.cpp +++ b/src/plugins/intel_cpu/src/nodes/pooling.cpp @@ -271,14 +271,31 @@ void Pooling::getSupportedDescriptors() { const auto &childShape = getOutputShapeAtPort(0); const size_t inputRank = getInputShapeAtPort(0).getRank(); + if (isDynamicNode()) { + inShape = MemoryDescUtils::makeDummyShape(parentShape); + const auto& origDims = parentShape.getDims(); + const auto& origMaxDims = parentShape.getMaxDims(); + + auto inDims = inShape.getStaticDims(); + for (size_t i = 0; i < inDims.size() - 2; i++) { + if (origDims[i + 2] == Shape::UNDEFINED_DIM) { + inDims[i + 2] = std::min(origMaxDims[i + 2], std::max(inDims[i + 2], poolingAttrs.kernel[i])); + } + } + inShape = Shape(inDims); + } else { + inShape = parentShape; + } + #if defined(OV_CPU_WITH_ACL) // WA: we may specify any layout here (NCHW or NHWC) since both are supported by ACL - arm_compute::DataLayout dataLayout = (parentShape.getDims().size() == 5) ? arm_compute::DataLayout::NDHWC : arm_compute::DataLayout::NCHW; - arm_compute::TensorInfo srcTensorInfo = arm_compute::TensorInfo(shapeCast(parentShape.getDims()), + arm_compute::DataLayout dataLayout = (inShape.getDims().size() == 5) ? arm_compute::DataLayout::NDHWC : arm_compute::DataLayout::NCHW; + arm_compute::TensorInfo srcTensorInfo = arm_compute::TensorInfo(shapeCast(inShape.getDims()), 1, precisionToAclDataType(inputPrecision), dataLayout); - arm_compute::TensorInfo dstTensorInfo = arm_compute::TensorInfo(shapeCast(childShape.getDims()), + arm_compute::TensorInfo dstTensorInfo = arm_compute::TensorInfo(shapeCast(isDynamicNode() ? MemoryDescUtils::makeDummyShape(childShape).getDims() : + childShape.getDims()), 1, precisionToAclDataType(outputPrecision), dataLayout); @@ -287,16 +304,19 @@ void Pooling::getSupportedDescriptors() { useACL = AclPoolingExecutor::isSupported(srcTensorInfo, dstTensorInfo, poolingAttrs, - parentShape.getDims().size(), + inShape.getDims().size(), getOriginalOutputsNumber(), dataLayout, (getOriginalOutputsNumber() > 1) ? &getOutputShapeAtPort(1).getDims() : nullptr, &pool_info, - &pool3d_info); + &pool3d_info, + isDynamicNode()); //FIXME: 5D tensors case is not assigned to ACL because there is no way to check layout here //NEPooling3dLayer supports NDHWC only - if (parentShape.getDims().size() == 5) + if (inShape.getDims().size() == 5) { useACL = false; + DEBUG_LOG("FIXME: 5D tensors case is not assigned to ACL because there is no way to check layout in getSupportedDescriptors()"); + } #endif if (useACL) return; @@ -324,19 +344,7 @@ void Pooling::getSupportedDescriptors() { if ((inputRank < 3) || (inputRank > 5)) IE_THROW() << "Pooling layer. Unsupported mode. Only 3D, 4D and 5D blobs are supported as input."; - inShape = MemoryDescUtils::makeDummyShape(parentShape); - if (isDynamicNode()) { - const auto& origDims = parentShape.getDims(); - const auto& origMaxDims = parentShape.getMaxDims(); - auto inDims = inShape.getStaticDims(); - for (size_t i = 0; i < inDims.size() - 2; i++) { - if (origDims[i + 2] == Shape::UNDEFINED_DIM) { - inDims[i + 2] = std::min(origMaxDims[i + 2], std::max(inDims[i + 2], poolingAttrs.kernel[i])); - } - } - inShape = Shape(inDims); - } initEffectiveAttributes(inShape, MemoryDescUtils::makeDummyShape(childShape)); @@ -386,7 +394,12 @@ void Pooling::prepareParams() { } else { attr = initPrimitiveAttr(); } - + if (isDynamicNode()) { + if (poolingAttrs.auto_pad) { + poolingAttrs.data_pad_begin = shapeInference->get_pads_begin(); + poolingAttrs.data_pad_end = shapeInference->get_pads_end(); + } + } if (useACL) { auto dstMemPtr = getChildEdgeAt(0)->getMemoryPtr(); auto srcMemPtr = getParentEdgeAt(0)->getMemoryPtr(); @@ -414,10 +427,6 @@ void Pooling::prepareParams() { auto outDesc = getChildEdgesAtPort(0)[0]->getMemory().getDescWithType(); if (isDynamicNode()) { - if (poolingAttrs.auto_pad) { - poolingAttrs.data_pad_begin = shapeInference->get_pads_begin(); - poolingAttrs.data_pad_end = shapeInference->get_pads_end(); - } initEffectiveAttributes(inDesc->getShape(), outDesc->getShape()); } @@ -593,18 +602,17 @@ void Pooling::initSupportedPrimitiveDescriptors() { config.inConfs.resize(getParentEdges().size()); config.outConfs.resize(getOriginalOutputsNumber()); - config.inConfs[0].setMemDesc( - creatorsMap.at(format)->createSharedDesc(getOriginalInputPrecisionAtPort(0), getInputShapeAtPort(0))); - config.outConfs[0].setMemDesc( - creatorsMap.at(format)->createSharedDesc(getOriginalOutputPrecisionAtPort(0), getOutputShapeAtPort(0))); - std::vector srcMemoryDescs; - for (const auto& inConf : config.inConfs) { - srcMemoryDescs.push_back(inConf.getMemDesc()); + for (size_t i = 0; i < config.inConfs.size(); i++) { + config.inConfs[i].setMemDesc( + creatorsMap.at(format)->createSharedDesc(getOriginalInputPrecisionAtPort(i), getInputShapeAtPort(i))); + srcMemoryDescs.push_back(config.inConfs[i].getMemDesc()); } std::vector dstMemoryDescs; - for (const auto& outConf : config.outConfs) { - dstMemoryDescs.push_back(outConf.getMemDesc()); + for (size_t i = 0; i < config.outConfs.size(); i++) { + config.outConfs[i].setMemDesc( + creatorsMap.at(format)->createSharedDesc(getOriginalOutputPrecisionAtPort(i), getOutputShapeAtPort(i))); + dstMemoryDescs.push_back(config.outConfs[i].getMemDesc()); } auto factory = std::make_shared( diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/pooling.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/pooling.cpp new file mode 100644 index 00000000000000..e40771146cd0ec --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/pooling.cpp @@ -0,0 +1,464 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gtest/gtest.h" +#include "pooling.hpp" +#include "test_utils/cpu_test_utils.hpp" + +using namespace InferenceEngine; +using namespace CPUTestUtils; +using namespace ngraph::helpers; +using namespace ov::test; + +namespace CPULayerTestsDefinitions { + +std::string PoolingLayerCPUTest::getTestCaseName(const testing::TestParamInfo& obj) { + LayerTestsDefinitions::poolSpecificParams basicParamsSet; + InputShape inputShapes; + ElementType inPrc; + bool isInt8; + CPUSpecificParams cpuParams; + fusingSpecificParams fusingParams; + std::tie(basicParamsSet, inputShapes, inPrc, isInt8, cpuParams, fusingParams) = obj.param; + + ngraph::helpers::PoolingTypes poolType; + std::vector kernel, stride; + std::vector padBegin, padEnd; + ngraph::op::PadType padType; + ngraph::op::RoundingType roundingType; + bool excludePad; + std::tie(poolType, kernel, stride, padBegin, padEnd, roundingType, padType, excludePad) = basicParamsSet; + + std::ostringstream results; + results << "IS=("; + results << ov::test::utils::partialShape2str({inputShapes.first}) << ")_"; + results << "TS="; + for (const auto& shape : inputShapes.second) { + results << ov::test::utils::vec2str(shape) << "_"; + } + results << "Prc=" << inPrc << "_"; + switch (poolType) { + case ngraph::helpers::PoolingTypes::MAX: + results << "MaxPool_"; + break; + case ngraph::helpers::PoolingTypes::AVG: + results << "AvgPool_"; + results << "ExcludePad=" << excludePad << "_"; + break; + } + results << "K" << ov::test::utils::vec2str(kernel) << "_"; + results << "S" << ov::test::utils::vec2str(stride) << "_"; + results << "PB" << ov::test::utils::vec2str(padBegin) << "_"; + results << "PE" << ov::test::utils::vec2str(padEnd) << "_"; + results << "Rounding=" << roundingType << "_"; + results << "AutoPad=" << padType << "_"; + results << "INT8=" << isInt8 << "_"; + + results << CPUTestsBase::getTestCaseName(cpuParams); + results << CpuTestWithFusing::getTestCaseName(fusingParams); + return results.str(); +} + +void PoolingLayerCPUTest::SetUp() { + targetDevice = ov::test::utils::DEVICE_CPU; + + LayerTestsDefinitions::poolSpecificParams basicParamsSet; + InputShape inputShapes; + ElementType inPrc; + bool isInt8; + CPUSpecificParams cpuParams; + fusingSpecificParams fusingParams; + std::tie(basicParamsSet, inputShapes, inPrc, isInt8, cpuParams, fusingParams) = this->GetParam(); + + ngraph::helpers::PoolingTypes poolType; + std::vector kernel, stride; + std::vector padBegin, padEnd; + ngraph::op::PadType padType; + ngraph::op::RoundingType roundingType; + bool excludePad; + std::tie(poolType, kernel, stride, padBegin, padEnd, roundingType, padType, excludePad) = basicParamsSet; + + std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; + std::tie(postOpMgrPtr, fusedOps) = fusingParams; + + if (selectedType.empty()) { + selectedType = getPrimitiveType(); + } + if (isInt8) + selectedType = selectedType + "_I8"; + else + selectedType = makeSelectedTypeStr(selectedType, inPrc); + + init_input_shapes({inputShapes}); + + ov::ParameterVector params; + for (auto&& shape : inputDynamicShapes) { + params.push_back(std::make_shared(inPrc, shape)); + } + + std::shared_ptr poolInput = params[0]; + if (isInt8) { + ov::Shape newShape(poolInput->get_output_partial_shape(0).size(), 1); + poolInput = ngraph::builder::makeFakeQuantize(poolInput, inPrc, 256, newShape); + } + + std::shared_ptr pooling = ngraph::builder::makePooling(poolInput, + stride, + padBegin, + padEnd, + kernel, + roundingType, + padType, + excludePad, + poolType); + + function = makeNgraphFunction(inPrc, params, pooling, "PoolingCPU"); +} + +std::string MaxPoolingV8LayerCPUTest::getTestCaseName( + const testing::TestParamInfo& obj) { + LayerTestsDefinitions::maxPoolV8SpecificParams basicParamsSet; + InputShape inputShapes; + ElementType inPrc; + CPUSpecificParams cpuParams; + std::tie(basicParamsSet, inputShapes, inPrc, cpuParams) = obj.param; + + std::vector kernel, stride, dilation; + std::vector padBegin, padEnd; + ngraph::op::PadType padType; + ngraph::op::RoundingType roundingType; + ngraph::element::Type indexElementType; + int64_t axis; + std::tie(kernel, stride, dilation, padBegin, padEnd, indexElementType, axis, roundingType, padType) = + basicParamsSet; + + std::ostringstream results; + results << "IS=("; + results << ov::test::utils::partialShape2str({inputShapes.first}) << ")_"; + results << "TS="; + for (const auto& shape : inputShapes.second) { + results << ov::test::utils::vec2str(shape) << "_"; + } + results << "Prc=" << inPrc << "_"; + results << "MaxPool_"; + results << "K" << ov::test::utils::vec2str(kernel) << "_"; + results << "S" << ov::test::utils::vec2str(stride) << "_"; + results << "D" << ov::test::utils::vec2str(dilation) << "_"; + results << "PB" << ov::test::utils::vec2str(padBegin) << "_"; + results << "PE" << ov::test::utils::vec2str(padEnd) << "_"; + results << "Rounding=" << roundingType << "_"; + results << "AutoPad=" << padType << "_"; + + results << CPUTestsBase::getTestCaseName(cpuParams); + return results.str(); +} + +void MaxPoolingV8LayerCPUTest::SetUp() { + targetDevice = ov::test::utils::DEVICE_CPU; + + LayerTestsDefinitions::maxPoolV8SpecificParams basicParamsSet; + InputShape inputShapes; + ElementType inPrc; + CPUSpecificParams cpuParams; + std::tie(basicParamsSet, inputShapes, inPrc, cpuParams) = this->GetParam(); + + std::vector kernel, stride, dilation; + std::vector padBegin, padEnd; + ngraph::op::PadType padType; + ngraph::op::RoundingType roundingType; + ngraph::element::Type indexElementType; + int64_t axis; + std::tie(kernel, stride, dilation, padBegin, padEnd, indexElementType, axis, roundingType, padType) = + basicParamsSet; + std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; + if (selectedType.empty()) { + selectedType = getPrimitiveType(); + } + selectedType = makeSelectedTypeStr(selectedType, inPrc); + + init_input_shapes({inputShapes}); + + ov::ParameterVector params; + for (auto&& shape : inputDynamicShapes) { + params.push_back(std::make_shared(inPrc, shape)); + } + std::shared_ptr pooling = ngraph::builder::makeMaxPoolingV8(params[0], + stride, + dilation, + padBegin, + padEnd, + kernel, + roundingType, + padType, + indexElementType, + axis); + pooling->get_rt_info() = getCPUInfo(); + ngraph::ResultVector results{std::make_shared(pooling->output(0))}; + function = std::make_shared(results, params, "MaxPooling"); +} + +TEST_P(PoolingLayerCPUTest, CompareWithRefs) { + run(); + CheckPluginRelatedResults(compiledModel, "Pooling"); +} + +TEST_P(MaxPoolingV8LayerCPUTest, CompareWithRefs) { + run(); + CheckPluginRelatedResults(compiledModel, "Pooling"); +} + +namespace Pooling { + +// The combination of parameters: NCHW + CEIL gives an accuracy problem in ACL AvgPool +const ngraph::op::RoundingType expectedAvgRoundingType() { +#if defined(OPENVINO_ARCH_ARM) || defined(OPENVINO_ARCH_ARM64) + return ngraph::op::RoundingType::FLOOR; +#else + return ngraph::op::RoundingType::CEIL; +#endif +} + +const std::vector& paramsMax3D() { + static const std::vector paramsMax3D = { + LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2}, {2}, {0}, {0}, + ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, + LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {4}, {2}, {0}, {0}, + ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, + LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2}, {1}, {0}, {0}, + ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, + }; + return paramsMax3D; +} + +const std::vector& paramsAvg3D() { + static const std::vector paramsAvg3D = { + LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {3}, {1}, {1}, {0}, + expectedAvgRoundingType(), ngraph::op::PadType::SAME_UPPER, false }, + LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {3}, {1}, {1}, {0}, + expectedAvgRoundingType(), ngraph::op::PadType::EXPLICIT, true }, + LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {4}, {4}, {2}, {2}, + expectedAvgRoundingType(), ngraph::op::PadType::EXPLICIT, true }, + }; + return paramsAvg3D; +} + +const std::vector& inpOutPrecision() { + static const std::vector inpOutPrecision = {ElementType::f32/*, ElementType::bf16*/}; + return inpOutPrecision; +} + +const std::vector& paramsMax4D() { + static const std::vector paramsMax4D = { + LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2}, {2, 2}, {0, 0}, {0, 0}, + ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, false }, + LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2}, {2, 2}, {0, 0}, {0, 0}, + ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false }, + LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {4, 2}, {2, 2}, {0, 0}, {0, 0}, + ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, + LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {4, 2}, {2, 1}, {0, 0}, {0, 0}, + ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, + }; + return paramsMax4D; +} + +const std::vector& paramsMaxV84D() { + static const std::vector paramsMaxV84D = { + LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2}, {2, 2}, {1, 1}, {0, 0}, {0, 0}, + ngraph::element::Type_t::i32, 0, + ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER }, + }; + return paramsMaxV84D; +} + +const std::vector& inputShapes3D() { + static const std::vector inputShapes3D = { + { {}, {{3, 4, 64}} }, + { {}, {{2, 8, 12}} }, + { {}, {{1, 16, 12}} }, + { {}, {{1, 21, 4}} }, + { {}, {{1, 32, 8}} }, + { + // dynamic + {-1, -1, -1}, + // target + { + {1, 32, 8}, + {1, 21, 4}, + {2, 8, 12} + } + }, + { + // dynamic + {{1, 5}, {4, 32}, {1, 64}}, + // target + { + {3, 4, 64}, + {1, 16, 12}, + {1, 32, 8} + } + } + }; + return inputShapes3D; +} + +const std::vector& inputShapes4D() { + static const std::vector inputShapes4D = { + { {}, {{3, 4, 64, 64}} }, + { {}, {{2, 8, 8, 12}} }, + { {}, {{1, 16, 16, 12}} }, + { {}, {{1, 21, 8, 4}} }, + { {}, {{1, 32, 8, 8}} }, + { + // dynamic + {-1, -1, -1, -1}, + // target + { + {1, 32, 8, 8}, + {1, 21, 8, 4}, + {2, 8, 8, 12}, + {1, 96, 125, 125} + } + }, + { + // dynamic + {{1, 5}, {4, 32}, {1, 64}, {1, 64}}, + // target + { + {3, 4, 64, 64}, + {1, 16, 16, 12}, + {1, 32, 8, 8} + } + }, + { + // dynamic + {{1, 10}, 16, 8, 8}, + // target + { + {1, 16, 8, 8}, + {2, 16, 8, 8}, + } + } + }; + return inputShapes4D; +} + +const std::vector& inputShapes5D() { + static const std::vector inputShapes5D = { + { {}, {{1, 4, 16, 16, 16}} }, + { {}, {{2, 8, 8, 8, 8}} }, + { {}, {{2, 16, 12, 16, 20}} }, + { {}, {{1, 19, 16, 20, 8}} }, + { {}, {{1, 32, 16, 8, 12}} }, + { + // dynamic + {-1, -1, -1, -1, -1}, + // target + { + {2, 8, 8, 8, 8}, + {1, 19, 16, 20, 8}, + {1, 4, 16, 16, 16} + } + }, + { + // dynamic + {{1, 5}, {4, 32}, {1, 64}, {1, 64}, {1, 25}}, + // target + { + {1, 4, 16, 16, 16}, + {1, 32, 16, 8, 12}, + {3, 16, 4, 8, 3} + } + } + }; + return inputShapes5D; +} + +const std::vector& paramsMaxV85D() { + static const std::vector paramsMaxV85D = { + LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, + ngraph::element::Type_t::i32, 0, + ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER }, + }; + return paramsMaxV85D; +} + +const std::vector& paramsAvg4D() { + static const std::vector paramsAvg4D = { + LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0}, + expectedAvgRoundingType(), ngraph::op::PadType::SAME_LOWER, true }, + LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0}, + expectedAvgRoundingType(), ngraph::op::PadType::SAME_UPPER, true }, + LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0}, + expectedAvgRoundingType(), ngraph::op::PadType::SAME_LOWER, false }, + LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0}, + expectedAvgRoundingType(), ngraph::op::PadType::SAME_UPPER, false }, + LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {0, 0}, {0, 0}, + expectedAvgRoundingType(), ngraph::op::PadType::EXPLICIT, true }, + LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {4, 4}, {4, 4}, {2, 2}, {2, 2}, + expectedAvgRoundingType(), ngraph::op::PadType::EXPLICIT, true }, + }; + return paramsAvg4D; +} + +const std::vector& paramsAvg5D() { + static const std::vector paramsAvg5D = { + LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0}, + expectedAvgRoundingType(), ngraph::op::PadType::SAME_LOWER, true }, + LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0}, + expectedAvgRoundingType(), ngraph::op::PadType::SAME_UPPER, true }, + LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0}, + expectedAvgRoundingType(), ngraph::op::PadType::SAME_LOWER, false }, + LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0}, + expectedAvgRoundingType(), ngraph::op::PadType::SAME_UPPER, false }, + LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, + expectedAvgRoundingType(), ngraph::op::PadType::EXPLICIT, true }, + LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {3, 3, 3}, {3, 3, 3}, {1, 1, 1}, {0, 0, 0}, + expectedAvgRoundingType(), ngraph::op::PadType::EXPLICIT, true }, + LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {4, 4, 4}, {2, 2, 2}, {2, 2, 2}, {2, 2, 2}, + expectedAvgRoundingType(), ngraph::op::PadType::EXPLICIT, true }, + }; + return paramsAvg5D; +} + +const std::vector& paramsMax5D() { + static const std::vector paramsMax5D = { + LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, + ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, false }, + LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, + ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false }, + LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, + ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, + LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {3, 3, 3}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, + ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, + }; + return paramsMax5D; +} + +const std::vector& paramsAvg4D_Large() { + static const std::vector paramsAvg4D_Large = { + LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {65, 65}, {65, 65}, {0, 0}, {0, 0}, + ngraph::op::RoundingType::FLOOR, ngraph::op::PadType::VALID, true }, + }; + return paramsAvg4D_Large; +} + +const std::vector& inputShapes4D_Large() { + static const std::vector inputShapes4D_Large = { + { + // dynamic + {-1, -1, -1, -1}, + // target + { + {1, 16, 65, 65}, + {1, 8, 130, 130}, + {1, 16, 65, 65} + } + }, + }; + return inputShapes4D_Large; +} + + +} // namespace Pooling +} // namespace CPULayerTestsDefinitions \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/pooling.hpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/pooling.hpp new file mode 100644 index 00000000000000..ecf12a0360de1f --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/pooling.hpp @@ -0,0 +1,69 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "ov_models/builders.hpp" +#include "test_utils/cpu_test_utils.hpp" +#include "test_utils/fusing_test_utils.hpp" +#include "shared_test_classes/single_layer/pooling.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" + +using namespace ov::test; +using namespace CPUTestUtils; + +namespace CPULayerTestsDefinitions { + +using poolLayerCpuTestParamsSet = std::tuple; + +using maxPoolV8LayerCpuTestParamsSet = std::tuple; + +class PoolingLayerCPUTest : public testing::WithParamInterface, + virtual public SubgraphBaseTest, public CpuTestWithFusing { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); + +protected: + void SetUp() override; +}; + +class MaxPoolingV8LayerCPUTest : public testing::WithParamInterface, + virtual public SubgraphBaseTest, public CPUTestsBase { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); + +protected: + void SetUp() override; +}; + +namespace Pooling { +const std::vector& inpOutPrecision(); +const ngraph::op::RoundingType expectedAvgRoundingType(); + +const std::vector& paramsMax3D(); +const std::vector& paramsAvg3D(); +const std::vector& paramsMax4D(); + +const std::vector& paramsMaxV84D(); +const std::vector& paramsMaxV85D(); + +const std::vector& inputShapes3D(); +const std::vector& inputShapes4D(); +const std::vector& inputShapes4D_Large(); +const std::vector& inputShapes5D(); + +const std::vector& paramsAvg4D(); +const std::vector& paramsAvg4D_Large(); +const std::vector& paramsAvg5D(); +const std::vector& paramsMax5D(); +} // namespace Pooling +} // namespace CPULayerTestsDefinitions \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/pooling.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/pooling.cpp new file mode 100644 index 00000000000000..e15408a6085b9d --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/pooling.cpp @@ -0,0 +1,181 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "single_layer_tests/classes/pooling.hpp" +#include "shared_test_classes/single_layer/pooling.hpp" +#include "test_utils/cpu_test_utils.hpp" +#include "test_utils/fusing_test_utils.hpp" + +using namespace InferenceEngine; +using namespace CPUTestUtils; +using namespace ngraph::helpers; +using namespace ov::test; + +namespace CPULayerTestsDefinitions { +namespace Pooling { + +static CPUSpecificParams expectedCpuConfig() { +#if defined(OPENVINO_ARCH_ARM) || defined(OPENVINO_ARCH_ARM64) + return CPUSpecificParams{{}, {}, {"acl"}, "acl"}; +#else + return CPUSpecificParams{{}, {}, {"ref_any"}, "ref_any"}; +#endif +} +const std::vector vecCpuConfigs = {expectedCpuConfig()}; + +const std::vector paramsAvg3D_RefOnly = { + LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2}, {2}, {2}, {2}, + expectedAvgRoundingType(), ngraph::op::PadType::EXPLICIT, false }, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_MaxPool_CPU_3D, PoolingLayerCPUTest, + ::testing::Combine( + ::testing::ValuesIn(paramsMax3D()), + ::testing::ValuesIn(inputShapes3D()), + ::testing::ValuesIn((inpOutPrecision())), + ::testing::Values(false), + ::testing::ValuesIn(vecCpuConfigs), + ::testing::Values(emptyFusingSpec)), + PoolingLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_3D, PoolingLayerCPUTest, + ::testing::Combine( + ::testing::ValuesIn(paramsAvg3D()), + ::testing::ValuesIn(inputShapes3D()), + ::testing::ValuesIn((inpOutPrecision())), + ::testing::Values(false), + ::testing::ValuesIn(vecCpuConfigs), + ::testing::Values(emptyFusingSpec)), + PoolingLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_3D_NotOptimized, PoolingLayerCPUTest, + ::testing::Combine( + ::testing::ValuesIn(paramsAvg3D_RefOnly), + ::testing::ValuesIn(inputShapes3D()), + ::testing::ValuesIn((inpOutPrecision())), + ::testing::Values(false), + ::testing::Values(expectedCpuConfig()), + ::testing::Values(emptyFusingSpec)), + PoolingLayerCPUTest::getTestCaseName); + +const std::vector paramsAvg4D_RefOnly = { + LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {2, 2}, {2, 2}, + expectedAvgRoundingType(), ngraph::op::PadType::EXPLICIT, false }, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_MaxPool_CPU_4D, PoolingLayerCPUTest, + ::testing::Combine( + ::testing::ValuesIn(paramsMax4D()), + ::testing::ValuesIn(inputShapes4D()), + ::testing::ValuesIn((inpOutPrecision())), + ::testing::Values(false), + ::testing::ValuesIn(vecCpuConfigs), + ::testing::Values(emptyFusingSpec)), + PoolingLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolV8_CPU_4D, MaxPoolingV8LayerCPUTest, + ::testing::Combine( + ::testing::ValuesIn(paramsMaxV84D()), + ::testing::ValuesIn(inputShapes4D()), + ::testing::ValuesIn((inpOutPrecision())), + ::testing::ValuesIn(vecCpuConfigs)), + MaxPoolingV8LayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_4D, PoolingLayerCPUTest, + ::testing::Combine( + ::testing::ValuesIn(paramsAvg4D()), + ::testing::ValuesIn(inputShapes4D()), + ::testing::ValuesIn((inpOutPrecision())), + ::testing::Values(false), + ::testing::ValuesIn(vecCpuConfigs), + ::testing::Values(emptyFusingSpec)), + PoolingLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_4D_NotOptimized, PoolingLayerCPUTest, + ::testing::Combine( + ::testing::ValuesIn(paramsAvg4D_RefOnly), + ::testing::ValuesIn(inputShapes4D()), + ::testing::ValuesIn((inpOutPrecision())), + ::testing::Values(false), + ::testing::Values(expectedCpuConfig()), + ::testing::Values(emptyFusingSpec)), + PoolingLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_Large, PoolingLayerCPUTest, + ::testing::Combine( + ::testing::ValuesIn(paramsAvg4D_Large()), + ::testing::ValuesIn(inputShapes4D_Large()), + ::testing::ValuesIn((inpOutPrecision())), + ::testing::Values(false), + ::testing::ValuesIn(vecCpuConfigs), + ::testing::Values(emptyFusingSpec)), + PoolingLayerCPUTest::getTestCaseName); + +const std::vector paramsMaxV85D_ref = { + LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2, 2}, {1, 1, 1}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, + ngraph::element::Type_t::i32, 0, + ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER }, + LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2, 2}, {1, 1, 1}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, + ngraph::element::Type_t::i32, 0, + ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT }, + LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 3, 4}, {2, 2, 2}, {2, 1, 1}, {1, 1, 1}, {1, 2, 2}, + ngraph::element::Type_t::i32, 0, + ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT }, +}; + +const std::vector paramsAvg5D_RefOnly = { + LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {2, 2, 2}, {2, 2, 2}, + expectedAvgRoundingType(), ngraph::op::PadType::EXPLICIT, false }, +}; + +//FIXME: 5D cases are temporarly disabled on ARM because ACL support check in Pooling::getSupportedDescriptors() can't check layout +#if defined(OPENVINO_ARCH_X86) || defined(OPENVINO_ARCH_X86_64) +INSTANTIATE_TEST_SUITE_P(smoke_MaxPool_CPU_5D, PoolingLayerCPUTest, + ::testing::Combine( + ::testing::ValuesIn(paramsMax5D()), + ::testing::ValuesIn(inputShapes5D()), + ::testing::ValuesIn((inpOutPrecision())), + ::testing::Values(false), + ::testing::ValuesIn(vecCpuConfigs), + ::testing::Values(emptyFusingSpec)), + PoolingLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolV8_CPU_5D, MaxPoolingV8LayerCPUTest, + ::testing::Combine( + ::testing::ValuesIn(paramsMaxV85D()), + ::testing::ValuesIn(inputShapes5D()), + ::testing::ValuesIn((inpOutPrecision())), + ::testing::ValuesIn(vecCpuConfigs)), + MaxPoolingV8LayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolV8_CPU_5D_ref, MaxPoolingV8LayerCPUTest, + ::testing::Combine( + ::testing::ValuesIn(paramsMaxV85D_ref), + ::testing::ValuesIn(inputShapes5D()), + ::testing::ValuesIn((inpOutPrecision())), + ::testing::Values(expectedCpuConfig())), + MaxPoolingV8LayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_5D, PoolingLayerCPUTest, + ::testing::Combine( + ::testing::ValuesIn(paramsAvg5D()), + ::testing::ValuesIn(inputShapes5D()), + ::testing::ValuesIn((inpOutPrecision())), + ::testing::Values(false), + ::testing::ValuesIn(vecCpuConfigs), + ::testing::Values(emptyFusingSpec)), + PoolingLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_5D_NotOptimized, PoolingLayerCPUTest, + ::testing::Combine( + ::testing::ValuesIn(paramsAvg5D_RefOnly), + ::testing::ValuesIn(inputShapes5D()), + ::testing::ValuesIn((inpOutPrecision())), + ::testing::Values(false), + ::testing::Values(expectedCpuConfig()), + ::testing::Values(emptyFusingSpec)), + PoolingLayerCPUTest::getTestCaseName); +#endif +} // namespace Pooling +} // namespace CPULayerTestsDefinitions \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/pooling.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/pooling.cpp new file mode 100644 index 00000000000000..89331ea284d49a --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/pooling.cpp @@ -0,0 +1,148 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "single_layer_tests/classes/pooling.hpp" +#include "shared_test_classes/single_layer/pooling.hpp" +#include "test_utils/cpu_test_utils.hpp" +#include "test_utils/fusing_test_utils.hpp" +#include +#include + +using namespace InferenceEngine; +using namespace CPUTestUtils; +using namespace ngraph::helpers; +using namespace ov::test; + + +namespace CPULayerTestsDefinitions { +namespace Pooling { +namespace { + +const auto ref = CPUSpecificParams{{}, {}, {"ref_any"}, "ref_any"}; +const auto avx512 = CPUSpecificParams{{}, {}, {"jit_avx512"}, "jit_avx512"}; +const auto avx = CPUSpecificParams{{}, {}, {"jit_avx"}, "jit_avx"}; +const auto sse42 = CPUSpecificParams{{}, {}, {"jit_sse42"}, "jit_sse42"}; + +const std::vector vecCpuConfigs = {sse42, avx, avx512}; + +const std::vector paramsMaxV84D_ref = { + LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2}, {2, 2}, {2, 2}, {0, 0}, {0, 0}, + ngraph::element::Type_t::i32, 0, + ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER }, + LayerTestsDefinitions::maxPoolV8SpecificParams{ {4, 2}, {2, 2}, {1, 2}, {0, 0}, {0, 0}, + ngraph::element::Type_t::i32, 0, + ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT }, + LayerTestsDefinitions::maxPoolV8SpecificParams{ {4, 2}, {2, 1}, {2, 2}, {0, 0}, {0, 0}, + ngraph::element::Type_t::i32, 0, + ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT }, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolV8_CPU_4D_ref, MaxPoolingV8LayerCPUTest, + ::testing::Combine( + ::testing::ValuesIn(paramsMaxV84D_ref), + ::testing::ValuesIn(inputShapes4D()), + ::testing::ValuesIn((inpOutPrecision())), + ::testing::Values(ref)), + MaxPoolingV8LayerCPUTest::getTestCaseName); + +const auto avx512_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_avx512"}, "jit_avx512"}; +const auto avx512_ndhwc = CPUSpecificParams{{ndhwc}, {ndhwc}, {"jit_avx512"}, "jit_avx512"}; + +const auto avx2_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_avx2"}, "jit_avx2"}; +const auto avx2_ndhwc = CPUSpecificParams{{ndhwc}, {ndhwc}, {"jit_avx2"}, "jit_avx2"}; + +const auto sse42_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_sse42"}, "jit_sse42"}; +const auto sse42_ndhwc = CPUSpecificParams{{ndhwc}, {ndhwc}, {"jit_sse42"}, "jit_sse42"}; + +const std::vector vecCpuConfigsFusing_4D = {sse42_nhwc, avx2_nhwc, avx512_nhwc}; +const std::vector vecCpuConfigsFusing_5D = {sse42_ndhwc, avx2_ndhwc, avx512_ndhwc}; + +std::vector fusingParamsSet { + emptyFusingSpec, + fusingFakeQuantizePerTensor, + fusingFakeQuantizePerChannel, +}; + +const std::vector inputShapes4D_int8 = { + { {}, {{3, 4, 64, 64}} }, + { {}, {{2, 8, 8, 12}} }, + { {}, {{1, 16, 16, 12}} }, + { {}, {{1, 21, 8, 4}} }, + { {}, {{1, 32, 8, 8}} }, + { + // dynamic + {-1, 32, -1, -1}, + // target + { + {1, 32, 8, 8}, + {1, 32, 8, 4}, + {2, 32, 8, 12}, + {1, 32, 8, 8} + } + }, + { + // dynamic + {{1, 5}, 16, {1, 64}, {1, 64}}, + // target + { + {3, 16, 32, 32}, + {1, 16, 16, 12}, + {1, 16, 8, 8}, + {3, 16, 32, 32}, + } + } +}; + +INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_4D_I8, PoolingLayerCPUTest, + ::testing::Combine( + ::testing::ValuesIn(paramsAvg4D()), + ::testing::ValuesIn(inputShapes4D_int8), + ::testing::Values(ElementType::f32), + ::testing::Values(true), + ::testing::ValuesIn(filterCPUInfoForDevice(vecCpuConfigsFusing_4D)), + ::testing::ValuesIn(fusingParamsSet)), + PoolingLayerCPUTest::getTestCaseName); + +const std::vector inputShapes5D_int8 = { + { {}, {{1, 4, 16, 16, 16}} }, + { {}, {{2, 8, 8, 8, 8}} }, + { {}, {{2, 16, 12, 16, 20}} }, + { {}, {{1, 19, 16, 20, 8}} }, + { {}, {{1, 32, 16, 8, 12}} }, + { + // dynamic + {-1, 32, -1, -1, -1}, + // target + { + {2, 32, 8, 8, 8}, + {1, 32, 16, 20, 8}, + {1, 32, 16, 16, 16}, + {2, 32, 8, 8, 8} + } + }, + { + // dynamic + {{1, 5}, 16, {1, 64}, {1, 64}, {1, 25}}, + // target + { + {1, 16, 16, 16, 16}, + {1, 16, 16, 8, 12}, + {2, 16, 8, 8, 8}, + {1, 16, 16, 16, 16}, + } + } +}; + +INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_5D_I8, PoolingLayerCPUTest, + ::testing::Combine( + ::testing::ValuesIn(paramsAvg5D()), + ::testing::ValuesIn(inputShapes5D_int8), + ::testing::Values(ElementType::f32), + ::testing::Values(true), + ::testing::ValuesIn(filterCPUInfoForDevice(vecCpuConfigsFusing_5D)), + ::testing::ValuesIn(fusingParamsSet)), + PoolingLayerCPUTest::getTestCaseName); +} // namespace +} // namespace Pooling +} // namespace CPULayerTestsDefinitions \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/pooling.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/pooling.cpp deleted file mode 100644 index c6a76f7fee9fad..00000000000000 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/pooling.cpp +++ /dev/null @@ -1,704 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ov_models/builders.hpp" -#include "test_utils/cpu_test_utils.hpp" -#include "test_utils/fusing_test_utils.hpp" -#include "shared_test_classes/single_layer/pooling.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" - -using namespace ov::test; -using namespace CPUTestUtils; - -namespace CPULayerTestsDefinitions { - -using poolLayerCpuTestParamsSet = std::tuple; - -using maxPoolV8LayerCpuTestParamsSet = std::tuple; - -class PoolingLayerCPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest, public CpuTestWithFusing { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj) { - LayerTestsDefinitions::poolSpecificParams basicParamsSet; - InputShape inputShapes; - ElementType inPrc; - bool isInt8; - CPUSpecificParams cpuParams; - fusingSpecificParams fusingParams; - std::tie(basicParamsSet, inputShapes, inPrc, isInt8, cpuParams, fusingParams) = obj.param; - - ngraph::helpers::PoolingTypes poolType; - std::vector kernel, stride; - std::vector padBegin, padEnd; - ngraph::op::PadType padType; - ngraph::op::RoundingType roundingType; - bool excludePad; - std::tie(poolType, kernel, stride, padBegin, padEnd, roundingType, padType, excludePad) = basicParamsSet; - - std::ostringstream results; - results << "IS=("; - results << ov::test::utils::partialShape2str({inputShapes.first}) << ")_"; - results << "TS="; - for (const auto& shape : inputShapes.second) { - results << ov::test::utils::vec2str(shape) << "_"; - } - results << "Prc=" << inPrc << "_"; - switch (poolType) { - case ngraph::helpers::PoolingTypes::MAX: - results << "MaxPool_"; - break; - case ngraph::helpers::PoolingTypes::AVG: - results << "AvgPool_"; - results << "ExcludePad=" << excludePad << "_"; - break; - } - results << "K" << ov::test::utils::vec2str(kernel) << "_"; - results << "S" << ov::test::utils::vec2str(stride) << "_"; - results << "PB" << ov::test::utils::vec2str(padBegin) << "_"; - results << "PE" << ov::test::utils::vec2str(padEnd) << "_"; - results << "Rounding=" << roundingType << "_"; - results << "AutoPad=" << padType << "_"; - results << "INT8=" << isInt8 << "_"; - - results << CPUTestsBase::getTestCaseName(cpuParams); - results << CpuTestWithFusing::getTestCaseName(fusingParams); - return results.str(); - } - -protected: - void SetUp() override { - targetDevice = ov::test::utils::DEVICE_CPU; - - LayerTestsDefinitions::poolSpecificParams basicParamsSet; - InputShape inputShapes; - ElementType inPrc; - bool isInt8; - CPUSpecificParams cpuParams; - fusingSpecificParams fusingParams; - std::tie(basicParamsSet, inputShapes, inPrc, isInt8, cpuParams, fusingParams) = this->GetParam(); - - ngraph::helpers::PoolingTypes poolType; - std::vector kernel, stride; - std::vector padBegin, padEnd; - ngraph::op::PadType padType; - ngraph::op::RoundingType roundingType; - bool excludePad; - std::tie(poolType, kernel, stride, padBegin, padEnd, roundingType, padType, excludePad) = basicParamsSet; - - std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; - std::tie(postOpMgrPtr, fusedOps) = fusingParams; - - if (selectedType.empty()) { - selectedType = getPrimitiveType(); - } - if (isInt8) - selectedType = selectedType + "_I8"; - else - selectedType = makeSelectedTypeStr(selectedType, inPrc); - - init_input_shapes({inputShapes}); - - ov::ParameterVector params; - for (auto&& shape : inputDynamicShapes) { - params.push_back(std::make_shared(inPrc, shape)); - } - std::shared_ptr poolInput = params[0]; - if (isInt8) { - ov::Shape newShape(poolInput->get_output_partial_shape(0).size(), 1); - poolInput = ngraph::builder::makeFakeQuantize(poolInput, inPrc, 256, newShape); - } - - std::shared_ptr pooling = ngraph::builder::makePooling(poolInput, - stride, - padBegin, - padEnd, - kernel, - roundingType, - padType, - excludePad, - poolType); - - function = makeNgraphFunction(inPrc, params, pooling, "PoolingCPU"); - } -}; - -class MaxPoolingV8LayerCPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest, public CPUTestsBase { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj) { - LayerTestsDefinitions::maxPoolV8SpecificParams basicParamsSet; - InputShape inputShapes; - ElementType inPrc; - CPUSpecificParams cpuParams; - std::tie(basicParamsSet, inputShapes, inPrc, cpuParams) = obj.param; - - std::vector kernel, stride, dilation; - std::vector padBegin, padEnd; - ngraph::op::PadType padType; - ngraph::op::RoundingType roundingType; - ngraph::element::Type indexElementType; - int64_t axis; - std::tie(kernel, stride, dilation, padBegin, padEnd, indexElementType, axis, roundingType, padType) = basicParamsSet; - - std::ostringstream results; - results << "IS=("; - results << ov::test::utils::partialShape2str({inputShapes.first}) << ")_"; - results << "TS="; - for (const auto& shape : inputShapes.second) { - results << ov::test::utils::vec2str(shape) << "_"; - } - results << "Prc=" << inPrc << "_"; - results << "MaxPool_"; - results << "K" << ov::test::utils::vec2str(kernel) << "_"; - results << "S" << ov::test::utils::vec2str(stride) << "_"; - results << "D" << ov::test::utils::vec2str(dilation) << "_"; - results << "PB" << ov::test::utils::vec2str(padBegin) << "_"; - results << "PE" << ov::test::utils::vec2str(padEnd) << "_"; - results << "Rounding=" << roundingType << "_"; - results << "AutoPad=" << padType << "_"; - - results << CPUTestsBase::getTestCaseName(cpuParams); - return results.str(); - } - -protected: - void SetUp() override { - targetDevice = ov::test::utils::DEVICE_CPU; - - LayerTestsDefinitions::maxPoolV8SpecificParams basicParamsSet; - InputShape inputShapes; - ElementType inPrc; - CPUSpecificParams cpuParams; - std::tie(basicParamsSet, inputShapes, inPrc, cpuParams) = this->GetParam(); - - std::vector kernel, stride, dilation; - std::vector padBegin, padEnd; - ngraph::op::PadType padType; - ngraph::op::RoundingType roundingType; - ngraph::element::Type indexElementType; - int64_t axis; - std::tie(kernel, stride, dilation, padBegin, padEnd, indexElementType, axis, roundingType, padType) = basicParamsSet; - std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; - if (selectedType.empty()) { - selectedType = getPrimitiveType(); - } - selectedType = makeSelectedTypeStr(selectedType, inPrc); - - init_input_shapes({inputShapes}); - - ov::ParameterVector params; - for (auto&& shape : inputDynamicShapes) { - params.push_back(std::make_shared(inPrc, shape)); - } - std::shared_ptr pooling = ngraph::builder::makeMaxPoolingV8(params[0], stride, dilation, padBegin, padEnd, - kernel, roundingType, padType, - indexElementType, axis); - pooling->get_rt_info() = getCPUInfo(); - ngraph::ResultVector results{std::make_shared(pooling->output(0))}; - function = std::make_shared(results, params, "MaxPooling"); - } -}; - -TEST_P(PoolingLayerCPUTest, CompareWithRefs) { - run(); - CheckPluginRelatedResults(compiledModel, "Pooling"); -} - -TEST_P(MaxPoolingV8LayerCPUTest, CompareWithRefs) { - run(); - CheckPluginRelatedResults(compiledModel, "Pooling"); -} - -namespace { - -const auto avx512 = CPUSpecificParams{{}, {}, {"jit_avx512"}, "jit_avx512"}; -const auto avx = CPUSpecificParams{{}, {}, {"jit_avx"}, "jit_avx"}; -const auto sse42 = CPUSpecificParams{{}, {}, {"jit_sse42"}, "jit_sse42"}; -const auto ref = CPUSpecificParams{{}, {}, {"ref_any"}, "ref_any"}; - -const std::vector vecCpuConfigs = {ref, sse42, avx, avx512}; -const std::vector inpOutPrecision = {ElementType::f32/*, ElementType::bf16*/}; - -const std::vector inputShapes3D = { - { {}, {{3, 4, 64}} }, - { {}, {{2, 8, 12}} }, - { {}, {{1, 16, 12}} }, - { {}, {{1, 21, 4}} }, - { {}, {{1, 32, 8}} }, - { - // dynamic - {-1, -1, -1}, - // target - { - {1, 32, 8}, - {1, 21, 4}, - {2, 8, 12} - } - }, - { - // dynamic - {{1, 5}, {4, 32}, {1, 64}}, - // target - { - {3, 4, 64}, - {1, 16, 12}, - {1, 32, 8} - } - } -}; - -const std::vector inputShapes4D = { - { {}, {{3, 4, 64, 64}} }, - { {}, {{2, 8, 8, 12}} }, - { {}, {{1, 16, 16, 12}} }, - { {}, {{1, 21, 8, 4}} }, - { {}, {{1, 32, 8, 8}} }, - { - // dynamic - {-1, -1, -1, -1}, - // target - { - {1, 32, 8, 8}, - {1, 21, 8, 4}, - {2, 8, 8, 12}, - {1, 96, 125, 125} - } - }, - { - // dynamic - {{1, 5}, {4, 32}, {1, 64}, {1, 64}}, - // target - { - {3, 4, 64, 64}, - {1, 16, 16, 12}, - {1, 32, 8, 8} - } - }, - { - // dynamic - {{1, 10}, 16, 8, 8}, - // target - { - {1, 16, 8, 8}, - {2, 16, 8, 8}, - } - } -}; - -const std::vector inputShapes5D = { - { {}, {{1, 4, 16, 16, 16}} }, - { {}, {{2, 8, 8, 8, 8}} }, - { {}, {{2, 16, 12, 16, 20}} }, - { {}, {{1, 19, 16, 20, 8}} }, - { {}, {{1, 32, 16, 8, 12}} }, - { - // dynamic - {-1, -1, -1, -1, -1}, - // target - { - {2, 8, 8, 8, 8}, - {1, 19, 16, 20, 8}, - {1, 4, 16, 16, 16} - } - }, - { - // dynamic - {{1, 5}, {4, 32}, {1, 64}, {1, 64}, {1, 25}}, - // target - { - {1, 4, 16, 16, 16}, - {1, 32, 16, 8, 12}, - {3, 16, 4, 8, 3} - } - } -}; - -/* ============= Pooling (1D) ============= */ -const std::vector paramsMax3D = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2}, {2}, {0}, {0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {4}, {2}, {0}, {0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2}, {1}, {0}, {0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, -}; - -const std::vector paramsAvg3D = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {3}, {1}, {1}, {0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {3}, {1}, {1}, {0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {4}, {4}, {2}, {2}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true }, -}; - -const std::vector paramsAvg3D_RefOnly = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2}, {2}, {2}, {2}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, -}; - -INSTANTIATE_TEST_SUITE_P(smoke_MaxPool_CPU_3D, PoolingLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(paramsMax3D), - ::testing::ValuesIn(inputShapes3D), - ::testing::ValuesIn(inpOutPrecision), - ::testing::Values(false), - ::testing::ValuesIn(filterCPUInfoForDevice(vecCpuConfigs)), - ::testing::Values(emptyFusingSpec)), - PoolingLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_3D, PoolingLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(paramsAvg3D), - ::testing::ValuesIn(inputShapes3D), - ::testing::ValuesIn(inpOutPrecision), - ::testing::Values(false), - ::testing::ValuesIn(filterCPUInfoForDevice(vecCpuConfigs)), - ::testing::Values(emptyFusingSpec)), - PoolingLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_3D_NotOptimized, PoolingLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(paramsAvg3D_RefOnly), - ::testing::ValuesIn(inputShapes3D), - ::testing::ValuesIn(inpOutPrecision), - ::testing::Values(false), - ::testing::Values(ref), - ::testing::Values(emptyFusingSpec)), - PoolingLayerCPUTest::getTestCaseName); - -/* ============= Pooling (2D) ============= */ -const std::vector paramsMax4D = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2}, {2, 2}, {0, 0}, {0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2}, {2, 2}, {0, 0}, {0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {4, 2}, {2, 2}, {0, 0}, {0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {4, 2}, {2, 1}, {0, 0}, {0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, -}; - -const std::vector paramsMaxV84D = { - LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2}, {2, 2}, {1, 1}, {0, 0}, {0, 0}, - ngraph::element::Type_t::i32, 0, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER }, -}; - -const std::vector paramsMaxV84D_ref = { - LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2}, {2, 2}, {2, 2}, {0, 0}, {0, 0}, - ngraph::element::Type_t::i32, 0, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER }, - LayerTestsDefinitions::maxPoolV8SpecificParams{ {4, 2}, {2, 2}, {1, 2}, {0, 0}, {0, 0}, - ngraph::element::Type_t::i32, 0, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT }, - LayerTestsDefinitions::maxPoolV8SpecificParams{ {4, 2}, {2, 1}, {2, 2}, {0, 0}, {0, 0}, - ngraph::element::Type_t::i32, 0, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT }, -}; - -const std::vector paramsAvg4D = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, true }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, true }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {0, 0}, {0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {4, 4}, {4, 4}, {2, 2}, {2, 2}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true }, -}; - -const std::vector paramsAvg4D_RefOnly = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {2, 2}, {2, 2}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, -}; - -INSTANTIATE_TEST_SUITE_P(smoke_MaxPool_CPU_4D, PoolingLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(paramsMax4D), - ::testing::ValuesIn(inputShapes4D), - ::testing::ValuesIn(inpOutPrecision), - ::testing::Values(false), - ::testing::ValuesIn(filterCPUInfoForDevice(vecCpuConfigs)), - ::testing::Values(emptyFusingSpec)), - PoolingLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolV8_CPU_4D, MaxPoolingV8LayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(paramsMaxV84D), - ::testing::ValuesIn(inputShapes4D), - ::testing::ValuesIn(inpOutPrecision), - ::testing::ValuesIn(filterCPUInfoForDevice(vecCpuConfigs))), - MaxPoolingV8LayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolV8_CPU_4D_ref, MaxPoolingV8LayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(paramsMaxV84D_ref), - ::testing::ValuesIn(inputShapes4D), - ::testing::ValuesIn(inpOutPrecision), - ::testing::Values(ref)), - MaxPoolingV8LayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_4D, PoolingLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(paramsAvg4D), - ::testing::ValuesIn(inputShapes4D), - ::testing::ValuesIn(inpOutPrecision), - ::testing::Values(false), - ::testing::ValuesIn(filterCPUInfoForDevice(vecCpuConfigs)), - ::testing::Values(emptyFusingSpec)), - PoolingLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_4D_NotOptimized, PoolingLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(paramsAvg4D_RefOnly), - ::testing::ValuesIn(inputShapes4D), - ::testing::ValuesIn(inpOutPrecision), - ::testing::Values(false), - ::testing::Values(ref), - ::testing::Values(emptyFusingSpec)), - PoolingLayerCPUTest::getTestCaseName); - -const std::vector paramsAvg4D_Large = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {65, 65}, {65, 65}, {0, 0}, {0, 0}, - ngraph::op::RoundingType::FLOOR, ngraph::op::PadType::VALID, true }, -}; - -const std::vector inputShapes4D_Large = { - { - // dynamic - {-1, -1, -1, -1}, - // target - { - {1, 16, 65, 65}, - {1, 8, 130, 130}, - {1, 16, 65, 65} - } - }, -}; - -INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_Large, PoolingLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(paramsAvg4D_Large), - ::testing::ValuesIn(inputShapes4D_Large), - ::testing::ValuesIn(inpOutPrecision), - ::testing::Values(false), - ::testing::ValuesIn(filterCPUInfoForDevice(vecCpuConfigs)), - ::testing::Values(emptyFusingSpec)), - PoolingLayerCPUTest::getTestCaseName); - -/* ============= Pooling (3D) ============= */ -const std::vector paramsMax5D = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {3, 3, 3}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, -}; - -const std::vector paramsMaxV85D = { - LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, - ngraph::element::Type_t::i32, 0, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER }, -}; - -const std::vector paramsMaxV85D_ref = { - LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2, 2}, {1, 1, 1}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, - ngraph::element::Type_t::i32, 0, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER }, - LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2, 2}, {1, 1, 1}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, - ngraph::element::Type_t::i32, 0, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT }, - LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 3, 4}, {2, 2, 2}, {2, 1, 1}, {1, 1, 1}, {1, 2, 2}, - ngraph::element::Type_t::i32, 0, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT }, -}; - -const std::vector paramsAvg5D = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, true }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, true }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {3, 3, 3}, {3, 3, 3}, {1, 1, 1}, {0, 0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {4, 4, 4}, {2, 2, 2}, {2, 2, 2}, {2, 2, 2}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true }, -}; - -const std::vector paramsAvg5D_RefOnly = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {2, 2, 2}, {2, 2, 2}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, -}; - -INSTANTIATE_TEST_SUITE_P(smoke_MaxPool_CPU_5D, PoolingLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(paramsMax5D), - ::testing::ValuesIn(inputShapes5D), - ::testing::ValuesIn(inpOutPrecision), - ::testing::Values(false), - ::testing::ValuesIn(filterCPUInfoForDevice(vecCpuConfigs)), - ::testing::Values(emptyFusingSpec)), - PoolingLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolV8_CPU_5D, MaxPoolingV8LayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(paramsMaxV85D), - ::testing::ValuesIn(inputShapes5D), - ::testing::ValuesIn(inpOutPrecision), - ::testing::ValuesIn(filterCPUInfoForDevice(vecCpuConfigs))), - MaxPoolingV8LayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolV8_CPU_5D_ref, MaxPoolingV8LayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(paramsMaxV85D_ref), - ::testing::ValuesIn(inputShapes5D), - ::testing::ValuesIn(inpOutPrecision), - ::testing::Values(ref)), - MaxPoolingV8LayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_5D, PoolingLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(paramsAvg5D), - ::testing::ValuesIn(inputShapes5D), - ::testing::ValuesIn(inpOutPrecision), - ::testing::Values(false), - ::testing::ValuesIn(filterCPUInfoForDevice(vecCpuConfigs)), - ::testing::Values(emptyFusingSpec)), - PoolingLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_5D_NotOptimized, PoolingLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(paramsAvg5D_RefOnly), - ::testing::ValuesIn(inputShapes5D), - ::testing::ValuesIn(inpOutPrecision), - ::testing::Values(false), - ::testing::Values(ref), - ::testing::Values(emptyFusingSpec)), - PoolingLayerCPUTest::getTestCaseName); - -/* === Fusing === */ - -const auto avx512_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_avx512"}, "jit_avx512"}; -const auto avx512_ndhwc = CPUSpecificParams{{ndhwc}, {ndhwc}, {"jit_avx512"}, "jit_avx512"}; - -const auto avx2_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_avx2"}, "jit_avx2"}; -const auto avx2_ndhwc = CPUSpecificParams{{ndhwc}, {ndhwc}, {"jit_avx2"}, "jit_avx2"}; - -const auto sse42_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_sse42"}, "jit_sse42"}; -const auto sse42_ndhwc = CPUSpecificParams{{ndhwc}, {ndhwc}, {"jit_sse42"}, "jit_sse42"}; - -const std::vector vecCpuConfigsFusing_4D = {sse42_nhwc, avx2_nhwc, avx512_nhwc}; -const std::vector vecCpuConfigsFusing_5D = {sse42_ndhwc, avx2_ndhwc, avx512_ndhwc}; - -std::vector fusingParamsSet { - emptyFusingSpec, - fusingFakeQuantizePerTensor, - fusingFakeQuantizePerChannel, -}; - -const std::vector inputShapes4D_int8 = { - { {}, {{3, 4, 64, 64}} }, - { {}, {{2, 8, 8, 12}} }, - { {}, {{1, 16, 16, 12}} }, - { {}, {{1, 21, 8, 4}} }, - { {}, {{1, 32, 8, 8}} }, - { - // dynamic - {-1, 32, -1, -1}, - // target - { - {1, 32, 8, 8}, - {1, 32, 8, 4}, - {2, 32, 8, 12}, - {1, 32, 8, 8} - } - }, - { - // dynamic - {{1, 5}, 16, {1, 64}, {1, 64}}, - // target - { - {3, 16, 32, 32}, - {1, 16, 16, 12}, - {1, 16, 8, 8}, - {3, 16, 32, 32}, - } - } -}; - -INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_4D_I8, PoolingLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(paramsAvg4D), - ::testing::ValuesIn(inputShapes4D_int8), - ::testing::Values(ElementType::f32), - ::testing::Values(true), - ::testing::ValuesIn(filterCPUInfoForDevice(vecCpuConfigsFusing_4D)), - ::testing::ValuesIn(fusingParamsSet)), - PoolingLayerCPUTest::getTestCaseName); - -const std::vector inputShapes5D_int8 = { - { {}, {{1, 4, 16, 16, 16}} }, - { {}, {{2, 8, 8, 8, 8}} }, - { {}, {{2, 16, 12, 16, 20}} }, - { {}, {{1, 19, 16, 20, 8}} }, - { {}, {{1, 32, 16, 8, 12}} }, - { - // dynamic - {-1, 32, -1, -1, -1}, - // target - { - {2, 32, 8, 8, 8}, - {1, 32, 16, 20, 8}, - {1, 32, 16, 16, 16}, - {2, 32, 8, 8, 8} - } - }, - { - // dynamic - {{1, 5}, 16, {1, 64}, {1, 64}, {1, 25}}, - // target - { - {1, 16, 16, 16, 16}, - {1, 16, 16, 8, 12}, - {2, 16, 8, 8, 8}, - {1, 16, 16, 16, 16}, - } - } -}; - -INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_5D_I8, PoolingLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(paramsAvg5D), - ::testing::ValuesIn(inputShapes5D_int8), - ::testing::Values(ElementType::f32), - ::testing::Values(true), - ::testing::ValuesIn(filterCPUInfoForDevice(vecCpuConfigsFusing_5D)), - ::testing::ValuesIn(fusingParamsSet)), - PoolingLayerCPUTest::getTestCaseName); - -} // namespace - -} // namespace CPULayerTestsDefinitions From 8fff47caf923c61a2f70f6ec5069e90a045411d7 Mon Sep 17 00:00:00 2001 From: Vitaliy Urusovskij Date: Tue, 17 Oct 2023 18:35:19 +0400 Subject: [PATCH 232/257] `ROIPooling`, `ROIAlign` layer tests to API2.0 (#20086) * `ROIPoolingLayerTest` to API2.0 * `ROIAlignLayerTest` to API2.0 --- .../single_layer_tests/roi_align.cpp | 39 ++-- .../single_layer_tests/roi_pooling.cpp | 74 ++++--- .../include/single_op_tests/roi_align.hpp | 19 ++ .../include/single_op_tests/roi_pooling.hpp | 15 ++ .../single_op/roi_align.hpp | 54 +++++ .../single_op/roi_pooling.hpp | 34 +++ .../src/base/utils/generate_inputs.cpp | 18 +- .../src/single_op/roi_align.cpp | 203 ++++++++++++++++++ .../src/single_op/roi_pooling.cpp | 74 +++++++ 9 files changed, 472 insertions(+), 58 deletions(-) create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/roi_align.hpp create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/roi_pooling.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/roi_align.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/roi_pooling.hpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/roi_align.cpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/roi_pooling.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/roi_align.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/roi_align.cpp index 7009da247635e1..58eaa5174ee107 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/roi_align.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/roi_align.cpp @@ -4,30 +4,32 @@ #include -#include "single_layer_tests/roi_align.hpp" +#include "single_op_tests/roi_align.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; +using ov::test::ROIAlignLayerTest; +namespace { -const std::vector netPRCs = { - InferenceEngine::Precision::FP16, - InferenceEngine::Precision::FP32 +const std::vector model_types = { + ov::element::f16, + ov::element::f32 }; const auto ROIAlignCases_average = ::testing::Combine( ::testing::ValuesIn( - std::vector> { - { 3, 8, 16, 16 }, - { 2, 1, 16, 16 }, - { 2, 1, 8, 16 }}), - ::testing::Values(std::vector{ 2, 4 }), + ov::test::static_shapes_to_test_representation( + std::vector>{ + {{ 3, 8, 16, 16 }}, + {{ 2, 1, 16, 16 }}, + {{ 2, 1, 8, 16 }}})), + ::testing::Values(ov::Shape{ 2, 4 }), ::testing::Values(2), ::testing::Values(2), ::testing::ValuesIn(std::vector { 1, 0.625 }), ::testing::Values(2), ::testing::Values("avg"), - ::testing::ValuesIn(netPRCs), + ::testing::ValuesIn(model_types), ::testing::Values(ov::test::utils::DEVICE_CPU) ); @@ -35,18 +37,21 @@ INSTANTIATE_TEST_SUITE_P(smoke_TestsROIAlign_average, ROIAlignLayerTest, ROIAlig const auto ROIAlignCases_max = ::testing::Combine( ::testing::ValuesIn( - std::vector> { - { 2, 8, 20, 20 }, - { 2, 1, 20, 20 }, - { 2, 1, 10, 20 }}), - ::testing::Values(std::vector{ 2, 4 }), + ov::test::static_shapes_to_test_representation( + std::vector>{ + {{ 2, 8, 20, 20 }}, + {{ 2, 1, 20, 20 }}, + {{ 2, 1, 10, 20 }}})), + ::testing::Values(ov::Shape{ 2, 4 }), ::testing::Values(2), ::testing::Values(2), ::testing::ValuesIn(std::vector { 1, 0.625 }), ::testing::Values(2), ::testing::Values("max"), - ::testing::ValuesIn(netPRCs), + ::testing::ValuesIn(model_types), ::testing::Values(ov::test::utils::DEVICE_CPU) ); INSTANTIATE_TEST_SUITE_P(smoke_TestsROIAlign_max, ROIAlignLayerTest, ROIAlignCases_max, ROIAlignLayerTest::getTestCaseName); + +} // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/roi_pooling.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/roi_pooling.cpp index ff2be946948c6a..ca68bc6fa4d5ea 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/roi_pooling.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/roi_pooling.cpp @@ -4,62 +4,72 @@ #include -#include "single_layer_tests/roi_pooling.hpp" +#include "single_op_tests/roi_pooling.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; +using ov::test::ROIPoolingLayerTest; -const std::vector> inShapes = { - {1, 3, 8, 8}, - {3, 4, 50, 50} +namespace { + +const std::vector param_shapes = { + {{1, 3, 8, 8}}, + {{3, 4, 50, 50}} }; -const std::vector> pooledShapes_max = { - {1, 1}, - {2, 2}, - {3, 3}, - {6, 6} +const std::vector coord_shapes = { + {{1, 5}}, + {{3, 5}}, + {{5, 5}} }; -const std::vector> pooledShapes_bilinear = { - {1, 1}, - {2, 2}, - {3, 3}, - {6, 6} +auto input_shapes = [](const std::vector& in1, const std::vector& in2) { + std::vector> res; + for (const auto& sh1 : in1) + for (const auto& sh2 : in2) + res.push_back(ov::test::static_shapes_to_test_representation({sh1, sh2})); + return res; +}(param_shapes, coord_shapes); + +const std::vector pooled_shapes_max = { + {{1, 1}}, + {{2, 2}}, + {{3, 3}}, + {{6, 6}} }; -const std::vector> coordShapes = { - {1, 5}, - {3, 5}, - {5, 5} +const std::vector pooled_shapes_bilinear = { + {{1, 1}}, + {{2, 2}}, + {{3, 3}}, + {{6, 6}} }; -const std::vector netPRCs = { - InferenceEngine::Precision::FP16, - InferenceEngine::Precision::FP32 +const std::vector model_types = { + ov::element::f16, + ov::element::f32 }; const std::vector spatial_scales = {0.625f, 1.f}; const auto test_ROIPooling_max = ::testing::Combine( - ::testing::ValuesIn(inShapes), - ::testing::ValuesIn(coordShapes), - ::testing::ValuesIn(pooledShapes_max), + ::testing::ValuesIn(input_shapes), + ::testing::ValuesIn(pooled_shapes_max), ::testing::ValuesIn(spatial_scales), - ::testing::Values(ngraph::helpers::ROIPoolingTypes::ROI_MAX), - ::testing::ValuesIn(netPRCs), + ::testing::Values(ov::test::utils::ROIPoolingTypes::ROI_MAX), + ::testing::ValuesIn(model_types), ::testing::Values(ov::test::utils::DEVICE_CPU) ); const auto test_ROIPooling_bilinear = ::testing::Combine( - ::testing::ValuesIn(inShapes), - ::testing::ValuesIn(coordShapes), - ::testing::ValuesIn(pooledShapes_bilinear), + ::testing::ValuesIn(input_shapes), + ::testing::ValuesIn(pooled_shapes_bilinear), ::testing::Values(spatial_scales[1]), - ::testing::Values(ngraph::helpers::ROIPoolingTypes::ROI_BILINEAR), - ::testing::ValuesIn(netPRCs), + ::testing::Values(ov::test::utils::ROIPoolingTypes::ROI_BILINEAR), + ::testing::ValuesIn(model_types), ::testing::Values(ov::test::utils::DEVICE_CPU) ); INSTANTIATE_TEST_SUITE_P(smoke_TestsROIPooling_max, ROIPoolingLayerTest, test_ROIPooling_max, ROIPoolingLayerTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_TestsROIPooling_bilinear, ROIPoolingLayerTest, test_ROIPooling_bilinear, ROIPoolingLayerTest::getTestCaseName); + +} // namespace diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/roi_align.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/roi_align.hpp new file mode 100644 index 00000000000000..96e1bbeeeac639 --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/roi_align.hpp @@ -0,0 +1,19 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/roi_align.hpp" + +namespace ov { +namespace test { +TEST_P(ROIAlignLayerTest, Inference) { + run(); +} + +TEST_P(ROIAlignV9LayerTest, Inference) { + run(); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/roi_pooling.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/roi_pooling.hpp new file mode 100644 index 00000000000000..3b89b91aa15504 --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/roi_pooling.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/roi_pooling.hpp" + +namespace ov { +namespace test { +TEST_P(ROIPoolingLayerTest, Inference) { + run(); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/roi_align.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/roi_align.hpp new file mode 100644 index 00000000000000..57257c7154a829 --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/roi_align.hpp @@ -0,0 +1,54 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { +using roialignParams = std::tuple< + std::vector, // Feature map shape + ov::Shape, // Proposal coords shape + int, // Bin's row count + int, // Bin's column count + float, // Spatial scale + int, // Pooling ratio + std::string, // Pooling mode + ov::element::Type, // Model type + ov::test::TargetDevice>; // Device name + +class ROIAlignLayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); + static void fillCoordTensor(std::vector& coords, int height, int width, + float spatialScale, int pooledRatio, int pooledH, int pooledW); + static void fillIdxTensor(std::vector& idx, int batchSize); + +protected: + void SetUp() override; +}; + +using roialignV9Params = std::tuple< + std::vector, // Feature map shape + ov::Shape, // Proposal coords shape + int, // Bin's row count + int, // Bin's column count + float, // Spatial scale + int, // Pooling ratio + std::string, // Pooling mode + std::string, // ROI aligned mode + ov::element::Type, // Model type + ov::test::TargetDevice>; // Device name +class ROIAlignV9LayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); + +protected: + void SetUp() override; +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/roi_pooling.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/roi_pooling.hpp new file mode 100644 index 00000000000000..ecd714f6ef511a --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/roi_pooling.hpp @@ -0,0 +1,34 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "common_test_utils/test_enums.hpp" + +namespace ov { +namespace test { +using roiPoolingParamsTuple = std::tuple< + std::vector, // Input, coords shapes + ov::Shape, // Pooled shape {pooled_h, pooled_w} + float, // Spatial scale + ov::test::utils::ROIPoolingTypes, // ROIPooling method + ov::element::Type, // Model type + ov::test::TargetDevice>; // Device name + +class ROIPoolingLayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); + +protected: + void SetUp() override; +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp b/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp index 139678a602fd21..0d8909c46581e0 100644 --- a/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp +++ b/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp @@ -9,7 +9,7 @@ #include "common_test_utils/ov_tensor_utils.hpp" -#include "shared_test_classes/single_layer/roi_align.hpp" +#include "shared_test_classes/single_op/roi_align.hpp" #include "shared_test_classes/single_layer/psroi_pooling.hpp" #include "shared_test_classes/base/utils/generate_inputs.hpp" #include "shared_test_classes/base/utils/ranges.hpp" @@ -537,13 +537,13 @@ ov::runtime::Tensor generate(const std::shared_ptr& node, if (node->get_sampling_ratio() != 0) { const auto &inputShape = node->get_input_shape(0); std::vector blobData(node->get_shape()[0] * 4); - LayerTestsDefinitions::ROIAlignLayerTest::fillCoordTensor(blobData, - inputShape[2], - inputShape[3], - node->get_spatial_scale(), - node->get_sampling_ratio(), - node->get_pooled_h(), - node->get_pooled_w()); + ov::test::ROIAlignLayerTest::fillCoordTensor(blobData, + inputShape[2], + inputShape[3], + node->get_spatial_scale(), + node->get_sampling_ratio(), + node->get_pooled_h(), + node->get_pooled_w()); return ov::test::utils::create_tensor(ov::element::f32, targetShape, blobData); } else { return generate(std::dynamic_pointer_cast(node), port, elemType, targetShape); @@ -551,7 +551,7 @@ ov::runtime::Tensor generate(const std::shared_ptr& node, } case 2: { std::vector roiIdxVector(node->get_shape()[0]); - LayerTestsDefinitions::ROIAlignLayerTest::fillIdxTensor(roiIdxVector, node->get_shape()[0]); + ov::test::ROIAlignLayerTest::fillIdxTensor(roiIdxVector, node->get_shape()[0]); return ov::test::utils::create_tensor(elemType, targetShape, roiIdxVector); } default: diff --git a/src/tests/functional/shared_test_classes/src/single_op/roi_align.cpp b/src/tests/functional/shared_test_classes/src/single_op/roi_align.cpp new file mode 100644 index 00000000000000..d191e7dda9565f --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/roi_align.cpp @@ -0,0 +1,203 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "shared_test_classes/single_op/roi_align.hpp" + +#include "openvino/core/enum_names.hpp" + +namespace ov { +namespace test { +std::string ROIAlignLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { + std::vector input_shapes; + ov::Shape coords_shape; + int pooled_h; + int pooled_w; + float spatial_scale; + int pooling_ratio; + std::string pooling_mode; + ov::element::Type model_type; + std::string target_device; + std::tie(input_shapes, coords_shape, pooled_h, pooled_w, spatial_scale, + pooling_ratio, pooling_mode, model_type, target_device) = obj.param; + + std::ostringstream result; + result << "IS=("; + for (size_t i = 0lu; i < input_shapes.size(); i++) { + result << ov::test::utils::partialShape2str({input_shapes[i].first}) + << (i < input_shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < input_shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < input_shapes.size(); j++) { + result << ov::test::utils::vec2str(input_shapes[j].second[i]) << (j < input_shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + result << "coordShape=" << ov::test::utils::vec2str(coords_shape) << "_"; + result << "pooledH=" << pooled_h << "_"; + result << "pooledW=" << pooled_w << "_"; + result << "spatialScale=" << spatial_scale << "_"; + result << "poolingRatio=" << pooling_ratio << "_"; + result << "poolingMode=" << pooling_mode << "_"; + result << "modelType=" << model_type.to_string() << "_"; + result << "trgDev=" << target_device; + return result.str(); +} + +static int randInt(int low, int high) { + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_int_distribution dis(low, high); + return dis(gen); +} + +void ROIAlignLayerTest::fillCoordTensor(std::vector& coords, int height, int width, + float spatial_scale, int pooled_ratio, int pooled_h, int pooled_w) { + int min_roi_width = pooled_w; + int max_roi_width = width / pooled_ratio; + int min_roi_height = pooled_h; + int max_roi_height = height / pooled_ratio; + + for (int i = 0; i < coords.size() / 4; i++) { + int size_x = std::min(width, randInt(min_roi_width, max_roi_width)); + int size_y = std::min(height, randInt(min_roi_height, max_roi_height)); + int start_x = randInt(0, std::max(1, width - size_x - 1)); + int start_y = randInt(0, std::max(1, height - size_y - 1)); + + coords[i * 4] = start_x / spatial_scale; + coords[i * 4 + 1] = start_y / spatial_scale; + coords[i * 4 + 2] = (start_x + size_x - 1) / spatial_scale; + coords[i * 4 + 3] = (start_y + size_y - 1) / spatial_scale; + } +} +void ROIAlignLayerTest::fillIdxTensor(std::vector& idx, int batch_size) { + int batch_id = 0; + for (int i = 0; i < idx.size(); i++) { + idx[i] = batch_id; + batch_id = (batch_id + 1) % batch_size; + } +} + +void ROIAlignLayerTest::SetUp() { + std::vector input_shapes; + ov::Shape coords_shape; + int pooled_h; + int pooled_w; + float spatial_scale; + int pooling_ratio; + std::string pooling_mode; + ov::element::Type model_type; + std::tie(input_shapes, coords_shape, pooled_h, pooled_w, spatial_scale, + pooling_ratio, pooling_mode, model_type, targetDevice) = this->GetParam(); + + init_input_shapes(input_shapes); + + auto param = std::make_shared(model_type, inputDynamicShapes[0]); + std::vector proposal_vector; + std::vector roi_idx_vector; + proposal_vector.resize(coords_shape[0] * 4); + roi_idx_vector.resize(coords_shape[0]); + + fillCoordTensor(proposal_vector, inputDynamicShapes[0][2].get_length(), inputDynamicShapes[0][3].get_length(), + spatial_scale, pooling_ratio, pooled_h, pooled_w); + fillIdxTensor(roi_idx_vector, inputDynamicShapes[0][0].get_length()); + auto idx_shape = ov::Shape{coords_shape[0]}; + + auto coords = std::make_shared(model_type, coords_shape, proposal_vector.data()); + auto rois_Idx = std::make_shared(ov::element::i32, idx_shape, roi_idx_vector.data()); + auto roi_align = std::make_shared(param, + coords, + rois_Idx, + pooled_h, + pooled_w, + pooling_ratio, + spatial_scale, + pooling_mode); + function = std::make_shared(roi_align->outputs(), ov::ParameterVector{param}, "roi_align"); +} + +std::string ROIAlignV9LayerTest::getTestCaseName(const testing::TestParamInfo& obj) { + std::vector input_shapes; + ov::Shape coords_shape; + int pooled_h; + int pooled_w; + float spatial_scale; + int pooling_ratio; + std::string pooling_mode; + std::string roi_aligned_mode; + ov::element::Type model_type; + std::string target_device; + std::tie(input_shapes, coords_shape, pooled_h, pooled_w, spatial_scale, + pooling_ratio, pooling_mode, roi_aligned_mode, model_type, target_device) = obj.param; + + std::ostringstream result; + result << "IS=("; + for (size_t i = 0lu; i < input_shapes.size(); i++) { + result << ov::test::utils::partialShape2str({input_shapes[i].first}) + << (i < input_shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < input_shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < input_shapes.size(); j++) { + result << ov::test::utils::vec2str(input_shapes[j].second[i]) << (j < input_shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + result << "coordShape=" << ov::test::utils::vec2str(coords_shape) << "_"; + result << "pooledH=" << pooled_h << "_"; + result << "pooledW=" << pooled_w << "_"; + result << "spatialScale=" << spatial_scale << "_"; + result << "poolingRatio=" << pooling_ratio << "_"; + result << "poolingMode=" << pooling_mode << "_"; + result << "ROIMode=" << roi_aligned_mode << "_"; + result << "modelType=" << model_type.to_string() << "_"; + result << "trgDev=" << target_device; + return result.str(); +} + +void ROIAlignV9LayerTest::SetUp() { + std::vector input_shapes; + ov::Shape coords_shape; + int pooled_h; + int pooled_w; + float spatial_scale; + int pooling_ratio; + std::string pooling_mode; + std::string roi_aligned_mode; + ov::element::Type model_type; + std::tie(input_shapes, coords_shape, pooled_h, pooled_w, spatial_scale, + pooling_ratio, pooling_mode, roi_aligned_mode, model_type, targetDevice) = this->GetParam(); + + init_input_shapes(input_shapes); + + auto param = std::make_shared(model_type, inputDynamicShapes[0]); + std::vector proposal_vector; + std::vector roi_idx_vector; + proposal_vector.resize(coords_shape[0] * 4); + roi_idx_vector.resize(coords_shape[0]); + + ROIAlignLayerTest::fillCoordTensor(proposal_vector, inputDynamicShapes[0][2].get_length(), inputDynamicShapes[0][3].get_length(), + spatial_scale, pooling_ratio, pooled_h, pooled_w); + ROIAlignLayerTest::fillIdxTensor(roi_idx_vector, inputDynamicShapes[0][0].get_length()); + auto idx_shape = ov::Shape{coords_shape[0]}; + + auto coords = std::make_shared(model_type, coords_shape, proposal_vector.data()); + auto rois_Idx = std::make_shared(ov::element::i32, idx_shape, roi_idx_vector.data()); + auto roi_align = std::make_shared(param, + coords, + rois_Idx, + pooled_h, + pooled_w, + pooling_ratio, + spatial_scale, + ov::EnumNames::as_enum(pooling_mode), + ov::EnumNames::as_enum(roi_aligned_mode)); + function = std::make_shared(roi_align->outputs(), ov::ParameterVector{param}, "roi_align"); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_op/roi_pooling.cpp b/src/tests/functional/shared_test_classes/src/single_op/roi_pooling.cpp new file mode 100644 index 00000000000000..f4867abe6629b7 --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/roi_pooling.cpp @@ -0,0 +1,74 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/roi_pooling.hpp" + +namespace ov { +namespace test { +std::string ROIPoolingLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { + std::vector input_shapes; + ov::Shape pool_shape; + float spatial_scale; + ov::test::utils::ROIPoolingTypes pool_method; + ov::element::Type model_type; + std::string target_device; + std::tie(input_shapes, pool_shape, spatial_scale, pool_method, model_type, target_device) = obj.param; + + std::ostringstream result; + result << "IS=("; + for (size_t i = 0lu; i < input_shapes.size(); i++) { + result << ov::test::utils::partialShape2str({input_shapes[i].first}) + << (i < input_shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < input_shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < input_shapes.size(); j++) { + result << ov::test::utils::vec2str(input_shapes[j].second[i]) << (j < input_shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + result << "PS=" << ov::test::utils::vec2str(pool_shape) << "_"; + result << "Scale=" << spatial_scale << "_"; + switch (pool_method) { + case utils::ROIPoolingTypes::ROI_MAX: + result << "Max_"; + break; + case utils::ROIPoolingTypes::ROI_BILINEAR: + result << "Bilinear_"; + break; + } + result << "modelType=" << model_type.to_string() << "_"; + result << "trgDev=" << target_device; + return result.str(); +} + +void ROIPoolingLayerTest::SetUp() { + std::vector input_shapes; + ov::Shape pool_shape; + float spatial_scale; + ov::test::utils::ROIPoolingTypes pool_method; + ov::element::Type model_type; + std::string target_device; + std::tie(input_shapes, pool_shape, spatial_scale, pool_method, model_type, targetDevice) = this->GetParam(); + + abs_threshold = 0.08f; + + init_input_shapes(input_shapes); + + auto param = std::make_shared(model_type, inputDynamicShapes[0]); + auto coord_param = std::make_shared(model_type, inputDynamicShapes[1]); + std::string pool_method_str; + if (pool_method == ov::test::utils::ROIPoolingTypes::ROI_MAX) { + pool_method_str = "max"; + } else if (pool_method == ov::test::utils::ROIPoolingTypes::ROI_BILINEAR) { + pool_method_str = "bilinear"; + } else { + FAIL() << "Incorrect type of ROIPooling operation"; + } + auto roi_pooling = std::make_shared(param, coord_param, pool_shape, spatial_scale, pool_method_str); + function = std::make_shared(roi_pooling->outputs(), ov::ParameterVector{param, coord_param}, "roi_pooling"); +} +} // namespace test +} // namespace ov From 1e7977332bdea7d1c21d3ae156286de3a32c36e9 Mon Sep 17 00:00:00 2001 From: Georgy Krivoruchko Date: Tue, 17 Oct 2023 08:30:22 -0700 Subject: [PATCH 233/257] Fixed naming and comments in ReduceProd (#20512) --- src/core/src/bound_evaluate.hpp | 2 +- src/core/src/op/reduce_prod.cpp | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/core/src/bound_evaluate.hpp b/src/core/src/bound_evaluate.hpp index 364a1eef3aa180..952343a7d0076b 100644 --- a/src/core/src/bound_evaluate.hpp +++ b/src/core/src/bound_evaluate.hpp @@ -9,7 +9,7 @@ namespace ov { // bool could_propagate(const Output& output, std::vector& order); -/// \brief Checks if all the elements of the bound Tensor are positive +/// \brief Checks if all the elements of the bound Tensor are non-negative bool tensor_is_non_negative(const Tensor& bound); /// \brief Checks if any element of the bound Tensor has max possible value diff --git a/src/core/src/op/reduce_prod.cpp b/src/core/src/op/reduce_prod.cpp index dbd89a8ae09192..9d2c4dee4a8c51 100644 --- a/src/core/src/op/reduce_prod.cpp +++ b/src/core/src/op/reduce_prod.cpp @@ -16,7 +16,7 @@ namespace ov { namespace op { namespace reduce_prod { namespace { -bool has_positive_bounds_on_data(const Node* const op) { +bool has_non_negative_bounds_on_data(const Node* const op) { const auto& lb = op->get_input_tensor(0).get_lower_value(); const auto& ub = op->get_input_tensor(0).get_upper_value(); @@ -79,20 +79,20 @@ bool ReduceProd::has_evaluate() const { } bool ReduceProd::evaluate_lower(ov::TensorVector& output_values) const { - return reduce_prod::has_positive_bounds_on_data(this) && get_input_tensor(1).has_and_set_bound() && + return reduce_prod::has_non_negative_bounds_on_data(this) && get_input_tensor(1).has_and_set_bound() && default_lower_bound_evaluator(this, output_values); } bool ReduceProd::evaluate_upper(ov::TensorVector& output_values) const { - if (!reduce_prod::has_positive_bounds_on_data(this) || !get_input_tensor(1).has_and_set_bound()) + if (!reduce_prod::has_non_negative_bounds_on_data(this) || !get_input_tensor(1).has_and_set_bound()) return false; - // We need to cover a corner case: if an Upper Bound comes from ShapeOf and contains - // dynamic dimension (-1) - it has a value 0x7FFFFFFFFFFFFFFF, which points on + // We need to cover a case: if an Upper Bound comes from ShapeOf and contains + // dynamic dimension (-1) - it has a value max_of_type, which points on // a maximum possible value. For example, Upper Bound of shape [-1, 12] is - // [0x7FFFFFFFFFFFFFFF, 12]. + // [max_of_type, 12]. // In such case we shouldn't evaluate a real ReduceProd because it'll cause an // overflow and returns wrong value. We should return an Upper Bound as for [-1], - // which will be evaluated as [0x7FFFFFFFFFFFFFFF] + // which will be evaluated as [max_of_type] // In case dimensions has a zero dimension - it should return 0 in any case if (tensor_has_max_value(get_input_tensor(0).get_upper_value()) && !tensor_has_zero_value(get_input_tensor(0).get_upper_value())) { From a5fb3823f6096f1be80e6095931f58e26b73bcb8 Mon Sep 17 00:00:00 2001 From: Oleg Pipikin Date: Tue, 17 Oct 2023 17:35:24 +0200 Subject: [PATCH 234/257] Refactor EinsumLayerTest, EyeLayerTest (#20288) * Refactor EinsumLayerTest * Refator EyeLayerTest * Apply comments * Fix --------- Co-authored-by: Vitaliy Urusovskij --- .../single_layer_tests/einsum.cpp | 41 +++++----- .../single_layer_tests/eye.cpp | 27 ++++--- .../shared/include/single_op_tests/einsum.hpp | 15 ++++ .../shared/include/single_op_tests/eye.hpp | 14 ++++ .../shared_test_classes/single_op/einsum.hpp | 34 +++++++++ .../shared_test_classes/single_op/eye.hpp | 24 ++++++ .../src/single_op/einsum.cpp | 61 +++++++++++++++ .../shared_test_classes/src/single_op/eye.cpp | 74 +++++++++++++++++++ 8 files changed, 255 insertions(+), 35 deletions(-) rename src/plugins/intel_gpu/tests/functional/{ => shared_tests_instances}/single_layer_tests/eye.cpp (88%) create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/einsum.hpp create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/eye.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/einsum.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/eye.hpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/einsum.cpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/eye.cpp diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/einsum.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/einsum.cpp index 94555588b727bb..7303e7a396a2b2 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/einsum.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/einsum.cpp @@ -4,35 +4,34 @@ #include -#include "single_layer_tests/einsum.hpp" - -using namespace ngraph::helpers; -using namespace LayerTestsDefinitions; +#include "single_op_tests/einsum.hpp" namespace { -const std::vector precisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16 +using ov::test::EinsumLayerTest; + +const std::vector model_types = { + ov::element::f32, + ov::element::f16 }; -const std::vector equationsWithInput = { - { "ij->ji", {{{1, 2}}} }, // transpose 2d - { "ijk->kij", { {1, 2, 3} } }, // transpose 3d - { "ij->i", { {2, 3} } }, // reduce - { "ab,cd->abcd", { { 1, 2}, {3, 4} } }, // no reduction - { "ab,bc->ac", { {2, 3}, {3, 2} } }, // matrix multiplication - { "ab,bcd,bc->ca", { {2, 4}, {4, 3, 1}, {4, 3} } }, // multiple multiplications - { "kii->ki", { {1, 3, 3} } }, // diagonal - { "abbac,bad->ad", { {2, 3, 3, 2, 4}, {3, 2, 1} } }, // diagonal and multiplication with repeated labels - { "a...->...a", { {2, 2, 3} } }, // transpose with ellipsis - { "a...->...", { {2, 2, 3} } }, // reduce with ellipsis - { "ab...,...->ab...", { {2, 2, 3}, {1} } }, // multiply by scalar - { "a...j,j...->a...", { {1, 1, 4, 3}, {3, 4, 2, 1} } } // complex multiplication +const std::vector equationsWithInput = { + { "ij->ji", ov::test::static_shapes_to_test_representation({ {1, 2} }) }, // transpose 2d + { "ijk->kij", ov::test::static_shapes_to_test_representation({ {1, 2, 3} }) }, // transpose 3d + { "ij->i", ov::test::static_shapes_to_test_representation({ {2, 3} }) }, // reduce + { "ab,cd->abcd", ov::test::static_shapes_to_test_representation({ { 1, 2}, {3, 4} }) }, // no reduction + { "ab,bc->ac", ov::test::static_shapes_to_test_representation({ {2, 3}, {3, 2} }) }, // matrix multiplication + { "ab,bcd,bc->ca", ov::test::static_shapes_to_test_representation({ {2, 4}, {4, 3, 1}, {4, 3} }) }, // multiple multiplications + { "kii->ki", ov::test::static_shapes_to_test_representation({ {1, 3, 3} }) }, // diagonal + { "abbac,bad->ad", ov::test::static_shapes_to_test_representation({ {2, 3, 3, 2, 4}, {3, 2, 1} }) }, // diagonal and multiplication with repeated labels + { "a...->...a", ov::test::static_shapes_to_test_representation({ {2, 2, 3} }) }, // transpose with ellipsis + { "a...->...", ov::test::static_shapes_to_test_representation({ {2, 2, 3} }) }, // reduce with ellipsis + { "ab...,...->ab...", ov::test::static_shapes_to_test_representation({ {2, 2, 3}, {1} }) }, // multiply by scalar + { "a...j,j...->a...", ov::test::static_shapes_to_test_representation({ {1, 1, 4, 3}, {3, 4, 2, 1} }) } // complex multiplication }; INSTANTIATE_TEST_SUITE_P(smoke_Einsum, EinsumLayerTest, - ::testing::Combine(::testing::ValuesIn(precisions), + ::testing::Combine(::testing::ValuesIn(model_types), ::testing::ValuesIn(equationsWithInput), ::testing::Values(ov::test::utils::DEVICE_GPU)), EinsumLayerTest::getTestCaseName); diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/eye.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/eye.cpp similarity index 88% rename from src/plugins/intel_gpu/tests/functional/single_layer_tests/eye.cpp rename to src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/eye.cpp index a347ab6877b16d..d610fc923d8e72 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/eye.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/eye.cpp @@ -2,19 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/eye.hpp" - -using namespace LayerTestsDefinitions; +#include "single_op_tests/eye.hpp" namespace { +using ov::test::EyeLayerTest; -TEST_P(EyeLayerTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - Run(); -} - -const std::vector netPrecisions = - {ElementType::f32, ElementType::f16, ElementType::i32, ElementType::i8, ElementType::u8, ElementType::i64}; +const std::vector model_types = { + ov::element::f32, + ov::element::f16, + ov::element::i32, + ov::element::i8, + ov::element::u8, + ov::element::i64}; const std::vector> eyePars = { // rows, cols, diag_shift @@ -40,7 +39,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Eye2D_WithNonScalar_Test, ::testing::Combine(::testing::ValuesIn(std::vector>{{{1}, {1}, {1}}}), ::testing::ValuesIn(emptyBatchShape), ::testing::ValuesIn(eyePars), - ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(model_types), ::testing::Values(ov::test::utils::DEVICE_GPU)), EyeLayerTest::getTestCaseName); @@ -50,7 +49,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Eye_1DBatch_Test, {{1}, {1}, {1}, {1}}}), ::testing::ValuesIn(batchShapes1D), ::testing::ValuesIn(eyePars), - ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(model_types), ::testing::Values(ov::test::utils::DEVICE_GPU)), EyeLayerTest::getTestCaseName); @@ -60,7 +59,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Eye_2DBatch_Test, {{1}, {1}, {1}, {2}}}), ::testing::ValuesIn(batchShapes2D), ::testing::ValuesIn(eyePars), - ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(model_types), ::testing::Values(ov::test::utils::DEVICE_GPU)), EyeLayerTest::getTestCaseName); @@ -70,7 +69,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Eye_3DBatch_Test, {{1}, {1}, {1}, {3}}}), ::testing::ValuesIn(batchShapes3D), ::testing::ValuesIn(eyePars), - ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(model_types), ::testing::Values(ov::test::utils::DEVICE_GPU)), EyeLayerTest::getTestCaseName); diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/einsum.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/einsum.hpp new file mode 100644 index 00000000000000..375a02f2ec65f4 --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/einsum.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/einsum.hpp" + +namespace ov { +namespace test { +TEST_P(EinsumLayerTest, Inference) { + run(); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/eye.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/eye.hpp new file mode 100644 index 00000000000000..ebacbba67b4844 --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/eye.hpp @@ -0,0 +1,14 @@ +// Copyright (C) 2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include "shared_test_classes/single_op/eye.hpp" + +namespace ov { +namespace test { +TEST_P(EyeLayerTest, Inference) { + run(); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/einsum.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/einsum.hpp new file mode 100644 index 00000000000000..aeef17447e6a43 --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/einsum.hpp @@ -0,0 +1,34 @@ +// Copyright (C) 2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { +typedef std::tuple< + std::string, // Equation + std::vector // Input shapes +> EinsumEquationWithInput; + +typedef std::tuple< + ov::element::Type, // Model type + EinsumEquationWithInput, // Equation with corresponding input shapes + std::string // Device name +> EinsumLayerTestParamsSet; + +class EinsumLayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); +protected: + void SetUp() override; +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/eye.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/eye.hpp new file mode 100644 index 00000000000000..1563a8a37f9cda --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/eye.hpp @@ -0,0 +1,24 @@ +// Copyright (C) 2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { +using EyeLayerTestParams = std::tuple< + std::vector, // eye shape + std::vector, // output batch shape + std::vector, // eye params (rows, cols, diag_shift) + ov::element::Type, // Model type + std::string>; // Device name + +class EyeLayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(testing::TestParamInfo obj); + void SetUp() override; +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_op/einsum.cpp b/src/tests/functional/shared_test_classes/src/single_op/einsum.cpp new file mode 100644 index 00000000000000..4912bf6a0afd04 --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/einsum.cpp @@ -0,0 +1,61 @@ +// Copyright (C) 2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/einsum.hpp" + +namespace ov { +namespace test { + +std::string EinsumLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { + EinsumEquationWithInput equation_with_input; + ov::element::Type model_type; + std::string targetDevice; + std::tie(model_type, equation_with_input, targetDevice) = obj.param; + std::string equation; + std::vector shapes; + std::tie(equation, shapes) = equation_with_input; + + std::ostringstream result; + result << "IS=("; + for (size_t i = 0lu; i < shapes.size(); i++) { + result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < shapes.size(); j++) { + result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + result << "PRC=" << model_type.get_type_name() << "_"; + result << "Eq=" << equation << "_"; + result << "trgDev=" << targetDevice; + return result.str(); +} + +void EinsumLayerTest::SetUp() { + EinsumEquationWithInput equation_with_input; + ov::element::Type model_type; + std::tie(model_type, equation_with_input, targetDevice) = this->GetParam(); + std::string equation; + std::vector shapes; + std::tie(equation, shapes) = equation_with_input; + init_input_shapes(shapes); + + ov::ParameterVector params; + ov::OutputVector param_outs; + for (const auto& shape : inputDynamicShapes) { + auto param = std::make_shared(model_type, shape); + params.push_back(param); + param_outs.push_back(param); + } + + auto einsum = std::make_shared(param_outs, equation); + + auto result = std::make_shared(einsum); + function = std::make_shared(result, params, "einsum"); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_op/eye.cpp b/src/tests/functional/shared_test_classes/src/single_op/eye.cpp new file mode 100644 index 00000000000000..0aae5e1593ef43 --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/eye.cpp @@ -0,0 +1,74 @@ +// Copyright (C) 2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include "shared_test_classes/single_op/eye.hpp" + +#include "common_test_utils/ov_tensor_utils.hpp" +#include "openvino/pass/constant_folding.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/eye.hpp" + +namespace ov { +namespace test { +std::string EyeLayerTest::getTestCaseName(testing::TestParamInfo obj) { + std::string td; + std::vector input_shapes; + ov::element::Type model_type; + std::vector out_batch_shape; + std::vector eye_par; + std::tie(input_shapes, out_batch_shape, eye_par, model_type, td) = obj.param; + std::ostringstream result; + result << "EyeTest_"; + result << "IS=("; + for (const auto& shape : input_shapes) { + result << ov::test::utils::partialShape2str({shape}) << "_"; + } + result << ")"; + result << "rowNum=" << eye_par[0] << "_"; + result << "colNum=" << eye_par[1] << "_"; + result << "diagShift=" << eye_par[2] << "_"; + result << "batchShape=" << ov::test::utils::vec2str(out_batch_shape) << "_"; + result << model_type << "_"; + result << std::to_string(obj.index); + return result.str(); +} + +void EyeLayerTest::SetUp() { + std::vector input_shapes; + int row_num, col_num; + int shift; + std::vector out_batch_shape; + ov::element::Type model_type; + std::vector eye_par; + std::tie(input_shapes, out_batch_shape, eye_par, model_type, targetDevice) = this->GetParam(); + row_num = eye_par[0]; + col_num = eye_par[1]; + shift = eye_par[2]; + + std::shared_ptr eye_operation; + + auto rows_const = std::make_shared(ov::element::i32, input_shapes[0], &row_num); + rows_const->set_friendly_name("rows"); + auto cols_const = std::make_shared(ov::element::i32, input_shapes[1], &col_num); + cols_const->set_friendly_name("cols"); + auto diag_const = std::make_shared(ov::element::i32, input_shapes[2], &shift); + diag_const->set_friendly_name("diagInd"); + + if (!out_batch_shape.empty() && out_batch_shape[0] != 0) { + auto batch_shape_par = std::make_shared(ov::element::i32, + ov::Shape{out_batch_shape.size()}, + out_batch_shape.data()); + batch_shape_par->set_friendly_name("batchShape"); + eye_operation = std::make_shared(rows_const, cols_const, diag_const, batch_shape_par, model_type); + } else { + eye_operation = std::make_shared(rows_const, cols_const, diag_const, model_type); + } + + // Without this call the eye operation will be calculated by CPU and substituted by Constant operator + ov::pass::disable_constant_folding(eye_operation); + auto result = std::make_shared(eye_operation); + function = std::make_shared(result, ov::ParameterVector(), "eye"); +} +} // namespace test +} // namespace ov From b270e365900ef80ce1eef44ce0e1388dc45d2da0 Mon Sep 17 00:00:00 2001 From: Tatiana Savina Date: Tue, 17 Oct 2023 17:48:20 +0200 Subject: [PATCH 235/257] add AMX (#20523) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index adc6f9f2b965ea..c34965cd736fc2 100644 --- a/README.md +++ b/README.md @@ -70,7 +70,7 @@ The OpenVINO™ Runtime can infer models on different hardware devices. This sec CPU Intel CPU openvino_intel_cpu_plugin - Intel Xeon with Intel® Advanced Vector Extensions 2 (Intel® AVX2), Intel® Advanced Vector Extensions 512 (Intel® AVX-512), and AVX512_BF16, Intel Core Processors with Intel AVX2, Intel Atom Processors with Intel® Streaming SIMD Extensions (Intel® SSE) + Intel Xeon with Intel® Advanced Vector Extensions 2 (Intel® AVX2), Intel® Advanced Vector Extensions 512 (Intel® AVX-512), and AVX512_BF16, Intel Core Processors with Intel AVX2, Intel Atom Processors with Intel® Streaming SIMD Extensions (Intel® SSE), Intel® Advanced Matrix Extensions (Intel® AMX) ARM CPU From f78e206ecb0449758cafa30be61995f64fa6bf8c Mon Sep 17 00:00:00 2001 From: Tatiana Savina Date: Tue, 17 Oct 2023 17:49:07 +0200 Subject: [PATCH 236/257] [DOCS] Update components in readme (#20524) * update readme * add space * add space --- README.md | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index c34965cd736fc2..bfc4a722c2680d 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,7 @@ OpenVINO™ is an open-source toolkit for optimizing and deploying AI inference. - Reduce resource demands and efficiently deploy on a range of Intel® platforms from edge to cloud -This open-source version includes several components: namely [Model Optimizer], [OpenVINO™ Runtime], [Post-Training Optimization Tool], as well as CPU, GPU, GNA, multi device and heterogeneous plugins to accelerate deep learning inference on Intel® CPUs and Intel® Processor Graphics. +This open-source version includes several components: namely [OpenVINO Model Converter (OVC)], [OpenVINO™ Runtime], as well as CPU, GPU, GNA, multi device and heterogeneous plugins to accelerate deep learning inference on Intel® CPUs and Intel® Processor Graphics. It supports pre-trained models from [Open Model Zoo], along with 100+ open source and public models in popular formats such as TensorFlow, ONNX, PaddlePaddle, MXNet, Caffe, Kaldi. @@ -48,8 +48,7 @@ source and public models in popular formats such as TensorFlow, ONNX, PaddlePadd * [python](./src/bindings/python) - Python API for OpenVINO™ Runtime * [Plugins](./src/plugins) - contains OpenVINO plugins which are maintained in open-source by the OpenVINO team. For more information, take a look at the [list of supported devices](#supported-hardware-matrix). * [Frontends](./src/frontends) - contains available OpenVINO frontends that allow reading models from the native framework format. -* [Model Optimizer] - is a cross-platform command-line tool that facilitates the transition between training and deployment environments, performs static model analysis, and adjusts deep learning models for optimal execution on end-point target devices. -* [Post-Training Optimization Tool] - is designed to accelerate the inference of deep learning models by applying special methods without model retraining or fine-tuning, for example, post-training 8-bit quantization. +* [OpenVINO Model Converter (OVC)] - is a cross-platform command-line tool that facilitates the transition between training and deployment environments, and adjusts deep learning models for optimal execution on end-point target devices. * [Samples] - applications in C, C++ and Python languages that show basic OpenVINO use cases. ## Supported Hardware matrix @@ -62,7 +61,7 @@ The OpenVINO™ Runtime can infer models on different hardware devices. This sec Device Plugin Library - ShortDescription + Short Description @@ -98,7 +97,7 @@ OpenVINO™ Toolkit also contains several plugins which simplify loading models Plugin Library - ShortDescription + Short Description @@ -196,6 +195,5 @@ Report questions, issues and suggestions, using: [Open Model Zoo]:https://github.com/openvinotoolkit/open_model_zoo [OpenVINO™ Runtime]:https://docs.openvino.ai/2023.1/openvino_docs_OV_UG_OV_Runtime_User_Guide.html -[Model Optimizer]:https://docs.openvino.ai/2023.1/openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html -[Post-Training Optimization Tool]:https://docs.openvino.ai/2023.1/pot_introduction.html +[OpenVINO Model Converter (OVC)]:https://docs.openvino.ai/2023.1/openvino_docs_model_processing_introduction.html#convert-a-model-in-cli-ovc [Samples]:https://github.com/openvinotoolkit/openvino/tree/master/samples From 33a97740b2d2e09e29a6ae571a8ce46b46a2e8b8 Mon Sep 17 00:00:00 2001 From: Katarzyna Mitrus Date: Tue, 17 Oct 2023 17:50:42 +0200 Subject: [PATCH 237/257] [MO][Opset13] Multinomial-13 support in MO IR Reader (#20446) * Add Multinomial-13 to MO * Add Multinomial tests for MO IR reader * Move convert_type check * Imports clean up * Update pacgage BOM file * Avoid files collision in tests --- tools/mo/automation/package_BOM.txt | 3 +- tools/mo/openvino/tools/mo/ops/multinomial.py | 69 +++++++++++ .../unit_tests/mo/utils/ir_reader/ops_test.py | 117 ++++++++++++++---- 3 files changed, 162 insertions(+), 27 deletions(-) create mode 100644 tools/mo/openvino/tools/mo/ops/multinomial.py diff --git a/tools/mo/automation/package_BOM.txt b/tools/mo/automation/package_BOM.txt index 0780ce7eba8151..b9bc64d1c8bf08 100644 --- a/tools/mo/automation/package_BOM.txt +++ b/tools/mo/automation/package_BOM.txt @@ -928,6 +928,7 @@ openvino/tools/mo/ops/MatMul.py openvino/tools/mo/ops/memory.py openvino/tools/mo/ops/memoryoffset.py openvino/tools/mo/ops/merge.py +openvino/tools/mo/ops/multinomial.py openvino/tools/mo/ops/mvn.py openvino/tools/mo/ops/mxfft.py openvino/tools/mo/ops/mxrepeat.py @@ -1106,4 +1107,4 @@ openvino/tools/mo/utils/tensorboard_util.py openvino/tools/mo/utils/type_utils.py openvino/tools/mo/utils/unsupported_ops.py openvino/tools/mo/utils/utils.py -openvino/tools/mo/utils/version.py \ No newline at end of file +openvino/tools/mo/utils/version.py diff --git a/tools/mo/openvino/tools/mo/ops/multinomial.py b/tools/mo/openvino/tools/mo/ops/multinomial.py new file mode 100644 index 00000000000000..42f4b0d3eedbb9 --- /dev/null +++ b/tools/mo/openvino/tools/mo/ops/multinomial.py @@ -0,0 +1,69 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension +from openvino.tools.mo.front.extractor import bool_to_str +from openvino.tools.mo.graph.graph import Graph, Node + +from openvino.tools.mo.ops.op import Op + + +class Multinomial(Op): + op = 'Multinomial' + enabled = False + + def __init__(self, graph: Graph, attrs: dict): + super().__init__(graph, { + 'type': self.op, + 'op': self.op, + 'version': 'opset13', + 'infer': self.infer, + 'in_ports_count': 2, + 'out_ports_count': 1, + 'type_infer': self.type_infer, + 'with_replacement': False, + 'log_probs': False, + 'global_seed': 0, + 'op_seed': 0, + 'convert_type': np.int64, + }, attrs) + + def backend_attrs(self): + return ['convert_type', + ('with_replacement', lambda node: bool_to_str( + node, 'with_replacement')), + ('log_probs', lambda node: bool_to_str(node, 'log_probs')), + 'global_seed', + 'op_seed'] + + def supported_attrs(self): + return ['convert_type', + 'with_replacement', + 'log_probs', + 'global_seed', + 'op_seed'] + + @staticmethod + def type_infer(node: Node): + assert node.has_valid('convert_type') + if node['convert_type'] == 'i32': + node.out_port(0).set_data_type(np.int32) + else: + node.out_port(0).set_data_type(np.int64) + + @staticmethod + def infer(node: Node): + + input_shape = node.in_node(0).shape + output_shape = [] + if input_shape is not None and input_shape.size == 2: + output_shape.append(input_shape[0]) + + num_samples = node.in_port(1).data.get_value() + if num_samples is not None: + output_shape.append(num_samples) + else: + output_shape.append(dynamic_dimension) + node.out_port(0).data.set_shape(output_shape) diff --git a/tools/mo/unit_tests/mo/utils/ir_reader/ops_test.py b/tools/mo/unit_tests/mo/utils/ir_reader/ops_test.py index 62cd013ad23093..3e5b35ef62fabb 100644 --- a/tools/mo/unit_tests/mo/utils/ir_reader/ops_test.py +++ b/tools/mo/unit_tests/mo/utils/ir_reader/ops_test.py @@ -10,7 +10,7 @@ import openvino.runtime.opset12 as opset12 import openvino.runtime.opset11 as opset11 import openvino.runtime.opset10 as opset10 -from openvino.runtime import Model, serialize, Core, PartialShape, Dimension +from openvino.runtime import Model, serialize, Core, PartialShape, Dimension, Type from openvino.tools.mo.utils.ir_reader.restore_graph import restore_graph_from_ir, save_restored_graph from openvino.tools.mo.utils.logger import init_logger @@ -23,16 +23,22 @@ class TestOps(unittest.TestCase): @staticmethod def check_graph_can_save(model, name): with tempfile.TemporaryDirectory() as tmp: - model_xml = Path(tmp) / (name + '.xml') - model_bin = Path(tmp) / (name + '.bin') + tmp_path = Path(tmp) + model_xml = tmp_path / (name + '.xml') + model_bin = tmp_path / (name + '.bin') serialize(model, model_xml, model_bin) graph, _ = restore_graph_from_ir(model_xml, model_bin) - save_restored_graph(graph, tmp, {}, name) + save_restored_graph(graph, tmp, {}, name + '_restored') # restore 2 times to validate that after save graph doesn't lose attributes etc. - graph, _ = restore_graph_from_ir(model_xml, model_bin) + restored_model_xml = tmp_path / (name + '_restored.xml') + restored_model_bin = tmp_path / (name + '_restored.bin') + graph, _ = restore_graph_from_ir( + restored_model_xml, restored_model_bin) + core = Core() + core.set_property({"ENABLE_MMAP": False}) # check that re-saved model can be read in runtime - Core().read_model(model_xml) - return graph + model = core.read_model(restored_model_xml) + return graph, model def test_topk_11(self): data_shape = [6, 12, 10, 24] @@ -43,7 +49,7 @@ def test_topk_11(self): topk = opset11.topk(data_parameter, k_val, axis, "max", "value", stable=True, name="TopK_11") model = Model(topk, [data_parameter]) - graph = TestOps.check_graph_can_save(model, 'topk_model') + graph, _ = TestOps.check_graph_can_save(model, 'topk_model') topk_node = graph.get_op_nodes(op="TopK")[0] self.assertEqual(topk_node["version"], "opset11") self.assertTrue(topk_node["stable"]) @@ -56,7 +62,7 @@ def test_interpolate_11(self): interpolate = opset11.interpolate(data_parameter, np.int32( [20, 48]), "nearest", "sizes", axes=np.int32([2, 3]), name="Interpolate_11") model = Model(interpolate, [data_parameter]) - graph = TestOps.check_graph_can_save(model, 'interpolate_model') + graph, _ = TestOps.check_graph_can_save(model, 'interpolate_model') interpolate_node = graph.get_op_nodes(op="Interpolate")[0] self.assertEqual(interpolate_node["version"], "opset11") self.assertTrue("force_precision_in_ports" in interpolate_node) @@ -69,7 +75,7 @@ def test_interpolate_11_scales(self): interpolate = opset11.interpolate(data_parameter, np.float32( [2., 2.]), "nearest", "scales", axes=np.int32([2, 3]), name="Interpolate_11") model = Model(interpolate, [data_parameter]) - graph = TestOps.check_graph_can_save(model, 'interpolate_model') + graph, _ = TestOps.check_graph_can_save(model, 'interpolate_model') interpolate_node = graph.get_op_nodes(op="Interpolate")[0] self.assertEqual(interpolate_node["version"], "opset11") self.assertTrue("force_precision_in_ports" not in interpolate_node) @@ -81,7 +87,7 @@ def test_interpolate_11_no_axes(self): interpolate = opset11.interpolate(data_parameter, np.int32( [6, 12, 20, 48]), "nearest", "sizes", name="Interpolate_11") model = Model(interpolate, [data_parameter]) - graph = TestOps.check_graph_can_save(model, 'interpolate_model') + graph, _ = TestOps.check_graph_can_save(model, 'interpolate_model') interpolate_node = graph.get_op_nodes(op="Interpolate")[0] self.assertEqual(interpolate_node["version"], "opset11") self.assertTrue("force_precision_in_ports" in interpolate_node) @@ -94,7 +100,7 @@ def test_interpolate_4(self): interpolate = opset10.interpolate(data_parameter, np.int32([20, 48]), np.float32( [2, 2]), "nearest", "sizes", axes=np.int32([2, 3]), name="Interpolate_4") model = Model(interpolate, [data_parameter]) - graph = TestOps.check_graph_can_save(model, 'interpolate4_model') + graph, _ = TestOps.check_graph_can_save(model, 'interpolate4_model') interpolate_node = graph.get_op_nodes(op="Interpolate")[0] self.assertEqual(interpolate_node["version"], "opset4") @@ -105,7 +111,7 @@ def test_unique(self): unique = opset10.unique(data_parameter, axis=np.int32( [2]), sorted=True, name="Unique_10") model = Model(unique, [data_parameter]) - graph = TestOps.check_graph_can_save(model, 'unique_model') + graph, _ = TestOps.check_graph_can_save(model, 'unique_model') unique_node = graph.get_op_nodes(op="Unique")[0] self.assertEqual(unique_node["version"], "opset10") self.assertListEqual(unique_node.out_port( @@ -118,7 +124,7 @@ def test_is_finite(self): data_shape, name="Data", dtype=np.float32) is_finite = opset10.is_finite(data_parameter, name="Is_finite_10") model = Model(is_finite, [data_parameter]) - graph = TestOps.check_graph_can_save(model, 'is_finite_model') + graph, _ = TestOps.check_graph_can_save(model, 'is_finite_model') is_finite_node = graph.get_op_nodes(op="IsFinite")[0] self.assertEqual(is_finite_node["version"], "opset10") @@ -128,7 +134,7 @@ def test_is_inf(self): data_shape, name="Data", dtype=np.float32) is_inf = opset10.is_inf(data_parameter, name="Is_inf_10") model = Model(is_inf, [data_parameter]) - graph = TestOps.check_graph_can_save(model, 'is_inf_model') + graph, _ = TestOps.check_graph_can_save(model, 'is_inf_model') is_inf_node = graph.get_op_nodes(op="IsInf")[0] self.assertEqual(is_inf_node["version"], "opset10") @@ -138,7 +144,7 @@ def test_is_nan(self): data_shape, name="Data", dtype=np.float32) is_nan = opset10.is_nan(data_parameter, name="Is_nan_10") model = Model(is_nan, [data_parameter]) - graph = TestOps.check_graph_can_save(model, 'is_nan_model') + graph, _ = TestOps.check_graph_can_save(model, 'is_nan_model') is_nan_node = graph.get_op_nodes(op="IsNaN")[0] self.assertEqual(is_nan_node["version"], "opset10") @@ -177,7 +183,7 @@ def test_if(self): out2 = if_node.set_output(then_body_res_2, else_body_res_2) model = Model([out1, out2], [parameter_x, parameter_y]) - graph = TestOps.check_graph_can_save(model, 'if_model') + graph, _ = TestOps.check_graph_can_save(model, 'if_model') if_node = graph.get_op_nodes(op="If")[0] self.assertEqual(if_node["version"], "opset8") _, layer_info, _ = if_node['IE'][0] @@ -192,7 +198,7 @@ def test_strided_slice_no_begin_end_mask(self): strided_slice = opset11.strided_slice(data_parameter, np.int32([1, 2, 3, 4]), np.int32( [3, 6, 9, 12]), np.int32([1, 1, 1, 1]), begin_mask=[], end_mask=[], name="StridedSlice_10") model = Model(strided_slice, [data_parameter]) - graph = TestOps.check_graph_can_save(model, 'strided_slice_model') + graph, _ = TestOps.check_graph_can_save(model, 'strided_slice_model') strided_slice_node = graph.get_op_nodes(op="StridedSlice")[0] self.assertEqual(strided_slice_node["version"], "opset1") @@ -206,7 +212,7 @@ def test_scatter_dynamic_shape(self): mul = opset11.multiply(scatter, np.int64([1, 2])) reshape = opset11.reshape(data_parameter, mul, True) model = Model(reshape, [data_parameter]) - graph = TestOps.check_graph_can_save(model, 'scatter_dynamic_model') + graph, _ = TestOps.check_graph_can_save(model, 'scatter_dynamic_model') scatter_update_node = graph.get_op_nodes(op="ScatterUpdate")[0] self.assertListEqual(scatter_update_node.out_port(0).data.get_value().tolist(), [0, None]) @@ -214,7 +220,7 @@ def test_pad_12(self): data_parameter = opset12.parameter([6, 12, 10, 24], name="Data", dtype=np.float32) pad = opset12.pad(data_parameter, np.int64([0, 0, -1, -2]), np.int64([0, 0, -3, -4]), "constant") model = Model(pad, [data_parameter]) - graph = TestOps.check_graph_can_save(model, 'pad_model') + graph, _ = TestOps.check_graph_can_save(model, 'pad_model') pad_node = graph.get_op_nodes(op="Pad")[0] self.assertEqual(pad_node["version"], "opset12") self.assertListEqual(pad_node.in_port(1).data.get_value().tolist(), [0, 0, -1, -2]) @@ -225,7 +231,7 @@ def test_scatter_elements_update_12(self): data_parameter = opset12.parameter([10], name="Data", dtype=np.float32) scatter = opset12.scatter_elements_update(data_parameter, np.int32([5, 0, 7, 5]), np.float32([5., 6., 1.5, -5.]), np.int32(0), "sum", False) model = Model(scatter, [data_parameter]) - graph = TestOps.check_graph_can_save(model, 'scatter_model') + graph, _ = TestOps.check_graph_can_save(model, 'scatter_model') scatter_node = graph.get_op_nodes(op="ScatterElementsUpdate")[0] self.assertListEqual(scatter_node.out_port(0).data.get_shape().tolist(), [10]) self.assertEqual(scatter_node["version"], "opset12") @@ -240,7 +246,7 @@ def test_group_norm_12(self): epsilon = 1e-6 node = opset12.group_normalization(data_parameter, scale, bias, num_groups, epsilon) model = Model(node, [data_parameter]) - graph = TestOps.check_graph_can_save(model, 'group_norm_model') + graph, _ = TestOps.check_graph_can_save(model, 'group_norm_model') gn_node = graph.get_op_nodes(op="GroupNormalization")[0] self.assertListEqual(gn_node.out_port(0).data.get_shape().tolist(), [1, 3, 3, 3]) self.assertEqual(gn_node["version"], "opset12") @@ -253,7 +259,7 @@ def test_bitwise_and_13(self): op = opset13.bitwise_and(a, b) model = Model(op, [a, b]) - graph = TestOps.check_graph_can_save(model, "bitwise_and_model") + graph, _ = TestOps.check_graph_can_save(model, "bitwise_and_model") op_node = graph.get_op_nodes(op="BitwiseAnd")[0] self.assertListEqual(op_node.out_port(0).data.get_shape().tolist(), [4, 2]) self.assertEqual(op_node["version"], "opset13") @@ -265,7 +271,7 @@ def test_bitwise_or_13(self): op = opset13.bitwise_or(a, b) model = Model(op, [a, b]) - graph = TestOps.check_graph_can_save(model, "bitwise_or_model") + graph, _ = TestOps.check_graph_can_save(model, "bitwise_or_model") op_node = graph.get_op_nodes(op="BitwiseOr")[0] self.assertListEqual(op_node.out_port(0).data.get_shape().tolist(), [4, 2]) self.assertEqual(op_node["version"], "opset13") @@ -277,7 +283,7 @@ def test_bitwise_xor_13(self): op = opset13.bitwise_xor(a, b) model = Model(op, [a, b]) - graph = TestOps.check_graph_can_save(model, "bitwise_xor_model") + graph, _ = TestOps.check_graph_can_save(model, "bitwise_xor_model") op_node = graph.get_op_nodes(op="BitwiseXor")[0] self.assertListEqual(op_node.out_port(0).data.get_shape().tolist(), [4, 2]) self.assertEqual(op_node["version"], "opset13") @@ -288,7 +294,66 @@ def test_bitwise_not_13(self): op = opset13.bitwise_not(a) model = Model(op, [a]) - graph = TestOps.check_graph_can_save(model, "bitwise_not_model") + graph, _ = TestOps.check_graph_can_save(model, "bitwise_not_model") op_node = graph.get_op_nodes(op="BitwiseNot")[0] self.assertListEqual(op_node.out_port(0).data.get_shape().tolist(), [4, 2]) self.assertEqual(op_node["version"], "opset13") + + def test_multinomial_13_param_inputs(self): + data_shape = [2, 8] + probs = opset13.parameter( + data_shape, name="probs", dtype=np.float32) + num_samples = opset13.parameter( + [1], name="num_samples", dtype=np.int32) + + op = opset13.multinomial(probs, num_samples, + convert_type="i32", + with_replacement=True, + log_probs=True, + global_seed=456, + op_seed=213) + + model = Model(op, [probs, num_samples]) + graph, loaded_model = TestOps.check_graph_can_save( + model, 'multinomial_param_model') + graph_node = graph.get_op_nodes(op="Multinomial")[0] + + self.assertEqual(graph_node["version"], "opset13") + self.assertListEqual(graph_node.out_port( + 0).data.get_shape().tolist(), [2, None]) + self.assertEqual(graph_node["convert_type"], "i32") + self.assertTrue(graph_node["with_replacement"]) + self.assertTrue(graph_node["log_probs"]) + self.assertEqual(graph_node["global_seed"], 456) + self.assertEqual(graph_node["op_seed"], 213) + self.assertEqual(loaded_model.get_output_element_type(0), Type.i32) + self.assertEqual(loaded_model.get_output_partial_shape( + 0), PartialShape([2, -1])) + + def test_multinomial_13_const_inputs(self): + probs = opset13.constant( + [[0.4, 0.5, 0.1], [0.3, 0.2, 0.5]], name="probs", dtype=np.float32) + num_samples = opset13.constant( + [3], name="num_samples", dtype=np.int64) + + op = opset13.multinomial(probs, num_samples, + convert_type="i64", + with_replacement=False, + log_probs=False) + + model = Model(op, []) + graph, loaded_model = TestOps.check_graph_can_save( + model, 'multinomial_const_model') + graph_node = graph.get_op_nodes(op="Multinomial")[0] + + self.assertEqual(graph_node["version"], "opset13") + self.assertListEqual(graph_node.out_port( + 0).data.get_shape().tolist(), [2, 3]) + self.assertEqual(graph_node["convert_type"], "i64") + self.assertFalse(graph_node["with_replacement"]) + self.assertFalse(graph_node["log_probs"]) + self.assertEqual(graph_node["global_seed"], 0) + self.assertEqual(graph_node["op_seed"], 0) + self.assertEqual(loaded_model.get_output_element_type(0), Type.i64) + self.assertEqual(loaded_model.get_output_partial_shape( + 0), PartialShape([2, 3])) From 7515b042e8662b6f1e3c7d91909c3b5c53466d46 Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Tue, 17 Oct 2023 19:17:49 +0200 Subject: [PATCH 238/257] [core]Migrate squeeze operator to new API (#20282) * Migrate Squeeze to new API * Remove visit_attributes as same as base class --- src/core/include/openvino/op/squeeze.hpp | 5 +- src/core/src/bound_evaluate.cpp | 2 +- src/core/src/op/squeeze.cpp | 150 ++++++++++------------- 3 files changed, 65 insertions(+), 92 deletions(-) diff --git a/src/core/include/openvino/op/squeeze.hpp b/src/core/include/openvino/op/squeeze.hpp index 28f098be406bf7..e66cfb3d27667a 100644 --- a/src/core/include/openvino/op/squeeze.hpp +++ b/src/core/include/openvino/op/squeeze.hpp @@ -20,11 +20,8 @@ class OPENVINO_API Squeeze : public Op { Squeeze(const Output& data, const Output& axes); Squeeze(const Output& data); - bool visit_attributes(AttributeVisitor& visitor) override; void validate_and_infer_types() override; - OPENVINO_SUPPRESS_DEPRECATED_START - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - OPENVINO_SUPPRESS_DEPRECATED_END + bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override; bool has_evaluate() const override; bool evaluate_lower(TensorVector& outputs) const override; bool evaluate_upper(TensorVector& outputs) const override; diff --git a/src/core/src/bound_evaluate.cpp b/src/core/src/bound_evaluate.cpp index e3b784b521c2af..1b1093b871c657 100644 --- a/src/core/src/bound_evaluate.cpp +++ b/src/core/src/bound_evaluate.cpp @@ -553,7 +553,7 @@ bool ov::has_and_set_equal_bounds(const Output& source) { } bool ov::have_node_inputs_bounds_set(const Node* const node, const size_t first_idx, const size_t last_idx) { - bool have_bound_set = last_idx <= node->get_input_size(); + bool have_bound_set = last_idx < node->get_input_size(); for (size_t i = first_idx; have_bound_set && (i <= last_idx); ++i) { have_bound_set = node->get_input_tensor(i).has_and_set_bound(); } diff --git a/src/core/src/op/squeeze.cpp b/src/core/src/op/squeeze.cpp index 50bf9af02d00a5..5e47edae2c1cc9 100644 --- a/src/core/src/op/squeeze.cpp +++ b/src/core/src/op/squeeze.cpp @@ -2,34 +2,39 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/squeeze.hpp" +#include "openvino/op/squeeze.hpp" -#include -#include -#include -#include +#include #include "bound_evaluate.hpp" #include "itt.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/validation_util.hpp" -#include "openvino/reference/copy.hpp" +#include "openvino/core/validation_util.hpp" +#include "openvino/op/constant.hpp" #include "squeeze_shape_inference.hpp" -using namespace std; -using namespace ngraph; +namespace ov { +namespace op { +namespace v0 { +namespace validate { +namespace { -op::Squeeze::Squeeze() : Op() {} +bool axes_has_and_set_bound(const Node& op) { + return (op.get_input_size() < 2) || op.get_input_tensor(1).has_and_set_bound(); +} +} // namespace +} // namespace validate + +Squeeze::Squeeze() : Op() {} -op::Squeeze::Squeeze(const Output& data, const Output& axes) : Op({data, axes}) { +Squeeze::Squeeze(const Output& data, const Output& axes) : Op({data, axes}) { constructor_validate_and_infer_types(); } -op::Squeeze::Squeeze(const Output& data) : Op({data}) { +Squeeze::Squeeze(const Output& data) : Op({data}) { constructor_validate_and_infer_types(); } -void op::Squeeze::validate_and_infer_types() { +void Squeeze::validate_and_infer_types() { OV_OP_SCOPE(v0_Squeeze_validate_and_infer_types); OPENVINO_SUPPRESS_DEPRECATED_START @@ -40,115 +45,86 @@ void op::Squeeze::validate_and_infer_types() { set_output_type(0, get_input_element_type(0), output_shapes[0]); } -bool ngraph::op::v0::Squeeze::visit_attributes(AttributeVisitor& visitor) { - OV_OP_SCOPE(v0_Squeeze_visit_attributes); - return true; -} - -shared_ptr op::Squeeze::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr Squeeze::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v0_Squeeze_clone_with_new_inputs); check_new_args_count(this, new_args); - if (new_args.size() == 1) { - return make_shared(new_args.at(0)); - } else if (new_args.size() == 2) { - return make_shared(new_args.at(0), new_args.at(1)); - } else { + + switch (new_args.size()) { + case 1: + return std::make_shared(new_args[0]); + case 2: + return std::make_shared(new_args[0], new_args[1]); + default: OPENVINO_THROW("Incorrect number of new arguments"); } } -OPENVINO_SUPPRESS_DEPRECATED_START -bool op::v0::Squeeze::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool Squeeze::evaluate(TensorVector& outputs, const TensorVector& inputs) const { OV_OP_SCOPE(v0_Squeeze_evaluate); - OPENVINO_SUPPRESS_DEPRECATED_START - OPENVINO_ASSERT(validate_host_tensor_vector(inputs, inputs.size())); - OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1)); - OPENVINO_SUPPRESS_DEPRECATED_END - - if (has_evaluate()) { - auto input_shapes = std::vector{inputs[0]->get_partial_shape()}; - - if (inputs.size() == 2) { - input_shapes.push_back(inputs[1]->get_partial_shape()); - } + OPENVINO_ASSERT(outputs.size() == 1); - auto output_shapes = shape_infer(this, input_shapes, make_tensor_accessor(inputs)); + const auto output_shapes = + shape_infer(this, ov::util::get_tensors_partial_shapes(inputs), make_tensor_accessor(inputs)); + outputs[0].set_shape(output_shapes.front().get_shape()); - auto out_shape = output_shapes[0].get_shape(); - outputs[0]->set_shape(out_shape); - - ov::reference::copy(inputs[0]->get_data_ptr(), - outputs[0]->get_data_ptr(), - shape_size(out_shape) * outputs[0]->get_element_type().size()); - - return true; - } - return false; + std::memcpy(outputs[0].data(), inputs[0].data(), outputs[0].get_byte_size()); + return true; } -OPENVINO_SUPPRESS_DEPRECATED_END -bool op::v0::Squeeze::has_evaluate() const { +bool Squeeze::has_evaluate() const { OV_OP_SCOPE(v0_Squeeze_has_evaluate); - - if (get_input_size() == 2) { - switch (get_input_element_type(1)) { - case ngraph::element::i8: - case ngraph::element::i16: - case ngraph::element::i32: - case ngraph::element::i64: - case ngraph::element::u8: - case ngraph::element::u16: - case ngraph::element::u32: - case ngraph::element::u64: + const auto validate_axes_type = [](const element::Type& et) -> bool { + switch (et) { + case element::i8: + case element::i16: + case element::i32: + case element::i64: + case element::u8: + case element::u16: + case element::u32: + case element::u64: return true; default: - break; + return false; } - return false; - } else if (get_input_size() == 1) { - return true; - } else { - return false; - } + }; + + return (get_input_size() < 2) || validate_axes_type(get_input_element_type(1)); } -bool op::v0::Squeeze::evaluate_lower(ov::TensorVector& output_values) const { +bool Squeeze::evaluate_lower(TensorVector& output_values) const { OV_OP_SCOPE(v0_Squeeze_evaluate_lower); - if (inputs().size() > 1 && !input_value(1).get_tensor().has_and_set_bound()) - return false; - return default_lower_bound_evaluator(this, output_values); + return validate::axes_has_and_set_bound(*this) && default_lower_bound_evaluator(this, output_values); } -bool op::v0::Squeeze::evaluate_upper(ov::TensorVector& output_values) const { +bool Squeeze::evaluate_upper(TensorVector& output_values) const { OV_OP_SCOPE(v0_Squeeze_evaluate_upper); - if (inputs().size() > 1 && !input_value(1).get_tensor().has_and_set_bound()) - return false; - return default_upper_bound_evaluator(this, output_values); + return validate::axes_has_and_set_bound(*this) && default_upper_bound_evaluator(this, output_values); } -bool op::v0::Squeeze::evaluate_label(TensorLabelVector& output_labels) const { - if (get_input_size() > 1 && !get_input_tensor(1).has_and_set_bound()) - return false; +bool Squeeze::evaluate_label(TensorLabelVector& output_labels) const { OPENVINO_SUPPRESS_DEPRECATED_START - return default_label_evaluator(this, output_labels); + return validate::axes_has_and_set_bound(*this) && default_label_evaluator(this, output_labels); OPENVINO_SUPPRESS_DEPRECATED_END } -bool op::v0::Squeeze::constant_fold(OutputVector& output_values, const OutputVector& inputs_values) { +bool Squeeze::constant_fold(OutputVector& output_values, const OutputVector& inputs_values) { OV_OP_SCOPE(v0_Squeeze_constant_fold); if (get_output_partial_shape(0).is_dynamic() || is_const_fold_disabled()) { return false; } - const auto& shape = get_output_shape(0); - - if (auto data_const = std::dynamic_pointer_cast(inputs_values[0].get_node_shared_ptr())) { - output_values[0] = std::make_shared(*data_const, shape); + if (auto data_const = std::dynamic_pointer_cast(inputs_values[0].get_node_shared_ptr())) { + const auto& shape = get_output_shape(0); + output_values[0] = std::make_shared(*data_const, shape); return true; } return false; } -bool op::v0::Squeeze::is_dynamic() const { +bool Squeeze::is_dynamic() const { return get_output_partial_shape(0).is_dynamic(); } +} // namespace v0 +} // namespace op +} // namespace ov From f3b61d77e3994bb810e23a74da9c4497174a4995 Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Tue, 17 Oct 2023 21:01:50 +0200 Subject: [PATCH 239/257] [GHA] Filter timm scope for model hub tests (#20525) * Filter timm scope * Apply suggestions from code review --- .../model_hub_tests/torch_tests/test_timm.py | 22 ++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/tests/model_hub_tests/torch_tests/test_timm.py b/tests/model_hub_tests/torch_tests/test_timm.py index d08f6a8c4a9a40..d6dd438df5c6c6 100644 --- a/tests/model_hub_tests/torch_tests/test_timm.py +++ b/tests/model_hub_tests/torch_tests/test_timm.py @@ -10,9 +10,28 @@ from openvino import convert_model +def filter_timm(timm_list: list) -> list: + unique_models = set() + filtered_list = [] + ignore_set = {"base", "mini", "small", "xxtiny", "xtiny", "tiny", "lite", "nano", "pico", "medium", "big", + "large", "xlarge", "xxlarge", "huge", "gigantic", "giant", "enormous", "xs", "xxs", "s", "m", "l", "xl"} + for name in timm_list: + # first: remove datasets + name_parts = name.split(".") + _name = "_".join(name.split(".")[:-1]) if len(name_parts) > 1 else name + # second: remove sizes + name_set = set([n for n in _name.split("_") if not n.isnumeric()]) + name_set = name_set.difference(ignore_set) + name_join = "_".join(name_set) + if name_join not in unique_models: + unique_models.add(name_join) + filtered_list.append(name) + return filtered_list + + def get_all_models() -> list: m_list = timm.list_pretrained() - return m_list + return filter_timm(m_list) # To make tests reproducible we seed the random generator @@ -61,6 +80,7 @@ def teardown_method(self): def test_convert_model_precommit(self, name, ie_device): self.run(name, None, ie_device) + @pytest.mark.nightly @pytest.mark.parametrize("name", get_all_models()) def test_convert_model_all_models(self, name, ie_device): self.run(name, None, ie_device) From f723f901001ec421de81f5fe1ecdd12e320fd080 Mon Sep 17 00:00:00 2001 From: "Wang, Yang" Date: Wed, 18 Oct 2023 10:36:20 +0800 Subject: [PATCH 240/257] fixing the issue of importing property from openvino. (#20531) --- docs/snippets/ov_auto.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/snippets/ov_auto.py b/docs/snippets/ov_auto.py index 84a3d89e957e31..8c2721a355d1b3 100644 --- a/docs/snippets/ov_auto.py +++ b/docs/snippets/ov_auto.py @@ -8,7 +8,7 @@ import openvino.properties.device as device import openvino.properties.hint as hints import openvino.properties.streams as streams -import properties.enable_profiling as enable_profiling +import openvino.properties.enable_profiling as enable_profiling #! [py_ov_property_import_header] import openvino.properties.log as log From 3b2ad48d79e70fcb13366ebfc7f7291b7e78003e Mon Sep 17 00:00:00 2001 From: Nesterov Alexander Date: Wed, 18 Oct 2023 07:04:39 +0200 Subject: [PATCH 241/257] [ARM CPU] Common fixes for fp16 (#20504) --- .../src/nodes/ctc_greedy_decoder.cpp | 4 ++-- .../src/nodes/ctc_greedy_decoder_seq_len.cpp | 2 +- .../src/nodes/non_max_suppression.cpp | 2 +- src/plugins/intel_cpu/src/nodes/non_zero.cpp | 3 ++- src/plugins/intel_cpu/src/nodes/normalize.cpp | 11 ++++++--- src/plugins/intel_cpu/src/nodes/rnn.cpp | 7 +++++- .../behavior/ov_plugin/properties.cpp | 11 +++++---- .../infer_request_dynamic.cpp | 5 ++-- .../ov_infer_request/iteration_chaining.cpp | 5 ++-- .../skip_tests_config.cpp | 24 +++++++++---------- 10 files changed, 45 insertions(+), 29 deletions(-) diff --git a/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder.cpp b/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder.cpp index 494fb6176dd65e..0b2ee382e2a2a9 100644 --- a/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder.cpp +++ b/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder.cpp @@ -56,11 +56,11 @@ void CTCGreedyDecoder::initSupportedPrimitiveDescriptors() { return; Precision inDataPrecision = getOriginalInputPrecisionAtPort(DATA_INDEX); - if (inDataPrecision != Precision::FP32 && inDataPrecision != Precision::BF16) + if (!one_of(inDataPrecision, Precision::FP32, Precision::BF16, Precision::FP16)) IE_THROW() << errorPrefix << "has unsupported 'data' input precision: " << inDataPrecision; Precision seqLenPrecision = getOriginalInputPrecisionAtPort(SEQUENCE_LENGTH_INDEX); - if (seqLenPrecision != Precision::FP32 && seqLenPrecision != Precision::BF16) + if (!one_of(inDataPrecision, Precision::FP32, Precision::BF16, Precision::FP16)) IE_THROW() << errorPrefix << "has unsupported 'sequence_length' input precision: " << seqLenPrecision; addSupportedPrimDesc({{LayoutType::ncsp, Precision::FP32}, diff --git a/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder_seq_len.cpp b/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder_seq_len.cpp index be695d85b8c6f2..8e6e0617cd75f2 100644 --- a/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder_seq_len.cpp +++ b/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder_seq_len.cpp @@ -55,7 +55,7 @@ void CTCGreedyDecoderSeqLen::initSupportedPrimitiveDescriptors() { return; Precision inDataPrecision = getOriginalInputPrecisionAtPort(DATA_INDEX); - if (inDataPrecision != Precision::FP32 && inDataPrecision != Precision::BF16) + if (!one_of(inDataPrecision, Precision::FP32, Precision::BF16, Precision::FP16)) IE_THROW() << errorPrefix << "has unsupported 'data' input precision: " << inDataPrecision; Precision seqLenPrecision = getOriginalInputPrecisionAtPort(SEQUENCE_LENGTH_INDEX); diff --git a/src/plugins/intel_cpu/src/nodes/non_max_suppression.cpp b/src/plugins/intel_cpu/src/nodes/non_max_suppression.cpp index bfd1d8fa982901..d2a46ac97da017 100644 --- a/src/plugins/intel_cpu/src/nodes/non_max_suppression.cpp +++ b/src/plugins/intel_cpu/src/nodes/non_max_suppression.cpp @@ -626,7 +626,7 @@ void NonMaxSuppression::initSupportedPrimitiveDescriptors() { if (!supportedPrimitiveDescriptors.empty()) return; - const std::vector supportedFloatPrecision = {Precision::FP32, Precision::BF16}; + const std::vector supportedFloatPrecision = {Precision::FP32, Precision::BF16, Precision::FP16}; const std::vector supportedIntOutputPrecision = {Precision::I32, Precision::I64}; checkPrecision(getOriginalInputPrecisionAtPort(NMS_BOXES), supportedFloatPrecision, "boxes", inType); diff --git a/src/plugins/intel_cpu/src/nodes/non_zero.cpp b/src/plugins/intel_cpu/src/nodes/non_zero.cpp index 4571eaa9e8c998..be2a8d894fc7f8 100644 --- a/src/plugins/intel_cpu/src/nodes/non_zero.cpp +++ b/src/plugins/intel_cpu/src/nodes/non_zero.cpp @@ -57,7 +57,7 @@ void NonZero::initSupportedPrimitiveDescriptors() { return; const auto &inPrc = getOriginalInputPrecisionAtPort(0); - if (!one_of(inPrc, Precision::FP32, Precision::BF16, Precision::I32, Precision::U32, Precision::I8, Precision::U8)) { + if (!one_of(inPrc, Precision::FP32, Precision::BF16, Precision::FP16, Precision::I32, Precision::U32, Precision::I8, Precision::U8)) { IE_THROW() << "Can't create primitive descriptor for NonZero layer with name: " << getName() << " doesn't support " << inPrc.name() << " precision on 0 port"; } @@ -123,6 +123,7 @@ void NonZero::execute(dnnl::stream strm) { OV_SWITCH(intel_cpu, NonZeroExecute, ctx, inputPrec, OV_CASE(Precision::FP32, float), OV_CASE(Precision::BF16, bfloat16_t), + OV_CASE(Precision::FP16, float16), OV_CASE(Precision::I32, int), OV_CASE(Precision::U32, uint32_t), OV_CASE(Precision::I8, int8_t), diff --git a/src/plugins/intel_cpu/src/nodes/normalize.cpp b/src/plugins/intel_cpu/src/nodes/normalize.cpp index c4fc60a9d9e855..12f3ecf397764a 100644 --- a/src/plugins/intel_cpu/src/nodes/normalize.cpp +++ b/src/plugins/intel_cpu/src/nodes/normalize.cpp @@ -796,10 +796,14 @@ void NormalizeL2::initSupportedPrimitiveDescriptors() { inputPrecision = outputPrecision = Precision::BF16; } - if (!one_of(inputPrecision, Precision::FP32, Precision::BF16, Precision::I8, Precision::U8)) { + if (one_of(Precision::FP16, inputPrecision, outputPrecision) && mayiuse(cpu::x64::sse41)) { + inputPrecision = outputPrecision = Precision::FP32; + } + + if (!one_of(inputPrecision, Precision::FP32, Precision::BF16, Precision::FP16, Precision::I8, Precision::U8)) { THROW_ERROR << "has unsupported input precision: " << inputPrecision; } - if (!one_of(outputPrecision, Precision::FP32, Precision::BF16, Precision::I8, Precision::U8)) { + if (!one_of(outputPrecision, Precision::FP32, Precision::BF16, Precision::FP16, Precision::I8, Precision::U8)) { THROW_ERROR << "has unsupported output precision: " << outputPrecision; } @@ -1483,7 +1487,8 @@ std::shared_ptr NormalizeL2::NormalizeL2Execut OV_CASE2(Precision::U8, Precision::FP32, uint8_t, float), OV_CASE2(Precision::I8, Precision::FP32, int8_t, float), OV_CASE2(Precision::FP32, Precision::FP32, float, float), - OV_CASE2(Precision::BF16, Precision::BF16, bfloat16_t, bfloat16_t)); + OV_CASE2(Precision::BF16, Precision::BF16, bfloat16_t, bfloat16_t), + OV_CASE2(Precision::FP16, Precision::FP16, float16_t, float16_t)); return ctx.executor; } diff --git a/src/plugins/intel_cpu/src/nodes/rnn.cpp b/src/plugins/intel_cpu/src/nodes/rnn.cpp index 9992f0f392b893..f453b7a5a51e0b 100644 --- a/src/plugins/intel_cpu/src/nodes/rnn.cpp +++ b/src/plugins/intel_cpu/src/nodes/rnn.cpp @@ -133,6 +133,7 @@ inline bool haveAttention(const dnnl::algorithm& alg) { const std::map RNN::weightsByinputDataType { // layer data type weights data type {memory::data_type::f32, memory::data_type::f32}, + {memory::data_type::f16, memory::data_type::f16}, {memory::data_type::bf16, memory::data_type::bf16}, {memory::data_type::u8, memory::data_type::s8}, {memory::data_type::s8, memory::data_type::s8}, @@ -505,6 +506,10 @@ void RNN::configurePortDataTypes() { if (one_of(memory::data_type::bf16, inDataTypes[xIdx], inDataTypes[hIdx])) inDataTypes[xIdx] = outDataTypes[yIdx] = outDataTypes[hoIdx] = inDataTypes[hIdx] = memory::data_type::bf16; // required by oneDNN. + if (one_of(memory::data_type::f16, inDataTypes[xIdx], inDataTypes[hIdx])) + // onednn doesn't have fp16 instance + inDataTypes[xIdx] = outDataTypes[yIdx] = outDataTypes[hoIdx] = inDataTypes[hIdx] = memory::data_type::f32; // required by oneDNN. + if (outDataTypes[yIdx] == memory::data_type::bf16 && one_of(inDataTypes[xIdx], memory::data_type::s8, memory::data_type::u8)) outDataTypes[yIdx] = memory::data_type::f32; // oneDNN does not support bf16 output precision for quantized rnn primitive yet } @@ -882,7 +887,7 @@ void RNN::copyWeightsData() { } const auto& dataType = inDataTypes[xIdx]; - if (dataType == memory::data_type::bf16) { + if (one_of(dataType, memory::data_type::bf16, memory::data_type::f16)) { fillWeights(gate_map, wIdx, rIdx); } else if (dataType == memory::data_type::f32) { // WA To avoid different weights layer and iter formats in FP32 case diff --git a/src/plugins/intel_cpu/tests/functional/behavior/ov_plugin/properties.cpp b/src/plugins/intel_cpu/tests/functional/behavior/ov_plugin/properties.cpp index 0e959ab865a6ed..10c0a244fcca31 100644 --- a/src/plugins/intel_cpu/tests/functional/behavior/ov_plugin/properties.cpp +++ b/src/plugins/intel_cpu/tests/functional/behavior/ov_plugin/properties.cpp @@ -173,13 +173,18 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigAffinityCore) { ASSERT_EQ(false, value); } +#if defined(OV_CPU_ARM_ENABLE_FP16) + const auto expected_precision_for_performance_mode = ov::element::f16; +#else + const auto expected_precision_for_performance_mode = InferenceEngine::with_cpu_x86_bfloat16() ? ov::element::bf16 : ov::element::f32; +#endif + TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigHintInferencePrecision) { ov::Core ie; auto value = ov::element::f32; - const auto precision = InferenceEngine::with_cpu_x86_bfloat16() ? ov::element::bf16 : ov::element::f32; ASSERT_NO_THROW(value = ie.get_property("CPU", ov::hint::inference_precision)); - ASSERT_EQ(precision, value); + ASSERT_EQ(expected_precision_for_performance_mode, value); const auto forcedPrecision = ov::element::f32; @@ -210,8 +215,6 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigEnableProfiling) { ASSERT_EQ(enableProfiling, value); } -const auto expected_precision_for_performance_mode = InferenceEngine::with_cpu_x86_bfloat16() ? ov::element::bf16 : ov::element::f32; - const auto bf16_if_can_be_emulated = InferenceEngine::with_cpu_x86_avx512_core() ? ov::element::bf16 : ov::element::f32; using ExpectedModeAndType = std::pair; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp index fa66f4a2c7801d..9cbe69255db0e0 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp @@ -11,11 +11,12 @@ using namespace ov::test::behavior; namespace { const std::vector configs = { - {} + {{ov::hint::inference_precision.name(), ov::element::f32}} }; const std::vector HeteroConfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)} + {{ov::hint::inference_precision.name(), ov::element::f32}, + {ov::device::priorities(ov::test::utils::DEVICE_CPU)}}, }; std::shared_ptr getFunction1() { diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/iteration_chaining.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/iteration_chaining.cpp index 2c8678165426b3..567c877be8e8ab 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/iteration_chaining.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/iteration_chaining.cpp @@ -10,11 +10,12 @@ using namespace ov::test::behavior; namespace { const std::vector configs = { - {} + {{ov::hint::inference_precision.name(), ov::element::f32}} }; const std::vector HeteroConfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)} + {{ov::hint::inference_precision.name(), ov::element::f32}, + {ov::device::priorities(ov::test::utils::DEVICE_CPU)}}, }; INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVIterationChaining, diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 9e9baece4c63e1..9faf421c26a0f9 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -190,18 +190,6 @@ std::vector disabledTestPatterns() { // Issue: 122094 R"(smoke_Interpolate_Basic_Down_Sample_Tail/InterpolateLayerTest.Inference.*(asymmetric|align_corners).*f16.*)", }; -#if defined(__APPLE__) && defined(OPENVINO_ARCH_ARM64) - // Issue: 120950 - retVector.emplace_back(R"(.*smoke_TensorIteratorCommon/TensorIteratorTest.Inference.*_modelType=f16_targetDevice=CPU.*)"); - retVector.emplace_back(R"(.*smoke_CtcGreedyDecoderBasic/CTCGreedyDecoderLayerTest.Inference.*netPRC=f16.*trgDev=CPU.*)"); - retVector.emplace_back(R"(.*CTCGreedyDecoderSeqLenLayerTest.Inference.*dataPRC=f16.*trgDev=CPU.*)"); - // Issue: 122177 - retVector.emplace_back(R"(.*smoke_LSTMCellCommon/LSTMCellTest.Inference.*_modelType=f16.*)"); - retVector.emplace_back(R"(.*smoke_LSTMSequenceCommonZeroClip/LSTMSequenceTest.Inference.*_modelType=f16.*)"); - // Issue 122699 - retVector.emplace_back(R"(.*smoke_nonzero/NonZeroLayerTest.Inference.*inPRC=f16.*)"); - retVector.emplace_back(R"(.*NormalizeL2LayerTest.Inference.*netPRC=f16.*)"); -#endif #if defined(OPENVINO_ARCH_X86) retVector.emplace_back(R"(.*DetectionOutputLayerTest.*)"); @@ -230,6 +218,18 @@ std::vector disabledTestPatterns() { retVector.emplace_back(R"(smoke_NegativeQuantizedMatMulMultiplyFusion.*)"); // int8 specific retVector.emplace_back(R"(smoke_Quantized.*)"); + +#if defined(OV_CPU_ARM_ENABLE_FP16) + // Issue: 123019 + retVector.emplace_back(R"(smoke_AvgPool_ExplicitPad_CeilRounding.*modelType=f16.*)"); + retVector.emplace_back(R"(smoke_AvgPool_ExplicitPad_FloorRounding_5Dinput/PoolingLayerTest.*modelType=f16.*)"); + retVector.emplace_back(R"(smoke_AvgPool_SameUpperPad_FloorRounding_5Dinput/PoolingLayerTest.*modelType=f16.*)"); + retVector.emplace_back(R"(smoke_AvgPool_SameLowerPad_CeilRounding_5Dinput/PoolingLayerTest.*modelType=f16.*)"); + retVector.emplace_back(R"(smoke_CompareWithRefs_Mvn.*INFERENCE_PRECISION_HINT=f16.*)"); + retVector.emplace_back(R"(smoke_staticShapes4D.*INFERENCE_PRECISION_HINT=f16.*)"); + retVector.emplace_back(R"(smoke_dynamicShapes4D.*INFERENCE_PRECISION_HINT=f16.*)"); +#endif + #endif #if defined(OPENVINO_ARCH_ARM) From a30e25c725b9aa38a2c5e51dfc52874a103e6bab Mon Sep 17 00:00:00 2001 From: Siddhant Chauhan Date: Wed, 18 Oct 2023 11:19:33 +0530 Subject: [PATCH 242/257] [TF FE][TF Hub] Support BatchMatMulV3 operation (#20528) * [TF FE][TF Hub] Support BatchMatMulV3 operation * Update src/frontends/tensorflow_common/src/op/matmul.cpp * Update src/frontends/tensorflow_common/src/op/matmul.cpp --------- Co-authored-by: Roman Kazantsev --- src/frontends/tensorflow/src/op_table.cpp | 1 + .../include/common_op_table.hpp | 1 + .../tensorflow_common/src/op/matmul.cpp | 20 +++++++++++++++++++ .../tensorflow_tests/test_tf_MatMul.py | 6 +++--- 4 files changed, 25 insertions(+), 3 deletions(-) diff --git a/src/frontends/tensorflow/src/op_table.cpp b/src/frontends/tensorflow/src/op_table.cpp index f763cca9afc980..bc1a657faf54fb 100644 --- a/src/frontends/tensorflow/src/op_table.cpp +++ b/src/frontends/tensorflow/src/op_table.cpp @@ -130,6 +130,7 @@ const std::map get_supported_ops() { {"AvgPool3D", CreatorFunction(translate_avg_pool_op)}, {"BatchMatMul", CreatorFunction(translate_batch_mat_mul_op)}, {"BatchMatMulV2", CreatorFunction(translate_batch_mat_mul_op)}, + {"BatchMatMulV3", CreatorFunction(translate_batch_mat_mul_with_type_op)}, {"BatchToSpaceND", CreatorFunction(translate_batch_to_space_nd_op)}, {"BroadcastArgs", CreatorFunction(translate_broadcast_args_op)}, {"BroadcastTo", CreatorFunction(translate_broadcast_to_op)}, diff --git a/src/frontends/tensorflow_common/include/common_op_table.hpp b/src/frontends/tensorflow_common/include/common_op_table.hpp index ff4e920f61de07..54f1dff243efd1 100644 --- a/src/frontends/tensorflow_common/include/common_op_table.hpp +++ b/src/frontends/tensorflow_common/include/common_op_table.hpp @@ -38,6 +38,7 @@ OP_CONVERTER(translate_arg_max_op); OP_CONVERTER(translate_arg_min_op); OP_CONVERTER(translate_avg_pool_op); OP_CONVERTER(translate_batch_mat_mul_op); +OP_CONVERTER(translate_batch_mat_mul_with_type_op); OP_CONVERTER(translate_batch_to_space_nd_op); OP_CONVERTER(translate_bias_add_op); OP_CONVERTER(translate_broadcast_args_op); diff --git a/src/frontends/tensorflow_common/src/op/matmul.cpp b/src/frontends/tensorflow_common/src/op/matmul.cpp index dd3aba71ddf5cd..21a0591d109b69 100644 --- a/src/frontends/tensorflow_common/src/op/matmul.cpp +++ b/src/frontends/tensorflow_common/src/op/matmul.cpp @@ -35,6 +35,26 @@ OutputVector translate_batch_mat_mul_op(const NodeContext& node) { set_node_name(node.get_name(), result); return result->outputs(); } + +OutputVector translate_batch_mat_mul_with_type_op(const NodeContext& node) { + auto x = node.get_input(0); + auto y = node.get_input(1); + + auto input_type = x.get_element_type(); + + auto adj_x = node.get_attribute("adj_x", false); + auto adj_y = node.get_attribute("adj_y", false); + auto t_out = node.get_attribute("Tout", input_type); + + auto result = make_shared(x, y, adj_x, adj_y)->output(0); + + if (t_out != input_type) { + result = make_shared(result, t_out); + } + + set_node_name(node.get_name(), result.get_node_shared_ptr()); + return {result}; +} } // namespace op } // namespace tensorflow } // namespace frontend diff --git a/tests/layer_tests/tensorflow_tests/test_tf_MatMul.py b/tests/layer_tests/tensorflow_tests/test_tf_MatMul.py index 460afb662851e7..2a93291af28230 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_MatMul.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_MatMul.py @@ -36,7 +36,7 @@ def create_net_with_matmul_op(self, x_shape, y_shape, x_bool, y_bool, op_type, i elif op_type == 'BatchMatMulV3': op_type_to_tf[op_type](x=tf_x, y=tf_y, Tout=tf.float32, adj_x=x_bool, adj_y=y_bool, name='Operation') else: - raise RuntimeError("Undknown operation") + raise RuntimeError("Unknown operation") tf.compat.v1.global_variables_initializer() tf_net = sess.graph_def @@ -53,7 +53,7 @@ def create_net_with_matmul_op(self, x_shape, y_shape, x_bool, y_bool, op_type, i @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.parametrize("op_type", ['BatchMatMul', 'BatchMatMulV2', - #'BatchMatMulV3', #Isn't supported + 'BatchMatMulV3', 'MatMul', ]) @pytest.mark.precommit_tf_fe @@ -72,7 +72,7 @@ def test_matmul_op_precommit(self, params, ie_device, precision, ir_version, tem @pytest.mark.parametrize("params", test_data) @pytest.mark.parametrize("op_type", ['BatchMatMul', 'BatchMatMulV2', - #'BatchMatMulV3', #Isn't supported + 'BatchMatMulV3', 'MatMul', ]) @pytest.mark.parametrize("x_bool", [ From c8b64f6a95c32b8b6795c9aea6a34e28027ceac6 Mon Sep 17 00:00:00 2001 From: Andrei Gorbachev Date: Wed, 18 Oct 2023 07:02:18 +0100 Subject: [PATCH 243/257] [GPU] Refactor BroadcastLayer, Bucketize, Concat (#20369) * BroadcastLayer * Bucketize * Concat * fix incorrect fill_random_unique_sequence fo f16 * Update src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/broadcast.cpp Co-authored-by: Roman Lyamin * fix after review --------- Co-authored-by: Roman Lyamin --- .../single_layer_tests/broadcast.cpp | 153 +++++++++++------- .../single_layer_tests/bucketize.cpp | 114 ++++++------- .../single_layer_tests/concat.cpp | 19 +-- .../include/common_test_utils/data_utils.hpp | 4 +- 4 files changed, 148 insertions(+), 142 deletions(-) diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/broadcast.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/broadcast.cpp index 84234e94bbd347..3387375a0c07c1 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/broadcast.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/broadcast.cpp @@ -4,25 +4,24 @@ #include -#include "single_layer_tests/broadcast.hpp" +#include "single_op_tests/broadcast.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; - namespace { - -const std::vector inputPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16, - InferenceEngine::Precision::I32, - InferenceEngine::Precision::I8, - InferenceEngine::Precision::U8 +using ov::test::BroadcastLayerTest; +using ov::test::BroadcastParamsTuple; +const std::vector inputPrecisions = { + ov::element::f32, + ov::element::f16, + ov::element::i32, + ov::element::i8, + ov::element::u8 }; -const std::vector inputTPrecisions = { - InferenceEngine::Precision::FP16, - InferenceEngine::Precision::I16, - InferenceEngine::Precision::BOOL +const std::vector inputTPrecisions = { + ov::element::f16, + ov::element::i16, + ov::element::boolean }; // NUMPY MODE ////////////////////////////////////////// @@ -31,12 +30,16 @@ std::vector> targetShapesNumpy0D = { {}, }; +std::vector> input_shapes_0d_static = { + {{}} +}; + INSTANTIATE_TEST_CASE_P(smoke_TestNumpyBroadcast0D, BroadcastLayerTest, ::testing::Combine(::testing::ValuesIn(targetShapesNumpy0D), - ::testing::Values(ngraph::AxisSet{}), // not used in numpy mode - ::testing::Values(ngraph::op::BroadcastType::NUMPY), - ::testing::Values(std::vector{}), + ::testing::Values(ov::AxisSet{}), // not used in numpy mode + ::testing::Values(ov::op::BroadcastType::NUMPY), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_0d_static)), ::testing::ValuesIn(inputPrecisions), ::testing::Values(ov::test::utils::DEVICE_GPU)), BroadcastLayerTest::getTestCaseName); @@ -53,12 +56,16 @@ std::vector> targetShapesNumpy1D = { {1, 4, 4}, }; +std::vector> input_shapes_1d_static = { + {{1}} +}; + INSTANTIATE_TEST_CASE_P(smoke_TestNumpyBroadcast1D, BroadcastLayerTest, ::testing::Combine(::testing::ValuesIn(targetShapesNumpy1D), - ::testing::Values(ngraph::AxisSet{}), // not used in numpy mode - ::testing::Values(ngraph::op::BroadcastType::NUMPY), - ::testing::Values(std::vector{1}), + ::testing::Values(ov::AxisSet{}), // not used in numpy mode + ::testing::Values(ov::op::BroadcastType::NUMPY), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_1d_static)), ::testing::ValuesIn(inputPrecisions), ::testing::Values(ov::test::utils::DEVICE_GPU)), BroadcastLayerTest::getTestCaseName); @@ -66,9 +73,9 @@ INSTANTIATE_TEST_CASE_P(smoke_TestNumpyBroadcast1D, INSTANTIATE_TEST_CASE_P(smoke_PrecTransformation, BroadcastLayerTest, ::testing::Combine( ::testing::Values(targetShapesNumpy1D[0]), - ::testing::Values(ngraph::AxisSet{}), //not used in numpy mode - ::testing::Values(ngraph::op::BroadcastType::NUMPY), - ::testing::Values(std::vector{1}), + ::testing::Values(ov::AxisSet{}), //not used in numpy mode + ::testing::Values(ov::op::BroadcastType::NUMPY), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_1d_static)), ::testing::ValuesIn(inputTPrecisions), ::testing::Values(ov::test::utils::DEVICE_GPU)), BroadcastLayerTest::getTestCaseName); @@ -81,12 +88,16 @@ std::vector> targetShapesNumpy2D = { {2, 2, 3, 6}, }; +std::vector> input_shapes_2d_static = { + {{3, 1}} +}; + INSTANTIATE_TEST_CASE_P(smoke_TestNumpyBroadcast2D, BroadcastLayerTest, ::testing::Combine(::testing::ValuesIn(targetShapesNumpy2D), ::testing::Values(ngraph::AxisSet{}), // not used in numpy mode ::testing::Values(ngraph::op::BroadcastType::NUMPY), - ::testing::Values(std::vector{3, 1}), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_2d_static)), ::testing::ValuesIn(inputPrecisions), ::testing::Values(ov::test::utils::DEVICE_GPU)), BroadcastLayerTest::getTestCaseName); @@ -99,42 +110,63 @@ std::vector> targetShapesNumpy3D = { {2, 1, 1, 4, 4}, }; +std::vector> input_shapes_3d_static = { + {{1, 4, 1}} +}; + + INSTANTIATE_TEST_CASE_P(smoke_TestNumpyBroadcast3D, BroadcastLayerTest, ::testing::Combine(::testing::ValuesIn(targetShapesNumpy3D), - ::testing::Values(ngraph::AxisSet{}), // not used in numpy mode - ::testing::Values(ngraph::op::BroadcastType::NUMPY), - ::testing::Values(std::vector{1, 4, 1}), + ::testing::Values(ov::AxisSet{}), // not used in numpy mode + ::testing::Values(ov::op::BroadcastType::NUMPY), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_3d_static)), ::testing::ValuesIn(inputPrecisions), ::testing::Values(ov::test::utils::DEVICE_GPU)), BroadcastLayerTest::getTestCaseName); +std::vector> targetShapesNumpy6D = { + {1, 2, 3, 4, 5, 6}, +}; + +std::vector> input_shapes_6d_static = { + {{1, 2, 1, 4, 1, 6}} +}; + INSTANTIATE_TEST_CASE_P(smoke_TestNumpyBroadcast6D, BroadcastLayerTest, - ::testing::Combine(::testing::Values(std::vector{1, 2, 3, 4, 5, 6}), - ::testing::Values(ngraph::AxisSet{}), // not used in numpy mode - ::testing::Values(ngraph::op::BroadcastType::NUMPY), - ::testing::Values(std::vector{1, 2, 1, 4, 1, 6}), + ::testing::Combine(::testing::ValuesIn(targetShapesNumpy6D), + ::testing::Values(ov::AxisSet{}), // not used in numpy mode + ::testing::Values(ov::op::BroadcastType::NUMPY), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_6d_static)), ::testing::ValuesIn(inputPrecisions), ::testing::Values(ov::test::utils::DEVICE_GPU)), BroadcastLayerTest::getTestCaseName); +std::vector> targetShapesNumpy5D = { + {1, 2, 3, 4, 5}, +}; + +std::vector> input_shapes_5d_static = { + {{1, 2, 1, 4, 1}} +}; + INSTANTIATE_TEST_CASE_P(smoke_TestNumpyBroadcast5D, BroadcastLayerTest, - ::testing::Combine(::testing::Values(std::vector{1, 2, 3, 4, 5}), - ::testing::Values(ngraph::AxisSet{}), // not used in numpy mode - ::testing::Values(ngraph::op::BroadcastType::NUMPY), - ::testing::Values(std::vector{1, 2, 1, 4, 1}), + ::testing::Combine(::testing::ValuesIn(targetShapesNumpy5D), + ::testing::Values(ov::AxisSet{}), // not used in numpy mode + ::testing::Values(ov::op::BroadcastType::NUMPY), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_5d_static)), ::testing::ValuesIn(inputPrecisions), ::testing::Values(ov::test::utils::DEVICE_GPU)), BroadcastLayerTest::getTestCaseName); // END NUMPY MODE ////////////////////////////////////// // BIDIRECTIONAL MODE ////////////////////////////////// -std::vector> inShapesBidi = { - {4, 1}, - {1, 4, 1}, - {4, 1, 1} +std::vector> inShapesBidi = { + {{4, 1}}, + {{1, 4, 1}}, + {{4, 1, 1}} }; std::vector> targetShapesBidi = { @@ -146,25 +178,25 @@ std::vector> targetShapesBidi = { INSTANTIATE_TEST_CASE_P(smoke_TestBidirectionalBroadcast, BroadcastLayerTest, ::testing::Combine(::testing::ValuesIn(targetShapesBidi), - ::testing::Values(ngraph::AxisSet{}), // not used in bidirectional mode - ::testing::Values(ngraph::op::BroadcastType::BIDIRECTIONAL), - ::testing::ValuesIn(inShapesBidi), + ::testing::Values(ov::AxisSet{}), // not used in bidirectional mode + ::testing::Values(ov::op::BroadcastType::BIDIRECTIONAL), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(inShapesBidi)), ::testing::ValuesIn(inputPrecisions), ::testing::Values(ov::test::utils::DEVICE_GPU)), BroadcastLayerTest::getTestCaseName); // EXPLICIT MODE /////////////////////////////////////// // 1D -std::vector> inShapesExplicit1D = { {4} }; +std::vector> inShapesExplicit1D = { {{4}} }; std::vector> targetShapesExplicit1D = { {4, 2, 4}, {4, 2, 4, 1} }; -std::vector axes1D = { {0}, {2} }; +std::vector axes1D = { {0}, {2} }; INSTANTIATE_TEST_CASE_P(smoke_TestExplicitBroadcast1D, BroadcastLayerTest, ::testing::Combine(::testing::ValuesIn(targetShapesExplicit1D), ::testing::ValuesIn(axes1D), - ::testing::Values(ngraph::op::BroadcastType::EXPLICIT), - ::testing::ValuesIn(inShapesExplicit1D), + ::testing::Values(ov::op::BroadcastType::EXPLICIT), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(inShapesExplicit1D)), ::testing::ValuesIn(inputPrecisions), ::testing::Values(ov::test::utils::DEVICE_GPU)), BroadcastLayerTest::getTestCaseName); @@ -172,18 +204,18 @@ INSTANTIATE_TEST_CASE_P(smoke_TestExplicitBroadcast1D, INSTANTIATE_TEST_SUITE_P(smoke_TestBidirectionalBroadcast3, BroadcastLayerTest, ::testing::Combine(::testing::Values(targetShapesBidi[2]), - ::testing::Values(ngraph::AxisSet{}), // not used in bidirectional mode - ::testing::Values(ngraph::op::BroadcastType::BIDIRECTIONAL), - ::testing::Values(inShapesBidi[2]), + ::testing::Values(ov::AxisSet{}), // not used in bidirectional mode + ::testing::Values(ov::op::BroadcastType::BIDIRECTIONAL), + ::testing::Values(ov::test::static_shapes_to_test_representation(inShapesBidi[2])), ::testing::ValuesIn(inputPrecisions), ::testing::Values(ov::test::utils::DEVICE_GPU)), BroadcastLayerTest::getTestCaseName); // EXPLICIT MODE -std::vector> inShapesExplicit = { - {3, 1}, - {2, 4} +std::vector> inShapesExplicit = { + {{3, 1}}, + {{2, 4}} }; std::vector> targetShapesExplicit = { @@ -192,34 +224,33 @@ std::vector> targetShapesExplicit = { }; // 2D -std::vector> inShapesExplicit2D = { {2, 4} }; +std::vector> inShapesExplicit2D = { {{2, 4}} }; std::vector> targetShapesExplicit2D = { {2, 2, 4}, {2, 2, 4, 1}}; -std::vector axes2D = { {1, 2}, {0, 2} }; +std::vector axes2D = { {1, 2}, {0, 2} }; INSTANTIATE_TEST_CASE_P(smoke_TestExplicitBroadcast2D, BroadcastLayerTest, ::testing::Combine(::testing::ValuesIn(targetShapesExplicit2D), ::testing::ValuesIn(axes2D), - ::testing::Values(ngraph::op::BroadcastType::EXPLICIT), - ::testing::ValuesIn(inShapesExplicit2D), + ::testing::Values(ov::op::BroadcastType::EXPLICIT), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(inShapesExplicit2D)), ::testing::ValuesIn(inputPrecisions), ::testing::Values(ov::test::utils::DEVICE_GPU)), BroadcastLayerTest::getTestCaseName); // 3D -std::vector> inShapesExplicit3D = { {2, 2, 2} }; +std::vector> inShapesExplicit3D = { {{2, 2, 2}} }; std::vector> targetShapesExplicit3D = { {2, 2, 2, 2} }; -std::vector axes3D = { {0, 1, 2}, {0, 1, 3}, {0, 2, 3}, {1, 2, 3} }; +std::vector axes3D = { {0, 1, 2}, {0, 1, 3}, {0, 2, 3}, {1, 2, 3} }; INSTANTIATE_TEST_CASE_P(smoke_TestExplicitBroadcast3D, BroadcastLayerTest, ::testing::Combine(::testing::ValuesIn(targetShapesExplicit3D), ::testing::ValuesIn(axes3D), - ::testing::Values(ngraph::op::BroadcastType::EXPLICIT), - ::testing::ValuesIn(inShapesExplicit3D), + ::testing::Values(ov::op::BroadcastType::EXPLICIT), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(inShapesExplicit3D)), ::testing::ValuesIn(inputPrecisions), ::testing::Values(ov::test::utils::DEVICE_GPU)), BroadcastLayerTest::getTestCaseName); // END EXPLICIT MODE /////////////////////////////////// - } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/bucketize.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/bucketize.cpp index b658cfc840fe06..89da385392745f 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/bucketize.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/bucketize.cpp @@ -2,31 +2,37 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "single_layer_tests/bucketize.hpp" +#include "single_op_tests/bucketize.hpp" #include -using namespace LayerTestsDefinitions; +using ov::test::BucketizeLayerTest; namespace { -const std::vector> data_shapes = { - // No reason to test other ranks as logic is the same - {40, 22, 13, 9}, // 4D - {6, 7, 3, 2, 8}, // 5D - {6, 7, 3, 2, 8, 5}, // 6D -}; - -const std::vector> buckets_shapes = { - {5}, - {100}, +const std::vector> input_shapes_static = { + {{40, 22, 13, 9}, {5}}, + {{6, 7, 3, 2, 8}, {5}}, + {{6, 7, 3, 2, 8, 5}, {5}}, + {{40, 22, 13, 9}, {100}}, + {{6, 7, 3, 2, 8}, {100}}, + {{6, 7, 3, 2, 8, 5}, {100}}, }; const std::vector with_right_bound = {true, false}; -const std::vector out_precision = { - InferenceEngine::Precision::I32, - InferenceEngine::Precision::I64, +const std::vector out_precision = { + ov::element::i32, + ov::element::i64 +}; + +const std::vector in_buckets_precision = { + ov::element::f16, + ov::element::f32, + ov::element::i32, + ov::element::i64, + ov::element::i8, + ov::element::u8 }; // We won't test FP32 and FP16 together as it won't make sense for now @@ -34,94 +40,68 @@ const std::vector out_precision = { INSTANTIATE_TEST_SUITE_P(smoke_Bucketize_input_fp16, BucketizeLayerTest, - testing::Combine(testing::ValuesIn(data_shapes), - testing::ValuesIn(buckets_shapes), + testing::Combine(testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_static)), testing::ValuesIn(with_right_bound), - testing::Values(InferenceEngine::Precision::FP16), - testing::Values(InferenceEngine::Precision::FP16, - InferenceEngine::Precision::I32, - InferenceEngine::Precision::I64, - InferenceEngine::Precision::I8, - InferenceEngine::Precision::U8), + testing::Values(ov::element::f16), + testing::Values(ov::element::f16, + ov::element::i32, + ov::element::i64, + ov::element::i8, + ov::element::u8), testing::ValuesIn(out_precision), testing::Values(ov::test::utils::DEVICE_GPU)), BucketizeLayerTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Bucketize_input_fp32, BucketizeLayerTest, - testing::Combine(testing::ValuesIn(data_shapes), - testing::ValuesIn(buckets_shapes), + testing::Combine(testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_static)), testing::ValuesIn(with_right_bound), - testing::Values(InferenceEngine::Precision::FP32), - testing::Values(InferenceEngine::Precision::FP32, - InferenceEngine::Precision::I32, - InferenceEngine::Precision::I64, - InferenceEngine::Precision::I8, - InferenceEngine::Precision::U8), + testing::Values(ov::element::f32), + testing::Values(ov::element::f32, + ov::element::i32, + ov::element::i64, + ov::element::i8, + ov::element::u8), testing::ValuesIn(out_precision), testing::Values(ov::test::utils::DEVICE_GPU)), BucketizeLayerTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Bucketize_input_i32, BucketizeLayerTest, - testing::Combine(testing::ValuesIn(data_shapes), - testing::ValuesIn(buckets_shapes), + testing::Combine(testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_static)), testing::ValuesIn(with_right_bound), - testing::Values(InferenceEngine::Precision::I32), - testing::Values(InferenceEngine::Precision::FP16, - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::I32, - InferenceEngine::Precision::I64, - InferenceEngine::Precision::I8, - InferenceEngine::Precision::U8), + testing::Values(ov::element::i32), + testing::ValuesIn(in_buckets_precision), testing::ValuesIn(out_precision), testing::Values(ov::test::utils::DEVICE_GPU)), BucketizeLayerTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Bucketize_input_i64, BucketizeLayerTest, - testing::Combine(testing::ValuesIn(data_shapes), - testing::ValuesIn(buckets_shapes), + testing::Combine(testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_static)), testing::ValuesIn(with_right_bound), - testing::Values(InferenceEngine::Precision::I64), - testing::Values(InferenceEngine::Precision::FP16, - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::I32, - InferenceEngine::Precision::I64, - InferenceEngine::Precision::I8, - InferenceEngine::Precision::U8), + testing::Values(ov::element::i64), + testing::ValuesIn(in_buckets_precision), testing::ValuesIn(out_precision), testing::Values(ov::test::utils::DEVICE_GPU)), BucketizeLayerTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Bucketize_input_i8, BucketizeLayerTest, - testing::Combine(testing::ValuesIn(data_shapes), - testing::ValuesIn(buckets_shapes), + testing::Combine(testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_static)), testing::ValuesIn(with_right_bound), - testing::Values(InferenceEngine::Precision::I8), - testing::Values(InferenceEngine::Precision::FP16, - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::I32, - InferenceEngine::Precision::I64, - InferenceEngine::Precision::I8, - InferenceEngine::Precision::U8), + testing::Values(ov::element::i8), + testing::ValuesIn(in_buckets_precision), testing::ValuesIn(out_precision), testing::Values(ov::test::utils::DEVICE_GPU)), BucketizeLayerTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Bucketize_input_u8, BucketizeLayerTest, - testing::Combine(testing::ValuesIn(data_shapes), - testing::ValuesIn(buckets_shapes), + testing::Combine(testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_static)), testing::ValuesIn(with_right_bound), - testing::Values(InferenceEngine::Precision::U8), - testing::Values(InferenceEngine::Precision::FP16, - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::I32, - InferenceEngine::Precision::I64, - InferenceEngine::Precision::I8, - InferenceEngine::Precision::U8), + testing::Values(ov::element::u8), + testing::ValuesIn(in_buckets_precision), testing::ValuesIn(out_precision), testing::Values(ov::test::utils::DEVICE_GPU)), BucketizeLayerTest::getTestCaseName); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/concat.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/concat.cpp index 0d0aa1fe704b77..a6dfee6b36d641 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/concat.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/concat.cpp @@ -4,35 +4,30 @@ #include -#include "single_layer_tests/concat.hpp" +#include "single_op_tests/concat.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; - namespace { +using ov::test::ConcatLayerTest; std::vector axes = {-3, -2, -1, 0, 1, 2, 3}; -std::vector>> inShapes = { +std::vector> inShapes = { {{10, 10, 10, 10}}, {{10, 10, 10, 10}, {10, 10, 10, 10}}, {{10, 10, 10, 10}, {10, 10, 10, 10}, {10, 10, 10, 10}}, {{10, 10, 10, 10}, {10, 10, 10, 10}, {10, 10, 10, 10}, {10, 10, 10, 10}}, {{10, 10, 10, 10}, {10, 10, 10, 10}, {10, 10, 10, 10}, {10, 10, 10, 10}, {10, 10, 10, 10}} }; -std::vector netPrecisions = {InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16, - InferenceEngine::Precision::I64}; +std::vector netPrecisions = {ov::element::f32, + ov::element::f16, + ov::element::i64}; INSTANTIATE_TEST_SUITE_P(smoke_NoReshape, ConcatLayerTest, ::testing::Combine( ::testing::ValuesIn(axes), - ::testing::ValuesIn(inShapes), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(inShapes)), ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(ov::test::utils::DEVICE_GPU)), ConcatLayerTest::getTestCaseName); } // namespace diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/data_utils.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/data_utils.hpp index 4d2e5006eeb72d..3b876a530cdf33 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/data_utils.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/data_utils.hpp @@ -232,9 +232,9 @@ void inline fill_random_unique_sequence(T* rawBlobDataPtr, auto value = static_cast(dist(generator)); value /= static_cast(k); if (std::is_same::value) { - elems.insert(static_cast(ov::float16(value).to_bits())); + elems.insert(static_cast(ov::float16(value))); } else if (std::is_same::value) { - elems.insert(static_cast(ov::bfloat16(value).to_bits())); + elems.insert(static_cast(ov::bfloat16(value))); } else { elems.insert(static_cast(value)); } From 30a038c776581e945e1ad2185d5d62c5a7334413 Mon Sep 17 00:00:00 2001 From: Sungeun Kim Date: Wed, 18 Oct 2023 16:28:22 +0900 Subject: [PATCH 244/257] [GPU] Gather needs to keep the original input/output rank (#20042) * Gather needs to keep the original input/output rank - because the parameters as indices, batch_dims and axis depend on the rank. - add input_rank to gather primitive. * don't query on set_preferred_formats pass -when the force_implementations is set. -when forcing_impl is not onednn. --- .../include/intel_gpu/primitives/gather.hpp | 7 +++ .../graph/graph_optimizer/reorder_inputs.cpp | 21 ++----- .../select_preferred_formats.cpp | 12 +++- .../src/graph/include/layout_optimizer.h | 2 + .../intel_gpu/src/graph/layout_optimizer.cpp | 42 +++++++++++++ .../src/graph/program_dump_graph.cpp | 20 +++++++ .../intel_gpu/src/plugin/ops/gather.cpp | 1 + .../tests/unit/fusions/gather_fusion_test.cpp | 6 +- .../primitive_comparison_test.cpp | 10 ++-- .../passes/add_required_reorders_test.cpp | 4 +- .../passes/mark_shape_of_subgraphs_test.cpp | 18 +++--- .../passes/prepare_buffer_fusing_test.cpp | 6 +- .../remove_redundant_reorders_tests.cpp | 4 +- .../tests/unit/passes/reorder_inputs_test.cpp | 8 +-- .../tests/unit/shape_infer/gather_si_test.cpp | 2 +- .../test_cases/canonicalization_gpu_test.cpp | 6 +- .../unit/test_cases/eltwise_gpu_test.cpp | 2 +- .../tests/unit/test_cases/gather_gpu_test.cpp | 59 ++++++++++--------- .../unit/test_cases/hash_key_gpu_test.cpp | 2 +- 19 files changed, 153 insertions(+), 79 deletions(-) diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/gather.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/gather.hpp index 2a7dad7fe4774d..cbc64a0e143ec2 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/primitives/gather.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/primitives/gather.hpp @@ -21,6 +21,7 @@ struct gather : public primitive_base { /// @param dict Input dictionary primitive id. /// @param idx Input indexes primitive id. /// @param axis Gathering axis. + /// @param input_rank Input rank. /// @param output_shape Output shape. /// @param batch_dim Batch_dim /// @param support_neg_ind Support negative indexes @@ -28,18 +29,22 @@ struct gather : public primitive_base { const input_info& dict, const input_info& idx, const int64_t axis, + const int64_t input_rank, const ov::Shape& output_shape, const int64_t batch_dim = 0, const bool support_neg_ind = false, const padding& output_padding = padding()) : primitive_base(id, {dict, idx}, {output_padding}) , axis(axis) + , input_rank(input_rank) , output_shape(output_shape) , batch_dim(batch_dim) , support_neg_ind(support_neg_ind) {} /// @brief Gathering axis int64_t axis = 0; + /// @brief Gather input rank + int64_t input_rank; /// @brief Gather output shape ov::Shape output_shape; /// @brief Gathering batch_dim @@ -69,6 +74,7 @@ struct gather : public primitive_base { void save(BinaryOutputBuffer& ob) const override { primitive_base::save(ob); ob << axis; + ob << input_rank; ob << output_shape; ob << batch_dim; ob << support_neg_ind; @@ -77,6 +83,7 @@ struct gather : public primitive_base { void load(BinaryInputBuffer& ib) override { primitive_base::load(ib); ib >> axis; + ib >> input_rank; ib >> output_shape; ib >> batch_dim; ib >> support_neg_ind; diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/reorder_inputs.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/reorder_inputs.cpp index 094e645a69e05f..769134e440b848 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/reorder_inputs.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/reorder_inputs.cpp @@ -121,15 +121,10 @@ struct travel_direction_wrapper { static format get_target_output_format(layout_optimizer& lo, const std::map& fmt_map, program_node *node, program_node *next) { auto user_idx = node->get_user_index(*next); - bool allow_new_shape_infer = node->get_program().get_config().get_property(ov::intel_gpu::allow_new_shape_infer); // 1. Check selected preferred_output_format - if (lo.get_optimization_attributes().use_onednn_impls || allow_new_shape_infer) { - // If onednn is not used, need to ignore get_preferred_output_fmt result as it is from onednn - auto ret = node->get_preferred_output_fmt(user_idx); - - if (ret != format::any) - return ret; - } + auto ret = node->get_preferred_output_fmt(user_idx); + if (ret != format::any) + return ret; // 2. Check fmt if (fmt_map.count(node) > 0) @@ -142,14 +137,10 @@ static format get_target_output_format(layout_optimizer& lo, const std::map& fmt_map, program_node *node, program_node *prev) { auto dep_idx = node->get_dependency_index(*prev); - bool allow_new_shape_infer = node->get_program().get_config().get_property(ov::intel_gpu::allow_new_shape_infer); // 1. Check selected preferred_input_format - if (lo.get_optimization_attributes().use_onednn_impls || allow_new_shape_infer) { - // If onednn is not used, need to ignore get_preferred_input_fmt result as it is from onednn - auto ret = node->get_preferred_input_fmt(dep_idx); - if (ret != format::any) - return ret; - } + auto ret = node->get_preferred_input_fmt(dep_idx); + if (ret != format::any) + return ret; // 2. Check fmt if (fmt_map.count(node) > 0) diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/select_preferred_formats.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/select_preferred_formats.cpp index 05dacd336a43f5..8b2b3a118a501f 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/select_preferred_formats.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/select_preferred_formats.cpp @@ -32,11 +32,21 @@ void select_preferred_formats::run(program& p) { return; #ifdef ENABLE_ONEDNN_FOR_GPU + auto forcing_map = _lo.get_implementation_forcing(); + engine.create_onednn_engine(p.get_config()); for (auto n : p.get_processing_order()) { - if (n->is_input() || !_lo.are_data_types_suitable_for_onednn(*n)) { + if (n->is_input() || !layout_optimizer::is_node_suitable_for_onednn(*n)) { continue; } + + // skip to set preferred_formats if forcing_impl is not onednn. + if (std::find_if(forcing_map.begin(), forcing_map.end(), + [&n](std::map>::value_type const& it) { + return (it.first == n->id() && it.second.second != impl_types::onednn); + }) != forcing_map.end()) + continue; + // Onednn primitive descriptor creation may fail, for example, due to asymmetric weight. try { if (n->is_type()) { diff --git a/src/plugins/intel_gpu/src/graph/include/layout_optimizer.h b/src/plugins/intel_gpu/src/graph/include/layout_optimizer.h index fd048838c704ea..e9ed4ec959ae1c 100644 --- a/src/plugins/intel_gpu/src/graph/include/layout_optimizer.h +++ b/src/plugins/intel_gpu/src/graph/include/layout_optimizer.h @@ -169,6 +169,7 @@ class layout_optimizer { impl_types get_preferred_impl_type(program_node& node, format preferred_format); impl_types get_forced_impl_type_by_config(program_node& node); + static bool is_node_suitable_for_onednn(program_node& node); static bool are_data_types_suitable_for_onednn(program_node& node); bool are_layouts_suitable_for_onednn(program_node& node); static bool onednn_check_data_types_for_pooling(data_types in_dt, data_types out_dt); @@ -188,6 +189,7 @@ class layout_optimizer { optimization_attributes get_optimization_attributes() { return _optimization_attributes; } void set_implementation_forcing(const ov::intel_gpu::ImplForcingMap& map); + const std::map> get_implementation_forcing() const; void update_formats_map(const convolution_node& node); bool is_format_optimized(const convolution_node& node, const format& format, bool use_weak_restrictions = false); diff --git a/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp b/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp index fabb1e53329293..f5f6c1ac16d82a 100644 --- a/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp +++ b/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp @@ -32,6 +32,7 @@ #include "region_yolo_inst.h" #include "prior_box_inst.h" #include "scatter_nd_update_inst.h" +#include "gather_inst.h" #include "to_string_utils.h" #include #include @@ -849,6 +850,18 @@ static bool is_node_for_onednn(reduce_node const& node, format preferred_format) return true; } +static bool is_node_for_onednn(convolution_node const& node) { + if (!layout_optimizer::are_data_types_suitable_for_onednn((program_node&)node)) + return false; + + auto input_layout = node.get_input_layout(0); + auto output_layout = node.get_output_layout(0); + if (input_layout.is_dynamic() || output_layout.is_dynamic()) + return false; + + return true; +} + static bool is_node_for_onednn(deconvolution_node const& node) { auto prim = node.get_primitive(); auto input_layout = node.get_input_layout(0); @@ -871,6 +884,9 @@ static bool is_node_for_onednn(deconvolution_node const& node) { static bool is_node_for_onednn(fully_connected_node const& node) { + if (!layout_optimizer::are_data_types_suitable_for_onednn((program_node&)node)) + return false; + auto fc_prim = node.get_primitive(); // onednn impl doesn't support compressed weights for now if (fc_prim->compressed_weights) @@ -891,6 +907,10 @@ static bool is_node_for_onednn(fully_connected_node const& node) { return true; } +static bool is_node_for_onednn(gemm_node const& node) { + return layout_optimizer::are_data_types_suitable_for_onednn((program_node&)node); +} + // This function is needed to avoid performance regressions for the convolutions with byxf layout // Previously some topologies had scale operations which prevented byxf usage // Now instead of scale we have eltwise + fused_ops which might enable byxf convolution in unexpected cases @@ -1242,6 +1262,20 @@ format layout_optimizer::get_expected_format(quantize_node const& node) { return expected; } +bool layout_optimizer::is_node_suitable_for_onednn(program_node& node) { + if (node.is_type()) { + return is_node_for_onednn(node.as()); + } else if (node.is_type()) { + return is_node_for_onednn(node.as()); + } else if (node.is_type()) { + return is_node_for_onednn(node.as()); + } else if (node.is_type()) { + return is_node_for_onednn(node.as()); + } + + return false; +} + bool layout_optimizer::are_data_types_suitable_for_onednn(program_node& node) { auto in_dt = node.get_input_layout(0).data_type; auto out_dt = node.get_output_layout(false).data_type; @@ -1770,6 +1804,10 @@ format layout_optimizer::get_preferred_format(program_node& node) { node.set_preferred_input_fmt(0, format::bfyx); } } + } else if (node.is_type()) { + // Gather needs the original input/output rank because + // the parameters as indices, batch_dims and axis depend on the rank. + node.set_preferred_input_fmt(0, format::get_default_format(node.as().get_primitive()->input_rank)); } if (allow_new_shape_infer && node.get_preferred_input_fmt() != format::any) { @@ -2089,6 +2127,10 @@ void layout_optimizer::set_implementation_forcing(const ov::intel_gpu::ImplForci } } +const std::map> layout_optimizer::get_implementation_forcing() const { + return _forcing_map; +} + const std::vector> layout_optimizer::optimized_formats = { {format::b_fs_yx_fsv16, true}, {format::b_fs_yx_fsv16, false}, diff --git a/src/plugins/intel_gpu/src/graph/program_dump_graph.cpp b/src/plugins/intel_gpu/src/graph/program_dump_graph.cpp index 684057ee4726f8..f8aa9fb1c08c60 100644 --- a/src/plugins/intel_gpu/src/graph/program_dump_graph.cpp +++ b/src/plugins/intel_gpu/src/graph/program_dump_graph.cpp @@ -188,6 +188,25 @@ void dump_graph_init(std::ofstream& graph, return out; }; + const auto dump_mem_preferred_info = [](const program_node* ptr) { + std::string out = ""; + auto input_fmts = ptr->get_preferred_input_fmts(); + if (!input_fmts.empty()) { + out += "preferred_in_fmt"; + for (auto& fmt : input_fmts) { + out += ":" + fmt_to_str(fmt); + } + } + auto output_fmts = ptr->get_preferred_output_fmts(); + if (!output_fmts.empty()) { + out += "\npreferred_out_fmt"; + for (auto& fmt : output_fmts) { + out += ":" + fmt_to_str(fmt); + } + } + + return out; + }; graph << "digraph cldnn_program {\n"; for (auto& node : program.get_processing_order()) { @@ -220,6 +239,7 @@ void dump_graph_init(std::ofstream& graph, } } graph << "\n" + dump_mem_info(node); + graph << "\n" + dump_mem_preferred_info(node); graph << "\""; #ifdef __clang__ #pragma clang diagnostic pop diff --git a/src/plugins/intel_gpu/src/plugin/ops/gather.cpp b/src/plugins/intel_gpu/src/plugin/ops/gather.cpp index 883ebaba1a6dc2..7d941375d5ae14 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/gather.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/gather.cpp @@ -119,6 +119,7 @@ void CreateGatherOpBase(ProgramBuilder& p, const std::shared_ptr& op, const i reordered_inputs[0], reordered_inputs[1], axis, + input_rank, out_shape, batch_dim, support_neg_ind); diff --git a/src/plugins/intel_gpu/tests/unit/fusions/gather_fusion_test.cpp b/src/plugins/intel_gpu/tests/unit/fusions/gather_fusion_test.cpp index 39022f9f7b870d..c1af88852dde09 100644 --- a/src/plugins/intel_gpu/tests/unit/fusions/gather_fusion_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/fusions/gather_fusion_test.cpp @@ -129,7 +129,7 @@ TEST_P(gather_quantize, basic) { data("in_hi", get_mem(get_per_channel_layout(p), 1, max_random)), data("out_lo", get_mem(get_single_element_layout(p), -127)), data("out_hi", get_mem(get_single_element_layout(p), 127)), - gather("gather_prim", input_info("input"), input_info("gather_indices"), p.axis, p.out_shape), + gather("gather_prim", input_info("input"), input_info("gather_indices"), p.axis, p.dictionary_shape.size(), p.out_shape), quantize("quantize", input_info("gather_prim"), input_info("in_lo"), input_info("in_hi"), input_info("out_lo"), input_info("out_hi"), 255, data_types::i8), reorder("reorder_bfyx", input_info("quantize"), p.default_format, data_types::f32) @@ -172,7 +172,7 @@ TEST_P(gather_eltwise_activation, basic) { input_layout("input", get_input_layout(p)), data("gather_indices", get_mem(get_indices_layout(p), 0, static_cast(get_axis_dim(p) - 1))), data("eltwise_data", get_mem(get_per_channel_layout(p), -10, 10)), - gather("gather_prim", input_info("input"), input_info("gather_indices"), p.axis, p.out_shape), + gather("gather_prim", input_info("input"), input_info("gather_indices"), p.axis, p.dictionary_shape.size(), p.out_shape), activation("activation", input_info("gather_prim"), activation_func::abs), eltwise("eltwise", { input_info("activation"), input_info("eltwise_data") }, eltwise_mode::prod), reorder("reorder_bfyx", input_info("eltwise"), p.default_format, data_types::f32) @@ -220,7 +220,7 @@ TEST_P(gather_eltwise_activation_dynamic, basic) { input_layout("input", get_input_layout(p, true)), input_layout("gather_indices", layout{ ov::PartialShape::dynamic(p.indices_shape.size()), p.data_type, format::bfyx }), input_layout("eltwise_data", get_per_channel_layout(p, true)), - gather("gather_prim", input_info("input"), input_info("gather_indices"), p.axis, p.out_shape), + gather("gather_prim", input_info("input"), input_info("gather_indices"), p.axis, p.dictionary_shape.size(), p.out_shape), activation("activation", input_info("gather_prim"), activation_func::abs), eltwise("eltwise", { input_info("activation"), input_info("eltwise_data") }, eltwise_mode::prod), reorder("reorder_bfyx", input_info("eltwise"), p.default_format, data_types::f32) diff --git a/src/plugins/intel_gpu/tests/unit/module_tests/primitive_comparison_test.cpp b/src/plugins/intel_gpu/tests/unit/module_tests/primitive_comparison_test.cpp index 11d769f322be93..7fafb55beeccb8 100644 --- a/src/plugins/intel_gpu/tests/unit/module_tests/primitive_comparison_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/module_tests/primitive_comparison_test.cpp @@ -89,11 +89,11 @@ TEST(primitive_comparison, fully_connected) { } TEST(primitive_comparison, gather) { - auto gather_prim = gather("gather", input_info("input0"), input_info("input1"), 2, {1, 3, 224, 224}, 1, true); - auto gather_prim_eq = gather("gather_eq", input_info("input0_eq"), input_info("input1_eq"), 2, {1, 3, 224, 224}, 1, true); - auto gather_prim_axis = gather("gather", input_info("input0"), input_info("input1"), 3, {1, 3, 224, 224}, 1, true); - auto gather_prim_batch_dim = gather("gather", input_info("input0"), input_info("input1"), 2, {1, 3, 224, 224}, 2, true); - auto gather_prim_support_neg_ind = gather("gather", input_info("input0"), input_info("input1"), 2, {1, 3, 224, 224}, 1, false); + auto gather_prim = gather("gather", input_info("input0"), input_info("input1"), 2, {}, {1, 3, 224, 224}, 1, true); + auto gather_prim_eq = gather("gather_eq", input_info("input0_eq"), input_info("input1_eq"), 2, {}, {1, 3, 224, 224}, 1, true); + auto gather_prim_axis = gather("gather", input_info("input0"), input_info("input1"), 3, {}, {1, 3, 224, 224}, 1, true); + auto gather_prim_batch_dim = gather("gather", input_info("input0"), input_info("input1"), 2, {}, {1, 3, 224, 224}, 2, true); + auto gather_prim_support_neg_ind = gather("gather", input_info("input0"), input_info("input1"), 2, {}, {1, 3, 224, 224}, 1, false); ASSERT_EQ(gather_prim, gather_prim_eq); ASSERT_NE(gather_prim, gather_prim_axis); diff --git a/src/plugins/intel_gpu/tests/unit/passes/add_required_reorders_test.cpp b/src/plugins/intel_gpu/tests/unit/passes/add_required_reorders_test.cpp index 4ac5f414c70664..629769d086d9d1 100644 --- a/src/plugins/intel_gpu/tests/unit/passes/add_required_reorders_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/passes/add_required_reorders_test.cpp @@ -41,11 +41,11 @@ TEST(add_required_reorders, input_reorder_inside_shape_of_subgraph) { topology.add(data("data_0", data_0)); topology.add(data("data_1", data_1)); topology.add(shape_of("shape_of", input_info("input"), 4, data_types::i32)); - topology.add(gather("gather0", input_info("shape_of"), input_info("data_0"), 0, {}, 0, true)); + topology.add(gather("gather0", input_info("shape_of"), input_info("data_0"), 0, {}, {}, 0, true)); topology.add(eltwise("eltwise0", {input_info("gather0"), input_info("data_1")}, eltwise_mode::prod, data_types::f32)); topology.add(reshape("reshape0", input_info("eltwise0"), false, {}, ov::PartialShape{1}, reshape::reshape_mode::unsqueeze)); - topology.add(gather("gather1", input_info("shape_of"), input_info("data_0"), 0, {}, 0, true)); + topology.add(gather("gather1", input_info("shape_of"), input_info("data_0"), 0, {}, {}, 0, true)); topology.add(eltwise("eltwise1", {input_info("gather1"), input_info("data_1")}, eltwise_mode::prod, data_types::f32)); topology.add(reshape("reshape1", input_info("eltwise1"), false, {}, ov::PartialShape{1}, reshape::reshape_mode::unsqueeze)); diff --git a/src/plugins/intel_gpu/tests/unit/passes/mark_shape_of_subgraphs_test.cpp b/src/plugins/intel_gpu/tests/unit/passes/mark_shape_of_subgraphs_test.cpp index 4fe7598e8af3d8..6b66075c6db26d 100644 --- a/src/plugins/intel_gpu/tests/unit/passes/mark_shape_of_subgraphs_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/passes/mark_shape_of_subgraphs_test.cpp @@ -62,7 +62,7 @@ TEST(mark_shape_of_subgraphs, simple_chain) { topology.add(data("data_0", data_0)); topology.add(data("data_1", data_1)); topology.add(shape_of("shape_of", input_info("input"), data_types::i64)); - topology.add(gather("gather", input_info("shape_of"), input_info("data_0"), 0, {})); + topology.add(gather("gather", input_info("shape_of"), input_info("data_0"), 0, 0, {})); topology.add(eltwise("eltwise", input_info("gather"), input_info("data_1"), eltwise_mode::sum)); topology.add(concatenation("concat", {input_info("eltwise"), input_info("data_1")}, 0)); topology.add(broadcast("broadcast", input_info("input"), input_info("concat"), {}, ov::op::BroadcastType::BIDIRECTIONAL)); @@ -103,7 +103,7 @@ TEST(mark_shape_of_subgraphs, simple_chain_w_reshape_inside_subgraph) { topology.add(data("data_0", data_0)); topology.add(data("data_1", data_1)); topology.add(shape_of("shape_of", input_info("input"), data_types::i64)); - topology.add(gather("gather", input_info("shape_of"), input_info("data_0"), 0, {1})); + topology.add(gather("gather", input_info("shape_of"), input_info("data_0"), 0, 1, {1})); topology.add(reshape("reshape", input_info("gather"), input_info("data_1"), false, ov::PartialShape{2})); topology.add(broadcast("broadcast", input_info("input"), input_info("reshape"), {}, ov::op::BroadcastType::BIDIRECTIONAL)); @@ -129,8 +129,8 @@ TEST(mark_shape_of_subgraphs, parallel_shape_of_subgraphs) { topology.add(data("data_0", data_0)); topology.add(shape_of("shape_of_0", input_info("input"), data_types::i64)); topology.add(shape_of("shape_of_1", input_info("input"), data_types::i64)); - topology.add(gather("gather_0", input_info("shape_of_0"), input_info("data_0"), 0, {})); - topology.add(gather("gather_1", input_info("shape_of_1"), input_info("data_0"), 0, {})); + topology.add(gather("gather_0", input_info("shape_of_0"), input_info("data_0"), 0, 0, {})); + topology.add(gather("gather_1", input_info("shape_of_1"), input_info("data_0"), 0, 0, {})); topology.add(eltwise("eltwise", input_info("gather_0"), input_info("gather_1"), eltwise_mode::sum)); topology.add(reshape("reshape", input_info("input"), input_info("eltwise"), false, ov::PartialShape())); @@ -160,9 +160,9 @@ TEST(mark_shape_of_subgraphs, parallel_shape_of_subgraphs_cascade) { topology.add(data("data_1", data_1)); topology.add(data("data_2", data_2)); topology.add(shape_of("shape_of_0", input_info("input"), data_types::i64)); - topology.add(gather("gather_0", input_info("shape_of_0"), input_info("data_0"), 0, {1})); + topology.add(gather("gather_0", input_info("shape_of_0"), input_info("data_0"), 0, 1, {1})); topology.add(shape_of("shape_of_1", input_info("input"), data_types::i64)); - topology.add(gather("gather_1", input_info("shape_of_1"), input_info("data_0"), 0, {1})); + topology.add(gather("gather_1", input_info("shape_of_1"), input_info("data_0"), 0, 1, {1})); topology.add(scatter_update("scatter_update_0", input_info("gather_0"), input_info("data_0"), input_info("data_0"), 0)); topology.add(scatter_update("scatter_update_1", input_info("gather_1"), input_info("data_0"), input_info("data_0"), 0)); topology.add(strided_slice("strided_slice_1", @@ -171,7 +171,7 @@ TEST(mark_shape_of_subgraphs, parallel_shape_of_subgraphs_cascade) { input_info("scatter_update_1"), input_info("data_0"), {}, {}, {}, {}, {}, {})); topology.add(shape_of("shape_of_2", input_info("input"), data_types::i64)); - topology.add(gather("gather_2", input_info("shape_of_2"), input_info("data_0"), 0, {})); + topology.add(gather("gather_2", input_info("shape_of_2"), input_info("data_0"), 0, 0, {})); topology.add(scatter_update("scatter_update_2", input_info("gather_2"), input_info("data_0"), input_info("data_0"), 0)); topology.add(strided_slice("strided_slice_2", input_info("data_1"), @@ -207,7 +207,7 @@ TEST(mark_shape_of_subgraphs, simple_chain_w_inserted_reorder) { topology.add(input_layout("input", input_layout_dynamic)); topology.add(data("data_0", data_0)); topology.add(shape_of("shape_of", input_info("input"), data_types::i64)); - topology.add(gather("gather", input_info("shape_of"), input_info("data_0"), 0, {1})); + topology.add(gather("gather", input_info("shape_of"), input_info("data_0"), 0, 1, {1})); topology.add(reshape("reshape", input_info("gather"), true, {}, {})); topology.add(reorder("reorder", input_info("reshape"), format::bfyx, data_types::f16)); topology.add(eltwise("eltwise", input_info("reorder"), input_info("data_0"), eltwise_mode::prod)); @@ -237,7 +237,7 @@ TEST(mark_shape_of_subgraphs, concat_with_empty_tensor_inputs) { topology.add(input_layout("input_empty", input_layout_empty)); topology.add(data("data_0", data_0)); topology.add(shape_of("shape_of_01", input_info("input"), data_types::i64)); - topology.add(gather("gather01", input_info("shape_of_01"), input_info("data_0"), 0, {1})); + topology.add(gather("gather01", input_info("shape_of_01"), input_info("data_0"), 0, 1, {1})); topology.add(shape_of("shape_of_02", input_info("input_empty"), data_types::i64)); topology.add(shape_of("shape_of_03", input_info("input_empty"), data_types::i64)); topology.add(concatenation("concat", {input_info("gather01"), input_info("shape_of_02"), input_info("shape_of_03")}, 0)); diff --git a/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp b/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp index 1866ddb6c19870..9d7aef3e2b68bf 100644 --- a/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp @@ -633,13 +633,13 @@ TEST(prepare_buffer_fusing, skip_in_place_concat_inside_shape_of_subgraph) { topology.add(data("data_1", data_1)); topology.add(data("data_2", data_2)); topology.add(shape_of("shape_of", input_info("input"), 4, data_types::i32)); - topology.add(gather("gather0", input_info("shape_of"), input_info("data_0"), 0, {}, 0, true)); + topology.add(gather("gather0", input_info("shape_of"), input_info("data_0"), 0, 0, {}, 0, true)); topology.add(reorder("reorder0", input_info("gather0"), format::any, data_types::f32, std::vector(), reorder_mean_mode::subtract, padding(), true)); topology.add(eltwise("eltwise0", input_info("reorder0"), input_info("data_1"), eltwise_mode::prod, broadcast_spec)); topology.add(reshape("reshape0", input_info("eltwise0"), false, {}, ov::PartialShape{1}, reshape::reshape_mode::unsqueeze)); - topology.add(gather("gather1", input_info("shape_of"), input_info("data_0"), 0, {}, 0, true)); + topology.add(gather("gather1", input_info("shape_of"), input_info("data_0"), 0, 0, {}, 0, true)); topology.add(reorder("reorder1", input_info("gather1"), format::any, data_types::f32, std::vector(), reorder_mean_mode::subtract, padding(), true)); topology.add(eltwise("eltwise1", input_info("reorder1"), input_info("data_1"), eltwise_mode::prod, broadcast_spec)); @@ -693,7 +693,7 @@ TEST(prepare_buffer_fusing, test_implicit_crop_and_outerpadding) { topology.add(input_layout("Input", in_input->get_layout())); topology.add(input_layout("Input_idx_1", input_idx1->get_layout())); topology.add(reorder("reorder_input", input_info("Input"), format::bfzyx, data_types::f32)); - topology.add(gather("gather1", input_info("reorder_input"), input_info("Input_idx_1"), axis, ov::Shape{1, 6, 2, 2, 2})); + topology.add(gather("gather1", input_info("reorder_input"), input_info("Input_idx_1"), axis, 5, ov::Shape{1, 6, 2, 2, 2})); topology.add(reorder("gather1_reorder", input_info("gather1"), reorder_layout)); topology.add(reshape("reshape1", input_info("gather1_reorder"), tensor(6, 2, 2, 2))); topology.add(crop("crop", input_info("reorder_input"), tensor{1, 6, 2, 2, 2}, tensor(1, 0, 0, 0, 0))); diff --git a/src/plugins/intel_gpu/tests/unit/passes/remove_redundant_reorders_tests.cpp b/src/plugins/intel_gpu/tests/unit/passes/remove_redundant_reorders_tests.cpp index 1d5bdd88209668..ee454a74e96635 100644 --- a/src/plugins/intel_gpu/tests/unit/passes/remove_redundant_reorders_tests.cpp +++ b/src/plugins/intel_gpu/tests/unit/passes/remove_redundant_reorders_tests.cpp @@ -346,13 +346,13 @@ TEST(remove_redundant_reorders, not_to_fuse_concat_with_reorder_inside_shape_of_ topology.add(data("data_1", data_1)); topology.add(data("data_2", data_2)); topology.add(shape_of("shape_of", input_info("input"), 4, data_types::i32)); - topology.add(gather("gather0", input_info("shape_of"), input_info("data_0"), 0, {}, 0, true)); + topology.add(gather("gather0", input_info("shape_of"), input_info("data_0"), 0, {}, {}, 0, true)); topology.add(reorder("reorder0", input_info("gather0"), format::any, data_types::f32, std::vector(), reorder_mean_mode::subtract, padding(), true)); topology.add(eltwise("eltwise0", input_info("reorder0"), input_info("data_1"), eltwise_mode::prod, broadcast_spec)); topology.add(reshape("reshape0", input_info("eltwise0"), false, {}, ov::PartialShape{1}, reshape::reshape_mode::unsqueeze)); - topology.add(gather("gather1", input_info("shape_of"), input_info("data_0"), 0, {}, 0, true)); + topology.add(gather("gather1", input_info("shape_of"), input_info("data_0"), 0, {}, {}, 0, true)); topology.add(reorder("reorder1", input_info("gather1"), format::any, data_types::f32, std::vector(), reorder_mean_mode::subtract, padding(), true)); topology.add(eltwise("eltwise1", input_info("reorder1"), input_info("data_1"), eltwise_mode::prod, broadcast_spec)); diff --git a/src/plugins/intel_gpu/tests/unit/passes/reorder_inputs_test.cpp b/src/plugins/intel_gpu/tests/unit/passes/reorder_inputs_test.cpp index a6efbbc98a5de3..4406605784a22f 100644 --- a/src/plugins/intel_gpu/tests/unit/passes/reorder_inputs_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/passes/reorder_inputs_test.cpp @@ -136,8 +136,8 @@ TEST(reorder_inputs, mixed_ranks_gather) { ov::CoordinateDiff{0, 0}, false)); topology.add(border("pad", { input_info("conv") }, 0, ov::CoordinateDiff{0, 0, 1, 1}, ov::CoordinateDiff{0, 0, 1, 1})); - topology.add(gather("gather1", input_info("pad"), input_info("data1"), 2, { 1, 2, 3, 128, 57 }, 0, false)); - topology.add(gather("gather2", input_info("gather1"), input_info("data2"), 4, { 1, 2, 3, 128, 3, 55 }, 0, false)); + topology.add(gather("gather1", input_info("pad"), input_info("data1"), 2, 4, { 1, 2, 3, 128, 57 }, 0, false)); + topology.add(gather("gather2", input_info("gather1"), input_info("data2"), 4, 5, { 1, 2, 3, 128, 3, 55 }, 0, false)); topology.add(permute("permute", input_info("gather2"), {0, 1, 2, 4, 3, 5})); ExecutionConfig config = get_test_default_config(engine); @@ -155,10 +155,10 @@ TEST(reorder_inputs, mixed_ranks_gather) { auto& gather1_node = prog_impl->get_node("gather1"); auto& gather2_node = prog_impl->get_node("gather2"); - ASSERT_EQ(gather1_node.get_input_layouts()[0].format, format::bfzyx); + ASSERT_EQ(gather1_node.get_input_layouts()[0].format, format::bfyx); ASSERT_EQ(gather1_node.get_output_layout().format, format::bfzyx); - ASSERT_EQ(gather2_node.get_input_layouts()[0].format, format::bfwzyx); + ASSERT_EQ(gather2_node.get_input_layouts()[0].format, format::bfzyx); ASSERT_EQ(gather2_node.get_output_layout().format, format::bfwzyx); } diff --git a/src/plugins/intel_gpu/tests/unit/shape_infer/gather_si_test.cpp b/src/plugins/intel_gpu/tests/unit/shape_infer/gather_si_test.cpp index aedcfb9d4dce5c..a1852c0c4561a5 100644 --- a/src/plugins/intel_gpu/tests/unit/shape_infer/gather_si_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/shape_infer/gather_si_test.cpp @@ -37,7 +37,7 @@ TEST_P(gather_test, shape_infer) { auto input0_layout_prim = std::make_shared("input0", p.in0_layout); auto input1_layout_prim = std::make_shared("input1", p.in1_layout); - auto gather_prim = std::make_shared("output", input_info("input0"), input_info("input1"), p.axis, ov::Shape{}, p.batch_dim); + auto gather_prim = std::make_shared("output", input_info("input0"), input_info("input1"), p.axis, 0, ov::Shape{}, p.batch_dim); cldnn::program prog(engine); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/canonicalization_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/canonicalization_gpu_test.cpp index 400152887e6ce9..933161185c78a8 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/canonicalization_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/canonicalization_gpu_test.cpp @@ -220,7 +220,7 @@ TEST(canonicalization, gather) { topology.add(input_layout("data", data_layout)); topology.add(input_layout("indices", indices_layout)); topology.add(gather("gather", input_info("data"), input_info("indices"), params.second.axis, - ov::Shape{}, params.second.batch_dim, params.second.support_neg_ind)); + 0, ov::Shape{}, params.second.batch_dim, params.second.support_neg_ind)); canonicalization_test(topology, "gather", std::get<1>(params.first), std::get<2>(params.first)); } @@ -254,9 +254,9 @@ TEST(canonicalization, fusing_gather_eltwise) { topology.add(input_layout("indices_second", indices_layout_second)); topology.add(input_layout("data", input_mul_layout)); topology.add(gather("gather_first", input_info("input"), input_info("indices_first"), shapes.second.axis, - shapes.second.out_shape, shapes.second.batch_dim, shapes.second.support_neg_ind)); + shapes.second.data_shape.rank().get_length(), shapes.second.out_shape, shapes.second.batch_dim, shapes.second.support_neg_ind)); topology.add(gather("gather_second", input_info("input"), input_info("indices_second"), shapes.second.axis, - shapes.second.out_shape, shapes.second.batch_dim, shapes.second.support_neg_ind)); + shapes.second.data_shape.rank().get_length(), shapes.second.out_shape, shapes.second.batch_dim, shapes.second.support_neg_ind)); topology.add(eltwise("mul", {input_info("gather_first"), input_info("data")}, eltwise_mode::prod)); topology.add(eltwise("add", {input_info("gather_second"), input_info("mul")}, eltwise_mode::sum)); topology.add(reorder("out_reorder", input_info("add"), format::bfyx, data_types::f32)); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/eltwise_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/eltwise_gpu_test.cpp index c1309b720daf16..5da00e8fb739f3 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/eltwise_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/eltwise_gpu_test.cpp @@ -2482,7 +2482,7 @@ TEST(eltwise_gpu_int, div_gather_fusing) { topology.add(input_layout("InputDictionary", input1->get_layout())); topology.add(input_layout("InputText", input2->get_layout())); topology.add(input_layout("Input3", input3->get_layout())); - topology.add(gather("gather", input_info("InputDictionary"), input_info("InputText"), 0, ov::Shape{2, 2, 2, 2})); + topology.add(gather("gather", input_info("InputDictionary"), input_info("InputText"), 0, 4, ov::Shape{2, 2, 2, 2})); topology.add(reorder("gather_reorder", input_info("gather"), { data_types::i32, format::bfyx, { 2, 2, 2, 2 } })); topology.add(eltwise("eltwise", { input_info("gather_reorder"), input_info("Input3") }, diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/gather_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/gather_gpu_test.cpp index bbef9e78912d5b..75465b89ae51ff 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/gather_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/gather_gpu_test.cpp @@ -88,6 +88,7 @@ class gather8_test : public ::testing::TestWithParam { input_info("reorder0"), input_info("reorder1"), axis, + shape_in[0].size(), ov::Shape(shape_out.begin(), shape_out.end()), batch_dim, true)); @@ -108,7 +109,7 @@ class gather8_test : public ::testing::TestWithParam { planar_topo.add(input_layout("input0", input0->get_layout())); planar_topo.add(input_layout("input1", input1->get_layout())); planar_topo.add( - gather("gather", input_info("input0"), input_info("input1"), axis, ov::Shape(shape_out.begin(), shape_out.end()), batch_dim, true)); + gather("gather", input_info("input0"), input_info("input1"), axis, shape_in[0].size(), ov::Shape(shape_out.begin(), shape_out.end()), batch_dim, true)); network planar_network(engine, planar_topo, get_test_default_config(engine)); planar_network.set_input_data("input0", input0); planar_network.set_input_data("input1", input1); @@ -408,7 +409,7 @@ TEST(gather8_gpu_fp16, d323_axisY_bdim_m1) { topology.add(input_layout("InputDictionary", input1->get_layout())); topology.add(input_layout("InputText", input2->get_layout())); topology.add( - gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, ov::Shape{3, 2, 3, 3, 2}, batch_dim, negative_indexes) + gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, 5, ov::Shape{3, 2, 3, 3, 2}, batch_dim, negative_indexes) ); network network(engine, topology, get_test_default_config(engine)); @@ -515,7 +516,7 @@ TEST(gather7_gpu_fp16, d222_axisX_bdim_m1) { topology.add(input_layout("InputDictionary", input1->get_layout())); topology.add(input_layout("InputText", input2->get_layout())); topology.add( - gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, ov::Shape{2, 2, 2, 2, 2, 2}, batch_dim) + gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, 6, ov::Shape{2, 2, 2, 2, 2, 2}, batch_dim) ); network network(engine, topology, get_test_default_config(engine)); @@ -626,7 +627,7 @@ TEST(gather7_gpu_fp16, d323_axisY_bdim_m1) { topology.add(input_layout("InputDictionary", input1->get_layout())); topology.add(input_layout("InputText", input2->get_layout())); topology.add( - gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, ov::Shape{3, 2, 3, 3, 2}, batch_dim) + gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, 5, ov::Shape{3, 2, 3, 3, 2}, batch_dim) ); network network(engine, topology, get_test_default_config(engine)); @@ -730,7 +731,7 @@ TEST(gather7_gpu_fp16, d44_axisY_bdim1) { topology.add(input_layout("InputDictionary", input1->get_layout())); topology.add(input_layout("InputText", input2->get_layout())); topology.add( - gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, ov::Shape{4, 3, 4, 1, 1, 1}, batch_dim) + gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, 4, ov::Shape{4, 3, 4, 1, 1, 1}, batch_dim) ); network network(engine, topology, get_test_default_config(engine)); @@ -805,7 +806,7 @@ TEST(gather7_gpu_fp16, d32_axisF_bdim_m1) { topology.add(input_layout("InputDictionary", input1->get_layout())); topology.add(input_layout("InputText", input2->get_layout())); topology.add( - gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, ov::Shape{3, 2, 1, 1}, batch_dim) + gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, 4, ov::Shape{3, 2, 1, 1}, batch_dim) ); network network(engine, topology, get_test_default_config(engine)); @@ -868,7 +869,7 @@ TEST(gather7_gpu_fp16, d32_axisF_bdim1) { topology.add(input_layout("InputDictionary", input1->get_layout())); topology.add(input_layout("InputText", input2->get_layout())); topology.add( - gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, ov::Shape{3, 2, 1, 1}, batch_dim) + gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, 4, ov::Shape{3, 2, 1, 1}, batch_dim) ); network network(engine, topology, get_test_default_config(engine)); @@ -930,7 +931,7 @@ TEST(gather7_gpu_fp16, d32_axisF_bdim0) { topology.add(input_layout("InputDictionary", input1->get_layout())); topology.add(input_layout("InputText", input2->get_layout())); topology.add( - gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, ov::Shape{3, 3, 2, 1}, batch_dim) + gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, 4, ov::Shape{3, 3, 2, 1}, batch_dim) ); network network(engine, topology, get_test_default_config(engine)); @@ -998,7 +999,7 @@ TEST(gather_gpu_fp16, d14_axisB) { topology.add(input_layout("InputDictionary", input1->get_layout())); topology.add(input_layout("InputText", input2->get_layout())); topology.add( - gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, ov::Shape{1, 4, 2, 1}) + gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, 4, ov::Shape{1, 4, 2, 1}) ); network network(engine, topology, get_test_default_config(engine)); @@ -1060,7 +1061,7 @@ TEST(gather_gpu_fp16, d222_axisB) { topology.add(input_layout("InputDictionary", input1->get_layout())); topology.add(input_layout("InputText", input2->get_layout())); topology.add( - gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, ov::Shape{2, 2, 2, 2}) + gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, 4, ov::Shape{2, 2, 2, 2}) ); network network(engine, topology, get_test_default_config(engine)); @@ -1121,7 +1122,7 @@ TEST(gather_gpu_fp16, d22_axisY) { topology.add(input_layout("InputDictionary", input1->get_layout())); topology.add(input_layout("InputText", input2->get_layout())); topology.add( - gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, ov::Shape{2, 2, 2, 2}) + gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, 4, ov::Shape{2, 2, 2, 2}) ); network network(engine, topology, get_test_default_config(engine)); @@ -1182,7 +1183,7 @@ TEST(gather_gpu_fp16, d22_axisF) { topology.add(input_layout("InputDictionary", input1->get_layout())); topology.add(input_layout("InputText", input2->get_layout())); topology.add( - gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, ov::Shape{2, 2, 2, 2}) + gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, 4, ov::Shape{2, 2, 2, 2}) ); network network(engine, topology, get_test_default_config(engine)); @@ -1240,7 +1241,7 @@ TEST(gather_gpu_fp32, d14_axisB) { topology.add(input_layout("InputDictionary", input1->get_layout())); topology.add(input_layout("InputText", input2->get_layout())); topology.add( - gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, ov::Shape{1, 4, 2, 1}) + gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, 4, ov::Shape{1, 4, 2, 1}) ); network network(engine, topology, get_test_default_config(engine)); @@ -1301,7 +1302,7 @@ TEST(gather_gpu_fp32, d222_axisB) { topology.add(input_layout("InputDictionary", input1->get_layout())); topology.add(input_layout("InputText", input2->get_layout())); topology.add( - gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, ov::Shape{2, 2, 2, 2}) + gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, 4, ov::Shape{2, 2, 2, 2}) ); network network(engine, topology, get_test_default_config(engine)); @@ -1362,7 +1363,7 @@ TEST(gather_gpu_fp32, d22_axisY) { topology.add(input_layout("InputDictionary", input1->get_layout())); topology.add(input_layout("InputText", input2->get_layout())); topology.add( - gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, ov::Shape{2, 2, 2, 2}) + gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, 4, ov::Shape{2, 2, 2, 2}) ); network network(engine, topology, get_test_default_config(engine)); @@ -1423,7 +1424,7 @@ TEST(gather_gpu_fp32, d22_axisF) { topology.add(input_layout("InputDictionary", input1->get_layout())); topology.add(input_layout("InputText", input2->get_layout())); topology.add( - gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, ov::Shape{2, 2, 2, 2}) + gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, 4, ov::Shape{2, 2, 2, 2}) ); network network(engine, topology, get_test_default_config(engine)); @@ -1484,7 +1485,7 @@ TEST(gather_gpu_int32, d22_axisF) { topology.add(input_layout("InputDictionary", input1->get_layout())); topology.add(input_layout("InputText", input2->get_layout())); topology.add( - gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, ov::Shape{2, 2, 2, 2}) + gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, 4, ov::Shape{2, 2, 2, 2}) ); network network(engine, topology, get_test_default_config(engine)); @@ -1542,7 +1543,7 @@ TEST(gather_gpu_int32, d14_axisB) { topology.add(input_layout("InputDictionary", input1->get_layout())); topology.add(input_layout("InputText", input2->get_layout())); topology.add( - gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, ov::Shape{1, 4, 2, 1}) + gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, 4, ov::Shape{1, 4, 2, 1}) ); network network(engine, topology, get_test_default_config(engine)); @@ -1603,7 +1604,7 @@ TEST(gather_gpu_int32, d222_axisB) { topology.add(input_layout("InputDictionary", input1->get_layout())); topology.add(input_layout("InputText", input2->get_layout())); topology.add( - gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, ov::Shape{2, 2, 2, 2}) + gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, 4, ov::Shape{2, 2, 2, 2}) ); network network(engine, topology, get_test_default_config(engine)); @@ -1664,7 +1665,7 @@ TEST(gather_gpu_int32, d22_axisY) { topology.add(input_layout("InputDictionary", input1->get_layout())); topology.add(input_layout("InputText", input2->get_layout())); topology.add( - gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, ov::Shape{2, 2, 2, 2}) + gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, 4, ov::Shape{2, 2, 2, 2}) ); network network(engine, topology, get_test_default_config(engine)); @@ -1728,7 +1729,7 @@ TEST(gather_gpu_fp32, d41_axisB) { topology.add(input_layout("InputDictionary", input1->get_layout())); topology.add(input_layout("InputText", input2->get_layout())); topology.add( - gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, ov::Shape{4, 1, 2, 3}) + gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, 4, ov::Shape{4, 1, 2, 3}) ); network network(engine, topology, get_test_default_config(engine)); @@ -1791,7 +1792,7 @@ TEST(gather_gpu_fp32, d41_axisF) { topology.add(input_layout("InputDictionary", input1->get_layout())); topology.add(input_layout("InputText", input2->get_layout())); topology.add( - gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, ov::Shape{2, 4, 1, 2}) + gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, 4, ov::Shape{2, 4, 1, 2}) ); network network(engine, topology, get_test_default_config(engine)); @@ -1850,7 +1851,7 @@ TEST(gather_gpu_fp32, d2_axisX) { topology.add(input_layout("InputDictionary", input1->get_layout())); topology.add(input_layout("InputText", input2->get_layout())); topology.add( - gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, ov::Shape{2, 2, 1, 2}) + gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, 4, ov::Shape{2, 2, 1, 2}) ); network network(engine, topology, get_test_default_config(engine)); @@ -1900,7 +1901,7 @@ TEST(gather_gpu_fp32, 322_axisF) { topology.add(input_layout("InputDictionary", input1->get_layout())); topology.add(input_layout("InputText", input2->get_layout())); topology.add( - gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, ov::Shape{3, 2, 2, 1}) + gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, 4, ov::Shape{3, 2, 2, 1}) ); network network(engine, topology, get_test_default_config(engine)); @@ -1940,7 +1941,7 @@ TEST(gather_gpu_fp32, dynamic_322_axisF) { topology topology; topology.add(input_layout("input1", in1_layout)); topology.add(input_layout("input2", in2_layout)); - topology.add(gather("gather", input_info("input1"), input_info("input2"), axis, ov::Shape{})); + topology.add(gather("gather", input_info("input1"), input_info("input2"), axis, 0, ov::Shape{})); ExecutionConfig config = get_test_default_config(engine); config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); @@ -1983,7 +1984,7 @@ TEST(gather_gpu_fp32, indice_out_of_bound) { topology topology; topology.add(input_layout("input1", in1_layout)); topology.add(input_layout("input2", in2_layout)); - topology.add(gather("gather", input_info("input1"), input_info("input2"), axis, ov::Shape{}, 0, true)); + topology.add(gather("gather", input_info("input1"), input_info("input2"), axis, 0, ov::Shape{}, 0, true)); ExecutionConfig config = get_test_default_config(engine); config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); @@ -2021,7 +2022,7 @@ TEST(gather_cpu_impl_fp32, dynamic_322_axisF) { topology topology; topology.add(input_layout("input1", in1_layout)); topology.add(input_layout("input2", in2_layout)); - topology.add(gather("gather", input_info("input1"), input_info("input2"), axis, ov::Shape{})); + topology.add(gather("gather", input_info("input1"), input_info("input2"), axis, 0, ov::Shape{})); auto config = get_test_default_config(engine); config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); @@ -2071,7 +2072,7 @@ void test_gather_gpu_u8_322_axisF(bool is_caching_test) { topology.add(input_layout("InputDictionary", input1->get_layout())); topology.add(input_layout("InputText", input2->get_layout())); topology.add( - gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, ov::Shape{3, 2, 2, 1})); + gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, 4, ov::Shape{3, 2, 2, 1})); cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); @@ -2121,7 +2122,7 @@ TEST(gather_single_axis, simple_Baxis) { topology.add(input_layout("InputDictionary", input1->get_layout())); topology.add(input_layout("InputText", input2->get_layout())); topology.add( - gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, ov::Shape{1, 2, 2, 1}) + gather("gather", input_info("InputDictionary"), input_info("InputText"), axis, 4, ov::Shape{1, 2, 2, 1}) ); topology.add(reorder("reorder", input_info("gather"), format::bfyx, data_types::i8)); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/hash_key_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/hash_key_gpu_test.cpp index a9c1e1262f3aff..68cfc54237737b 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/hash_key_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/hash_key_gpu_test.cpp @@ -94,7 +94,7 @@ class check_hash_value: public ::testing::Test { topology.add(input_layout("InputDictionary", input1->get_layout())); topology.add(input_layout("InputText", input2->get_layout())); topology.add( - gather(key_prim_id, input_info("InputDictionary"), input_info("InputText"), axis, ov::Shape{3, 2, 3, 3, 2}, batch_dim, negative_indexes) + gather(key_prim_id, input_info("InputDictionary"), input_info("InputText"), axis, 5, ov::Shape{3, 2, 3, 3, 2}, batch_dim, negative_indexes) ); cldnn::network::ptr net = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); From 5f336a68701685dc051aa51f655af973c1d12445 Mon Sep 17 00:00:00 2001 From: hyunback kim Date: Wed, 18 Oct 2023 16:54:09 +0900 Subject: [PATCH 245/257] [GPU] Support o_is_yx_isv2 (#20521) 216-attention-center model in notebooks needs o_is_yx_isv2 with onednn3.3 Signed-off-by: hyunback --- src/plugins/intel_gpu/include/intel_gpu/runtime/format.hpp | 1 + .../intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp | 4 ++++ .../src/kernel_selector/cl_kernels/reorder_weights.cl | 4 ++++ .../intel_gpu/src/kernel_selector/kernel_selector_common.cpp | 1 + src/plugins/intel_gpu/src/kernel_selector/tensor_type.cpp | 5 +++++ src/plugins/intel_gpu/src/kernel_selector/tensor_type.h | 1 + src/plugins/intel_gpu/src/runtime/format.cpp | 1 + 7 files changed, 17 insertions(+) diff --git a/src/plugins/intel_gpu/include/intel_gpu/runtime/format.hpp b/src/plugins/intel_gpu/include/intel_gpu/runtime/format.hpp index 9e38a8b99c7b5e..ea3e2aec0274d0 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/runtime/format.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/runtime/format.hpp @@ -139,6 +139,7 @@ struct format { oyix, oxiy, os_iyx_osv16, ///< format used only for convolution weights + o_is_yx_isv2, ///< format used only for convolution weights o_is_yx_isv4, ///< format used only for convolution weights o_is_yx_isv16, ///< format used only for convolution weights o_is_zyx_isv16, ///< format used only for convolution weights diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp index 3392a7e42b2363..f0872d3702970e 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp @@ -422,6 +422,8 @@ kernel_selector::weights_layout to_weights_layout(format f, bool is_grouped) { return kernel_selector::weights_layout::yxio; case format::os_yxi_osv16: return kernel_selector::weights_layout::os_yxi_osv16; + case format::o_is_yx_isv2: + return kernel_selector::weights_layout::o_is_yx_isv2; case format::o_is_yx_isv4: return kernel_selector::weights_layout::o_is_yx_isv4; case format::o_is_yx_isv16: @@ -741,6 +743,8 @@ cldnn::format::type from_weights_layout(kernel_selector::weights_layout l) { return cldnn::format::yxio; case kernel_selector::weights_layout::os_yxi_osv16: return cldnn::format::os_yxi_osv16; + case kernel_selector::weights_layout::o_is_yx_isv2: + return cldnn::format::o_is_yx_isv2; case kernel_selector::weights_layout::o_is_yx_isv4: return cldnn::format::o_is_yx_isv4; case kernel_selector::weights_layout::o_is_yx_isv16: diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/reorder_weights.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/reorder_weights.cl index 9003f23ad1ec8d..052c6721a88141 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/reorder_weights.cl +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/reorder_weights.cl @@ -280,6 +280,8 @@ inline uint FUNC(get_input_index)(uint g, uint o, uint i, uint z, uint y, uint x return GET_FILTER_OS_IYX_OSV_INDEX(INPUT0, o, i, y, x, 32); #elif defined INPUT0_LAYOUT_OS_IYX_OSV32__AI32 return GET_FILTER_OS_IYX_OSV_INDEX(INPUT0, o, i, y, x, 32); +#elif defined INPUT0_LAYOUT_O_IS_YX_ISV2 + return GET_FILTER_O_IS_ZYX_ISV16_INDEX(INPUT0, o, i, 0, y, x, 2); #elif defined INPUT0_LAYOUT_O_IS_YX_ISV4 return GET_FILTER_O_IS_ZYX_ISV16_INDEX(INPUT0, o, i, 0, y, x, 4); #elif defined INPUT0_LAYOUT_O_IS_YX_ISV16 @@ -501,6 +503,8 @@ inline uint FUNC(get_output_index)(uint g, uint o, uint i, uint z, uint y, uint return GET_FILTER_OS_IYX_OSV_INDEX(OUTPUT, o, i, y, x, 32); #elif defined OUTPUT_LAYOUT_OS_IYX_OSV64 return GET_FILTER_OS_IYX_OSV_INDEX(OUTPUT, o, i, y, x, 64); +#elif defined OUTPUT_LAYOUT_O_IS_YX_ISV2 + return GET_FILTER_O_IS_ZYX_ISV16_INDEX(OUTPUT, o, i, 0, y, x, 2); #elif defined OUTPUT_LAYOUT_O_IS_YX_ISV4 return GET_FILTER_O_IS_ZYX_ISV16_INDEX(OUTPUT, o, i, 0, y, x, 4); #elif defined OUTPUT_LAYOUT_O_IS_YX_ISV16 diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_common.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_common.cpp index 6e192c92bfb808..8c6d2af2fd8f69 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_common.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_common.cpp @@ -316,6 +316,7 @@ std::string toString(WeightsLayout layout) { case WeightsLayout::os_is_zyx_osv16_isv16: return "OS_IS_ZYX_OSV16_ISV16"; case WeightsLayout::os_is_zyx_osv32_isv16: return "OS_IS_ZYX_OSV32_ISV16"; case WeightsLayout::os_is_zyx_osv64_isv16: return "OS_IS_ZYX_OSV64_ISV16"; + case WeightsLayout::o_is_yx_isv2: return "O_IS_YX_ISV2"; case WeightsLayout::o_is_yx_isv4: return "O_IS_YX_ISV4"; case WeightsLayout::o_is_yx_isv16: return "O_IS_YX_ISV16"; case WeightsLayout::o_is_zyx_isv16: return "O_IS_ZYX_ISV16"; diff --git a/src/plugins/intel_gpu/src/kernel_selector/tensor_type.cpp b/src/plugins/intel_gpu/src/kernel_selector/tensor_type.cpp index b352059d850dea..3fcd03bdece0db 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/tensor_type.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/tensor_type.cpp @@ -86,6 +86,7 @@ WeightsTensor::WeightsChannelArray WeightsTensor::weightsChannelArray {{ { WeightsLayout::os_iyx_osv32__ai32, { 0, 1, -1, 2, 3, -1 } }, { WeightsLayout::os_iyx_osv64, { 0, 1, -1, 2, 3, -1 } }, { WeightsLayout::os_iyx_osv16_rotate_180, { 0, 1, -1, 2, 3, -1 } }, + { WeightsLayout::o_is_yx_isv2, { 0, 1, -1, 2, 3, -1 } }, { WeightsLayout::o_is_yx_isv4, { 0, 1, -1, 2, 3, -1 } }, { WeightsLayout::o_is_yx_isv16, { 0, 1, -1, 2, 3, -1 } }, { WeightsLayout::o_is_zyx_isv16, { 0, 1, 2, 3, 4, -1 } }, @@ -617,6 +618,10 @@ NDims WeightsTensor::GetSimpleDims(const std::vector& d, WeightsLayout l // TODO: It's not the right pitches. it's here in order to calculate physical size switch (l) { + case o_is_yx_isv2: + assert(newDims.size() == 4); + newDims[2] = RoundUp(newDims[2], 2); + break; case o_is_yx_isv4: assert(newDims.size() == 4); newDims[2] = RoundUp(newDims[2], 4); diff --git a/src/plugins/intel_gpu/src/kernel_selector/tensor_type.h b/src/plugins/intel_gpu/src/kernel_selector/tensor_type.h index 97f087e6f2a051..205b3198a7a103 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/tensor_type.h +++ b/src/plugins/intel_gpu/src/kernel_selector/tensor_type.h @@ -94,6 +94,7 @@ enum WeightsLayout { oxiy, iyxo, yxio, + o_is_yx_isv2, o_is_yx_isv4, o_is_yx_isv16, o_is_zyx_isv16, diff --git a/src/plugins/intel_gpu/src/runtime/format.cpp b/src/plugins/intel_gpu/src/runtime/format.cpp index bd31583493ea71..095bda14bbf97d 100644 --- a/src/plugins/intel_gpu/src/runtime/format.cpp +++ b/src/plugins/intel_gpu/src/runtime/format.cpp @@ -85,6 +85,7 @@ static const std::map format_traits_map { FMT_TRAITS(oizyx, 1, 1, 3, 0, {0, 1, 2, 3, 4}, "oizyx", "oixyz", {}), FMT_TRAITS(iozyx, 1, 1, 3, 0, {1, 0, 2, 3, 4}, "iozyx", "oixyz", {}), FMT_TRAITS(os_is_yx_isv16_osv16, 1, 1, 2, 0, {0, 1, 2, 3}, "oiyx", "oixy", {{1, 16}, {0, 16}}), + FMT_TRAITS(o_is_yx_isv2, 1, 1, 2, 0, {0, 1, 2, 3}, "oiyx", "oixy", {{1, 2}}), FMT_TRAITS(o_is_yx_isv4, 1, 1, 2, 0, {0, 1, 2, 3}, "oiyx", "oixy", {{1, 4}}), FMT_TRAITS(o_is_yx_isv16, 1, 1, 2, 0, {0, 1, 2, 3}, "oiyx", "oixy", {{1, 16}}), FMT_TRAITS(o_is_zyx_isv16, 1, 1, 3, 0, {0, 1, 2, 3, 4}, "oizyx", "oixyz", {{1, 16}}), From 4eab5b4635184ceebed1a119537afa9a16f32b54 Mon Sep 17 00:00:00 2001 From: Nikolay Shchegolev Date: Wed, 18 Oct 2023 12:50:46 +0400 Subject: [PATCH 246/257] [CPU] RandomUniform-8 implementation. (#20171) --- src/core/src/op/random_uniform.cpp | 2 + src/core/tests/copy.cpp | 4 +- .../onnx/frontend/src/op/random_uniform.cpp | 1 + src/plugins/intel_cpu/src/cpu_types.cpp | 2 + src/plugins/intel_cpu/src/cpu_types.h | 1 + src/plugins/intel_cpu/src/node.cpp | 4 +- src/plugins/intel_cpu/src/node.h | 4 +- .../intel_cpu/src/nodes/grid_sample.cpp | 24 +- .../intel_cpu/src/nodes/grid_sample.hpp | 2 +- .../src/nodes/kernels/x64/grid_sample.cpp | 6 +- .../src/nodes/kernels/x64/grid_sample.hpp | 14 +- .../src/nodes/kernels/x64/jit_kernel_base.cpp | 337 +++++++--- .../src/nodes/kernels/x64/jit_kernel_base.hpp | 106 ++- .../src/nodes/kernels/x64/random_uniform.cpp | 635 ++++++++++++++++++ .../src/nodes/kernels/x64/random_uniform.hpp | 99 +++ .../intel_cpu/src/nodes/random_uniform.cpp | 532 +++++++++++++++ .../intel_cpu/src/nodes/random_uniform.hpp | 120 ++++ src/plugins/intel_cpu/src/nodes/reference.cpp | 53 +- src/plugins/intel_cpu/src/nodes/reference.h | 4 +- src/plugins/intel_cpu/src/nodes_factory.cpp | 2 + .../shape_inference/custom/random_uniform.cpp | 47 ++ .../shape_inference/custom/random_uniform.hpp | 37 + .../skip_tests_config.cpp | 2 + .../classes/random_uniform.cpp | 265 ++++++++ .../classes/random_uniform.hpp | 53 ++ .../instances/common/random_uniform.cpp | 68 ++ .../instances/x64/random_uniform.cpp | 46 ++ .../functional/test_utils/cpu_test_utils.hpp | 1 + .../common_test_utils/common_utils.hpp | 4 + 29 files changed, 2318 insertions(+), 157 deletions(-) create mode 100644 src/plugins/intel_cpu/src/nodes/kernels/x64/random_uniform.cpp create mode 100644 src/plugins/intel_cpu/src/nodes/kernels/x64/random_uniform.hpp create mode 100644 src/plugins/intel_cpu/src/nodes/random_uniform.cpp create mode 100644 src/plugins/intel_cpu/src/nodes/random_uniform.hpp create mode 100644 src/plugins/intel_cpu/src/shape_inference/custom/random_uniform.cpp create mode 100644 src/plugins/intel_cpu/src/shape_inference/custom/random_uniform.hpp create mode 100644 src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/random_uniform.cpp create mode 100644 src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/random_uniform.hpp create mode 100644 src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/random_uniform.cpp create mode 100644 src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/random_uniform.cpp diff --git a/src/core/src/op/random_uniform.cpp b/src/core/src/op/random_uniform.cpp index 296b115979c8f7..b07044960f44c9 100644 --- a/src/core/src/op/random_uniform.cpp +++ b/src/core/src/op/random_uniform.cpp @@ -92,6 +92,8 @@ bool RandomUniform::evaluate(TensorVector& outputs, const TensorVector& inputs) const auto& t_out = get_out_type(); OPENVINO_ASSERT(validate::out_et(t_out), "Unsupported type of RandomUniform: " + t_out.get_type_name()); + outputs[0].set_shape(out_shape); + auto state = ov::reference::random_uniform(out_dims.data(), static_cast(inputs[1].data()), static_cast(inputs[2].data()), diff --git a/src/core/tests/copy.cpp b/src/core/tests/copy.cpp index d7b2b4256f4aa3..f902d7485a1932 100644 --- a/src/core/tests/copy.cpp +++ b/src/core/tests/copy.cpp @@ -447,12 +447,12 @@ TEST(copy, random_uniform) { const auto min_val_param = make_shared(element::f32, Shape{1}); const auto max_val_param = make_shared(element::f32, Shape{1}); - auto out_shape = make_shared(element::i64, Shape{3}, std::vector{1, 2, 3}); + auto out_shape = make_shared(element::i64, Shape{3}, shape); auto ru = std::make_shared(out_shape, min_val_param, max_val_param, element::f32, 150, 10); // Call `evaluate` to update m_state - auto outputs = ov::TensorVector{{element::i64, out_shape->get_shape(), shape.data()}}; + auto outputs = ov::TensorVector{{element::i64, {1lu, 2lu, 3lu}}}; ru->evaluate(outputs, ov::TensorVector{{element::i64, out_shape->get_shape(), shape.data()}, {element::f32, min_val_param->get_shape(), &min}, diff --git a/src/frontends/onnx/frontend/src/op/random_uniform.cpp b/src/frontends/onnx/frontend/src/op/random_uniform.cpp index 6215dcc491c07d..a26ed672a0cc15 100644 --- a/src/frontends/onnx/frontend/src/op/random_uniform.cpp +++ b/src/frontends/onnx/frontend/src/op/random_uniform.cpp @@ -29,6 +29,7 @@ OutputVector random_uniform(const Node& node) { const auto target_type = common::get_ngraph_element_type(dtype); const uint64_t global_seed = 0; + // TODO: This multiplication leads to a mismatch in accuracy. Issue: 123003 const auto seed_uint64 = static_cast(seed * 1000); return {std::make_shared(target_shape_const, diff --git a/src/plugins/intel_cpu/src/cpu_types.cpp b/src/plugins/intel_cpu/src/cpu_types.cpp index 03fbe1a9923b7a..6f5a84701b184d 100644 --- a/src/plugins/intel_cpu/src/cpu_types.cpp +++ b/src/plugins/intel_cpu/src/cpu_types.cpp @@ -123,6 +123,7 @@ static const TypeToNameMap& get_type_to_name_tbl() { { "ScatterElementsUpdate", Type::ScatterElementsUpdate}, { "ScatterNDUpdate", Type::ScatterNDUpdate}, { "Interpolate", Type::Interpolate}, + { "RandomUniform", Type::RandomUniform}, { "ReduceL1", Type::Reduce}, { "ReduceL2", Type::Reduce}, { "ReduceLogicalAnd", Type::Reduce}, @@ -317,6 +318,7 @@ std::string NameFromType(const Type type) { CASE(PriorBox); CASE(PriorBoxClustered) CASE(MHA); + CASE(RandomUniform); CASE(Unique); CASE(Ngram); CASE(Unknown); diff --git a/src/plugins/intel_cpu/src/cpu_types.h b/src/plugins/intel_cpu/src/cpu_types.h index 403ed62d482f8b..9afbe2d7485ddd 100644 --- a/src/plugins/intel_cpu/src/cpu_types.h +++ b/src/plugins/intel_cpu/src/cpu_types.h @@ -110,6 +110,7 @@ enum class Type { PriorBoxClustered, Interaction, MHA, + RandomUniform, Unique, Ngram }; diff --git a/src/plugins/intel_cpu/src/node.cpp b/src/plugins/intel_cpu/src/node.cpp index e8fe6b89a00afc..ab02ae44dd6ce2 100644 --- a/src/plugins/intel_cpu/src/node.cpp +++ b/src/plugins/intel_cpu/src/node.cpp @@ -95,8 +95,6 @@ Node::Node(const std::shared_ptr& op, typeStr(op->get_type_name()), type(TypeFromName(op->get_type_name())), profiling(op->get_friendly_name()) { - const std::string errorPrefix = "Ngraph operation " + std::string(op->get_type_name()) + " with name " + op->get_friendly_name(); - for (size_t i = 0; i < op->get_input_size(); i++) { const auto &shape = op->get_input_partial_shape(i); if (shape.rank().is_dynamic()) { @@ -480,6 +478,8 @@ std::string Node::getPrimitiveDescriptorType() const { SEARCH_TYPE(_dw); SEARCH_TYPE(_1x1); +#undef SEARCH_TYPE + if (type == impl_desc_type::unknown) str_type = "unknown"; else if (str_type.empty()) diff --git a/src/plugins/intel_cpu/src/node.h b/src/plugins/intel_cpu/src/node.h index 5becbfa9863f70..864c08a95b04c6 100644 --- a/src/plugins/intel_cpu/src/node.h +++ b/src/plugins/intel_cpu/src/node.h @@ -41,6 +41,8 @@ #include "nodes/executors/mvn_list.hpp" #include "nodes/executors/executor.hpp" +#define THROW_CPU_NODE_ERR(...) OPENVINO_THROW(getTypeStr(), " node with name '", getName(), "' ", __VA_ARGS__) + namespace ov { namespace intel_cpu { @@ -353,7 +355,7 @@ class Node { inplace = InPlaceType::Unknown; } - std::string getPrimitiveDescriptorType() const; + virtual std::string getPrimitiveDescriptorType() const; PerfCount &PerfCounter() { return perfCounter; } diff --git a/src/plugins/intel_cpu/src/nodes/grid_sample.cpp b/src/plugins/intel_cpu/src/nodes/grid_sample.cpp index 798b04078352bf..6868e907fa7ae8 100644 --- a/src/plugins/intel_cpu/src/nodes/grid_sample.cpp +++ b/src/plugins/intel_cpu/src/nodes/grid_sample.cpp @@ -10,9 +10,11 @@ #include using namespace InferenceEngine; -using namespace dnnl::impl::cpu; using namespace ov::intel_cpu; using namespace ov::intel_cpu::node; +#if defined(OPENVINO_ARCH_X86_64) +using namespace dnnl::impl::cpu; +#endif // OPENVINO_ARCH_X86_64 #define THROW_ERROR IE_THROW() << getTypeStr() << " node with name '" << getName() << "' " @@ -23,10 +25,14 @@ bool GridSample::isSupportedOperation(const std::shared_ptr& op, errorMessage = "Not supported GridSample operation version. CPU plug-in supports only 9th version."; return false; } +#if defined(OPENVINO_ARCH_X86_64) if (!x64::mayiuse(x64::sse41)) { errorMessage = "Not supported CPU instructions set."; return false; } +#else + return false; +#endif // OPENVINO_ARCH_X86_64 } catch (...) { return false; } @@ -34,6 +40,8 @@ bool GridSample::isSupportedOperation(const std::shared_ptr& op, return true; } +#if defined(OPENVINO_ARCH_X86_64) + GridSample::GridSample(const std::shared_ptr& op, const GraphContext::CPtr context) : Node(op, context, NgraphShapeInferFactory(op, PortMask(1))) { std::string errorMessage; @@ -110,7 +118,7 @@ void GridSample::initSupportedPrimitiveDescriptors() { } void GridSample::createPrimitive() { - GridSampleKernelConfParams jcp; + kernel::GridSampleKernelConfParams jcp; jcp.inDataPrc = dataPrecision; jcp.gridPrc = gridPrecision; @@ -133,15 +141,13 @@ void GridSample::createPrimitive() { jcp.cannelNum = jcp.dynamicChannel ? 1lu : srcDataDims[1]; } -#if defined(OPENVINO_ARCH_X86_64) if (x64::mayiuse(x64::avx512_core)) { - jitKernel.reset(new GridSampleKernel(jcp)); + jitKernel.reset(new kernel::GridSampleKernel(jcp)); } else if (x64::mayiuse(x64::avx2)) { - jitKernel.reset(new GridSampleKernel(jcp)); + jitKernel.reset(new kernel::GridSampleKernel(jcp)); } else if (x64::mayiuse(x64::sse41)) { - jitKernel.reset(new GridSampleKernel(jcp)); + jitKernel.reset(new kernel::GridSampleKernel(jcp)); } -#endif // OPENVINO_ARCH_X86_64 if (!jitKernel) { THROW_ERROR << " could not create JIT kernel."; } @@ -268,7 +274,7 @@ void GridSample::execute(dnnl::stream strm) { auto threadBody = [&](const int ithr, const int nthr) { const auto& p = execParamsPerThread[ithr]; - auto arg = GridSamplesKernelExecArgs(); + auto arg = kernel::GridSamplesKernelExecArgs(); if (p.workAmount == 0lu) { return; } @@ -311,3 +317,5 @@ void GridSample::executeDynamicImpl(dnnl::stream strm) { bool GridSample::created() const { return getType() == Type::GridSample; } + +#endif // OPENVINO_ARCH_X86_64 diff --git a/src/plugins/intel_cpu/src/nodes/grid_sample.hpp b/src/plugins/intel_cpu/src/nodes/grid_sample.hpp index 89a1a409764615..78b5f9d66710ca 100644 --- a/src/plugins/intel_cpu/src/nodes/grid_sample.hpp +++ b/src/plugins/intel_cpu/src/nodes/grid_sample.hpp @@ -72,7 +72,7 @@ class GridSample : public Node { static constexpr size_t IN_DATA = 0; static constexpr size_t IN_GRID = 1; - std::shared_ptr jitKernel; + std::shared_ptr jitKernel; }; } // namespace node diff --git a/src/plugins/intel_cpu/src/nodes/kernels/x64/grid_sample.cpp b/src/plugins/intel_cpu/src/nodes/kernels/x64/grid_sample.cpp index 7501dd606427ce..89e658a7d6a6fc 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/x64/grid_sample.cpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/x64/grid_sample.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2022 Intel Corporation +// Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -8,12 +8,13 @@ using namespace dnnl::impl::cpu; namespace ov { namespace intel_cpu { +namespace kernel { #define GET_OFF(field) offsetof(GridSamplesKernelExecArgs, field) template GridSampleKernel::GridSampleKernel(const GridSampleKernelConfParams& jcp) : - GridSampleKernelBase(jit_name(), jcp) { + GridSampleKernelBase(jit_name(), jcp, isa) { vlen = x64::cpu_isa_traits::vlen; dataTypeSize = jcp.inDataPrc.size(); gridTypeSize = jcp.gridPrc.size(); @@ -2085,5 +2086,6 @@ template class GridSampleKernel; template class GridSampleKernel; template class GridSampleKernel; +} // namespace kernel } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/kernels/x64/grid_sample.hpp b/src/plugins/intel_cpu/src/nodes/kernels/x64/grid_sample.hpp index c24100259cd5bb..295c715fb8146b 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/x64/grid_sample.hpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/x64/grid_sample.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2022 Intel Corporation +// Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -14,6 +14,12 @@ namespace intel_cpu { enum class GridSampleInterpolationMode { BILINEAR, BICUBIC, NEAREST }; enum class GridSamplePaddingMode { ZEROS, BORDER, REFLECTION }; +namespace kernel { + +class GridSampleKernelBase; + +#if defined(OPENVINO_ARCH_X86_64) + struct GridSampleKernelConfParams { bool dynamicShapes = false; bool dynamicBatch = false; @@ -66,7 +72,8 @@ class GridSampleKernelBase: public JitKernelBase { assert(ker_); ker_(args); } - explicit GridSampleKernelBase(const char* name, const GridSampleKernelConfParams& jcp) : JitKernelBase(name), ker_(nullptr), jcp(jcp) {} + explicit GridSampleKernelBase(const char* name, const GridSampleKernelConfParams& jcp, dnnl::impl::cpu::x64::cpu_isa_t isa) + : JitKernelBase(name, isa), ker_(nullptr), jcp(jcp) {} virtual void create_ker() = 0; uint64_t getVecLen() { @@ -173,5 +180,8 @@ class GridSampleKernel : public GridSampleKernelBase { void hwShiftPs2dq(const Vmm& vDst, const Vmm& vHCoord, const Vmm& vWCoord, const Vmm& vWidth); }; +#endif // OPENVINO_ARCH_X86_64 + +} // namespace kernel } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/kernels/x64/jit_kernel_base.cpp b/src/plugins/intel_cpu/src/nodes/kernels/x64/jit_kernel_base.cpp index 6afbecf143f27b..bc0daaf6e33e2a 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/x64/jit_kernel_base.cpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/x64/jit_kernel_base.cpp @@ -1,172 +1,243 @@ -// Copyright (C) 2022 Intel Corporation +// Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #include "jit_kernel_base.hpp" -using namespace ov; -using namespace intel_cpu; using namespace dnnl::impl::cpu; +namespace ov { +namespace intel_cpu { +namespace kernel { -void JitKernelBase::uni_vfmsub132ps(const Xbyak::Xmm& vDst, - const Xbyak::Xmm& vSrc, +JitKernelBase::JitKernelBase(const char* name, x64::cpu_isa_t isa) + : x64::jit_generator(name, nullptr, x64::MAX_CODE_SIZE, true, isa), m_isa(isa) { + vlen = x64::isa_max_vlen(isa); +} + +void JitKernelBase::uni_vfmsub132ps(const Xbyak::Xmm& v_dst, + const Xbyak::Xmm& v_src, const Xbyak::Operand& op) { if (isValidIsa(x64::avx2)) { - vfmsub132ps(vDst, vSrc, op); + vfmsub132ps(v_dst, v_src, op); } else if (isValidIsa(x64::avx)) { - assert(vDst.getIdx() != vSrc.getIdx()); - vmulps(vDst, vDst, op); - vsubps(vDst, vDst, vSrc); + assert(v_dst.getIdx() != v_src.getIdx()); + vmulps(v_dst, v_dst, op); + vsubps(v_dst, v_dst, v_src); } else { - assert(vDst.getIdx() != vSrc.getIdx()); - mulps(vDst, op); - subps(vDst, vSrc); + assert(v_dst.getIdx() != v_src.getIdx()); + mulps(v_dst, op); + subps(v_dst, v_src); } } -void JitKernelBase::uni_vfnmadd132ps(const Xbyak::Xmm& vDst, - const Xbyak::Xmm& vSrc, +void JitKernelBase::uni_vfnmadd132ps(const Xbyak::Xmm& v_dst, + const Xbyak::Xmm& v_src, const Xbyak::Operand& op) { if (isValidIsa(x64::avx2)) { - vfnmadd132ps(vDst, vSrc, op); + vfnmadd132ps(v_dst, v_src, op); } else if (isValidIsa(x64::avx)) { - assert(vDst.getIdx() != vSrc.getIdx()); - vmulps(vDst, vDst, op); - vsubps(vDst, vSrc, vDst); + assert(v_dst.getIdx() != v_src.getIdx()); + vmulps(v_dst, v_dst, op); + vsubps(v_dst, v_src, v_dst); } else { - assert(vDst.getIdx() != vSrc.getIdx()); - mulps(vDst, op); - subps(vSrc, vDst); - movups(vDst, vSrc); + assert(v_dst.getIdx() != v_src.getIdx()); + mulps(v_dst, op); + subps(v_src, v_dst); + movups(v_dst, v_src); } } -void JitKernelBase::uni_vfmsub231ps(const Xbyak::Xmm& vDst, - const Xbyak::Xmm& vSrc, +void JitKernelBase::uni_vfmsub231ps(const Xbyak::Xmm& v_dst, + const Xbyak::Xmm& v_src, const Xbyak::Operand& op) { if (isValidIsa(x64::avx2)) { - vfmsub231ps(vDst, vSrc, op); + vfmsub231ps(v_dst, v_src, op); } else if (isValidIsa(x64::avx)) { - assert(!vDst.isEqualIfNotInherited(op)); - vmulps(vSrc, vSrc, op); - vsubps(vDst, vSrc, vDst); + assert(!v_dst.isEqualIfNotInherited(op)); + vmulps(v_src, v_src, op); + vsubps(v_dst, v_src, v_dst); } else { - assert(!vDst.isEqualIfNotInherited(op)); - mulps(vSrc, op); - subps(vSrc, vDst); - movups(vDst, vSrc); + assert(!v_dst.isEqualIfNotInherited(op)); + mulps(v_src, op); + subps(v_src, v_dst); + movups(v_dst, v_src); } } -void JitKernelBase::uni_vpaddd(const Xbyak::Ymm& vDst, - const Xbyak::Ymm& vSrc, +void JitKernelBase::uni_vpaddd(const Xbyak::Ymm& v_dst, + const Xbyak::Ymm& v_src, const Xbyak::Operand& op) { if (isValidIsa(x64::avx2)) { - vpaddd(vDst, vSrc, op); + vpaddd(v_dst, v_src, op); } else if (isValidIsa(x64::avx)) { - Xbyak::Xmm xmmDst(vDst.getIdx()); - vmovups(vDst, vSrc); + Xbyak::Xmm xmmDst(v_dst.getIdx()); + vmovups(v_dst, v_src); if (op.isYMM()) { Xbyak::Ymm ymmOp(op.getIdx()); Xbyak::Xmm xmmOp(op.getIdx()); paddd(xmmDst, xmmOp); - vperm2f128(vDst, vDst, vDst, 0x1); + vperm2f128(v_dst, v_dst, v_dst, 0x1); vperm2f128(ymmOp, ymmOp, ymmOp, 0x1); paddd(xmmDst, xmmOp); - vperm2f128(vDst, vDst, vDst, 0x1); + vperm2f128(v_dst, v_dst, v_dst, 0x1); vperm2f128(ymmOp, ymmOp, ymmOp, 0x1); } else if (op.isMEM()) { const int vlen = x64::cpu_isa_traits::vlen; paddd(xmmDst, op.getAddress()); - vperm2f128(vDst, vDst, vDst, 0x1); + vperm2f128(v_dst, v_dst, v_dst, 0x1); paddd(xmmDst, ptr[op.getAddress().getRegExp() + vlen]); - vperm2f128(vDst, vDst, vDst, 0x1); + vperm2f128(v_dst, v_dst, v_dst, 0x1); } else { IE_THROW() << "Not supported operand type."; } } else if (isValidIsa(x64::sse41)) { - assert(vDst.getIdx() != vSrc.getIdx()); - paddd(vDst, op); + assert(v_dst.getIdx() != v_src.getIdx()); + paddd(v_dst, op); } else { IE_THROW() << "Not defined behavior for instruction 'vpaddd' in current instructions set."; } } -void JitKernelBase::uni_vpsubd(const Xbyak::Ymm& vDst, - const Xbyak::Ymm& vSrc, +void JitKernelBase::uni_vpaddq(const Xbyak::Xmm& v_dst, + const Xbyak::Xmm& v_src, + const Xbyak::Operand& op) { + if (isValidIsa(x64::avx2)) { + vpaddq(v_dst, v_src, op); + } else { + if (v_dst.getIdx() != v_src.getIdx()) { + movups(v_dst, v_src); + } + paddq(v_dst, op); + } +} + +void JitKernelBase::uni_vpsubd(const Xbyak::Ymm& v_dst, + const Xbyak::Ymm& v_src, const Xbyak::Operand& op) { if (isValidIsa(x64::avx2)) { - vpsubd(vDst, vSrc, op); + vpsubd(v_dst, v_src, op); } else if (isValidIsa(x64::avx)) { - Xbyak::Xmm xmmDst(vDst.getIdx()); - vmovups(vDst, vSrc); + Xbyak::Xmm xmmDst(v_dst.getIdx()); + vmovups(v_dst, v_src); if (op.isYMM()) { Xbyak::Ymm ymmOp(op.getIdx()); Xbyak::Xmm xmmOp(op.getIdx()); psubd(xmmDst, xmmOp); - vperm2f128(vDst, vDst, vDst, 0x1); + vperm2f128(v_dst, v_dst, v_dst, 0x1); vperm2f128(ymmOp, ymmOp, ymmOp, 0x1); psubd(xmmDst, xmmOp); - vperm2f128(vDst, vDst, vDst, 0x1); + vperm2f128(v_dst, v_dst, v_dst, 0x1); vperm2f128(ymmOp, ymmOp, ymmOp, 0x1); } else if (op.isMEM()) { const int vlen = x64::cpu_isa_traits::vlen; psubd(xmmDst, op.getAddress()); - vperm2f128(vDst, vDst, vDst, 0x1); + vperm2f128(v_dst, v_dst, v_dst, 0x1); psubd(xmmDst, ptr[op.getAddress().getRegExp() + vlen]); - vperm2f128(vDst, vDst, vDst, 0x1); + vperm2f128(v_dst, v_dst, v_dst, 0x1); } else { IE_THROW() << "Not supported operand type."; } } else if (isValidIsa(x64::sse41)) { - assert(vDst.getIdx() != vSrc.getIdx()); - psubd(vDst, op); + assert(v_dst.getIdx() != v_src.getIdx()); + psubd(v_dst, op); } else { IE_THROW() << "Not defined behavior for instruction 'vpsubd' in current instructions set."; } } -void JitKernelBase::uni_vdivps(const Xbyak::Xmm& vDst, +void JitKernelBase::uni_vsubpd(const Xbyak::Xmm& v_dst, + const Xbyak::Xmm& v_src, + const Xbyak::Operand& op) { + if (isValidIsa(x64::avx)) { + vsubpd(v_dst, v_src, op); + } else { + if (v_dst.getIdx() != v_src.getIdx()) { + movups(v_dst, v_src); + } + subpd(v_dst, op); + } +} + +void JitKernelBase::uni_vmulpd(const Xbyak::Xmm& v_dst, + const Xbyak::Xmm& v_src, + const Xbyak::Operand& op) { + if (isValidIsa(x64::avx)) { + vmulpd(v_dst, v_src, op); + } else { + if (v_dst.getIdx() != v_src.getIdx()) { + movups(v_dst, v_src); + } + mulpd(v_dst, op); + } +} + +void JitKernelBase::uni_vpmuludq(const Xbyak::Xmm& v_dst, + const Xbyak::Xmm& v_src, + const Xbyak::Operand& op) { + if (isValidIsa(x64::avx2)) { + vpmuludq(v_dst, v_src, op); + } else { + if (v_dst.getIdx() != v_src.getIdx()) { + movups(v_dst, v_src); + } + pmuludq(v_dst, op); + } +} + +void JitKernelBase::uni_vdivps(const Xbyak::Xmm& v_dst, const Xbyak::Operand& op1, const Xbyak::Operand& op2) { if (isValidIsa(x64::avx)) { - vdivps(vDst, op1, op2); + vdivps(v_dst, op1, op2); + } else { + if (!v_dst.isEqualIfNotInherited(op1)) { + movups(v_dst, op1); + } + divps(v_dst, op2); + } +} + +void JitKernelBase::uni_vdivpd(const Xbyak::Xmm& v_dst, + const Xbyak::Xmm& v_src, + const Xbyak::Operand& op) { + if (isValidIsa(x64::avx)) { + vdivpd(v_dst, v_src, op); } else { - if (!vDst.isEqualIfNotInherited(op1)) { - movups(vDst, op1); + if (v_dst.getIdx() != v_src.getIdx()) { + movups(v_dst, v_src); } - divps(vDst, op2); + divpd(v_dst, op); } } -void JitKernelBase::uni_vandps(const Xbyak::Xmm& vDst, +void JitKernelBase::uni_vandps(const Xbyak::Xmm& v_dst, const Xbyak::Xmm& vSrs, const Xbyak::Operand &op) { if (isValidIsa(x64::avx)) { - vandps(vDst, vSrs, op); + vandps(v_dst, vSrs, op); } else { - if (!vDst.isEqualIfNotInherited(vSrs)) { - movups(vDst, vSrs); + if (!v_dst.isEqualIfNotInherited(vSrs)) { + movups(v_dst, vSrs); } - andps(vDst, op); + andps(v_dst, op); } } -void JitKernelBase::uni_vandnps(const Xbyak::Xmm& vDst, +void JitKernelBase::uni_vandnps(const Xbyak::Xmm& v_dst, const Xbyak::Xmm& vSrs, const Xbyak::Operand &op) { if (isValidIsa(x64::avx)) { - vandnps(vDst, vSrs, op); + vandnps(v_dst, vSrs, op); } else { - if (!vDst.isEqualIfNotInherited(vSrs)) { - movups(vDst, vSrs); + if (!v_dst.isEqualIfNotInherited(vSrs)) { + movups(v_dst, vSrs); } - andnps(vDst, op); + andnps(v_dst, op); } } -void JitKernelBase::gatherdd(const Xbyak::Xmm& vDst, +void JitKernelBase::gatherdd(const Xbyak::Xmm& v_dst, const Xbyak::Reg64& rSrcPtr, const Xbyak::Xmm& vSrcShift, const Xbyak::Opmask& kReadMask, @@ -178,28 +249,28 @@ void JitKernelBase::gatherdd(const Xbyak::Xmm& vDst, if (!useMask) kxnord(kReadMask, kReadMask, kReadMask); if (zeroFill) - uni_vpxor(vDst, vDst, vDst); + uni_vpxor(v_dst, v_dst, v_dst); - vpgatherdd(vDst | kReadMask, ptr[rSrcPtr + vSrcShift]); + vpgatherdd(v_dst | kReadMask, ptr[rSrcPtr + vSrcShift]); } -void JitKernelBase::gatherdd(const Xbyak::Xmm& vDst, +void JitKernelBase::gatherdd(const Xbyak::Xmm& v_dst, const Xbyak::Reg64& rSrcPtr, const Xbyak::Xmm& vSrcShift, const Xbyak::Xmm& vReadMask, const bool useMask, const bool zeroFill) { - if (vDst.getIdx() == vSrcShift.getIdx() || vDst.getIdx() == vReadMask.getIdx() || vSrcShift.getIdx() == vReadMask.getIdx()) { + if (v_dst.getIdx() == vSrcShift.getIdx() || v_dst.getIdx() == vReadMask.getIdx() || vSrcShift.getIdx() == vReadMask.getIdx()) { IE_THROW() << "Any pair of the index, mask, or destination registers cannot be the same."; } if (zeroFill) - pxor(vDst, vDst); // Don't use vpxor. It zeros the rest of the YMM register. + pxor(v_dst, v_dst); // Don't use vpxor. It zeros the rest of the YMM register. if (isValidIsa(x64::avx2)) { if (!useMask) uni_vpcmpeqd(vReadMask, vReadMask, vReadMask); - vpgatherdd(vDst, ptr[rSrcPtr + vSrcShift], vReadMask); + vpgatherdd(v_dst, ptr[rSrcPtr + vSrcShift], vReadMask); } else { auto rAux = getReg64(); Xbyak::Reg32 r32Aux = Xbyak::Reg32(rAux.getIdx()); @@ -213,7 +284,7 @@ void JitKernelBase::gatherdd(const Xbyak::Xmm& vDst, je(lLoopNext, T_NEAR); } uni_vpextrd(r32Aux, vSrcShift, i); - pinsrd(vDst, ptr[rSrcPtr + rAux], i); + pinsrd(v_dst, ptr[rSrcPtr + rAux], i); if (useMask) L(lLoopNext); @@ -221,30 +292,30 @@ void JitKernelBase::gatherdd(const Xbyak::Xmm& vDst, } } -void JitKernelBase::gatherdd(const Xbyak::Ymm& vDst, +void JitKernelBase::gatherdd(const Xbyak::Ymm& v_dst, const Xbyak::Reg64& rSrcPtr, const Xbyak::Ymm& vSrcShift, const Xbyak::Ymm& vReadMask, const bool useMask, const bool zeroFill) { - if (vDst.getIdx() == vSrcShift.getIdx() || vDst.getIdx() == vReadMask.getIdx() || vSrcShift.getIdx() == vReadMask.getIdx()) { + if (v_dst.getIdx() == vSrcShift.getIdx() || v_dst.getIdx() == vReadMask.getIdx() || vSrcShift.getIdx() == vReadMask.getIdx()) { IE_THROW() << "Any pair of the index, mask, or destination registers cannot be the same."; } if (isValidIsa(x64::avx2)) { if (!useMask) uni_vpcmpeqd(vReadMask, vReadMask, vReadMask); if (zeroFill) - uni_vpxor(vDst, vDst, vDst); + uni_vpxor(v_dst, v_dst, v_dst); - vpgatherdd(vDst, ptr[rSrcPtr + vSrcShift], vReadMask); + vpgatherdd(v_dst, ptr[rSrcPtr + vSrcShift], vReadMask); } else { - Xbyak::Xmm xmmDst = Xbyak::Xmm(vDst.getIdx()), + Xbyak::Xmm xmmDst = Xbyak::Xmm(v_dst.getIdx()), xmmSrcShft = Xbyak::Xmm(vSrcShift.getIdx()), xmmReadMask = Xbyak::Xmm(vReadMask.getIdx()); for (uint8_t i = 0; i < 2; i++) { gatherdd(xmmDst, rSrcPtr, xmmSrcShft, xmmReadMask, useMask, zeroFill); - vperm2f128(vDst, vDst, vDst, 0x1); + vperm2f128(v_dst, v_dst, v_dst, 0x1); vperm2f128(vSrcShift, vSrcShift, vSrcShift, 0x1); if (useMask) vperm2f128(vReadMask, vReadMask, vReadMask, 0x1); @@ -252,6 +323,15 @@ void JitKernelBase::gatherdd(const Xbyak::Ymm& vDst, } } +void JitKernelBase::uni_vpbroadcastq(const Xbyak::Xmm &x, const Xbyak::Operand &op) { + if (isValidIsa(x64::avx2)) { + vpbroadcastq(x, op); + } else { + movsd(x, op); + shufpd(x, x, 0x0); + } +} + void JitKernelBase::uni_vpbroadcastd(const Xbyak::Xmm &x, const Xbyak::Operand &op) { if (isValidIsa(x64::avx2)) { vpbroadcastd(x, op); @@ -285,6 +365,57 @@ void JitKernelBase::uni_vpbroadcastd(const Xbyak::Ymm &x, const Xbyak::Operand & } } +void JitKernelBase::uni_vroundpd(const Xbyak::Xmm& v_dst, const Xbyak::Operand& op, const uint8_t imm) { + if (isValidIsa(x64::avx512_core)) { + vrndscalepd(v_dst, op, imm & 0x3); + } else if (isValidIsa(x64::avx)) { + vroundpd(v_dst, op, imm); + } else { + roundpd(v_dst, op, imm); + } +} + +void JitKernelBase::uni_vcvtdq2pd(const Xbyak::Xmm& v_dst, + const Xbyak::Operand& op) { + if (isValidIsa(x64::avx)) { + vcvtdq2pd(v_dst, op); + } else { + cvtdq2pd(v_dst, op); + } +} + +void JitKernelBase::uni_vcvtpd2dq(const Xbyak::Xmm& v_dst, + const Xbyak::Operand& op) { + if (isValidIsa(x64::avx)) { + vcvtpd2dq(v_dst, op); + } else { + cvtpd2dq(v_dst, op); + } +} + +void JitKernelBase::uni_vpmovzxdq(const Xbyak::Xmm& v_dst, + const Xbyak::Operand& op) { + if (isValidIsa(x64::avx2)) { + vpmovzxdq(v_dst, op); + } else { + pmovzxdq(v_dst, op); + } +} + +void JitKernelBase::uni_vshufpd(const Xbyak::Xmm& v_dst, + const Xbyak::Xmm& v_src, + const Xbyak::Operand& op, + uint8_t imm) { + if (isValidIsa(x64::avx)) { + vshufpd(v_dst, v_src, op, imm); + } else { + if (v_dst.getIdx() != v_src.getIdx()) { + movups(v_dst, v_src); + } + shufpd(v_dst, op, imm); + } +} + void JitKernelBase::fillRestWorkMask(const Xbyak::Opmask& dstMask, const Xbyak::Reg64& rWorkRest) { auto rOnes = getReg64(); @@ -362,7 +493,7 @@ void JitKernelBase::fillRestWorkMask(const Xbyak::Ymm& ymmDstMask, L(lEnd); } -void JitKernelBase::load(const Xbyak::Xmm& vDst, +void JitKernelBase::load(const Xbyak::Xmm& v_dst, const Xbyak::Address& srcAddr, const Xbyak::Reg64& rLoadNum, const size_t typeSize, @@ -373,7 +504,7 @@ void JitKernelBase::load(const Xbyak::Xmm& vDst, const uint8_t elPerVec = x64::cpu_isa_traits::vlen / typeSize; Xbyak::Label lEnd; if (zeroFilling) - pxor(vDst, vDst); + pxor(v_dst, v_dst); for (uint8_t i = 0; i < elPerVec; i++) { cmp(rLoadNum, i); @@ -381,18 +512,18 @@ void JitKernelBase::load(const Xbyak::Xmm& vDst, const size_t offset = i * typeSize; if (typeSize == 1) - pinsrb(vDst, ptr[srcAddr.getRegExp() + offset], i); + pinsrb(v_dst, ptr[srcAddr.getRegExp() + offset], i); else if (typeSize == 2) - pinsrw(vDst, ptr[srcAddr.getRegExp() + offset], i); + pinsrw(v_dst, ptr[srcAddr.getRegExp() + offset], i); else if (typeSize == 4) - pinsrd(vDst, ptr[srcAddr.getRegExp() + offset], i); + pinsrd(v_dst, ptr[srcAddr.getRegExp() + offset], i); else if (typeSize == 8) - pinsrq(vDst, ptr[srcAddr.getRegExp() + offset], i); + pinsrq(v_dst, ptr[srcAddr.getRegExp() + offset], i); } L(lEnd); } -void JitKernelBase::load(const Xbyak::Ymm& vDst, +void JitKernelBase::load(const Xbyak::Ymm& v_dst, const Xbyak::Address& srcAddr, const Xbyak::Reg64& rLoadNum, const size_t typeSize, @@ -403,8 +534,8 @@ void JitKernelBase::load(const Xbyak::Ymm& vDst, const size_t elPerXmm = x64::cpu_isa_traits::vlen / typeSize; Xbyak::Label lEnd; if (zeroFilling) - uni_vpxor(vDst, vDst, vDst); - Xbyak::Xmm xmmDst(vDst.getIdx()); + uni_vpxor(v_dst, v_dst, v_dst); + Xbyak::Xmm xmmDst(v_dst.getIdx()); for (size_t i = 0lu; i < 2lu; i++) { Xbyak::Label lPerm; @@ -427,13 +558,13 @@ void JitKernelBase::load(const Xbyak::Ymm& vDst, } L(lPerm); - vperm2f128(vDst, vDst, vDst, 0x1); + vperm2f128(v_dst, v_dst, v_dst, 0x1); } L(lEnd); } void JitKernelBase::store(const Xbyak::Address& dstAddr, - const Xbyak::Xmm& vSrc, + const Xbyak::Xmm& v_src, const Xbyak::Reg64& rToStoreNum, const size_t typeSize) { if (!one_of(typeSize, 1u, 2u, 4u, 8u)) { @@ -448,27 +579,27 @@ void JitKernelBase::store(const Xbyak::Address& dstAddr, const size_t offset = i * typeSize; if (typeSize == 1) { - uni_vpextrb(ptr[dstAddr.getRegExp() + offset], vSrc, i); + uni_vpextrb(ptr[dstAddr.getRegExp() + offset], v_src, i); } else if (typeSize == 2) { - uni_vpextrw(ptr[dstAddr.getRegExp() + offset], vSrc, i); + uni_vpextrw(ptr[dstAddr.getRegExp() + offset], v_src, i); } else if (typeSize == 4) { - uni_vpextrd(ptr[dstAddr.getRegExp() + offset], vSrc, i); + uni_vpextrd(ptr[dstAddr.getRegExp() + offset], v_src, i); } else if (typeSize == 8) { - uni_vpextrq(ptr[dstAddr.getRegExp() + offset], vSrc, i); + uni_vpextrq(ptr[dstAddr.getRegExp() + offset], v_src, i); } } L(lEnd); } void JitKernelBase::store(const Xbyak::Address& dstAddr, - const Xbyak::Ymm& vSrc, + const Xbyak::Ymm& v_src, const Xbyak::Reg64& rToStoreNum, const size_t typeSize) { if (!one_of(typeSize, 1u, 2u, 4u, 8u)) { IE_THROW() << "Could not store data with type size " << typeSize; } Xbyak::Label lEnd; - Xbyak::Xmm xmmSrc(vSrc.getIdx()); + Xbyak::Xmm xmmSrc(v_src.getIdx()); const size_t elPerXmm = x64::cpu_isa_traits::vlen / typeSize; for (int i = 0; i < 2; i++) { @@ -493,7 +624,7 @@ void JitKernelBase::store(const Xbyak::Address& dstAddr, } L(lPerm); - vperm2f128(vSrc, vSrc, vSrc, 0x1); + vperm2f128(v_src, v_src, v_src, 0x1); } L(lEnd); } @@ -575,3 +706,7 @@ void JitKernelBase::memMovDD(const Xbyak::Reg64& rDst, } L(lEnd); } + +} // namespace kernel +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/kernels/x64/jit_kernel_base.hpp b/src/plugins/intel_cpu/src/nodes/kernels/x64/jit_kernel_base.hpp index e39efde753bbbc..f17eb9a02d8771 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/x64/jit_kernel_base.hpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/x64/jit_kernel_base.hpp @@ -1,14 +1,23 @@ -// Copyright (C) 2022 Intel Corporation +// Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #pragma once +#include "openvino/core/visibility.hpp" + +#if defined(OPENVINO_ARCH_X86_64) #include "cpu/x64/jit_generator.hpp" #include "registers_pool.hpp" +#endif // OPENVINO_ARCH_X86_64 namespace ov { namespace intel_cpu { +namespace kernel { + +class JitKernelBase; + +#if defined(OPENVINO_ARCH_X86_64) #define getReg64() RegistersPool::Reg(registersPool) #define getReg32() RegistersPool::Reg(registersPool) @@ -17,7 +26,11 @@ namespace intel_cpu { class JitKernelBase: public dnnl::impl::cpu::x64::jit_generator { public: - JitKernelBase(const char* name) : dnnl::impl::cpu::x64::jit_generator(name) {} + JitKernelBase(const char* name, dnnl::impl::cpu::x64::cpu_isa_t max_cpu_isa); + + dnnl::impl::cpu::x64::cpu_isa_t getIsa() { return m_isa; } + + size_t getVectorLen() { return vlen; } void uni_vfmsub132ps(const Xbyak::Xmm& vDst, const Xbyak::Xmm& vSrc, const Xbyak::Operand& op); @@ -31,14 +44,24 @@ class JitKernelBase: public dnnl::impl::cpu::x64::jit_generator { void uni_vpaddd(const Xbyak::Ymm& vDst, const Xbyak::Ymm& vSrc, const Xbyak::Operand& op); + void uni_vpaddq(const Xbyak::Xmm& vDst, const Xbyak::Xmm& vSrc, const Xbyak::Operand& op); + void uni_vpsubd(const Xbyak::Xmm& vDst, const Xbyak::Xmm& vSrc, const Xbyak::Operand& op) { jit_generator::uni_vpsubd(vDst, vSrc, op); } void uni_vpsubd(const Xbyak::Ymm& vDst, const Xbyak::Ymm& vSrc, const Xbyak::Operand& op); + void uni_vsubpd(const Xbyak::Xmm& v_dst, const Xbyak::Xmm& v_src, const Xbyak::Operand& op); + + void uni_vmulpd(const Xbyak::Xmm& v_dst, const Xbyak::Xmm& v_src, const Xbyak::Operand& op); + + void uni_vpmuludq(const Xbyak::Xmm& v_dst, const Xbyak::Xmm& op_1, const Xbyak::Operand& op_2); + void uni_vdivps(const Xbyak::Xmm& vDst, const Xbyak::Operand& op1, const Xbyak::Operand& op2); + void uni_vdivpd(const Xbyak::Xmm& v_dst, const Xbyak::Xmm& v_src, const Xbyak::Operand& op2); + void uni_vandps(const Xbyak::Xmm& vDst, const Xbyak::Xmm& vSrs, const Xbyak::Operand &op); void uni_vandnps(const Xbyak::Xmm& vDst, const Xbyak::Xmm& vSrs, const Xbyak::Operand &op); @@ -63,6 +86,18 @@ class JitKernelBase: public dnnl::impl::cpu::x64::jit_generator { void uni_vpbroadcastd(const Xbyak::Ymm &x, const Xbyak::Operand &op); + void uni_vpbroadcastq(const Xbyak::Xmm &x, const Xbyak::Operand &op); + + void uni_vroundpd(const Xbyak::Xmm& v_dst, const Xbyak::Operand& op, const uint8_t imm); + + void uni_vcvtdq2pd(const Xbyak::Xmm& v_dst, const Xbyak::Operand& op); + + void uni_vcvtpd2dq(const Xbyak::Xmm& v_dst, const Xbyak::Operand& op); + + void uni_vpmovzxdq(const Xbyak::Xmm& v_dst, const Xbyak::Operand& op); + + void uni_vshufpd(const Xbyak::Xmm& v_dst, const Xbyak::Xmm& v_srs, const Xbyak::Operand& op, uint8_t imm); + void gatherdd(const Xbyak::Xmm& vDst, const Xbyak::Reg64& rSrcPtr, const Xbyak::Xmm& vSrcShift, @@ -140,7 +175,9 @@ class JitKernelBase: public dnnl::impl::cpu::x64::jit_generator { return dnnl::impl::cpu::x64::mayiuse(isa); } + const dnnl::impl::cpu::x64::cpu_isa_t m_isa; RegistersPool::Ptr registersPool; + size_t vlen; enum { // Comparison predicate operand (immediate byte) for single-precision floating-point values. @@ -155,5 +192,70 @@ class JitKernelBase: public dnnl::impl::cpu::x64::jit_generator { }; }; +template +class JitKernel : public JitKernelBase { +public: + using KernelFunc = void (*)(const CallArgs *); + + explicit JitKernel(const char* name, const CompileParams& jcp, dnnl::impl::cpu::x64::cpu_isa_t max_cpu_isa) + : JitKernelBase{name, max_cpu_isa}, m_jcp{jcp}, m_func{nullptr} {} + + ~JitKernel() override = default; + + dnnl::impl::status_t create_kernel() override { + const dnnl::impl::status_t code = jit_generator::create_kernel(); + if (code != dnnl::impl::status::success) { + OPENVINO_THROW("Could not create kernel. Error code: ", std::to_string(code), ". ", + "Xbyak error code: ", Xbyak::ConvertErrorToString(Xbyak::GetError())); + } + m_func = (decltype(m_func))jit_ker(); + return code; + } + + void operator()(const CallArgs* args) const { + assert(m_func); + m_func(args); + } + + void operator()(const CallArgs& args) const { + this->operator()(&args); + } + + template class KernelT> + static std::shared_ptr> createInstance(const CompileParams& jcp) { + std::shared_ptr> res; + + try { +#define IF_ISA_CASE(ISA) \ + if (dnnl::impl::cpu::x64::mayiuse(ISA)) \ + res.reset(new KernelT(jcp)); \ + else + + IF_ISA_CASE(dnnl::impl::cpu::x64::avx512_core) + IF_ISA_CASE(dnnl::impl::cpu::x64::avx2) + IF_ISA_CASE(dnnl::impl::cpu::x64::sse41); + +#undef IF_ISA_CASE + + if (res) { + res->create_kernel(); + } + } catch (...) { + return nullptr; + } + + return res; + } + +protected: + CompileParams m_jcp; + +private: + KernelFunc m_func; +}; + +#endif // OPENVINO_ARCH_X86_64 + +} // namespace kernel } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/kernels/x64/random_uniform.cpp b/src/plugins/intel_cpu/src/nodes/kernels/x64/random_uniform.cpp new file mode 100644 index 00000000000000..301c2f7e08ff69 --- /dev/null +++ b/src/plugins/intel_cpu/src/nodes/kernels/x64/random_uniform.cpp @@ -0,0 +1,635 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "random_uniform.hpp" + +using namespace dnnl::impl::cpu; + +namespace ov { +namespace intel_cpu { +namespace kernel { + +#define GET_OFF(field) offsetof(RandomUniformCallArgs, field) + +template +RandomUniform::RandomUniform(const RandomUniformCompileParams& jcp) : + JitKernel(jit_name(), jcp, isa) { +} + +template +void RandomUniform::generate() { + this->preamble(); + registersPool = RegistersPool::create(isa, {rax, rcx, rsp, rdi, k0}); + + r64_dst = getReg64(); + r64_work_amount = getReg64(); + + mov(r64_work_amount, ptr[r64_params + GET_OFF(work_amount)]); + mov(r64_dst, ptr[r64_params + GET_OFF(dst_ptr)]); + + initVectors(); + process(); + + registersPool.reset(); + this->postamble(); +} + +template <> +void RandomUniform::initVectors() { + const auto r64_aux = getReg64(); + const auto r32_aux = Xbyak::Reg32(r64_aux.getIdx()); + const auto r16_aux = Xbyak::Reg16(r64_aux.getIdx()); + + v_max_mul_n_64 = getVmm(); + v_max_mul_c_64 = getVmm(); + v_add_low_k = getVmm(); + v_add_up_k = getVmm(); + v_n_inc = getVmm(); + v_range = getVmm(); + v_min = getVmm(); + v_key_64 = getVmm(); + v_counter_64 = getVmm(); + v_n_64 = getVmm(); + v_res_perm = getVmm(); + + if (m_jcp.out_data_type.is_real()) { + v_convert_0 = getVmm(); + v_convert_1 = getVmm(); + } + + // Initialize constants. +#define BROADCAST_R(F, V, R, C) \ + mov(R, C); \ + F(V, R); +#define BROADCAST_P(F, V, R, C) \ + mov(R, ptr[r64_params + GET_OFF(C)]); \ + F(V, ptr[R]); + + BROADCAST_R(vpbroadcastq, v_max_mul_n_64, r64_aux, STATISTIC_MAXIMIZING_MULTIPLIER_N) + BROADCAST_R(vpbroadcastq, v_max_mul_c_64, r64_aux, STATISTIC_MAXIMIZING_MULTIPLIER_COUNTER) + BROADCAST_R(vpbroadcastd, v_add_low_k, r32_aux, CRUSH_RESISTANCE_CONST_LOWER_VALUE) + BROADCAST_R(vpbroadcastd, v_add_up_k, r32_aux, CRUSH_RESISTANCE_CONST_UPPER_VALUE) + BROADCAST_R(vpbroadcastq, v_n_inc, r64_aux, 0x00000008) + + if (m_jcp.out_data_type == element::f32) { + BROADCAST_R(vpbroadcastd, v_convert_0, r32_aux, 0x3f800000) + BROADCAST_R(vpbroadcastd, v_convert_1, r32_aux, 0x007fffff) + BROADCAST_P(vpbroadcastd, v_range, r64_aux, range_ptr) + BROADCAST_P(vpbroadcastd, v_min, r64_aux, min_ptr) + } else if (m_jcp.out_data_type == element::f16 && x64::mayiuse(x64::avx512_core_fp16)) { + BROADCAST_R(vpbroadcastw, v_convert_0, r16_aux, 0x3c00) + BROADCAST_R(vpbroadcastw, v_convert_1, r16_aux, 0x03ff) + BROADCAST_P(vpbroadcastw, v_range, r64_aux, range_ptr) + BROADCAST_P(vpbroadcastw, v_min, r64_aux, min_ptr) + } else if (m_jcp.out_data_type == element::bf16 && x64::mayiuse(x64::avx512_core_bf16)) { + v_convert_2 = getVmm(); + const auto ymm_min = Xbyak::Ymm(v_min.getIdx()); + const auto ymm_range = Xbyak::Ymm(v_range.getIdx()); + + BROADCAST_R(vpbroadcastw, v_convert_0, r16_aux, 0x3f80) + BROADCAST_R(vpbroadcastw, v_convert_1, r16_aux, 0x007f) + BROADCAST_R(vpbroadcastd, v_convert_2, r32_aux, 0x3f800000) + + BROADCAST_P(vpbroadcastw, v_range, r64_aux, range_ptr) + vpmovzxwd(v_range, ymm_range); + uni_vpslld(v_range, v_range, 16); + + BROADCAST_P(vpbroadcastw, v_min, r64_aux, min_ptr) + vpmovzxwd(v_min, ymm_min); + uni_vpslld(v_min, v_min, 16); + } else if (m_jcp.out_data_type == element::i32) { + const auto ymm_range = Xbyak::Ymm(v_range.getIdx()); + + BROADCAST_P(vpbroadcastd, v_range, r64_aux, range_ptr) + BROADCAST_P(vpbroadcastd, v_min, r64_aux, min_ptr) + + uni_vcvtdq2pd(v_range, ymm_range); + } else { + OPENVINO_THROW("RandomUniform kernel does not support precision ", m_jcp.out_data_type, " for ", x64::get_isa_info()); + } + + // Initialize inputs. + BROADCAST_P(vpbroadcastq, v_key_64, r64_aux, key_ptr) + BROADCAST_P(vpbroadcastq, v_counter_64, r64_aux, counter_ptr) + BROADCAST_P(vpbroadcastq, v_n_64, r64_aux, n_ptr) + + if (m_jcp.out_data_type.size() <= 4) { + static const uint64_t n_inc_arr[8] = { 0, 1, 2, 3, 4, 5, 6, 7 }; + mov(r64_aux, reinterpret_cast(n_inc_arr)); + } else { + static const uint64_t n_inc_arr[8] = { 0, 1, 2, 3, 4, 5, 6, 7 }; // TODO: i64 + mov(r64_aux, reinterpret_cast(n_inc_arr)); + } + uni_vpaddq(v_n_64, v_n_64, ptr[r64_aux]); + + // Initialize auxiliary vectors. + static const uint32_t res_perm_mask[16] = { 0b00000000, 0b00010000, 0b00001000, 0b00011000, 0b00000010, 0b00010010, 0b00001010, 0b00011010, + 0b00000100, 0b00010100, 0b00001100, 0b00011100, 0b00000110, 0b00010110, 0b00001110, 0b00011110 }; + mov(r64_aux, reinterpret_cast(res_perm_mask)); + uni_vmovups(v_res_perm, ptr[r64_aux]); + + if (m_jcp.out_data_type == element::f16 && x64::mayiuse(x64::avx512_core_fp16)) { + v_perm_16 = getVmm(); + static const uint16_t perm_16[32] = { 0b00000000, 0b00000010, 0b00000100, 0b00000110, 0b00001000, 0b00001010, 0b00001100, 0b00001110, + 0b00010000, 0b00010010, 0b00010100, 0b00010110, 0b00011000, 0b00011010, 0b00011100, 0b00011110, + 0b00100000, 0b00100010, 0b00100100, 0b00100110, 0b00101000, 0b00101010, 0b00101100, 0b00101110, + 0b00110000, 0b00110010, 0b00110100, 0b00110110, 0b00111000, 0b00111010, 0b00111100, 0b00111110 }; + mov(r64_aux, reinterpret_cast(perm_16)); + uni_vmovups(v_perm_16, ptr[r64_aux]); + } + +#undef BROADCAST_R +#undef BROADCAST_P +} + +template // Works for AVX2, SSE41 +void RandomUniform::initVectors() { + const auto r64_aux = getReg64(); + + v_max_mul_n_64 = getVmm(); + v_max_mul_c_64 = getVmm(); + v_add_low_k = getVmm(); + v_add_up_k = getVmm(); + v_range = getVmm(); + v_key_64 = getVmm(); + v_counter_64 = getVmm(); + v_n_64 = getVmm(); + + r64_n_inc = getReg64(); + r64_min = getReg64(); + +#define INIT_ARR(A, V, R, T) \ + static const T A[8] = { V, V, V, V, V, V, V, V }; \ + if (isa == x64::avx2) { \ + mov(R, reinterpret_cast(A)); \ + } else { \ + static const T* A##_aligned = A + (reinterpret_cast(A) % 16) / sizeof(T); \ + mov(R, reinterpret_cast(A##_aligned)); \ + } + + // Initialize constants. + INIT_ARR(max_mul_n_64, STATISTIC_MAXIMIZING_MULTIPLIER_N, r64_aux, uint64_t); + uni_vmovups(v_max_mul_n_64, ptr[r64_aux]); + + INIT_ARR(max_mul_c_64, STATISTIC_MAXIMIZING_MULTIPLIER_COUNTER, r64_aux, uint64_t); + uni_vmovups(v_max_mul_c_64, ptr[r64_aux]); + + INIT_ARR(add_low_k, CRUSH_RESISTANCE_CONST_LOWER_VALUE, r64_aux, uint32_t); + uni_vmovups(v_add_low_k, ptr[r64_aux]); + + INIT_ARR(add_up_k, CRUSH_RESISTANCE_CONST_UPPER_VALUE, r64_aux, uint32_t); + uni_vmovups(v_add_up_k, ptr[r64_aux]); + + INIT_ARR(n_inc_step, isa == x64::avx2 ? 4 : 2, r64_n_inc, uint64_t); + + if (m_jcp.out_data_type == element::f32) { + r64_convert_0 = getReg64(); + r64_convert_1 = getReg64(); + + INIT_ARR(convert_0, 0x3f800000, r64_convert_0, uint32_t); + INIT_ARR(convert_1, 0x007fffff, r64_convert_1, uint32_t); + + mov(r64_aux, ptr[r64_params + GET_OFF(range_ptr)]); + uni_vpbroadcastd(v_range, ptr[r64_aux]); + + auto v_aux = getVmm(); + mov(r64_aux, ptr[r64_params + GET_OFF(min_ptr)]); + uni_vpbroadcastd(v_aux, ptr[r64_aux]); + static uint32_t min_arr[8]; + mov(r64_min, reinterpret_cast(min_arr)); + uni_vmovups(ptr[r64_min], v_aux); + } else if (m_jcp.out_data_type == element::i32) { + r64_f64_pow_52 = getReg64(); + const auto v_aux = getVmm(); + const auto xmm_range = Xbyak::Xmm(v_range.getIdx()); + + INIT_ARR(f64_pow_52, 0x4330000000000000, r64_f64_pow_52, uint64_t); + + mov(r64_aux, ptr[r64_params + GET_OFF(range_ptr)]); + uni_vpbroadcastd(v_range, ptr[r64_aux]); + + mov(r64_aux, ptr[r64_params + GET_OFF(min_ptr)]); + uni_vpbroadcastd(v_aux, ptr[r64_aux]); + static uint32_t min_arr[8]; + mov(r64_min, reinterpret_cast(min_arr)); + uni_vmovups(ptr[r64_min], v_aux); + + uni_vcvtdq2pd(v_range, xmm_range); + } else { + OPENVINO_THROW("RandomUniform kernel does not support precision ", m_jcp.out_data_type, " for ", x64::get_isa_info()); + } + + // Initialize inputs. + mov(r64_aux, ptr[r64_params + GET_OFF(key_ptr)]); + uni_vpbroadcastq(v_key_64, ptr[r64_aux]); + + mov(r64_aux, ptr[r64_params + GET_OFF(counter_ptr)]); + uni_vpbroadcastq(v_counter_64, ptr[r64_aux]); + + mov(r64_aux, ptr[r64_params + GET_OFF(n_ptr)]); + uni_vpbroadcastq(v_n_64, ptr[r64_aux]); + + if (m_jcp.out_data_type.size() <= 4) { + if (isa == x64::avx2) { + static const uint64_t n_inc_arr[4] = { 0, 1, 2, 3 }; + mov(r64_aux, reinterpret_cast(n_inc_arr)); + } else { + static uint64_t n_inc_arr[4]; + static uint64_t* n_inc_arr_aligned = n_inc_arr + (reinterpret_cast(n_inc_arr) % 16) / sizeof(uint64_t); + n_inc_arr_aligned[0] = 0; + n_inc_arr_aligned[1] = 1; + mov(r64_aux, reinterpret_cast(n_inc_arr_aligned)); + } + } else { + static const uint64_t n_inc_arr[4] = { 0, 1, 2, 3 }; // TODO: i64 + mov(r64_aux, reinterpret_cast(n_inc_arr)); + } + + uni_vpaddq(v_n_64, v_n_64, ptr[r64_aux]); + +#undef INIT_ARR +} + +template +void RandomUniform::process() { + auto v_dst_0 = getVmm(); + auto v_dst_1 = getVmm(); + std::vector v_res{ v_dst_0, v_dst_1 }; + + auto step = vlen; + if (one_of(m_jcp.out_data_type.size(), 2lu, 4lu)) { + step = vlen * 2 / sizeof(uint32_t); + } else if (m_jcp.out_data_type.size() == 8) { + step = vlen / sizeof(uint32_t); + } + + Xbyak::Label l_loop, l_tail; + L(l_loop); { + cmp(r64_work_amount, step); + jl(l_tail, T_NEAR); + + runPhilox(v_res, v_key_64, v_counter_64, v_n_64); + convert(v_res, v_res); + + uni_vmovups(ptr[r64_dst], v_dst_0); + add(r64_dst, vlen); + if (one_of(m_jcp.out_data_type.size(), 4lu, 8lu)) { + uni_vmovups(ptr[r64_dst], v_dst_1); + add(r64_dst, vlen); + } + + if (isa == x64::avx512_core) { + uni_vpaddd(v_n_64, v_n_64, v_n_inc); + } else { + uni_vpaddd(v_n_64, v_n_64, ptr[r64_n_inc]); + } + + sub(r64_work_amount, step); + jmp(l_loop, T_NEAR); + } + + L(l_tail); + tail(v_res); +} + +template +void RandomUniform::calculateRound(const Vmm& vmm_k_0, const Vmm& vmm_k_1, const Vmm& vmm_c_0, const Vmm& vmm_c_1, + const Vmm& vmm_n_0, const Vmm& vmm_n_1, const Vmm& vmm_aux_0, const Vmm& vmm_aux_1) { + uni_vpmuludq(vmm_aux_0, vmm_n_0, v_max_mul_n_64); // {p0,p1,p0,p1} = {n0,_,n0,_} * {m0,_,m0,_} + uni_vpmuludq(vmm_aux_1, vmm_c_0, v_max_mul_c_64); // {r0,r1,r0,r1} = {c0,_,c0,_} * {m0,_,m0,_} + + uni_vpshufd(vmm_c_0, vmm_aux_0, 0b10110001); // {p1,p0,p1,p0} = shuf {p0,p1,p0,p1} + uni_vxorps(vmm_c_0, vmm_c_0, vmm_c_1); // {c0,_,c0,_} = {p1,_,p1,_} ^ {c1,_,c1,_} + uni_vxorps(vmm_c_0, vmm_c_0, vmm_k_1); // {c0,_,c0,_} = {c0,_,c0,_} ^ {k1,_,k1,_} + + uni_vpshufd(vmm_n_0, vmm_aux_1, 0b10110001); // {r1,r0,r1,r0} = shuf {r0,r1,r0,r1} + uni_vxorps(vmm_n_0, vmm_n_0, vmm_n_1); // {n0,_,n0,_} = {r1,_,r1,_} ^ {n1,_,n1,_} + uni_vxorps(vmm_n_0, vmm_n_0, vmm_k_0); // {n0,_,n0,_} = {n0,_,n0,_} ^ {k0,_,k0,_} +} + +template +void RandomUniform::runPhilox(const std::vector& vmm_dst, const Vmm& vmm_key, const Vmm& vmm_counter, const Vmm& vmm_n) { + auto vmm_k_0 = getVmm(); + auto vmm_k_1 = getVmm(); + auto vmm_n_0 = getVmm(); + auto vmm_n_1 = vmm_dst[0]; + auto vmm_c_0 = getVmm(); + auto vmm_c_1 = getVmm(); + auto vmm_aux_0 = getVmm(); + auto vmm_aux_1 = vmm_dst[1]; + + uni_vmovups(vmm_k_0, vmm_key); // {k0,k1,k0,k1} -> {k0,_,k0,_} + uni_vpshufd(vmm_k_1, vmm_key, 0b10110001); // {k0,k1,k0,k1} -> {k1,_,k1,_} + + uni_vpmuludq(vmm_aux_0, vmm_n, v_max_mul_n_64); // {p0,p1,p0,p1} = {n0,_,n0,_} * {m0,_,m0,_} + uni_vpmuludq(vmm_aux_1, vmm_counter, v_max_mul_c_64); // {r0,r1,r0,r1} = {c0,_,c0,_} * {m0,_,m0,_} + + uni_vxorps(vmm_c_0, vmm_aux_0, vmm_counter); // {_,c0,_,c0} = {_,p1,_,p1} ^ {_,c1,_,c1} + uni_vxorps(vmm_c_0, vmm_c_0, vmm_key); // {_,c0,_,c0} = {_,c0,_,c0} ^ {_,k1,_,k1} + uni_vpshufd(vmm_c_0, vmm_c_0, 0b10110001); // {_,c0,_,c0} -> {c0,_,c0,_} + + uni_vxorps(vmm_n_0, vmm_aux_1, vmm_n); // {_,n0,_,n0} = {_,r1,_,r1} ^ {_,n1,_,n1} + uni_vpshufd(vmm_n_0, vmm_n_0, 0b10110001); // {_,n0,_,n0} -> {n0,_,n0,_} + uni_vxorps(vmm_n_0, vmm_n_0, vmm_key); // {n0,_,n0,_} = {n0,_,n0,_} ^ {k0,_,k0,_} + + for (size_t i = 0lu; i < ROUNDS_NUMBER - 1; i++) { + raiseKey(vmm_k_0, vmm_k_1); + + std::swap(vmm_c_1, vmm_aux_0); + std::swap(vmm_n_1, vmm_aux_1); + calculateRound(vmm_k_0, vmm_k_1, vmm_c_0, vmm_c_1, vmm_n_0, vmm_n_1, vmm_aux_0, vmm_aux_1); + } + std::swap(vmm_c_1, vmm_aux_0); + std::swap(vmm_n_1, vmm_aux_1); + + if (isa == x64::avx512_core) { + vpermt2d(vmm_n_0, v_res_perm, vmm_n_1); // {n0,n1,n0,n1} = perm {n0,_,n0,_} {n1,_,n1,_} + vpermt2d(vmm_c_0, v_res_perm, vmm_c_1); // {c0,c1,c0,c1} = perm {c0,_,c0,_} {c1,_,c1,_} + vshufpd(vmm_dst[0], vmm_n_0, vmm_c_0, 0b00000000); // {n0,n1,c0,c1} = shuf {n0,n1,n0,n1} {c0,c1,c0,c1} + vshufpd(vmm_dst[1], vmm_n_0, vmm_c_0, 0b11111111); // {n0,n1,c0,c1} = shuf {n0,n1,n0,n1} {c0,c1,c0,c1} + } else if (isa == x64::avx2) { + auto ymm_dst_0 = Xbyak::Ymm(vmm_dst[0].getIdx()); + auto ymm_dst_1 = Xbyak::Ymm(vmm_dst[1].getIdx()); + auto ymm_c_0 = Xbyak::Ymm(vmm_c_0.getIdx()); + + uni_vshufps(vmm_n_0, vmm_n_0, vmm_n_1, 0b10001000); // {n0,n0,n1,n1} = shuf {n0,_,n0,_} {n1,_,n1,_} + uni_vshufps(vmm_c_0, vmm_c_0, vmm_c_1, 0b10001000); // {c0,c0,c1,c1} = shuf {c0,_,c0,_} {c1,_,c1,_} + uni_vshufps(ymm_dst_1, vmm_n_0, vmm_c_0, 0b10001000); // {n0,n1,c0,c1} = shuf {n0,n0,n1,n1} {c0,c0,c1,c1} + uni_vshufps(vmm_c_0, vmm_n_0, vmm_c_0, 0b11011101); // {n0,n1,c0,c1} = shuf {n0,n0,n1,n1} {c0,c0,c1,c1} + vperm2f128(ymm_dst_0, ymm_dst_1, ymm_c_0, 0b00100000); + vperm2f128(ymm_dst_1, ymm_dst_1, ymm_c_0, 0b00110001); + } else { + uni_vshufps(vmm_n_0, vmm_n_0, vmm_n_1, 0b10001000); + uni_vshufps(vmm_c_0, vmm_c_0, vmm_c_1, 0b10001000); + uni_vshufps(vmm_dst[0], vmm_n_0, vmm_c_0, 0b10001000); + uni_vshufps(vmm_dst[1], vmm_n_0, vmm_c_0, 0b11011101); + } +} + +template +void RandomUniform::raiseKey(const Vmm& vmm_k_0, const Vmm& vmm_k_1) { + uni_vpaddd(vmm_k_0, vmm_k_0, v_add_low_k); // {k0,_,k0,_} + {l0,_,l0,_} + uni_vpaddd(vmm_k_1, vmm_k_1, v_add_up_k); // {k1,_,k1,_} + {u0,_,u0,_} +} + +template <> +void RandomUniform::convert(const std::vector& v_dst, const std::vector& v_src) { + if (m_jcp.out_data_type.size() == 4) { + for (size_t i = 0lu; i < v_src.size(); i++) { + const auto& vmm_src = v_src[i]; + const auto& vmm_dst = v_dst[i]; + + if (m_jcp.out_data_type == element::f32) { + uni_vandps(vmm_dst, vmm_src, v_convert_1); + uni_vorps(vmm_dst, vmm_dst, v_convert_0); + uni_vsubps(vmm_dst, vmm_dst, v_convert_0); + vfmadd132ps(vmm_dst, v_min, v_range); + } else if (m_jcp.out_data_type == element::i32) { + // x % (max - min) + min + const auto v_aux_0 = getVmm(); + const auto v_aux_1 = getVmm(); + const auto ymm_src = Xbyak::Ymm(vmm_src.getIdx()); + const auto ymm_dst = Xbyak::Ymm(vmm_dst.getIdx()); + const auto ymm_aux_1 = Xbyak::Ymm(v_aux_1.getIdx()); + + // Divide in the f64 due to the f32 loses accuracy here. + vcvtudq2pd(v_aux_0, ymm_src); + uni_vdivpd(v_aux_1, v_aux_0, v_range); + uni_vroundpd(v_aux_1, v_aux_1, 3); + vfnmadd132pd(v_aux_1, v_aux_0, v_range); + + vextractf64x4(ymm_dst, vmm_src, 1); + vcvtudq2pd(v_aux_0, ymm_dst); + uni_vcvtpd2dq(ymm_dst, v_aux_1); + uni_vdivpd(v_aux_1, v_aux_0, v_range); + uni_vroundpd(v_aux_1, v_aux_1, 3); + vfnmadd132pd(v_aux_1, v_aux_0, v_range); + uni_vcvtpd2dq(ymm_aux_1, v_aux_1); + vshuff64x2(vmm_dst, vmm_dst, v_aux_1, 0b01000100); + + uni_vpaddd(vmm_dst, vmm_dst, v_min); + } else { + OPENVINO_THROW("RandomUniform kernel does not support precision ", m_jcp.out_data_type, " for ", x64::get_isa_info()); + } + } + } else if (m_jcp.out_data_type.size() == 2) { + if (m_jcp.out_data_type == element::f16 && x64::mayiuse(x64::avx512_core_fp16)) { + const auto& vmm_dst = v_dst[0]; + + if (v_src[0].getIdx() != vmm_dst.getIdx()) { + uni_vmovups(vmm_dst, v_src[0]); + } + vpermt2w(vmm_dst, v_perm_16, v_src[1]); + + uni_vandps(vmm_dst, vmm_dst, v_convert_1); + uni_vorps(vmm_dst, vmm_dst, v_convert_0); + vsubph(vmm_dst, vmm_dst, v_convert_0); + vfmadd132ph(vmm_dst, v_min, v_range); + } else if (m_jcp.out_data_type == element::bf16 && x64::mayiuse(x64::avx512_core_bf16)) { + for (size_t i = 0lu; i < v_src.size(); i++) { + const auto& vmm_dst = v_dst[i]; + + uni_vandps(vmm_dst, v_src[i], v_convert_1); + uni_vorps(vmm_dst, vmm_dst, v_convert_0); + uni_vpslld(vmm_dst, vmm_dst, 16); + + uni_vsubps(vmm_dst, vmm_dst, v_convert_2); + vfmadd132ps(vmm_dst, v_min, v_range); + } + + vcvtne2ps2bf16(v_dst[0], v_dst[1], v_dst[0]); + } else { + OPENVINO_THROW("RandomUniform kernel does not support precision ", m_jcp.out_data_type, " for ", x64::get_isa_info()); + } + } else if (m_jcp.out_data_type.size() == 8) { + if (m_jcp.out_data_type == element::i64) { + // TODO: in scope of i64 enabling. + } + OPENVINO_THROW("RandomUniform kernel does not support precision ", m_jcp.out_data_type, " for ", x64::get_isa_info()); + } else { + OPENVINO_THROW("RandomUniform kernel does not support precision ", m_jcp.out_data_type, " for ", x64::get_isa_info()); + } +} + +template // Works for AVX2, SSE41 +void RandomUniform::convert(const std::vector& v_dst, const std::vector& v_src) { + if (m_jcp.out_data_type.size() == 4) { + for (size_t i = 0lu; i < v_src.size(); i++) { + auto vmm_src = v_src[i]; + auto vmm_dst = v_dst[i]; + + if (m_jcp.out_data_type == element::f32) { + uni_vandps(vmm_dst, vmm_src, ptr[r64_convert_1]); + uni_vorps(vmm_dst, vmm_dst, ptr[r64_convert_0]); + uni_vsubps(vmm_dst, vmm_dst, ptr[r64_convert_0]); + if (isa == x64::avx2) { + vfmadd213ps(vmm_dst, v_range, ptr[r64_min]); + } else { + uni_vmulps(vmm_dst, vmm_dst, v_range); + uni_vaddps(vmm_dst, vmm_dst, ptr[r64_min]); + } + } else if (m_jcp.out_data_type == element::i32) { + // x % (max - min) + min + const auto v_aux_0 = getVmm(); + const auto v_aux_1 = getVmm(); + const auto xmm_dst = Xbyak::Xmm(vmm_dst.getIdx()); + const auto ymm_dst = Xbyak::Ymm(vmm_dst.getIdx()); + const auto xmm_aux_1 = Xbyak::Xmm(v_aux_1.getIdx()); + + // Convert u32->f64. TODO: move to convert emitter after i64 enabling. + uni_vpmovzxdq(v_aux_0, xmm_dst); + uni_vorps(v_aux_0, v_aux_0, ptr[r64_f64_pow_52]); + uni_vsubpd(v_aux_0, v_aux_0, ptr[r64_f64_pow_52]); + + // Divide in the f64 due to the f32 loses accuracy here. + uni_vdivpd(v_aux_1, v_aux_0, v_range); + uni_vroundpd(v_aux_1, v_aux_1, 3); + if (isa == x64::avx2) { + vfnmadd132pd(v_aux_1, v_aux_0, v_range); + } else { + uni_vmulpd(v_aux_1, v_aux_1, v_range); + uni_vsubpd(v_aux_0, v_aux_0, v_aux_1); + uni_vmovups(v_aux_1, v_aux_0); + } + + if (isa == x64::avx2) { + vperm2f128(ymm_dst, ymm_dst, ymm_dst, 0b00000001); + } else { + uni_vshufpd(vmm_dst, vmm_dst, vmm_dst, 0b00000001); + } + // Convert u32->f64. TODO: move to convert emitter after i64 enabling. + uni_vpmovzxdq(v_aux_0, xmm_dst); + uni_vorps(v_aux_0, v_aux_0, ptr[r64_f64_pow_52]); + uni_vsubpd(v_aux_0, v_aux_0, ptr[r64_f64_pow_52]); + + uni_vcvtpd2dq(xmm_dst, v_aux_1); + uni_vdivpd(v_aux_1, v_aux_0, v_range); + uni_vroundpd(v_aux_1, v_aux_1, 3); + if (isa == x64::avx2) { + vfnmadd132pd(v_aux_1, v_aux_0, v_range); + } else { + uni_vmulpd(v_aux_1, v_aux_1, v_range); + uni_vsubpd(v_aux_0, v_aux_0, v_aux_1); + uni_vmovups(v_aux_1, v_aux_0); + } + uni_vcvtpd2dq(xmm_aux_1, v_aux_1); + if (isa == x64::avx2) { + vperm2f128(ymm_dst, ymm_dst, v_aux_1, 0b00100000); + } else { + uni_vshufpd(vmm_dst, vmm_dst, v_aux_1, 0b00000000); + } + + uni_vpaddd(vmm_dst, vmm_dst, ptr[r64_min]); + } else { + OPENVINO_THROW("RandomUniform kernel does not support precision ", m_jcp.out_data_type, " for ", x64::get_isa_info()); + } + } + } else if (m_jcp.out_data_type.size() == 8) { + if (m_jcp.out_data_type == element::i64) { + // TODO: in scope of i64 enabling. + } + OPENVINO_THROW("RandomUniform kernel does not support precision ", m_jcp.out_data_type, " for ", x64::get_isa_info()); + } else { + OPENVINO_THROW("RandomUniform kernel does not support precision ", m_jcp.out_data_type, " for ", x64::get_isa_info()); + } +} + +template <> +void RandomUniform::tail(const std::vector& vmm_dst) { + Xbyak::Label l_end; + const auto k_rest_mask = getMask(); + + cmp(r64_work_amount, 0); + jle(l_end, T_NEAR); + + runPhilox(vmm_dst, v_key_64, v_counter_64, v_n_64); + convert(vmm_dst, vmm_dst); + + if (m_jcp.out_data_type.size() == 4) { + Xbyak::Label l_0; + const auto step = vlen / sizeof(uint32_t); + + cmp(r64_work_amount, step); + jl(l_0, T_NEAR); + + uni_vmovups(ptr[r64_dst], vmm_dst[0]); + add(r64_dst, vlen); + sub(r64_work_amount, step); + fillRestWorkMask(k_rest_mask, r64_work_amount); + uni_vmovups(ptr[r64_dst] | k_rest_mask, vmm_dst[1]); + jmp(l_end, T_NEAR); + + L(l_0); + fillRestWorkMask(k_rest_mask, r64_work_amount); + uni_vmovups(ptr[r64_dst] | k_rest_mask, vmm_dst[0]); + } else if (m_jcp.out_data_type.size() == 2) { + fillRestWorkMask(k_rest_mask, r64_work_amount); + vmovdqu16(ptr[r64_dst] | k_rest_mask, vmm_dst[0]); + } + + L(l_end); +} + +template <> +void RandomUniform::tail(const std::vector& vmm_dst) { + Xbyak::Label l_0, l_end; + const auto step = vlen / sizeof(uint32_t); + + cmp(r64_work_amount, 0); + jle(l_end, T_NEAR); + + runPhilox(vmm_dst, v_key_64, v_counter_64, v_n_64); + convert(vmm_dst, vmm_dst); + const auto v_rest_mask = getVmm(); + + cmp(r64_work_amount, step); + jl(l_0, T_NEAR); + + uni_vmovups(ptr[r64_dst], vmm_dst[0]); + add(r64_dst, vlen); + sub(r64_work_amount, step); + fillRestWorkMask(v_rest_mask, r64_work_amount, m_jcp.out_data_type.size()); + vmaskmovps(ptr[r64_dst], v_rest_mask, vmm_dst[1]); + jmp(l_end, T_NEAR); + + L(l_0); + fillRestWorkMask(v_rest_mask, r64_work_amount, m_jcp.out_data_type.size()); + vmaskmovps(ptr[r64_dst], v_rest_mask, vmm_dst[0]); + + L(l_end); +} + +template +void RandomUniform::tail(const std::vector& vmm_dst) { + Xbyak::Label l_0, l_end; + const auto step = vlen / sizeof(uint32_t); + + cmp(r64_work_amount, 0); + jle(l_end, T_NEAR); + + runPhilox(vmm_dst, v_key_64, v_counter_64, v_n_64); + convert(vmm_dst, vmm_dst); + + cmp(r64_work_amount, step); + jl(l_0, T_NEAR); + + uni_vmovups(ptr[r64_dst], vmm_dst[0]); + add(r64_dst, vlen); + sub(r64_work_amount, step); + store(ptr[r64_dst], vmm_dst[1], r64_work_amount, m_jcp.out_data_type.size()); + jmp(l_end, T_NEAR); + + L(l_0); + store(ptr[r64_dst], vmm_dst[0], r64_work_amount, m_jcp.out_data_type.size()); + + L(l_end); +} + +template class RandomUniform; +template class RandomUniform; +template class RandomUniform; + +} // namespace kernel +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/kernels/x64/random_uniform.hpp b/src/plugins/intel_cpu/src/nodes/kernels/x64/random_uniform.hpp new file mode 100644 index 00000000000000..366be4c3a132ce --- /dev/null +++ b/src/plugins/intel_cpu/src/nodes/kernels/x64/random_uniform.hpp @@ -0,0 +1,99 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "jit_kernel_base.hpp" + +#if defined(OPENVINO_ARCH_X86_64) + +namespace ov { +namespace intel_cpu { +namespace kernel { + +struct RandomUniformCompileParams { + element::Type out_data_type = element::f32; +}; + +struct RandomUniformCallArgs { + void* dst_ptr; + const void* key_ptr; + const void* counter_ptr; + const void* n_ptr; + const void* min_ptr; + const void* range_ptr; + uint64_t work_amount = 0lu; +}; + +template +class RandomUniform : public JitKernel { +public: + DECLARE_CPU_JIT_AUX_FUNCTIONS(RandomUniform) + + explicit RandomUniform(const RandomUniformCompileParams& jcp); + + void generate() override; + +private: + using Vmm = typename dnnl::impl::utils::conditional3::type; + using Vmask = typename dnnl::impl::utils::conditional3::type; + + RegistersPool::Reg r64_dst; + RegistersPool::Reg r64_work_amount; + RegistersPool::Reg r64_n_inc; + RegistersPool::Reg r64_convert_0; + RegistersPool::Reg r64_convert_1; + RegistersPool::Reg r64_min; + RegistersPool::Reg r64_f64_pow_52; + + const Xbyak::Reg64 r64_params = Xbyak::Reg64(dnnl::impl::cpu::x64::abi_param_regs[0]); + + // Vector registers. + RegistersPool::Reg v_max_mul_n_64; + RegistersPool::Reg v_max_mul_c_64; + RegistersPool::Reg v_add_low_k; + RegistersPool::Reg v_add_up_k; + RegistersPool::Reg v_convert_0; + RegistersPool::Reg v_convert_1; + RegistersPool::Reg v_convert_2; + RegistersPool::Reg v_n_inc; + RegistersPool::Reg v_key_64; + RegistersPool::Reg v_counter_64; + RegistersPool::Reg v_n_64; + RegistersPool::Reg v_min; + RegistersPool::Reg v_range; + RegistersPool::Reg v_res_perm; + RegistersPool::Reg v_perm_16; + + void initVectors(); + + void process(); + + void runPhilox(const std::vector& vmm_res, const Vmm& vmm_key, const Vmm& vmm_counter, const Vmm& vmm_n); + + void calculateRound(const Vmm& vmm_k_0, const Vmm& vmm_k_1, const Vmm& vmm_c_0, const Vmm& vmm_c_1, + const Vmm& vmm_n_0, const Vmm& vmm_n_1, const Vmm& vmm_aux_0, const Vmm& vmm_aux_1); + + void raiseKey(const Vmm& vmm_k_0, const Vmm& vmm_k_1); + + void convert(const std::vector& vmm_dst, const std::vector& vmm_src); + + void tail(const std::vector& vmm_dst); + + static constexpr uint64_t ROUNDS_NUMBER = 10lu; + static constexpr uint32_t CRUSH_RESISTANCE_CONST_LOWER_VALUE = 0x9E3779B9; + static constexpr uint32_t CRUSH_RESISTANCE_CONST_UPPER_VALUE = 0xBB67AE85; + static constexpr uint64_t STATISTIC_MAXIMIZING_MULTIPLIER_N = 0xD2511F53; + static constexpr uint64_t STATISTIC_MAXIMIZING_MULTIPLIER_COUNTER = 0xCD9E8D57; +}; + +} // namespace kernel +} // namespace intel_cpu +} // namespace ov + +#endif // OPENVINO_ARCH_X86_64 diff --git a/src/plugins/intel_cpu/src/nodes/random_uniform.cpp b/src/plugins/intel_cpu/src/nodes/random_uniform.cpp new file mode 100644 index 00000000000000..77d823710c942f --- /dev/null +++ b/src/plugins/intel_cpu/src/nodes/random_uniform.cpp @@ -0,0 +1,532 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "random_uniform.hpp" + +#include "ie_parallel.hpp" +#include "ie_ngraph_utils.hpp" +#include +#include +#include "shape_inference/custom/random_uniform.hpp" + +namespace ov { +namespace intel_cpu { +namespace node { + +bool RandomUniform::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { + try { + if (op->get_type_info() != op::v8::RandomUniform::get_type_info_static()) { + errorMessage = "Only RandomUniform operation from the opset8 is supported by the CPU plugin."; + return false; + } + } catch (...) { + return false; + } + return true; +} + +RandomUniform::RandomUniform(const std::shared_ptr& op, const GraphContext::CPtr& context) + : Node(op, context, RandomUniformShapeInferFactory(op)) { + std::string errorMessage; + if (!isSupportedOperation(op, errorMessage)) { + THROW_CPU_NODE_ERR(errorMessage); + } + + // RandomUniform should generate new sequence each run even if all inputs are constants. So that method Node::IsConstant() + // doesn't return 'True' for RandomUniform with all constant inputs and the node generates new values for each inference, + // we set 'NoConst' value for 'ConstantType' in ctor. + constant = ConstantType::NoConst; + + auto rnd_op = as_type_ptr(op); + m_global_seed = rnd_op->get_global_seed(); + m_op_seed = rnd_op->get_op_seed(); + + m_output_prc = op->get_output_element_type(0); + + for (size_t i = 0lu; i < op->get_input_size(); i++) { + if (is_type(op->get_input_node_ptr(i))) { + m_const_inputs[i] = true; + } + } + + if (m_algo == STL) { + m_generator = std::default_random_engine{static_cast(m_op_seed)}; + } +} + +void RandomUniform::getSupportedDescriptors() { + if (getParentEdges().size() != 3) { + THROW_CPU_NODE_ERR("has incorrect number of input edges."); + } + if (getChildEdges().empty()) { + THROW_CPU_NODE_ERR("has incorrect number of output edges."); + } +} + +void RandomUniform::initSupportedPrimitiveDescriptors() { + auto shape_prc = getOriginalInputPrecisionAtPort(SHAPE); + if (!one_of(shape_prc, InferenceEngine::Precision::I32, InferenceEngine::Precision::I64)) { + shape_prc = InferenceEngine::Precision::I32; + } + + auto out_prc = getOriginalOutputPrecisionAtPort(0); + if (out_prc.is_float() && ((m_algo == PHILOX && + !one_of(out_prc, InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16, InferenceEngine::Precision::BF16)) || + (m_algo == STL && !one_of(out_prc, InferenceEngine::Precision::FP32)))) { + out_prc = InferenceEngine::Precision::FP32; + } + if (!out_prc.is_float() && !one_of(out_prc, InferenceEngine::Precision::I32, InferenceEngine::Precision::I64)) { + out_prc = InferenceEngine::Precision::I32; + } + m_output_prc = InferenceEngine::details::convertPrecision(out_prc); + + addSupportedPrimDesc({{LayoutType::ncsp, shape_prc, m_const_inputs[SHAPE]}, + {LayoutType::ncsp, out_prc, m_const_inputs[MIN_VAL]}, + {LayoutType::ncsp, out_prc, m_const_inputs[MAX_VAL]}}, + {{LayoutType::ncsp, out_prc}}, + ref_any); +} + +void RandomUniform::createPrimitive() { + if (m_const_inputs[MIN_VAL]) { + initEdgeValues(m_min_val, getParentEdgeAt(MIN_VAL)->getMemoryPtr()->getData(), m_output_prc); + } + if (m_const_inputs[MAX_VAL]) { + initEdgeValues(m_max_val, getParentEdgeAt(MAX_VAL)->getMemoryPtr()->getData(), m_output_prc); + evalRange(); + } + + if (m_algo == PHILOX) { +#if defined(OPENVINO_ARCH_X86_64) + kernel::RandomUniformCompileParams jcp; + + jcp.out_data_type = m_output_prc; + + m_jit_kernel = kernel::JitKernel::createInstance(jcp); + + if (m_jit_kernel) { + if (auto selected_pd = getSelectedPrimitiveDescriptor()) { + using namespace dnnl::impl::cpu; + if (m_jit_kernel->getIsa() == x64::avx512_core) { + selected_pd->setImplementationType(jit_avx512); + } else if (m_jit_kernel->getIsa() == x64::avx2) { + selected_pd->setImplementationType(jit_avx2); + } else if (m_jit_kernel->getIsa() == x64::sse41) { + selected_pd->setImplementationType(jit_sse42); + } + } + } +#endif // OPENVINO_ARCH_X86_64 + } + + if (m_const_inputs[SHAPE]) { + Node::createPrimitive(); + } +} + +bool RandomUniform::needPrepareParams() const { + if (m_out_shape != getChildEdgeAt(0)->getMemoryPtr()->getShape().getStaticDims()) { + return true; + } + return false; +} + +void RandomUniform::prepareParams() { + m_out_shape = getChildEdgeAt(0)->getMemoryPtr()->getShape().getStaticDims(); + m_out_el_num = std::accumulate(m_out_shape.begin(), m_out_shape.end(), 1lu, std::multiplies()); + + if (m_algo == PHILOX) { + m_skip_count = m_out_el_num * SKIP_CONST; + + if (m_out_el_num < PHILOX_PARALLEL_EXECUTION_THRESHOLD) { + m_threads_num = 1; + } else { + m_threads_num = parallel_get_max_threads(); + } + m_thread_params.resize(m_threads_num); + + parallel_nt(m_threads_num, [&](const int ithr, const int nthr) { + auto& p = m_thread_params[ithr]; + uint64_t start = 0lu, end = 0lu; + + if (m_jit_kernel) { +#if defined(OPENVINO_ARCH_X86_64) + const auto block_size = (m_jit_kernel->getVectorLen() / m_output_prc.size()) * 2; + const auto blocks_num = (m_out_el_num + block_size - 1) / block_size; + const auto blocks_per_thr = (blocks_num + nthr - 1) / nthr; + + start = ithr * blocks_per_thr * block_size; + end = (ithr + 1) * blocks_per_thr * block_size; +#endif // OPENVINO_ARCH_X86_64 + } else { + const auto groups_num = (m_out_el_num + PHILOX_GROUP_SIZE - 1) / PHILOX_GROUP_SIZE; + const auto groups_per_thr = (groups_num + nthr - 1) / nthr; + + start = ithr * groups_per_thr * PHILOX_GROUP_SIZE; + end = (ithr + 1) * groups_per_thr * PHILOX_GROUP_SIZE; + + p.step = m_output_prc.size() > 4 ? 2 : 4; + } + + if (end > m_out_el_num) { + end = m_out_el_num; + } + if (start > end) { + start = end; + } + p.work_amount = end - start; + p.n_shift = start / PHILOX_GROUP_SIZE; + p.dst_shift = start * m_output_prc.size(); + }); + } +} + +void RandomUniform::execute(dnnl::stream strm) { + if (!m_const_inputs[MIN_VAL]) { + initEdgeValues(m_min_val, getParentEdgeAt(MIN_VAL)->getMemoryPtr()->getData(), m_output_prc); + if (m_const_inputs[MAX_VAL]) { + evalRange(); + } + } + if (!m_const_inputs[MAX_VAL]) { + initEdgeValues(m_max_val, getParentEdgeAt(MAX_VAL)->getMemoryPtr()->getData(), m_output_prc); + evalRange(); + } + + auto data = getChildEdgeAt(0)->getMemoryPtr()->getData(); + + if (m_algo == PHILOX) { + m_state = computePhilox(data, m_out_el_num, m_state); + } else if (m_algo == STL) { + computeStl(data, m_out_el_num); + } else { + THROW_CPU_NODE_ERR("unsupported algorithm."); + } +} + +void RandomUniform::executeDynamicImpl(dnnl::stream strm) { + execute(strm); +} + +////////////// PHILOX algo /////////////// + +namespace { +// Following const values are taken from the original paper: +// https://www.thesalmons.org/john/random123/papers/random123sc11.pdf +constexpr uint32_t CRUSH_RESISTANCE_CONST_LOWER_VALUE = 0x9E3779B9; +constexpr uint32_t CRUSH_RESISTANCE_CONST_UPPER_VALUE = 0xBB67AE85; +constexpr uint64_t STATISTIC_MAXIMIZING_MULTIPLIER_N = 0xD2511F53; +constexpr uint64_t STATISTIC_MAXIMIZING_MULTIPLIER_COUNTER = 0xCD9E8D57; +constexpr uint64_t ROUNDS_NUMBER = 10llu; + +inline void calculateRound(const uint32_t* key, uint32_t* counter, uint32_t* n) { + uint64_t prod_0 = STATISTIC_MAXIMIZING_MULTIPLIER_N * n[0]; + uint64_t prod_1 = STATISTIC_MAXIMIZING_MULTIPLIER_COUNTER * counter[0]; + n[0] = static_cast(prod_1 >> 32) ^ n[1] ^ key[0]; + n[1] = static_cast(prod_1); + counter[0] = static_cast(prod_0 >> 32) ^ counter[1] ^ key[1]; + counter[1] = static_cast(prod_0); +} + +inline void raiseKey(uint32_t* key) { + key[0] += CRUSH_RESISTANCE_CONST_LOWER_VALUE; + key[1] += CRUSH_RESISTANCE_CONST_UPPER_VALUE; +} + +inline void runPhilox(uint64_t key, uint64_t counter, uint64_t n, uint32_t* res) { + uint32_t* key_32 = reinterpret_cast(&key); + uint32_t* counter_32 = reinterpret_cast(&counter); + uint32_t* n_32 = reinterpret_cast(&n); + + for (size_t i = 0lu; i < ROUNDS_NUMBER; i++) { + calculateRound(key_32, counter_32, n_32); + if (i < ROUNDS_NUMBER - 1) + raiseKey(key_32); + } + + res[0] = n_32[0]; + res[1] = n_32[1]; + res[2] = counter_32[0]; + res[3] = counter_32[1]; +} + +inline void convertToOutputType(const uint32_t* in, + float min, + float range, + float* out, + size_t el_to_copy) { + RandomUniform::OutputType out_val; + + for (size_t i = 0lu; i < el_to_copy; i++) { + out_val.u32 = 0x3f800000 | (in[i] & 0x7fffffu); + out[i] = (out_val.f32 - 1.f) * range + min; + } +} + +inline void convertToOutputType(const uint32_t* in, + float16 min, + float16 range, + float16* out, + size_t el_to_copy) { + RandomUniform::OutputType out_val; + + for (size_t i = 0lu; i < el_to_copy; i++) { + uint16_t x_uint16 = static_cast(in[i]); + out_val.u16 = 0x3c00 | (x_uint16 & 0x03ffu); + out[i] = (out_val.f16 - static_cast(1)) * range + min; + } +} + +inline void convertToOutputType(const uint32_t* in, + bfloat16 min, + bfloat16 range, + bfloat16* out, + size_t el_to_copy) { + RandomUniform::OutputType out_val; + + for (size_t i = 0lu; i < el_to_copy; i++) { + uint16_t x_uint16 = static_cast(in[i]); + out_val.u16 = 0x3f80 | (x_uint16 & 0x7fu); + out[i] = (out_val.bf16 - static_cast(1)) * range + min; + } +} + +inline void convertToOutputType(const uint32_t* in, + int32_t min, + int32_t range, + int32_t* out, + size_t el_to_copy) { + for (size_t i = 0lu; i < el_to_copy; i++) { + out[i] = static_cast(in[i] % range + min); + } +} + +inline void convertToOutputType(const uint32_t* in, + int64_t min, + int64_t range, + int64_t* out, + size_t el_to_copy) { + for (size_t i = 0lu; i < el_to_copy; i++) { + out[i] = static_cast(((static_cast(in[i * 2]) << 32) + in[i * 2 + 1]) % range + min); + } +} + +} // namespace + +std::pair RandomUniform::computePhilox(void* out, size_t out_el_num, const std::pair& prev_state) { + // When both seed values are equal to zero RandomUniform should generate non-deterministic sequence. + if (m_global_seed == 0lu && m_op_seed == 0lu) { + std::srand(static_cast(std::time(nullptr))); + m_global_seed = std::rand(); + } + + uint64_t n_state = prev_state.first; + uint64_t counter_state = prev_state.second; + + uint64_t counter = counter_state > 0 ? counter_state : m_op_seed; + + auto out_u8 = reinterpret_cast(out); + + if (m_jit_kernel) { +#if defined(OPENVINO_ARCH_X86_64) + parallel_nt(m_threads_num, [&](const int ithr, const int nthr) { + auto& p = m_thread_params[ithr]; + if (p.work_amount == 0lu) { + return; + } + auto n = n_state + p.n_shift; + + kernel::RandomUniformCallArgs args; + + args.dst_ptr = (out_u8 + p.dst_shift); + args.key_ptr = &m_global_seed; + args.counter_ptr = &counter; + args.n_ptr = &n; + args.min_ptr = &m_min_val; + args.range_ptr = &m_range_val; + args.work_amount = p.work_amount; + + (*m_jit_kernel)(&args); + }); +#endif // OPENVINO_ARCH_X86_64 + } else { + auto threadBody = [&](const int ithr, const int nthr) { + auto& p = m_thread_params[ithr]; + if (p.work_amount == 0lu) { + return; + } + auto n = n_state + p.n_shift; + auto out_cur = out_u8 + p.dst_shift; + auto work_rest = static_cast(p.work_amount); + uint32_t res[4]; + +#define EXEC_CASE(P) \ + case element::P: { \ + auto out_t = reinterpret_cast::value_type *>(out_cur); \ + for (; work_rest > 0l; work_rest -= p.step, out_t += p.step) { \ + runPhilox(m_global_seed, counter, n, res); \ + auto el_to_copy = std::min(p.step, static_cast(work_rest)); \ + convertToOutputType(res, m_min_val.P, m_range_val.P, out_t, el_to_copy); \ + if (++n == 0) { \ + counter++; \ + } \ + } \ + } break; + + switch (m_output_prc) { + EXEC_CASE(f32) + EXEC_CASE(f16) + EXEC_CASE(bf16) + EXEC_CASE(i32) + EXEC_CASE(i64) + default: THROW_CPU_NODE_ERR("Unsupported type of RandomUniform: ", m_output_prc.to_string()); + } + +#undef EXEC_CASE + }; + + parallel_nt(m_threads_num, threadBody); + } + + // Calculate counter values for next RandomUniform run. + n_state += m_skip_count; + if (n_state < m_skip_count) { + counter_state++; + } + + return { n_state, counter_state }; +} + +////////////// STL algo /////////////// +void RandomUniform::computeStl(void* out, size_t work_amount) { + switch (m_output_prc) { + case element::f32: { + generateData>( + std::uniform_real_distribution{m_min_val.f32, m_max_val.f32}, out, work_amount); + } break; + case element::i32: { + generateData>( + std::uniform_int_distribution{m_min_val.i32, m_max_val.i32}, out, work_amount); + } break; + case element::i64: { + generateData>( + std::uniform_int_distribution{m_min_val.i64, m_max_val.i64}, out, work_amount); + } break; + default: + THROW_CPU_NODE_ERR("has unsupported output type: ", m_output_prc); + } +} + +template +void RandomUniform::generateData(DISTR_TYPE distribution, void* out, size_t work_amount) { + auto dst = reinterpret_cast(out); + for (size_t i = 0; i < work_amount; i++) { + *dst = distribution(m_generator); + dst++; + } +} +////////////////////////////////// + +void RandomUniform::initEdgeValues(OutputType& dst, const void* src, const element::Type& output_type) { +#define EL_CASE(E) \ + case element::E: \ + dst.E = *reinterpret_cast::value_type *>(src); \ + break; + + switch (output_type) { + EL_CASE(f32) + EL_CASE(f16) + EL_CASE(bf16) + EL_CASE(i32) + EL_CASE(i64) + EL_CASE(f64) + default: + THROW_CPU_NODE_ERR("has unsupported output precision: ", output_type); + } + +#undef EL_CASE +} + +void RandomUniform::evalRange() { +#define EL_CASE(E) \ + case element::E: \ + m_range_val.E = m_max_val.E - m_min_val.E; \ + break; + + switch (m_output_prc) { + EL_CASE(f32) + EL_CASE(f16) + EL_CASE(bf16) + EL_CASE(i32) + EL_CASE(i64) + EL_CASE(f64) + default: + THROW_CPU_NODE_ERR("has unsupported output precision: ", m_output_prc); + } + +#undef EL_CASE +} + +std::string RandomUniform::getPrimitiveDescriptorType() const { + auto selectedPrimitiveDesc = getSelectedPrimitiveDescriptor(); + + impl_desc_type type = impl_desc_type::undef; + if (selectedPrimitiveDesc) { + type = selectedPrimitiveDesc->getImplementationType(); + } + + std::string str_type; + + auto add_type = [&](std::string t) { + if (!str_type.empty() && t.c_str()[0] != '_') + str_type += "_"; + str_type += t; + }; + +#define SEARCH_TYPE(_type) \ + if ((type & impl_desc_type::_type) == impl_desc_type::_type) \ + add_type(#_type) + + SEARCH_TYPE(undef); + SEARCH_TYPE(jit); + SEARCH_TYPE(ref); + + SEARCH_TYPE(avx512); + SEARCH_TYPE(avx2); + SEARCH_TYPE(sse42); + SEARCH_TYPE(any); + +#undef SEARCH_TYPE + + if (type == impl_desc_type::unknown) + str_type = "unknown"; + else if (str_type.empty()) + str_type = "undef"; + + if (selectedPrimitiveDesc) { + if (selectedPrimitiveDesc->getConfig().outConfs[0].getMemDesc()->getPrecision() != InferenceEngine::Precision::U8) { + str_type += "_" + std::string(selectedPrimitiveDesc->getConfig().outConfs[0].getMemDesc()->getPrecision().name()); + } else { + str_type += "_I8"; + } + } + + return str_type; +} + +bool RandomUniform::needShapeInfer() const { + return !m_const_inputs[SHAPE]; +} + +bool RandomUniform::isExecutable() const { + return !isInputTensorAtPortEmpty(SHAPE); +} + +bool RandomUniform::created() const { + return getType() == Type::RandomUniform; +} + +} // namespace node +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/random_uniform.hpp b/src/plugins/intel_cpu/src/nodes/random_uniform.hpp new file mode 100644 index 00000000000000..ecbfebdf5d79c6 --- /dev/null +++ b/src/plugins/intel_cpu/src/nodes/random_uniform.hpp @@ -0,0 +1,120 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include "kernels/x64/random_uniform.hpp" + +namespace ov { +namespace intel_cpu { +namespace node { + +class RandomUniform : public Node { +public: + union OutputType { + float f32; + float16 f16; + bfloat16 bf16; + double f64; + int32_t i32; + uint32_t u32; + uint16_t u16; + int64_t i64; + }; + + RandomUniform(const std::shared_ptr& op, const GraphContext::CPtr& context); + + void getSupportedDescriptors() override; + + void initSupportedPrimitiveDescriptors() override; + + bool needPrepareParams() const override; + + void prepareParams() override; + + void execute(dnnl::stream strm) override; + + void executeDynamicImpl(dnnl::stream strm) override; + + bool isExecutable() const override; + + void createPrimitive() override; + + bool created() const override; + + bool canBeInPlace() const override { return false; } + + static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; + + std::string getPrimitiveDescriptorType() const override; + +protected: + bool needShapeInfer() const override; + +private: + void computeStl(void* out, size_t work_amount); + + std::pair computePhilox(void* out, size_t work_amount, const std::pair& prev_state); + + template + void generateData(DISTR_TYPE distribution, void* out, size_t work_amount); + + void initOutShape(VectorDims& dst, const void* src, const element::Type& shape_type, size_t len); + + void initEdgeValues(OutputType& dst, const void* src, const element::Type& output_type); + + void evalRange(); + + enum { SHAPE = 0, MIN_VAL, MAX_VAL }; + enum AlgoType { STL, PHILOX }; + + bool m_const_inputs[3] = {false, false, false}; + + ov::element::Type m_output_prc; + uint64_t m_global_seed = 0lu; + uint64_t m_op_seed = 0lu; + std::pair m_state {0lu, 0lu}; + + VectorDims m_out_shape = {}; + uint64_t m_out_el_num = 1lu; + OutputType m_min_val; + OutputType m_max_val; + OutputType m_range_val; + AlgoType m_algo = PHILOX; + + std::default_random_engine m_generator; + + struct ThreadParams { + uint64_t work_amount = 0lu; + uint64_t dst_shift = 0lu; + uint64_t n_shift = 0lu; + uint64_t step = 0lu; + }; + + uint64_t m_threads_num = 0lu; + std::vector m_thread_params; + + ///// PHILOX constants ///// + + // Determines how many sequence elements of RNG sequence are skipped between runs. + // Can be any positive value, 256 is chosen for parity with Tensorflow. + static constexpr uint64_t SKIP_CONST = 256lu; + + // Philox algorithm returns 4 elements of RNG sequence per each invocation + static constexpr uint64_t PHILOX_GROUP_SIZE = 4lu; + + // Output elements number threshold to execute on one thread. + static constexpr uint64_t PHILOX_PARALLEL_EXECUTION_THRESHOLD = 1000lu; + + uint64_t m_skip_count = 0lu; + ///////////////////////////////////////////////////////////////////////////////// + + std::shared_ptr m_jit_kernel; +}; + +} // namespace node +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/reference.cpp b/src/plugins/intel_cpu/src/nodes/reference.cpp index b42dc99b390fb4..091e31813125cf 100644 --- a/src/plugins/intel_cpu/src/nodes/reference.cpp +++ b/src/plugins/intel_cpu/src/nodes/reference.cpp @@ -2,18 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - -#include -#include +#include "reference.h" -#include "common/blocked_desc_creator.h" #include "common/cpu_memcpy.h" +#include #include "openvino/core/shape_util.hpp" -#include "openvino/runtime/tensor.hpp" -#include "reference.h" -using namespace dnnl; using namespace InferenceEngine; using namespace InferenceEngine::details; @@ -21,21 +15,15 @@ namespace ov { namespace intel_cpu { namespace node { -Reference::Reference(const std::shared_ptr& op, const GraphContext::CPtr context, +Reference::Reference(const std::shared_ptr& op, const GraphContext::CPtr& context, const std::string& errorMessage) : - Node(op, context, NgraphShapeInferFactory(op, FULL_PORT_MASK)), ngraphOp(op), additionalErrorMessage(errorMessage) { + Node(op, context, NgraphShapeInferFactory(op, FULL_PORT_MASK)), ovCoreNode(op), additionalErrorMessage(errorMessage) { if (!op->has_evaluate()) { IE_THROW(NotImplemented) << "Cannot fallback on ngraph reference implementation (Ngraph::Node::evaluate() is not implemented)"; } + setType(Type::Reference); setTypeStr("Reference"); - - // RandomUniform should generate new sequence each run even if all inputs are constants. So that method Node::IsConstant() - // doesn't return 'True' for RandomUniform with all constant inputs and the node generates new values for each inference, - // we set 'NoConst' value for 'ConstantType' in ctor - if (ov::is_type(ngraphOp)) { - constant = ConstantType::NoConst; - } } void Reference::getSupportedDescriptors() {} @@ -47,13 +35,13 @@ void Reference::initSupportedPrimitiveDescriptors() { std::vector inputConfigurators; inputConfigurators.reserve(inputShapes.size()); for (size_t i = 0; i < inputShapes.size(); i++) { - inputConfigurators.emplace_back(LayoutType::ncsp, convertPrecision(ngraphOp->get_input_element_type(i)), inputShapes[i]); + inputConfigurators.emplace_back(LayoutType::ncsp, convertPrecision(ovCoreNode->get_input_element_type(i)), inputShapes[i]); } std::vector outputConfigurators; outputConfigurators.reserve(inputShapes.size()); for (size_t i = 0; i < outputShapes.size(); i++) { - outputConfigurators.emplace_back(LayoutType::ncsp, convertPrecision(ngraphOp->get_output_element_type(i)), outputShapes[i]); + outputConfigurators.emplace_back(LayoutType::ncsp, convertPrecision(ovCoreNode->get_output_element_type(i)), outputShapes[i]); } addSupportedPrimDesc(inputConfigurators, outputConfigurators, impl_desc_type::ref); @@ -64,8 +52,8 @@ void Reference::createPrimitive() {} void Reference::execute(dnnl::stream strm) { auto inputs = prepareInputs(); auto outputs = prepareOutputs(); - if (!ngraphOp->evaluate(outputs, inputs)) { - IE_THROW() << "Evaluation failed on node of type: " << std::string(ngraphOp->get_type_name()) << " name: " << getName(); + if (!ovCoreNode->evaluate(outputs, inputs)) { + THROW_CPU_NODE_ERR("evaluation failed for core operation: ", std::string(ovCoreNode->get_type_name())); } } @@ -81,18 +69,16 @@ void Reference::executeDynamicImpl(dnnl::stream strm) { for (size_t i = 0; i < outputShapes.size(); ++i) { auto mem_desc = getBaseMemDescAtOutputPort(i); if (mem_desc->isDefined()) { - outputs.emplace_back(ngraphOp->get_output_element_type(i), mem_desc->getShape().getStaticDims()); + outputs.emplace_back(ovCoreNode->get_output_element_type(i), mem_desc->getShape().getStaticDims()); } else { - outputs.emplace_back(ngraphOp->get_output_element_type(i), ov::util::make_dynamic_shape()); + outputs.emplace_back(ovCoreNode->get_output_element_type(i), ov::util::make_dynamic_shape()); } } } else { - IE_THROW(Unexpected) << - "Unexpected shape infer result status during the inference of a node with type " << - getTypeStr() << " and name " << getName(); + THROW_CPU_NODE_ERR("got unexpected shape infer result status during the inference."); } - if (!ngraphOp->evaluate(outputs, inputs)) { - IE_THROW() << "Evaluation failed on node of type: " << std::string(ngraphOp->get_type_name()) << " name: " << getName(); + if (!ovCoreNode->evaluate(outputs, inputs)) { + THROW_CPU_NODE_ERR("evaluation failed for core operation: ", std::string(ovCoreNode->get_type_name())); } if (ShapeInferStatus::skip == result.status) { std::vector newOutputDims; @@ -105,8 +91,7 @@ void Reference::executeDynamicImpl(dnnl::stream strm) { auto memory = getChildEdgesAtPort(i)[0]->getMemoryPtr(); auto& tensor = outputs[i]; if (memory->getSize() != tensor.get_byte_size()) { - IE_THROW(Unexpected) << "Output tensor data size mismatch occurred during the inference of a node with type " << - getTypeStr() << " and name " << getName() << " on output port number " << i; + THROW_CPU_NODE_ERR("output tensor data size mismatch occurred during the inference on output port number ", i); } cpu_memcpy(memory->getData(), tensor.data(), tensor.get_byte_size()); } @@ -125,9 +110,9 @@ ov::TensorVector Reference::prepareInputs() const { ov::TensorVector inputs; for (size_t i = 0; i < inputShapes.size(); i++) { void *srcDataPtr = getParentEdgesAtPort(i)[0]->getMemory().getData(); - ov::Shape shape = ngraphOp->get_input_partial_shape(i).rank().get_length() == 0 ? + ov::Shape shape = ovCoreNode->get_input_partial_shape(i).rank().get_length() == 0 ? ov::Shape{} : getParentEdgesAtPort(i)[0]->getMemory().getStaticDims(); - inputs.push_back(ov::Tensor(ngraphOp->get_input_element_type(i), shape, srcDataPtr)); + inputs.push_back(ov::Tensor(ovCoreNode->get_input_element_type(i), shape, srcDataPtr)); } return inputs; } @@ -136,9 +121,9 @@ ov::TensorVector Reference::prepareOutputs() const { ov::TensorVector outputs; for (size_t i = 0; i < outputShapes.size(); i++) { void *dstDataPtr = getChildEdgesAtPort(i)[0]->getMemory().getData(); - ov::Shape shape = ngraphOp->get_output_partial_shape(i).rank().get_length() == 0 ? + ov::Shape shape = ovCoreNode->get_output_partial_shape(i).rank().get_length() == 0 ? ov::Shape{} : getChildEdgesAtPort(i)[0]->getMemory().getStaticDims(); - outputs.push_back(ov::Tensor(ngraphOp->get_output_element_type(i), shape, dstDataPtr)); + outputs.push_back(ov::Tensor(ovCoreNode->get_output_element_type(i), shape, dstDataPtr)); } return outputs; } diff --git a/src/plugins/intel_cpu/src/nodes/reference.h b/src/plugins/intel_cpu/src/nodes/reference.h index 4c2a8a1310806f..c2453835229138 100644 --- a/src/plugins/intel_cpu/src/nodes/reference.h +++ b/src/plugins/intel_cpu/src/nodes/reference.h @@ -12,7 +12,7 @@ namespace node { class Reference : public Node { public: - Reference(const std::shared_ptr& op, const GraphContext::CPtr context, const std::string& errorMessage); + Reference(const std::shared_ptr& op, const GraphContext::CPtr& context, const std::string& errorMessage); void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; @@ -29,7 +29,7 @@ class Reference : public Node { ov::TensorVector prepareOutputs() const; private: - const std::shared_ptr ngraphOp; + const std::shared_ptr ovCoreNode; const std::string additionalErrorMessage; }; diff --git a/src/plugins/intel_cpu/src/nodes_factory.cpp b/src/plugins/intel_cpu/src/nodes_factory.cpp index 3afe8aaa32c1d9..7add05741f04e1 100644 --- a/src/plugins/intel_cpu/src/nodes_factory.cpp +++ b/src/plugins/intel_cpu/src/nodes_factory.cpp @@ -79,6 +79,7 @@ #include "nodes/experimental_detectron_generate_proposals_single_image.h" #include "nodes/generate_proposals.h" #include "nodes/embedding_bag_packed_sum.h" +#include "nodes/random_uniform.hpp" #include "nodes/reduce.h" #include "nodes/if.h" #include "nodes/ctc_greedy_decoder.h" @@ -180,6 +181,7 @@ Node::NodesFactory::NodesFactory() INTEL_CPU_NODE(Unique, Type::Unique); INTEL_CPU_NODE(Ngram, Type::Ngram); INTEL_CPU_NODE(Interpolate, Type::Interpolate); + INTEL_CPU_NODE(RandomUniform, Type::RandomUniform); INTEL_CPU_NODE(Reduce, Type::Reduce); INTEL_CPU_NODE(Gather, Type::Gather); INTEL_CPU_NODE(NonMaxSuppression, Type::NonMaxSuppression); diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/random_uniform.cpp b/src/plugins/intel_cpu/src/shape_inference/custom/random_uniform.cpp new file mode 100644 index 00000000000000..cca3c74cce86b0 --- /dev/null +++ b/src/plugins/intel_cpu/src/shape_inference/custom/random_uniform.cpp @@ -0,0 +1,47 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "random_uniform.hpp" +#include + +namespace ov { +namespace intel_cpu { +namespace node { + +// TODO: remove after fixing the issue 123011 +IShapeInfer::Result RandomUniformShapeInfer::infer( + const std::vector>& input_shapes, + const std::unordered_map& data_dependency) { + VectorDims dims; + const auto& mem = data_dependency.at(0); + const auto rank = mem->getShape().getElementsCount(); + auto shape_prc = mem->getDesc().getPrecision(); + switch (shape_prc) { + case InferenceEngine::Precision::I32: { + auto data = reinterpret_cast(mem->getData()); + dims.assign(data, data + rank); + } break; + case InferenceEngine::Precision::I64: { + auto data = reinterpret_cast(mem->getData()); + dims.assign(data, data + rank); + } break; + default: + OPENVINO_THROW("Unexpected Shape input precision: ", shape_prc); + } + + return {{dims}, ShapeInferStatus::success}; +} + +RandomUniformShapeInferFactory::RandomUniformShapeInferFactory(const std::shared_ptr& op) : m_op(op) { + OPENVINO_ASSERT(ov::is_type(m_op), + "Unexpected op type in RandomUniform shape inference factory: ", m_op->get_type_name()); +} + +ShapeInferPtr RandomUniformShapeInferFactory::makeShapeInfer() const { + return std::make_shared(); +} + +} // namespace node +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/random_uniform.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/random_uniform.hpp new file mode 100644 index 00000000000000..ce87a966a9cbc9 --- /dev/null +++ b/src/plugins/intel_cpu/src/shape_inference/custom/random_uniform.hpp @@ -0,0 +1,37 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shape_inference/shape_inference_cpu.hpp" +#include + +#pragma once + +namespace ov { +namespace intel_cpu { +namespace node { + +class RandomUniformShapeInfer : public ShapeInferEmptyPads { +public: + explicit RandomUniformShapeInfer() {} + IShapeInfer::Result infer( + const std::vector>& input_shapes, + const std::unordered_map& data_dependency) override; + + port_mask_t get_port_mask() const override { + return PortMask(0); + } +}; + +class RandomUniformShapeInferFactory : public ShapeInferFactory { +public: + explicit RandomUniformShapeInferFactory(const std::shared_ptr& op); + ShapeInferPtr makeShapeInfer() const override; + +private: + std::shared_ptr m_op; +}; + +} // namespace node +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 9faf421c26a0f9..4eb40365fa95d7 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -189,6 +189,8 @@ std::vector disabledTestPatterns() { R"(smoke_LSTMSequenceCommon.*LSTMSequenceTest.Inference.*CONVERT_TO_TI.*)", // Issue: 122094 R"(smoke_Interpolate_Basic_Down_Sample_Tail/InterpolateLayerTest.Inference.*(asymmetric|align_corners).*f16.*)", + // Need to generate sequence exactly in the i64 data type. Enable in scope of i64 enabling. + R"(.*RandomUniformLayerTestCPU.*OutPrc=i64.*)", }; #if defined(OPENVINO_ARCH_X86) diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/random_uniform.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/random_uniform.cpp new file mode 100644 index 00000000000000..2f9706e7d2562e --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/random_uniform.cpp @@ -0,0 +1,265 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "random_uniform.hpp" +#include "ov_models/builders.hpp" + +using namespace CPUTestUtils; +using namespace ov::test; + +namespace CPULayerTestsDefinitions { + +std::string RandomUniformLayerTestCPU::getTestCaseName(const testing::TestParamInfo& obj) { + const auto& out_shape = std::get<0>(obj.param); + const auto& min_max = std::get<1>(obj.param); + + std::ostringstream result; + + result << "IS={" << out_shape.size(); + result << "}_OS=" << out_shape; + result << "_Min=" << std::get<0>(min_max); + result << "_Max=" << std::get<1>(min_max); + result << "_ShapePrc=" << std::get<2>(obj.param); + result << "_OutPrc=" << std::get<3>(obj.param); + result << "_GlobalSeed=" << std::get<4>(obj.param); + result << "_OperationalSeed=" << std::get<5>(obj.param); + result << "_ConstIn={" << utils::bool2str(std::get<6>(obj.param)) << "," + << utils::bool2str(std::get<7>(obj.param)) << "," + << utils::bool2str(std::get<8>(obj.param)) << "}"; + + result << CPUTestsBase::getTestCaseName(std::get<9>(obj.param)); + + const auto& config = std::get<10>(obj.param); + if (!config.empty()) { + result << "_PluginConf={"; + for (const auto& conf_item : config) { + result << "_" << conf_item.first << "="; + conf_item.second.print(result); + } + result << "}"; + } + + return result.str(); +} + +void RandomUniformLayerTestCPU::SetUp() { + targetDevice = utils::DEVICE_CPU; + + const auto& params = this->GetParam(); + m_output_shape = std::get<0>(params); + const auto& min_max = std::get<1>(params); + const auto& shape_prc = std::get<2>(params); + const auto& output_prc = std::get<3>(params); + m_global_seed = std::get<4>(params); + m_operational_seed = std::get<5>(params); + const auto& const_in_1 = std::get<6>(params); + const auto& const_in_2 = std::get<7>(params); + const auto& const_in_3 = std::get<8>(params); + const auto& cpu_params = std::get<9>(params); + configuration = std::get<10>(params); + + m_min_val = std::get<0>(min_max); + m_max_val = std::get<1>(min_max); + std::tie(inFmts, outFmts, priority, selectedType) = cpu_params; + +#if defined(OV_CPU_WITH_ACL) + updateSelectedType("ref_any", output_prc, configuration); +#else + if (output_prc == ElementType::i64) { + updateSelectedType(getPrimitiveType(), ElementType::i32, configuration); + } else if (output_prc == ElementType::f64) { + updateSelectedType(getPrimitiveType(), ElementType::f32, configuration); + } else if (output_prc == ElementType::f16) { + if (InferenceEngine::with_cpu_x86_avx512_core_fp16()) { + updateSelectedType(getPrimitiveType(), ElementType::f16, configuration); + } else { + updateSelectedType(getPrimitiveType(), ElementType::f32, configuration); + } + } else if (output_prc == ElementType::bf16) { + if (InferenceEngine::with_cpu_x86_bfloat16()) { + updateSelectedType(getPrimitiveType(), ElementType::bf16, configuration); + } else { + updateSelectedType("ref_any", ElementType::bf16, configuration); + } + } else { + updateSelectedType(getPrimitiveType(), output_prc, configuration); + } +#endif + + std::vector in_shapes; + ov::ParameterVector in_params; + std::vector> inputs; + + if (!const_in_1) { + in_shapes.push_back({{}, {{m_output_shape.size()}}}); + in_params.push_back(std::make_shared(shape_prc, ov::PartialShape{static_cast(m_output_shape.size())})); + in_params.back()->set_friendly_name("shape"); + inputs.push_back(in_params.back()); + } else { + inputs.push_back(ngraph::builder::makeConstant(shape_prc, {m_output_shape.size()}, m_output_shape)); + } + if (!const_in_2) { + in_shapes.push_back({{}, {{1}}}); + in_params.push_back(std::make_shared(output_prc, ov::PartialShape{1})); + in_params.back()->set_friendly_name("minval"); + inputs.push_back(in_params.back()); + } else { + inputs.push_back(ngraph::builder::makeConstant(output_prc, {1}, std::vector{m_min_val})); + } + if (!const_in_3) { + in_shapes.push_back({{}, {{1}}}); + in_params.push_back(std::make_shared(output_prc, ov::PartialShape{1})); + in_params.back()->set_friendly_name("maxval"); + inputs.push_back(in_params.back()); + } else { + inputs.push_back(ngraph::builder::makeConstant(output_prc, {1}, std::vector{m_max_val})); + } + + init_input_shapes(in_shapes); + + const auto rnd_op = std::make_shared(inputs[0], inputs[1], inputs[2], output_prc, m_global_seed, m_operational_seed); + const ov::ResultVector results{std::make_shared(rnd_op)}; + + function = std::make_shared(results, in_params, "RandomUniformLayerTestCPU"); +} + +template +void fill_data(TD* dst, const TS* src, size_t len) { + for (size_t i = 0llu; i < len; i++) { + dst[i] = static_cast(src[i]); + } +} + +void RandomUniformLayerTestCPU::generate_inputs(const std::vector& targetInputStaticShapes) { + inputs.clear(); + const auto& func_inputs = function->inputs(); + + for (size_t i = 0llu; i < func_inputs.size(); ++i) { + const auto& func_input = func_inputs[i]; + const auto& name = func_input.get_node()->get_friendly_name(); + const auto& in_prc = func_input.get_element_type(); + auto tensor = ov::Tensor(in_prc, targetInputStaticShapes[i]); + +#define CASE(P, S, L) \ +case P : \ +fill_data(tensor.data::value_type>(), S, L); break; + + if (name == "shape") { + switch (in_prc) { + CASE(ElementType::i32, m_output_shape.data(), m_output_shape.size()) + CASE(ElementType::i64, m_output_shape.data(), m_output_shape.size()) + default: + OPENVINO_THROW("RandomUniform does not support precision ", in_prc, " for the Shape input."); + } + } else if (name == "minval") { + switch (in_prc) { + CASE(ElementType::f32, &m_min_val, 1) + CASE(ElementType::f16, &m_min_val, 1) + CASE(ElementType::bf16, &m_min_val, 1) + CASE(ElementType::i32, &m_min_val, 1) + CASE(ElementType::i64, &m_min_val, 1) + CASE(ElementType::f64, &m_min_val, 1) + default: + OPENVINO_THROW("RandomUniform does not support precision ", in_prc, " for the Minval input."); + } + } else if (name == "maxval") { + switch (in_prc) { + CASE(ElementType::f32, &m_max_val, 1) + CASE(ElementType::f16, &m_max_val, 1) + CASE(ElementType::bf16, &m_max_val, 1) + CASE(ElementType::i32, &m_max_val, 1) + CASE(ElementType::i64, &m_max_val, 1) + CASE(ElementType::f64, &m_max_val, 1) + default: + OPENVINO_THROW("RandomUniform does not support precision ", in_prc, " for the Maxval input."); + } + } + +#undef CASE + + inputs.insert({func_input.get_node_shared_ptr(), tensor}); + } +} + +void RandomUniformLayerTestCPU::compare(const std::vector& expected, const std::vector& actual) { + if (m_global_seed != 0lu || m_operational_seed != 0lu) { + SubgraphBaseTest::compare(expected, actual); + return; + } + + // When both seed values are equal to zero, RandomUniform should generate non-deterministic sequence. + // In this case will use Mean and Variance metrics. + +#define CASE(X) case X : rndUCompare::value_type>(expected[0], actual[0]); break; + + switch (expected[0].get_element_type()) { + CASE(ElementType::f32) + CASE(ElementType::i32) + CASE(ElementType::f16) + CASE(ElementType::bf16) + CASE(ElementType::i64) + CASE(ElementType::f64) + default: OPENVINO_THROW("Unsupported element type: ", expected[0].get_element_type()); + } + +#undef CASE +} + +precisions_map RandomUniformLayerTestCPU::get_ref_precisions_convert_map() { + precisions_map precisions; + + if (!InferenceEngine::with_cpu_x86_avx512_core()) { + precisions.insert({ ov::element::bf16, ov::element::f32 }); + } + if (!InferenceEngine::with_cpu_x86_avx512_core_fp16()) { + precisions.insert({ ov::element::f16, ov::element::f32 }); + } + + return precisions; +} + +inline double less_or_equal(double a, double b) { + return (b - a) >= (std::fmax(std::fabs(a), std::fabs(b)) * std::numeric_limits::epsilon()); +} + +template +void RandomUniformLayerTestCPU::rndUCompare(const ov::Tensor& expected, const ov::Tensor& actual) { + auto actual_data = actual.data(); + size_t shape_size_cnt = ov::shape_size(expected.get_shape()); + double act_mean = 0.0; + double act_variance = 0.0; + const double exp_mean = (m_max_val + m_min_val) / 2.0; + const double exp_variance = std::pow(m_max_val - m_min_val, 2) / 12.0; + + for (size_t i = 0; i < shape_size_cnt; ++i) { + auto actual_value = static_cast(actual_data[i]); + if (std::isnan(actual_value)) { + std::ostringstream out_stream; + out_stream << "Actual value is NAN on coordinate: " << i; + throw std::runtime_error(out_stream.str()); + } + act_mean += actual_value; + act_variance += std::pow(actual_value - exp_mean, 2); + } + act_mean /= shape_size_cnt; + act_variance /= shape_size_cnt; + + auto rel_mean = (exp_mean - act_mean) / (m_max_val - m_min_val); + auto rel_variance = (exp_variance - act_variance) / std::pow(m_max_val - m_min_val, 2); + + if (!(less_or_equal(rel_mean, m_mean_threshold) && less_or_equal(rel_variance, m_variance_threshold))) { + std::ostringstream out_stream; + out_stream << "rel_mean < m_mean_threshold && rel_variance < m_variance_threshold" << + "\n\t rel_mean: " << rel_mean << + "\n\t rel_variance: " << rel_variance; + throw std::runtime_error(out_stream.str()); + } +} + +TEST_P(RandomUniformLayerTestCPU, CompareWithRefs) { + run(); + CheckPluginRelatedResults(compiledModel, "RandomUniform"); +} + +} // namespace CPULayerTestsDefinitions diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/random_uniform.hpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/random_uniform.hpp new file mode 100644 index 00000000000000..1cb9f5fccc451a --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/random_uniform.hpp @@ -0,0 +1,53 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "test_utils/cpu_test_utils.hpp" + +namespace CPULayerTestsDefinitions { + +typedef std::tuple< + ov::Shape, // Output shapes + std::tuple, // Min and Max values + ov::test::ElementType, // Shape precision + ov::test::ElementType, // Output precision + uint64_t, // Global seed + uint64_t, // Operational seed + bool, // Is 1st input constant + bool, // Is 2nd input constant + bool, // Is 3rd input constant + CPUTestUtils::CPUSpecificParams, // CPU specific params + ov::AnyMap // Additional plugin configuration +> RandomUniformLayerTestCPUParamSet; + +class RandomUniformLayerTestCPU : public testing::WithParamInterface, + public ov::test::SubgraphBaseTest, public CPUTestUtils::CPUTestsBase { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); + +protected: + void SetUp() override; + + void generate_inputs(const std::vector& target_shapes) override; + + void compare(const std::vector& expected, const std::vector& actual) override; + + precisions_map get_ref_precisions_convert_map() override; + + template + void rndUCompare(const ov::Tensor& expected, const ov::Tensor& actual); + +private: + ov::Shape m_output_shape; + uint64_t m_global_seed; + uint64_t m_operational_seed; + double m_min_val; + double m_max_val; + static constexpr double m_mean_threshold = 0.05; + static constexpr double m_variance_threshold = 0.1; +}; + +} // namespace CPULayerTestsDefinitions diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/random_uniform.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/random_uniform.cpp new file mode 100644 index 00000000000000..f319fb6ada2719 --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/random_uniform.cpp @@ -0,0 +1,68 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "single_layer_tests/classes/random_uniform.hpp" + +using namespace CPUTestUtils; +using namespace ov::test; + +namespace CPULayerTestsDefinitions { +namespace RandomUniform { + +static const std::vector shape_prc = { + ElementType::i32, + ElementType::i64 +}; + +static const std::vector output_shapes = { + {500}, + {4, 3, 210} +}; + +static const std::vector global_seed = { + 0, 8 +}; + +static const std::vector operational_seed = { + 0, 3, 5 +}; + +static const std::vector> min_max = { + {0, 50}, + {-50, 50}, + {-50, 0} +}; + +INSTANTIATE_TEST_SUITE_P(smoke_Param, RandomUniformLayerTestCPU, + ::testing::Combine( + ::testing::ValuesIn(output_shapes), + ::testing::ValuesIn(min_max), + ::testing::ValuesIn(shape_prc), + ::testing::Values(ElementType::f32, ElementType::i32), + ::testing::ValuesIn(global_seed), + ::testing::ValuesIn(operational_seed), + ::testing::Values(false), + ::testing::Values(false), + ::testing::Values(false), + ::testing::Values(emptyCPUSpec), + ::testing::Values(empty_plugin_config)), + RandomUniformLayerTestCPU::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_ParamConst, RandomUniformLayerTestCPU, + ::testing::Combine( + ::testing::Values(output_shapes[0]), + ::testing::Values(min_max[0]), + ::testing::Values(ElementType::i32), + ::testing::Values(ElementType::f32), + ::testing::Values(1), + ::testing::Values(0), + ::testing::Values(true, false), + ::testing::Values(true, false), + ::testing::Values(true, false), + ::testing::Values(emptyCPUSpec), + ::testing::Values(empty_plugin_config)), + RandomUniformLayerTestCPU::getTestCaseName); + +} // namespace RandomUniform +} // namespace CPULayerTestsDefinitions diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/random_uniform.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/random_uniform.cpp new file mode 100644 index 00000000000000..8fec42f382464d --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/random_uniform.cpp @@ -0,0 +1,46 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "single_layer_tests/classes/random_uniform.hpp" + +using namespace CPUTestUtils; +using namespace ov::test; + +namespace CPULayerTestsDefinitions { +namespace RandomUniform { + +static const std::vector output_prc_nightly = { + ElementType::f32, + ElementType::f16, + ElementType::bf16, + ElementType::i32, + ElementType::i64 +}; + +// Need to validate the Kernel corner cases. +static const std::vector output_shapes_nightly = { + {1}, {2}, {3}, {2, 2}, {5}, {2, 3}, {7}, {2, 2, 2}, {3, 3}, {2, 5}, {11}, {2, 3, 2}, {13}, {2, 7}, {3, 5}, + {4, 4}, {1, 17}, {2, 9}, {19}, {4, 5}, {21}, {11, 2}, {23, 1}, {4, 2, 3}, {5, 5}, {26}, {1, 27}, {14, 2}, + {29}, {10, 3}, {31}, {2, 8, 2}, {33}, {17, 2}, {5, 7}, {2, 3, 2, 3}, {37}, {2, 19}, {2, 20}, {41}, {42}, + {43}, {22, 2}, {3, 5, 3}, {5, 2, 5}, {1, 3, 1, 17, 1}, {26, 2}, {53}, {54}, {55}, {56}, {57}, {58}, {59}, + {2, 32}, {99}, {127}, {128}, {129}, {199}, {255}, {499}, {997}, {1753}, {2899} +}; + +INSTANTIATE_TEST_SUITE_P(nightly_Param, RandomUniformLayerTestCPU, + ::testing::Combine( + ::testing::ValuesIn(output_shapes_nightly), + ::testing::Values(std::tuple{-31, 17}), + ::testing::Values(ElementType::i32), + ::testing::ValuesIn(output_prc_nightly), + ::testing::Values(3), + ::testing::Values(1), + ::testing::Values(true, false), + ::testing::Values(true, false), + ::testing::Values(true, false), + ::testing::Values(emptyCPUSpec), + ::testing::Values(empty_plugin_config)), + RandomUniformLayerTestCPU::getTestCaseName); + +} // namespace RandomUniform +} // namespace CPULayerTestsDefinitions diff --git a/src/plugins/intel_cpu/tests/functional/test_utils/cpu_test_utils.hpp b/src/plugins/intel_cpu/tests/functional/test_utils/cpu_test_utils.hpp index d8deddfebe5d69..fff65f9e1c442f 100644 --- a/src/plugins/intel_cpu/tests/functional/test_utils/cpu_test_utils.hpp +++ b/src/plugins/intel_cpu/tests/functional/test_utils/cpu_test_utils.hpp @@ -170,6 +170,7 @@ class CPUTestsBase { // common parameters const auto emptyCPUSpec = CPUSpecificParams{{}, {}, {}, {}}; const std::map cpuEmptyPluginConfig; +const ov::AnyMap empty_plugin_config{}; const std::map cpuFP32PluginConfig = { { InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::NO } }; const std::map cpuBF16PluginConfig = diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/common_utils.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/common_utils.hpp index cc45a47d779d57..0bd9f4845d481b 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/common_utils.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/common_utils.hpp @@ -115,6 +115,10 @@ inline std::string set2str(const std::set& set) { return std::string("()"); } +inline std::string bool2str(const bool val) { + return val ? "True" : "False"; +} + template std::vector> combineParams(const std::map>& keyValueSets) { std::vector> resVec; From 222fbb1aec55febcab0f0e53fd49b9b98fb5f7fb Mon Sep 17 00:00:00 2001 From: Ekaterina Aidova Date: Wed, 18 Oct 2023 12:58:54 +0400 Subject: [PATCH 247/257] [PT FE]: support aten::fill_diagonal_, aten::fill (#20395) * [PT FE]: support aten::fill_diagonal_, aten::fill * remove xfail * Update src/frontends/pytorch/src/op/full.cpp Co-authored-by: Maxim Vafin * Update tests/model_hub_tests/torch_tests/test_hf_transformers.py --------- Co-authored-by: Maxim Vafin --- src/frontends/pytorch/src/op/full.cpp | 81 ++++++++++++++++++- src/frontends/pytorch/src/op_table.cpp | 7 +- tests/layer_tests/pytorch_tests/test_full.py | 81 ++++++++++++++++--- .../torch_tests/hf_transformers_models | 8 +- .../torch_tests/test_hf_transformers.py | 5 +- 5 files changed, 163 insertions(+), 19 deletions(-) diff --git a/src/frontends/pytorch/src/op/full.cpp b/src/frontends/pytorch/src/op/full.cpp index cf60d096555007..e8bfa1c7ce99d7 100644 --- a/src/frontends/pytorch/src/op/full.cpp +++ b/src/frontends/pytorch/src/op/full.cpp @@ -3,10 +3,19 @@ // #include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/op/add.hpp" #include "openvino/op/broadcast.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/convert_like.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/power.hpp" +#include "openvino/op/range.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/scatter_elements_update.hpp" #include "openvino/op/shape_of.hpp" +#include "openvino/op/squeeze.hpp" #include "utils.hpp" namespace ov { @@ -71,12 +80,17 @@ OutputVector translate_full_like(const NodeContext& context) { return {base_translate_full_with_convertlike(context, sizes, value, out)}; }; -OutputVector translate_fill_(const NodeContext& context) { - num_inputs_check(context, 2, 2); +OutputVector translate_fill(const NodeContext& context) { + num_inputs_check(context, 2, 3); auto input = context.get_input(0); auto value = context.get_input(1); auto sizes = context.mark_node(std::make_shared(input, element::i32)); - return {base_translate_full_with_convertlike(context, sizes, value, input)}; + auto out = context.input_is_none(2) ? input : context.get_input(2); + auto result = base_translate_full_with_convertlike(context, sizes, value, out); + if (!context.input_is_none(2)) { + context.mutate_input(2, result); + } + return {result}; }; OutputVector translate_new_full(const NodeContext& context) { @@ -187,6 +201,67 @@ OutputVector translate_empty(const NodeContext& context) { } return {empty}; }; + +OutputVector translate_fill_diagonal(const NodeContext& context) { + // aten::fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!) + // realization inspired by numpy: + // https://github.com/numpy/numpy/blob/c236e694d222ae6b812cb8dab54471bc4c912f0f/numpy/lib/_index_tricks_impl.py#L787-L918 + num_inputs_check(context, 3, 3); + auto input_tensor = context.get_input(0); + auto fill_value = context.get_input(1); + auto input_shape = context.mark_node(std::make_shared(input_tensor, element::i32)); + auto input_rank = input_tensor.get_partial_shape().rank(); + auto const_one = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {1})); + auto const_zero = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {0})); + auto const_one_s = context.mark_node(v0::Constant::create(element::i32, Shape{}, {1})); + auto const_zero_s = context.mark_node(v0::Constant::create(element::i32, Shape{}, {0})); + auto const_neg_one = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-1})); + if (input_rank.is_dynamic() || input_rank.get_length() < 2) { + FRONT_END_OP_CONVERSION_CHECK(false, "aten::fill_diagonal_ required tensor with static rank >= 2 "); + } + auto flatten_input = context.mark_node(std::make_shared(input_tensor, const_neg_one, false)); + auto wrap = context.const_input(2); + Output step; + // default value for end - number of elements in input tensor + Output end; + auto flatten_shape = context.mark_node(std::make_shared(flatten_input, element::i32)); + end = context.mark_node(std::make_shared(flatten_shape, const_neg_one, const_zero)); + auto last_dim = context.mark_node(std::make_shared(input_shape, const_neg_one, const_zero)); + if (input_rank.get_length() == 2) { + // step = a.shape[1] + 1 + step = context.mark_node(std::make_shared(last_dim, const_one_s)); + if (!wrap) { + // if not wrap. and non squared matrix, do not fill tail by cutting end to square + end = context.mark_node(std::make_shared(last_dim, last_dim)); + } + } else { + // step = 1 + (cumprod(a.shape[:-1])).sum() + // cumprod operation is not supported by ov, but with condition that >2D tensors supported only if all dims + // equals cumprod can be represented as finite geometric serial and its sum can be found by formula + // b0 * (bn * q - 1) / (q - 1), where in this particual case q = b0, bn = b0 ^ n + auto rank_minus_one = + context.mark_node(v0::Constant::create(element::i32, Shape{}, {input_rank.get_length() - 1})); + auto dim_power = context.mark_node(std::make_shared(last_dim, rank_minus_one)); + auto dim_power_minus_one = context.mark_node(std::make_shared(dim_power, const_neg_one)); + auto dim_minus_one = context.mark_node(std::make_shared(last_dim, const_neg_one)); + auto q = context.mark_node(std::make_shared(dim_power_minus_one, dim_minus_one, true)); + auto cumprod_sum = context.mark_node(std::make_shared(last_dim, q)); + step = context.mark_node(std::make_shared(const_one_s, cumprod_sum)); + // wrap parameter is not applicable in this case as supported only equal dims on pytorch side + } + step = context.mark_node(std::make_shared(step, const_zero)); + end = context.mark_node(std::make_shared(end, const_zero)); + auto indices = context.mark_node(std::make_shared(const_zero_s, end, step, element::i32)); + auto indices_shape = context.mark_node(std::make_shared(indices, element::i32)); + fill_value = context.mark_node(std::make_shared(fill_value, input_tensor)); + fill_value = context.mark_node(std::make_shared(fill_value, indices_shape)); + // fill values + auto filled_tensor = + context.mark_node(std::make_shared(flatten_input, indices, fill_value, const_zero)); + // reshape back to original shape + filled_tensor = context.mark_node(std::make_shared(filled_tensor, input_shape, false)); + return {filled_tensor}; +} } // namespace op } // namespace pytorch } // namespace frontend diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index 47969ddb57d1c6..75665ffe8d4d14 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -66,7 +66,8 @@ OP_CONVERTER(translate_expand_as); OP_CONVERTER(translate_eye); OP_CONVERTER(translate_fake_quantize_per_channel_affine); OP_CONVERTER(translate_fake_quantize_per_tensor_affine); -OP_CONVERTER(translate_fill_); +OP_CONVERTER(translate_fill); +OP_CONVERTER(translate_fill_diagonal); OP_CONVERTER(translate_flatten); OP_CONVERTER(translate_flip); OP_CONVERTER(translate_floor_divide); @@ -323,7 +324,9 @@ const std::map get_supported_ops_ts() { {"aten::fake_quantize_per_channel_affine", op::translate_fake_quantize_per_channel_affine}, {"aten::fake_quantize_per_tensor_affine", op::translate_fake_quantize_per_tensor_affine}, {"aten::feature_dropout", op::skip_node}, - {"aten::fill_", op::inplace_op}, + {"aten::fill", op::translate_fill}, + {"aten::fill_", op::inplace_op}, + {"aten::fill_diagonal_", op::inplace_op}, {"aten::flatten", op::quantizable_op}, {"aten::flip", op::translate_flip}, {"aten::floor", op::translate_1to1_match_1_inputs}, diff --git a/tests/layer_tests/pytorch_tests/test_full.py b/tests/layer_tests/pytorch_tests/test_full.py index 4ce42db7fa9167..c564b1bb3731b9 100644 --- a/tests/layer_tests/pytorch_tests/test_full.py +++ b/tests/layer_tests/pytorch_tests/test_full.py @@ -104,31 +104,94 @@ def test_full_out(self, shape, value, dtype, with_names, ie_device, precision, i ir_version, kwargs_to_prepare_input={'value': value}) class TestFill(PytorchLayerTest): - def _prepare_input(self, value, shape, input_dtype, value_dtype): - return (np.random.randn(*shape).astype(input_dtype), np.array(value, dtype=value_dtype),) + def _prepare_input(self, value, shape, input_dtype, value_dtype, out=False): + if not out: + return (np.random.randn(*shape).astype(input_dtype), np.array(value, dtype=value_dtype),) + return (np.random.randn(*shape).astype(input_dtype), np.array(value, dtype=value_dtype), np.zeros(shape, dtype=input_dtype)) - def create_model(self): + + def create_model(self, mode): import torch class aten_fill(torch.nn.Module): + def __init__(self, mode) -> None: + super().__init__() + if mode == "inplace": + self.forward = self.forward_inplace + if mode == "out": + self.forward = self.forward_out - def forward(self, input_t: torch.Tensor, x: float): + + def forward_inplace(self, input_t: torch.Tensor, x: float): return input_t.fill_(x) + + def forward_out(self, input_t: torch.Tensor, x: float, out: torch.Tensor): + return input_t.fill(x, out=out), out + + def forward(self, input_t: torch.Tensor, x:float): + return input_t.fill(x) + ref_net = None - model = aten_fill() + model = aten_fill(mode) - return model, ref_net, "aten::fill_" + return model, ref_net, "aten::fill_" if mode == "inplace" else "aten::fill" @pytest.mark.parametrize("shape", [[1], [1, 2], [1, 2, 3], [1, 2, 3, 4], [2, 3, 4, 5, 6]]) @pytest.mark.parametrize("value", [0, 1, -1, 0.5]) @pytest.mark.parametrize("input_dtype", ["int8", "int32", "int64", "float32", "float64"]) @pytest.mark.parametrize("value_dtype", ["int8", "int32", "int64", "float32", "float64"]) + @pytest.mark.parametrize("mode", ["", "inplace", "out"]) @pytest.mark.nightly @pytest.mark.precommit - def test_fill(self, shape, value, input_dtype, value_dtype, ie_device, precision, ir_version): - self._test(*self.create_model(), ie_device, precision, ir_version, - kwargs_to_prepare_input={'value': value, 'shape': shape, "input_dtype": input_dtype, "value_dtype": value_dtype}) + def test_fill(self, shape, value, input_dtype, value_dtype, mode, ie_device, precision, ir_version): + self._test(*self.create_model(mode), ie_device, precision, ir_version, + kwargs_to_prepare_input={ + 'value': value, + 'shape': shape, + "input_dtype": input_dtype, + "value_dtype": value_dtype, + "out": mode == "out" + }) + +class TestFillDiagonal(PytorchLayerTest): + def _prepare_input(self, shape, input_dtype, value, value_dtype): + return np.zeros(shape).astype(input_dtype), np.array(value, dtype=value_dtype) + + def create_model(self, shape, wrap): + import torch + + class aten_fill_diagonal(torch.nn.Module): + def __init__(self, input_shape, wrap=False) -> None: + super().__init__() + self.wrap = wrap + self.input_shape = input_shape + + def forward(self, x:torch.Tensor, y:float): + x = x.reshape(self.input_shape) + return x.fill_diagonal_(y, wrap=self.wrap), x + + ref_net = None + + model = aten_fill_diagonal(shape, wrap) + return model, "aten::fill_diagonal_", ref_net + + @pytest.mark.parametrize("shape", ([4, 4], [5, 4], [8, 4], [4, 3], [5, 5, 5], [3, 3, 3, 3], [4, 4, 4, 4, 4])) + @pytest.mark.parametrize("value", [0, 1, -1, 2.5]) + @pytest.mark.parametrize("input_dtype", ["int8", "int32", "int64", "float32", "float64"]) + @pytest.mark.parametrize("value_dtype", ["int8", "int32", "int64", "float32", "float64"]) + @pytest.mark.parametrize("wrap", [True, False]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_fill_diagonal(self, shape, value, input_dtype, value_dtype, wrap, ie_device, precision, ir_version): + self._test(*self.create_model(shape, wrap), ie_device, precision, ir_version, + kwargs_to_prepare_input={ + 'value': value, + 'shape': shape, + "input_dtype": input_dtype, + "value_dtype": value_dtype + }) + class TestZero(PytorchLayerTest): def _prepare_input(self, shape, input_dtype): diff --git a/tests/model_hub_tests/torch_tests/hf_transformers_models b/tests/model_hub_tests/torch_tests/hf_transformers_models index 112aedeb60de0c..0618d98a4d9f31 100644 --- a/tests/model_hub_tests/torch_tests/hf_transformers_models +++ b/tests/model_hub_tests/torch_tests/hf_transformers_models @@ -242,7 +242,7 @@ microsoft/deberta-base,deberta microsoft/git-large-coco,git,skip,Load problem microsoft/layoutlm-base-uncased,layoutlm microsoft/layoutlmv2-base-uncased,layoutlmv2,skip,Load problem -microsoft/layoutlmv3-base,layoutlmv3,xfail,Unsupported op aten::amax aten::clip +microsoft/layoutlmv3-base,layoutlmv3 microsoft/markuplm-base,markuplm microsoft/resnet-50,resnet microsoft/speecht5_hifigan,hifigan,skip,Load problem @@ -251,7 +251,7 @@ microsoft/swinv2-tiny-patch4-window8-256,swinv2,xfail,Unsupported op aten::adapt microsoft/table-transformer-detection,table-transformer microsoft/wavlm-large,wavlm,skip,Load problem microsoft/xclip-base-patch32,xclip,skip,Load problem -microsoft/xprophetnet-large-wiki100-cased,xlm-prophetnet,xfail,Unsupported op aten::fill_diagonal_ +microsoft/xprophetnet-large-wiki100-cased,xlm-prophetnet miguelvictor/python-fromzero-lstmlm,lstmlm,skip,Load problem mingzi151/test-hf-wav2vec2bert,wav2vec2bert,skip,Load problem MIT/ast-finetuned-audioset-10-10-0.4593,audio-spectrogram-transformer,skip,Load problem @@ -348,7 +348,7 @@ SteveZhan/my-resnet50d,resnet_steve,skip,Load problem suno/bark,bark,skip,Load problem surajnair/r3m-50,r3m,skip,Load problem susnato/clvp_dev,clvp,skip,Load problem -Tanrei/GPTSAN-japanese,gptsan-japanese,xfail,Unsupported op aten::clip aten::index_put_ prim::TupleConstruct +Tanrei/GPTSAN-japanese,gptsan-japanese,xfail,Unsupported op aten::index_put_ prim::TupleConstruct tau/bart-large-sled-govreport,tau/sled,skip,Load problem taufeeque/best-cb-model,codebook,skip,Load problem Team-PIXEL/pixel-base,pixel,skip,Load problem @@ -357,7 +357,7 @@ thomwolf/vqgan_imagenet_f16_1024,vqgan_model,skip,Load problem thu-ml/zh-clip-vit-roberta-large-patch14,zhclip,skip,Load problem tifa-benchmark/promptcap-coco-vqa,ofa,skip,Load problem tli8hf/robertabase_snli,transformerfornli,skip,Load problem -transfo-xl-wt103,transfo-xl,xfail,Unsupported op aten::clamp_ aten::index_copy_ +transfo-xl-wt103,transfo-xl,xfail,Unsupported op aten::index_copy_ transZ/BART_shared_clean,shared_bart,skip,Load problem transZ/BART_shared_v2,shared_bart_v2,skip,Load problem transZ/misecom,misecom,skip,Load problem diff --git a/tests/model_hub_tests/torch_tests/test_hf_transformers.py b/tests/model_hub_tests/torch_tests/test_hf_transformers.py index 3a677353c86508..184e725a04f9b9 100644 --- a/tests/model_hub_tests/torch_tests/test_hf_transformers.py +++ b/tests/model_hub_tests/torch_tests/test_hf_transformers.py @@ -298,7 +298,10 @@ def teardown_method(self): ("google/tapas-large-finetuned-wtq", "tapas"), ("gpt2", "gpt2"), ("openai/clip-vit-large-patch14", "clip"), - ("RWKV/rwkv-4-169m-pile", "rwkv")]) + ("RWKV/rwkv-4-169m-pile", "rwkv"), + ("microsoft/layoutlmv3-base", "layoutlmv3"), + ("microsoft/xprophetnet-large-wiki100-cased", "xlm-prophetnet"), + ]) @pytest.mark.precommit def test_convert_model_precommit(self, name, type, ie_device): self.run(model_name=name, model_link=type, ie_device=ie_device) From 2415f0c7cffda56dfaedf88d54630badfe449545 Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Wed, 18 Oct 2023 11:09:27 +0200 Subject: [PATCH 248/257] [core]Migrate BatchToSpace to new API (#20450) * Migrate BatchToSpace to new API * Remove not required suppression macro --- .../include/openvino/op/batch_to_space.hpp | 4 +- src/core/src/op/batch_to_space.cpp | 112 ++++++++---------- 2 files changed, 50 insertions(+), 66 deletions(-) diff --git a/src/core/include/openvino/op/batch_to_space.hpp b/src/core/include/openvino/op/batch_to_space.hpp index 6609e539087628..2dbbf018913fd3 100644 --- a/src/core/include/openvino/op/batch_to_space.hpp +++ b/src/core/include/openvino/op/batch_to_space.hpp @@ -37,9 +37,7 @@ class OPENVINO_API BatchToSpace : public Op { const Output& block_shape, const Output& crops_begin, const Output& crops_end); - OPENVINO_SUPPRESS_DEPRECATED_START - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - OPENVINO_SUPPRESS_DEPRECATED_END + bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override; bool has_evaluate() const override; void validate_and_infer_types() override; diff --git a/src/core/src/op/batch_to_space.cpp b/src/core/src/op/batch_to_space.cpp index da2c2c5fa703a1..0b522b5156b017 100644 --- a/src/core/src/op/batch_to_space.cpp +++ b/src/core/src/op/batch_to_space.cpp @@ -2,33 +2,23 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/batch_to_space.hpp" - -#include -#include -#include -#include -#include -#include -#include +#include "openvino/op/batch_to_space.hpp" +#include "batch_to_space_shape_inference.hpp" #include "itt.hpp" -#include "ngraph/builder/make_constant.hpp" -#include "ngraph/node.hpp" -#include "ngraph/opsets/opset3.hpp" -#include "ngraph/shape.hpp" #include "openvino/op/util/precision_sensitive_attribute.hpp" #include "openvino/op/util/slice_plan.hpp" #include "openvino/reference/reshape.hpp" #include "openvino/reference/strided_slice.hpp" -using namespace std; -using namespace ngraph; +namespace ov { +namespace op { +namespace v1 { -ngraph::op::v1::BatchToSpace::BatchToSpace(const ngraph::Output& data, - const ngraph::Output& block_shape, - const ngraph::Output& crops_begin, - const ngraph::Output& crops_end) +BatchToSpace::BatchToSpace(const Output& data, + const Output& block_shape, + const Output& crops_begin, + const Output& crops_end) : Op({data, block_shape, crops_begin, crops_end}) { ov::mark_as_precision_sensitive(input(1)); ov::mark_as_precision_sensitive(input(2)); @@ -36,7 +26,7 @@ ngraph::op::v1::BatchToSpace::BatchToSpace(const ngraph::Output& d constructor_validate_and_infer_types(); } -void op::v1::BatchToSpace::validate_and_infer_types() { +void BatchToSpace::validate_and_infer_types() { OV_OP_SCOPE(v1_BatchToSpace_validate_and_infer_types); const auto& data_et = get_input_element_type(0); @@ -66,30 +56,29 @@ void op::v1::BatchToSpace::validate_and_infer_types() { set_output_type(0, data_et, output_shape); } -std::shared_ptr ngraph::op::v1::BatchToSpace::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr BatchToSpace::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v1_BatchToSpace_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3)); + return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3)); } -bool ngraph::op::v1::BatchToSpace::visit_attributes(ngraph::AttributeVisitor& visitor) { +bool BatchToSpace::visit_attributes(AttributeVisitor& visitor) { OV_OP_SCOPE(v1_BatchToSpace_visit_attributes); return true; } -OPENVINO_SUPPRESS_DEPRECATED_START namespace { -bool batch_to_space_evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) { - auto data = inputs[0]; - const auto elem_size = data->get_element_type().size(); +bool batch_to_space_evaluate(TensorVector& outputs, const TensorVector& inputs) { + const auto& in = inputs[0]; + const auto elem_size = in.get_element_type().size(); - auto data_shape = data->get_shape(); + auto data_shape = in.get_shape(); - auto const block_values_size = shape_size(inputs[1]->get_shape()); + auto const block_values_size = shape_size(inputs[1].get_shape()); - const auto* block_values = inputs[1]->get_data_ptr(); - const auto* crops_begin_values = inputs[2]->get_data_ptr(); - const auto* crops_end_values = inputs[3]->get_data_ptr(); + const auto* block_values = inputs[1].data(); + const auto* crops_begin_values = inputs[2].data(); + const auto* crops_end_values = inputs[3].data(); ov::Shape dispersed_shape(1); dispersed_shape.insert(dispersed_shape.end(), data_shape.begin(), data_shape.end()); @@ -101,7 +90,13 @@ bool batch_to_space_evaluate(const HostTensorVector& outputs, const HostTensorVe return false; } - auto* flat_data = data->get_data_ptr(); + auto* in_first = static_cast(in.data()); + + // Copy input tensor to not overwrite evaluate's inputs tensors passed as const. + // The evaluate algorithm should be improved to avoid additional data copy. + auto flat_in = Tensor(in.get_element_type(), data_shape); + auto* flat_data = static_cast(flat_in.data()); + std::memcpy(flat_data, in_first, flat_in.get_byte_size()); std::vector dispersed_data(shape_size(data_shape) * elem_size); ov::Shape post_transpose_shape(axes_order.size()); @@ -117,15 +112,15 @@ bool batch_to_space_evaluate(const HostTensorVector& outputs, const HostTensorVe dispersed_shape, elem_size); - size_t val = 1; - for (size_t axis_idx = 0; axis_idx <= block_values_size; ++axis_idx) { + for (size_t axis_idx = 0, val = 1; axis_idx <= block_values_size; ++axis_idx) { if ((block_idx + 1) == axis_idx) { axes_order[axis_idx] = 0; } else { axes_order[axis_idx] = val; - val++; + ++val; } } + for (size_t axis_idx = 0; axis_idx < axes_order.size(); ++axis_idx) { post_transpose_shape[axis_idx] = dispersed_shape[axes_order[axis_idx]]; } @@ -148,61 +143,52 @@ bool batch_to_space_evaluate(const HostTensorVector& outputs, const HostTensorVe data_shape = squeezed_shape; } - std::vector upperbounds_values(data_shape.size()); + std::vector upper_bounds_values(data_shape.size()); for (size_t i = 0; i < data_shape.size(); ++i) { - upperbounds_values[i] = data_shape[i] - crops_end_values[i]; + upper_bounds_values[i] = data_shape[i] - crops_end_values[i]; } std::vector begin_mask(data_shape.size(), 0); std::vector end_mask(data_shape.size(), 0); - std::vector begins(shape_size(inputs[2]->get_shape())); - begins.assign(crops_begin_values, crops_begin_values + shape_size(inputs[2]->get_shape())); + std::vector begins(shape_size(inputs[2].get_shape())); + begins.assign(crops_begin_values, crops_begin_values + shape_size(inputs[2].get_shape())); std::vector default_strides(begins.size(), 1); const auto slice_plan = ov::op::util::make_slice_plan(data_shape, begins, - upperbounds_values, + upper_bounds_values, default_strides, begin_mask, end_mask, AxisSet(), AxisSet(), AxisSet()); - ov::reference::strided_slice(flat_data, outputs[0]->get_data_ptr(), data_shape, slice_plan, elem_size); + ov::reference::strided_slice(flat_data, static_cast(outputs[0].data()), data_shape, slice_plan, elem_size); return true; } } // namespace -bool ngraph::op::v1::BatchToSpace::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool BatchToSpace::evaluate(TensorVector& outputs, const TensorVector& inputs) const { OV_OP_SCOPE(v1_BatchToSpace_evaluate); - OPENVINO_SUPPRESS_DEPRECATED_START - OPENVINO_ASSERT(validate_host_tensor_vector(inputs, 4)); - OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1)); - OPENVINO_SUPPRESS_DEPRECATED_END - - if (outputs[0]->get_partial_shape().is_dynamic()) { - std::vector input_shapes; - input_shapes.reserve(inputs.size()); + OPENVINO_ASSERT(outputs.size() == 1); - for (size_t i = 0; i < inputs.size(); ++i) { - input_shapes.push_back(inputs[i]->get_partial_shape()); - if (input_shapes.back().is_dynamic()) { - return false; - } - } - - const auto output_shape = shape_infer(this, input_shapes, ov::make_tensor_accessor(inputs)).front().to_shape(); - - outputs[0]->set_element_type(inputs[0]->get_element_type()); - outputs[0]->set_shape(output_shape); + std::vector input_shapes; + for (const auto& in : inputs) { + input_shapes.emplace_back(in.get_shape()); } + const auto output_shape = shape_infer(this, input_shapes, ov::make_tensor_accessor(inputs)).front().to_shape(); + outputs[0].set_shape(output_shape); + return batch_to_space_evaluate(outputs, inputs); } -bool ngraph::op::v1::BatchToSpace::has_evaluate() const { +bool BatchToSpace::has_evaluate() const { OV_OP_SCOPE(v1_BatchToSpace_has_evaluate); return !get_input_partial_shape(0).is_dynamic() && get_input_shape(0).size() >= 2 && get_input_shape(0).size() <= shape_size(get_input_shape(1)); } +} // namespace v1 +} // namespace op +} // namespace ov From 9fb40b0007a4798c718ca67cdf89cadf89157777 Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Wed, 18 Oct 2023 11:16:12 +0200 Subject: [PATCH 249/257] [core]Migrate LogicalNot to new API (#20438) * Migrate LogicalNot to new API * Rename file not.hpp -> logical_not.hpp * Apply code style --- src/core/include/openvino/op/logical_not.hpp | 4 +- .../openvino/reference/logical_not.hpp | 25 +++++ .../include/openvino/reference/not.hpp | 18 --- src/core/src/op/logical_not.cpp | 105 ++++++++---------- 4 files changed, 74 insertions(+), 78 deletions(-) create mode 100644 src/core/reference/include/openvino/reference/logical_not.hpp delete mode 100644 src/core/reference/include/openvino/reference/not.hpp diff --git a/src/core/include/openvino/op/logical_not.hpp b/src/core/include/openvino/op/logical_not.hpp index c5421b8db14a47..052aed0a09ad24 100644 --- a/src/core/include/openvino/op/logical_not.hpp +++ b/src/core/include/openvino/op/logical_not.hpp @@ -24,9 +24,7 @@ class OPENVINO_API LogicalNot : public Op { void validate_and_infer_types() override; std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - OPENVINO_SUPPRESS_DEPRECATED_START - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - OPENVINO_SUPPRESS_DEPRECATED_END + bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override; bool has_evaluate() const override; }; } // namespace v1 diff --git a/src/core/reference/include/openvino/reference/logical_not.hpp b/src/core/reference/include/openvino/reference/logical_not.hpp new file mode 100644 index 00000000000000..ca31a824b50d5f --- /dev/null +++ b/src/core/reference/include/openvino/reference/logical_not.hpp @@ -0,0 +1,25 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +namespace ov { +namespace reference { + +/** + * @brief Reference implementation of LogicalNot operator. + * + * @param arg Pointer to input data. + * @param out Pointer to output data. + * @param count Number of elements in input buffer. + */ +template +void logical_not(const T* arg, T* out, const size_t count) { + std::transform(arg, std::next(arg, count), out, std::logical_not()); +} +} // namespace reference +} // namespace ov diff --git a/src/core/reference/include/openvino/reference/not.hpp b/src/core/reference/include/openvino/reference/not.hpp deleted file mode 100644 index e0444a8eb73a2a..00000000000000 --- a/src/core/reference/include/openvino/reference/not.hpp +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace ov { -namespace reference { -template -void logical_not(const T* arg, T* out, size_t count) { - for (size_t i = 0; i < count; i++) { - out[i] = static_cast(!(arg[i])); - } -} -} // namespace reference -} // namespace ov diff --git a/src/core/src/op/logical_not.cpp b/src/core/src/op/logical_not.cpp index 7ed4971861766a..db9f939463651a 100644 --- a/src/core/src/op/logical_not.cpp +++ b/src/core/src/op/logical_not.cpp @@ -2,22 +2,34 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/logical_not.hpp" + +#include "element_visitor.hpp" #include "itt.hpp" -#include "ngraph/op/not.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/elementwise_args.hpp" -#include "ngraph/runtime/host_tensor.hpp" -#include "ngraph/validation_util.hpp" -#include "openvino/reference/not.hpp" +#include "openvino/reference/logical_not.hpp" + +namespace ov { +namespace op { +namespace logical_not { -using namespace ngraph; -using namespace std; +struct Evaluate : element::NoAction { + using element::NoAction::visit; -op::v1::LogicalNot::LogicalNot(const Output& arg) : Op({arg}) { + template > + static result_type visit(const Tensor& in, Tensor& out, const size_t count) { + reference::logical_not(in.data(), out.data(), count); + return true; + } +}; +} // namespace logical_not + +namespace v1 { + +LogicalNot::LogicalNot(const Output& arg) : Op({arg}) { constructor_validate_and_infer_types(); } -void op::v1::LogicalNot::validate_and_infer_types() { +void LogicalNot::validate_and_infer_types() { OV_OP_SCOPE(v1_LogicalNot_validate_and_infer_types); const auto& element_type = get_input_element_type(0); // No boolean element_type validation for backward compatibility @@ -25,64 +37,43 @@ void op::v1::LogicalNot::validate_and_infer_types() { set_output_type(0, element_type, arg_pshape); } -shared_ptr op::v1::LogicalNot::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr LogicalNot::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v1_LogicalNot_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0)); -} - -OPENVINO_SUPPRESS_DEPRECATED_START -namespace notop { -namespace { -template -inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count) { - using T = typename element_type_traits::value_type; - ov::reference::logical_not(arg0->get_data_ptr(), out->get_data_ptr(), count); - return true; + return std::make_shared(new_args.at(0)); } -bool evaluate_not(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count) { - bool rc = true; - out->set_unary(arg0); +bool LogicalNot::evaluate(TensorVector& outputs, const TensorVector& inputs) const { + OV_OP_SCOPE(v1_LogicalNot_evaluate); + OPENVINO_ASSERT(outputs.size() == 1); + OPENVINO_ASSERT(inputs.size() == 1); - switch (arg0->get_element_type()) { - OPENVINO_TYPE_CASE(evaluate_not, boolean, arg0, out, count); - OPENVINO_TYPE_CASE(evaluate_not, i32, arg0, out, count); - OPENVINO_TYPE_CASE(evaluate_not, i64, arg0, out, count); - OPENVINO_TYPE_CASE(evaluate_not, u32, arg0, out, count); - OPENVINO_TYPE_CASE(evaluate_not, u64, arg0, out, count); - OPENVINO_TYPE_CASE(evaluate_not, f16, arg0, out, count); - OPENVINO_TYPE_CASE(evaluate_not, f32, arg0, out, count); - default: - rc = false; - break; - } - return rc; -} -} // namespace -} // namespace notop + outputs[0].set_shape(inputs[0].get_shape()); -bool op::v1::LogicalNot::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - OV_OP_SCOPE(v1_LogicalNot_evaluate); - OPENVINO_SUPPRESS_DEPRECATED_START - OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); - OPENVINO_SUPPRESS_DEPRECATED_END - return notop::evaluate_not(inputs[0], outputs[0], inputs[0]->get_element_count()); + using namespace ov::element; + return IfTypeOf::apply( + inputs[0].get_element_type(), + inputs[0], + outputs[0], + shape_size(inputs[0].get_shape())); } -bool op::v1::LogicalNot::has_evaluate() const { +bool LogicalNot::has_evaluate() const { OV_OP_SCOPE(v1_LogicalNot_has_evaluate); switch (get_input_element_type(0)) { - case ngraph::element::boolean: - case ngraph::element::i32: - case ngraph::element::i64: - case ngraph::element::u32: - case ngraph::element::u64: - case ngraph::element::f16: - case ngraph::element::f32: + case element::boolean: + case element::f16: + case element::f32: + case element::i32: + case element::i64: + case element::u32: + case element::u64: return true; default: - break; + return false; } - return false; } + +} // namespace v1 +} // namespace op +} // namespace ov From d82cd839ce2fe3c738b42e33eb61f094833846c8 Mon Sep 17 00:00:00 2001 From: Zhang Yi Date: Wed, 18 Oct 2023 17:21:53 +0800 Subject: [PATCH 250/257] [PaddleFT]Simplify slice converter by opset8::slice (#20508) --- src/frontends/paddle/src/op/slice_ops.hpp | 68 +++---------------- .../test_models/gen_scripts/generate_slice.py | 2 +- 2 files changed, 12 insertions(+), 58 deletions(-) diff --git a/src/frontends/paddle/src/op/slice_ops.hpp b/src/frontends/paddle/src/op/slice_ops.hpp index dc2a609ba18513..1f8798bc00e91f 100644 --- a/src/frontends/paddle/src/op/slice_ops.hpp +++ b/src/frontends/paddle/src/op/slice_ops.hpp @@ -33,63 +33,16 @@ NamedOutputs slice_op(const NodeContext& node, const bool& stride_input) { Output start_idx_node = idx_node("StartsTensor", "StartsTensorList", "starts", node); Output end_idx_node = idx_node("EndsTensor", "EndsTensorList", "ends", node); Output strides_idx_node; - if (stride_input) - strides_idx_node = idx_node("StridesTensor", "StridesTensorList", "strides", node); - - // The following process is: - // Given: - // data = [ [1, 2, 3, 4], [5, 6, 7, 8], ] // shape is: [2, 4] - // axes = [0] - // starts = [1] - // ends = [2] - // Our process is: - // 1. Get 'axes': [0, 1], 'starts', 'ends' - // 2. Get data shape: [2,4] and dims: 2 - // 3. Create two tensor t1 and t2, shape is the dims from step2: 2. t1: [0, 0], t2: [INT_MAX, INT_MAX] - // 4. Use 'ScatterNDUpdate' to update some elements in t1, the updated indexes are coming from 'axes', the contents - // are coming from 'starts', t1: [1, 0]; apply the similar process to t2 - // 5. Call 'StrideSlice' with t1 and t2 - // Why using ScatterNDUpdate is that 'axes' may be discontinuous. - - // the shape of input, such as [2, 4] - const auto shape_node = std::make_shared(data, element::Type_t::i32); - // the input dim, such as [2] - const auto rank_node = std::make_shared(shape_node, element::i32); - const auto const_0_node = default_opset::Constant::create(element::i32, {}, {0}); - const auto const_max_node = default_opset::Constant::create(element::i32, {}, {INT_MAX}); - const auto const_1_node = default_opset::Constant::create(element::i32, {}, {1}); - // t1: [0, 0] - const auto start_node = std::make_shared(const_0_node, rank_node); - // t2: [INT_MAX, INT_MAX] - const auto end_node = std::make_shared(const_max_node, rank_node); - const auto strides_node = std::make_shared(const_1_node, rank_node); - const auto axes_node = default_opset::Constant::create(element::i32, {axes.size(), 1}, axes); - // update t1 - const auto fixed_start_node = - std::make_shared(start_node, axes_node, start_idx_node); - // update t2 - const auto fixed_end_node = std::make_shared(end_node, axes_node, end_idx_node); - std::shared_ptr stride_slice_node; if (stride_input) { - const auto fixed_strides_node = - std::make_shared(strides_node, axes_node, strides_idx_node); - - stride_slice_node = std::make_shared(data, - fixed_start_node, - fixed_end_node, - fixed_strides_node, - std::vector{0}, - std::vector{0}); + strides_idx_node = idx_node("StridesTensor", "StridesTensorList", "strides", node); } else { - stride_slice_node = std::make_shared(data, - fixed_start_node, - fixed_end_node, - std::vector{0}, - std::vector{0}); + strides_idx_node = + default_opset::Constant::create(element::i32, start_idx_node.get_shape(), std::vector{1}); } - + const auto axes_node = default_opset::Constant::create(element::i32, {axes.size()}, axes); + const auto slice_node = + std::make_shared(data, start_idx_node, end_idx_node, strides_idx_node, axes_node); const auto decrease_axis = node.get_attribute>("decrease_axis"); - if (decrease_axis.size() > 0) { PartialShape input_shape = data.get_partial_shape(); PADDLE_OP_CHECK(node, @@ -99,18 +52,19 @@ NamedOutputs slice_op(const NodeContext& node, const bool& stride_input) { // according to paddle slice_op, when all axes are decreased, output shape is [1], instead of scalar. // Ref: paddle/fluid/operators/slice_op.h auto decreased_node = std::make_shared( - stride_slice_node, + slice_node, std::make_shared(element::i64, Shape{1}, 1), false); return node.default_single_output_mapping({decreased_node}, {"Out"}); } + const auto squeeze_index_node = default_opset::Constant::create(element::i32, {decrease_axis.size()}, decrease_axis); - const auto decreased_node = std::make_shared(stride_slice_node, squeeze_index_node); + const auto decreased_node = std::make_shared(slice_node, squeeze_index_node); return node.default_single_output_mapping({decreased_node}, {"Out"}); + } else { + return node.default_single_output_mapping({slice_node}, {"Out"}); } - - return node.default_single_output_mapping({stride_slice_node}, {"Out"}); } } // namespace } // namespace op diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_slice.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_slice.py index f89e18d7500c65..f2a6d1a8769295 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_slice.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_slice.py @@ -20,7 +20,7 @@ def slice(name : str, x, axes : list, start : list, end : list): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): node_x = paddle.static.data(name='x', shape=x.shape, dtype = data_type) - out = paddle.fluid.layers.slice(node_x, axes = axes, starts = start, ends = end) + out = paddle.slice(node_x, axes = axes, starts = start, ends = end) cpu = paddle.static.cpu_places(1) exe = paddle.static.Executor(cpu[0]) From 4574fb112c62ee9faec29507ba1dd2dadacde035 Mon Sep 17 00:00:00 2001 From: Tatiana Savina Date: Wed, 18 Oct 2023 11:27:10 +0200 Subject: [PATCH 251/257] change snippet name (#20538) --- .../preprocessing_overview/preprocessing_usecase_save.md | 4 ++-- docs/snippets/ov_preprocessing.cpp | 4 ++-- docs/snippets/ov_preprocessing.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/preprocessing_overview/preprocessing_usecase_save.md b/docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/preprocessing_overview/preprocessing_usecase_save.md index 71de4a7e5cc82f..2cb7270c14daf0 100644 --- a/docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/preprocessing_overview/preprocessing_usecase_save.md +++ b/docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/preprocessing_overview/preprocessing_usecase_save.md @@ -68,14 +68,14 @@ in the model preparation script for such a case. .. doxygensnippet:: docs/snippets/ov_preprocessing.py :language: Python - :fragment: ov:preprocess:save + :fragment: ov:preprocess:save_model .. tab-item:: C++ :sync: cpp .. doxygensnippet:: docs/snippets/ov_preprocessing.cpp :language: cpp - :fragment: ov:preprocess:save + :fragment: ov:preprocess:save_model Application Code - Load Model to Target Device diff --git a/docs/snippets/ov_preprocessing.cpp b/docs/snippets/ov_preprocessing.cpp index f559a7a5a1aef4..176953f46691f1 100644 --- a/docs/snippets/ov_preprocessing.cpp +++ b/docs/snippets/ov_preprocessing.cpp @@ -165,7 +165,7 @@ int main() { //! [ov:preprocess:save_headers] void save_example() { - //! [ov:preprocess:save] + //! [ov:preprocess:save_model] // ======== Step 0: read original model ========= ov::Core core; std::shared_ptr model = core.read_model("/path/to/some_model.onnx"); @@ -200,7 +200,7 @@ void save_example() { std::string xml = "/path/to/some_model_saved.xml"; std::string bin = "/path/to/some_model_saved.bin"; ov::serialize(model, xml, bin); - //! [ov:preprocess:save] + //! [ov:preprocess:save_model] } diff --git a/docs/snippets/ov_preprocessing.py b/docs/snippets/ov_preprocessing.py index 23cd30548115ad..8a8f4ce212b4f7 100644 --- a/docs/snippets/ov_preprocessing.py +++ b/docs/snippets/ov_preprocessing.py @@ -184,7 +184,7 @@ def custom_abs(output: Output): model_path = get_path_to_model() serialized_model_path = get_path_to_model() -# ! [ov:preprocess:save] +# ! [ov:preprocess:save_model] # ======== Step 0: read original model ========= core = Core() model = core.read_model(model=model_path) @@ -219,7 +219,7 @@ def custom_abs(output: Output): # ======== Step 3: Save the model ================ serialize(model, serialized_model_path) -# ! [ov:preprocess:save] +# ! [ov:preprocess:save_model] path_to_cache_dir = get_temp_dir() From 90ad4c618ded8ec273a11e410c5f57f2cbab60db Mon Sep 17 00:00:00 2001 From: Vladimir Paramuzov Date: Wed, 18 Oct 2023 15:56:35 +0400 Subject: [PATCH 252/257] [GPU] Grouped decompression scale/zp support (#20491) --- .../graph_optimizer/prepare_quantization.cpp | 39 +++ .../src/graph/impls/ocl/fully_connected.cpp | 7 - .../intel_gpu/src/graph/layout_optimizer.cpp | 2 +- .../fully_connected_gpu_bf_tiled.cl | 60 ++++- .../fully_connected_gpu_bfyx_ref.cl | 32 ++- .../fully_connected_kernel_base.cpp | 12 + .../convert_fc_to_compressed.cpp | 85 ++++-- .../transformations/convert_matmul_to_fc.cpp | 2 +- .../dynamic/matmul_weights_decompression.cpp | 241 ++++++++++++------ .../test_cases/fully_connected_gpu_test.cpp | 71 +++--- 10 files changed, 389 insertions(+), 162 deletions(-) diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_quantization.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_quantization.cpp index d1b125aa8f1df5..f55d99b6a5fa80 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_quantization.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_quantization.cpp @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "fully_connected_inst.h" #include "pooling_inst.h" #include "quantize_inst.h" #include "reorder_inst.h" @@ -847,6 +848,42 @@ bool prepare_quantization::optimize_quantize(program &p, quantize_node& quantize return true; } +static void optimize_weights_decompression_parameters(fully_connected_node& fc_node, program& p) { + auto fc_prim = fc_node.get_primitive(); + if (!fc_prim->compressed_weights) + return; + + auto reorder_bfyx_to_fbyx = [&](size_t dep_id) { + auto& dep = fc_node.get_dependency(dep_id); + auto target_layout = dep.get_output_layout(); + target_layout.format = format::fbyx; + auto reorder_prim = std::make_shared(dep.id() + "_reorder", dep.id(), target_layout); + p.add_intermediate(reorder_prim, fc_node, dep_id, true); + fc_node.get_dependency(dep_id).recalc_output_layout(false); + }; + + auto need_reorder = [&](size_t dep_id) { + auto dep_layout = fc_node.get_input_layout(dep_id); + auto dep_pshape = dep_layout.get_partial_shape(); + + auto groups_count = dep_pshape[dep_pshape.size() - 1].get_length(); + + return groups_count > 1; + }; + + auto decompression_scale_idx = !fc_node.bias_term() ? 2 : 3; + if (need_reorder(decompression_scale_idx)) { + reorder_bfyx_to_fbyx(decompression_scale_idx); + } + + if (!fc_prim->decompression_zero_point.empty()) { + auto decompression_zp_idx = decompression_scale_idx + 1; + if (need_reorder(decompression_zp_idx)) { + reorder_bfyx_to_fbyx(decompression_zp_idx); + } + } +} + void prepare_quantization::run(program& p) { auto itr = p.get_processing_order().begin(); while (itr != p.get_processing_order().end()) { @@ -859,6 +896,8 @@ void prepare_quantization::run(program& p) { remove_fake_reorders(p, node->as()); } else if (node->is_type()) { prepare_asymmetric_quantization(p, node->as()); + } else if (node->is_type()) { + optimize_weights_decompression_parameters(node->as(), p); } } } diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/fully_connected.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/fully_connected.cpp index 43ce081d2f69ea..19007a481579f6 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/fully_connected.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/fully_connected.cpp @@ -110,20 +110,13 @@ struct fully_connected_impl : typed_primitive_impl_ocl { bool has_scale = !primitive->decompression_scale.empty(); size_t offset = primitive->bias.empty() ? 2 : 3; - const auto& weights_pshape = input1_layout.get_partial_shape(); if (has_scale) { auto scale_layout = input_layouts[offset++]; - if (input1_pshape.size() != 2) { - scale_layout.set_partial_shape(reshape_to_2d(scale_layout.get_partial_shape(), weights_pshape[0], primitive->weights_rank)); - } layouts.push_back(scale_layout); } if (has_zp) { auto zp_layout = input_layouts[offset]; - if (input1_pshape.size() != 2) { - zp_layout.set_partial_shape(reshape_to_2d(zp_layout.get_partial_shape(), weights_pshape[0], primitive->weights_rank)); - } layouts.push_back(zp_layout); } diff --git a/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp b/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp index f5f6c1ac16d82a..69b1e12fa3b4ae 100644 --- a/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp +++ b/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp @@ -307,7 +307,7 @@ bool layout_optimizer::can_fuse_reorder(program_node& prev, program_node& next, (fmt_prev == format::b_fs_yx_fsv4 && prev_output_layout.feature() % 32 == 0 && prev_output_layout.spatial(0) == 1 && - prev_output_layout.spatial(1) == 1))) + prev_output_layout.spatial(1) == 1)) && is_input_reorder(prev, next)) return true; if (next.is_type() && fmt_prev == format::b_fs_yx_fsv16 && fmt_next == format::b_fs_yx_fsv4 && is_input_idx(0)) diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/fully_connected_gpu_bf_tiled.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/fully_connected_gpu_bf_tiled.cl index d4992801a80447..f6dacec4a73c80 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/fully_connected_gpu_bf_tiled.cl +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/fully_connected_gpu_bf_tiled.cl @@ -120,7 +120,7 @@ KERNEL(fc)( uint input_offset = out_b * TILE_IN_B_PITCH + INPUT0_OFFSET; uint weights_offset = out_f * INPUT_ELEMENTS_COUNT; -#if COMPRESSED_WEIGHTS +#if COMPRESSED_WEIGHTS && DECOMPRESSION_SCALE_GROUPS_NUM == 1 #if DECOMPRESSION_SCALE_LENGTH > 1 && DECOMPRESSION_SCALE_LENGTH % SIMD == 0 ACCUMULATOR_VEC_TYPE d_scale = BLOCK_READN(ACCUMULATOR_TYPE, TILE_OFM, decompression_scale, out_f); #elif DECOMPRESSION_SCALE_LENGTH > 1 && DECOMPRESSION_SCALE_LENGTH % SIMD != 0 @@ -134,9 +134,11 @@ KERNEL(fc)( ACCUMULATOR_VEC_TYPE d_scale = decompression_scale[0]; #endif - #if !DECOMPRESSION_ZP_TERM - ACCUMULATOR_VEC_TYPE d_zp = 0; - #elif DECOMPRESSION_ZP_LENGTH > 1 && DECOMPRESSION_ZP_LENGTH % SIMD == 0 + ACCUMULATOR_TYPE* d_scales = (ACCUMULATOR_TYPE*)(&d_scale); +#endif + +#if COMPRESSED_WEIGHTS && DECOMPRESSION_ZP_TERM && DECOMPRESSION_ZP_GROUPS_NUM == 1 + #if DECOMPRESSION_ZP_LENGTH > 1 && DECOMPRESSION_ZP_LENGTH % SIMD == 0 ACCUMULATOR_VEC_TYPE d_zp = BLOCK_READN(ACCUMULATOR_TYPE, TILE_OFM, decompression_zp, out_f); #elif DECOMPRESSION_ZP_LENGTH > 1 && DECOMPRESSION_ZP_LENGTH % SIMD != 0 ACCUMULATOR_VEC_TYPE d_zp = 0; @@ -148,9 +150,7 @@ KERNEL(fc)( #else ACCUMULATOR_VEC_TYPE d_zp = decompression_zp[0]; #endif - - ACCUMULATOR_TYPE* ds = (ACCUMULATOR_TYPE*)(&d_scale); - ACCUMULATOR_TYPE* dzp = (ACCUMULATOR_TYPE*)(&d_zp); + ACCUMULATOR_TYPE* d_zps = (ACCUMULATOR_TYPE*)(&d_zp); #endif #if REALIGN_FP16_OFFSET @@ -193,7 +193,28 @@ KERNEL(fc)( ACCUMULATOR_TYPE* w = (ACCUMULATOR_TYPE*)(&wei); unroll_for(uint kii = 0; kii < TILE_K; ++kii) { unroll_for(uint fi = 0; fi < TILE_OFM; ++fi) { - w[kii * TILE_OFM + fi] = (w[kii * TILE_OFM + fi] - dzp[fi]) * ds[fi]; + const uint w_idx = kii * TILE_OFM + fi; + const uint offset_ofm = out_f + fi*SIMD + sglid; + #if DECOMPRESSION_SCALE_GROUPS_NUM > 1 + const uint scale_offset = (offset_ofm % DECOMPRESSION_SCALE_BATCH_NUM) * DECOMPRESSION_SCALE_BATCH_PITCH + + ((kii + ki*TILE_K + ni*TILE_IFM*SIMD) / DECOMPRESSION_SCALE_GROUP_SIZE)*DECOMPRESSION_SCALE_FEATURE_PITCH; + ACCUMULATOR_TYPE ds = decompression_scale[scale_offset]; + #else + ACCUMULATOR_TYPE ds = d_scales[fi]; + #endif + + #if DECOMPRESSION_ZP_TERM + #if DECOMPRESSION_ZP_GROUPS_NUM > 1 + const uint zp_offset = (offset_ofm % DECOMPRESSION_ZP_BATCH_NUM) * DECOMPRESSION_ZP_BATCH_PITCH + + ((kii + ki*TILE_K + ni*TILE_IFM*SIMD) / DECOMPRESSION_ZP_GROUP_SIZE) * DECOMPRESSION_ZP_FEATURE_PITCH; + ACCUMULATOR_TYPE dzp = decompression_zp[zp_offset]; + #else + ACCUMULATOR_TYPE dzp = d_zps[fi]; + #endif + #else + ACCUMULATOR_TYPE dzp = ACCUMULATOR_VAL_ZERO; + #endif + w[w_idx] = (w[w_idx] - dzp) * ds; } } #endif @@ -230,7 +251,28 @@ KERNEL(fc)( ACCUMULATOR_TYPE* w = (ACCUMULATOR_TYPE*)(&wei); unroll_for(uint kii = 0; kii < TILE_K; ++kii) { unroll_for(uint fi = 0; fi < TILE_OFM; ++fi) { - w[kii * TILE_OFM + fi] = (w[kii * TILE_OFM + fi] - dzp[fi]) * ds[fi]; + const uint w_idx = kii * TILE_OFM + fi; + uint offset_ofm = out_f + fi*SIMD + get_sub_group_local_id(); + #if DECOMPRESSION_SCALE_GROUPS_NUM > 1 + const uint scale_offset = (offset_ofm % DECOMPRESSION_SCALE_BATCH_NUM) * DECOMPRESSION_SCALE_BATCH_PITCH + + ((kii + ki*TILE_K + ni*TILE_IFM*SIMD) / DECOMPRESSION_SCALE_GROUP_SIZE)*DECOMPRESSION_SCALE_FEATURE_PITCH; + ACCUMULATOR_TYPE ds = decompression_scale[scale_offset]; + #else + ACCUMULATOR_TYPE ds = d_scales[fi]; + #endif + + #if DECOMPRESSION_ZP_TERM + #if DECOMPRESSION_ZP_GROUPS_NUM > 1 + const uint zp_offset = (offset_ofm % DECOMPRESSION_ZP_BATCH_NUM) * DECOMPRESSION_ZP_BATCH_PITCH + + ((kii + ki*TILE_K + ni*TILE_IFM*SIMD) / DECOMPRESSION_ZP_GROUP_SIZE) * DECOMPRESSION_ZP_FEATURE_PITCH; + ACCUMULATOR_TYPE dzp = decompression_zp[zp_offset]; + #else + ACCUMULATOR_TYPE dzp = d_zps[fi]; + #endif + #else + ACCUMULATOR_TYPE dzp = ACCUMULATOR_VAL_ZERO; + #endif + w[w_idx] = (w[w_idx] - dzp) * ds; } } #endif diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/fully_connected_gpu_bfyx_ref.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/fully_connected_gpu_bfyx_ref.cl index 72e8d6d7d3d855..6374e65c4f5fcc 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/fully_connected_gpu_bfyx_ref.cl +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/fully_connected_gpu_bfyx_ref.cl @@ -36,18 +36,24 @@ KERNEL(fc)( for (uint x = 0; x < INPUT0_SIZE_X; ++x) { const uint input0_idx = INPUT0_GET_INDEX(b, ofm, y, x); - const uint filter_idx = GET_FILTER_INDEX(FILTER, 0, oym, y, 0, 0); #if COMPRESSED_WEIGHTS - ACCUMULATOR_TYPE filter_compressed = TO_ACCUMULATOR_TYPE(weights[filter_idx]); #if DECOMPRESSION_ZP_TERM - ACCUMULATOR_TYPE zp = TO_ACCUMULATOR_TYPE(decompression_zp[DECOMPRESSION_ZP_GET_INDEX_SAFE(0, oym, 0, 0)]); + const uint zp_offset = DECOMPRESSION_ZP_GET_INDEX_SAFE(oym, y / DECOMPRESSION_ZP_GROUP_SIZE, 0, 0); + ACCUMULATOR_TYPE zp = TO_ACCUMULATOR_TYPE(decompression_zp[zp_offset]); #else ACCUMULATOR_TYPE zp = ACCUMULATOR_VAL_ZERO; #endif - DECOMPRESSION_SCALE_TYPE scale = decompression_scale[DECOMPRESSION_SCALE_GET_INDEX_SAFE(0, oym, 0, 0)]; - ACCUMULATOR_TYPE filter_val = (TO_ACCUMULATOR_TYPE(filter_compressed) - TO_ACCUMULATOR_TYPE(zp)) * scale; + const uint decomp_offset = DECOMPRESSION_SCALE_GET_INDEX_SAFE(oym, y / DECOMPRESSION_SCALE_GROUP_SIZE, 0, 0); + DECOMPRESSION_SCALE_TYPE scale = decompression_scale[decomp_offset]; + #endif + + #if COMPRESSED_WEIGHTS_INT8 + const uint filter_idx = GET_FILTER_INDEX(FILTER, 0, oym, y, 0, 0); + ACCUMULATOR_TYPE filter_compressed = TO_ACCUMULATOR_TYPE(weights[filter_idx]); + ACCUMULATOR_TYPE filter_val = (filter_compressed - zp) * scale; dotProd += (ACCUMULATOR_TYPE)(input[input0_idx]) * (ACCUMULATOR_TYPE)(filter_val); #else + const uint filter_idx = GET_FILTER_INDEX(FILTER, 0, oym, y, 0, 0); dotProd += (ACCUMULATOR_TYPE)(input[input0_idx]) * (ACCUMULATOR_TYPE)(weights[filter_idx]); #endif } @@ -67,19 +73,25 @@ KERNEL(fc)( for (uint x = 0; x < INPUT0_SIZE_X; ++x) { const uint input0_idx = INPUT0_GET_INDEX(b, ifm, y, x); - const uint filter_idx = GET_FILTER_INDEX(FILTER, 0, ofm, ifm, y, x); #if COMPRESSED_WEIGHTS - FILTER_TYPE filter_compressed = weights[filter_idx]; #if DECOMPRESSION_ZP_TERM - ACCUMULATOR_TYPE zp = decompression_zp[DECOMPRESSION_ZP_GET_INDEX_SAFE(0, ofm, 0, 0)]; + const uint zp_offset = DECOMPRESSION_ZP_GET_INDEX_SAFE(ofm, ifm / DECOMPRESSION_ZP_GROUP_SIZE, 0, 0); + ACCUMULATOR_TYPE zp = TO_ACCUMULATOR_TYPE(decompression_zp[zp_offset]); #else ACCUMULATOR_TYPE zp = ACCUMULATOR_VAL_ZERO; #endif + const uint decomp_offset = DECOMPRESSION_SCALE_GET_INDEX_SAFE(ofm, ifm / DECOMPRESSION_SCALE_GROUP_SIZE, 0, 0); + DECOMPRESSION_SCALE_TYPE scale = decompression_scale[decomp_offset]; + #endif - DECOMPRESSION_SCALE_TYPE scale = decompression_scale[DECOMPRESSION_SCALE_GET_INDEX_SAFE(0, ofm, 0, 0)]; - ACCUMULATOR_TYPE filter_val = (TO_ACCUMULATOR_TYPE(filter_compressed) - TO_ACCUMULATOR_TYPE(zp)) * scale; + + #if COMPRESSED_WEIGHTS_INT8 + const uint filter_idx = GET_FILTER_INDEX(FILTER, 0, ofm, ifm, y, x); + FILTER_TYPE filter_compressed = weights[filter_idx]; + ACCUMULATOR_TYPE filter_val = (TO_ACCUMULATOR_TYPE(filter_compressed) - zp) * scale; dotProd += (ACCUMULATOR_TYPE)(input[input0_idx]) * (ACCUMULATOR_TYPE)(filter_val); #else + const uint filter_idx = GET_FILTER_INDEX(FILTER, 0, ofm, ifm, y, x); dotProd += (ACCUMULATOR_TYPE)(input[input0_idx]) * (ACCUMULATOR_TYPE)(weights[filter_idx]); #endif } diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/fully_connected/fully_connected_kernel_base.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/fully_connected/fully_connected_kernel_base.cpp index 3e9eb35cdaaff0..a75d35469837f7 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/fully_connected/fully_connected_kernel_base.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/fully_connected/fully_connected_kernel_base.cpp @@ -24,11 +24,23 @@ JitConstants FullyConnectedKernelBase::GetJitConstants(const fully_connected_par if (params.compressed) { jit.AddConstants({MakeJitConstant("COMPRESSED_WEIGHTS", 1)}); + if (params.weights.GetDType() == WeightsType::INT8 || params.weights.GetDType() == WeightsType::UINT8) { + jit.AddConstants({MakeJitConstant("COMPRESSED_WEIGHTS_INT8", 1)}); + } + + const size_t scale_groups_num = params.decompression_scale.Feature().v; + const size_t scale_group_size = params.weights.IFM().v / params.decompression_scale.Feature().v; jit.AddConstants({MakeJitConstant("DECOMPRESSION_SCALE_TERM", 1)}); jit.AddConstants({MakeJitConstant("DECOMPRESSION_SCALE", params.decompression_scale)}); + jit.AddConstants({MakeJitConstant("DECOMPRESSION_SCALE_GROUPS_NUM", scale_groups_num)}); + jit.AddConstants({MakeJitConstant("DECOMPRESSION_SCALE_GROUP_SIZE", scale_group_size)}); if (params.has_decompression_zp) { + const size_t zp_groups_num = params.decompression_zero_point.Feature().v; + const size_t zp_group_size = params.weights.IFM().v / params.decompression_zero_point.Feature().v; jit.AddConstants({MakeJitConstant("DECOMPRESSION_ZP_TERM", 1)}); jit.AddConstants({MakeJitConstant("DECOMPRESSION_ZP", params.decompression_zero_point)}); + jit.AddConstants({MakeJitConstant("DECOMPRESSION_ZP_GROUPS_NUM", zp_groups_num)}); + jit.AddConstants({MakeJitConstant("DECOMPRESSION_ZP_GROUP_SIZE", zp_group_size)}); } } diff --git a/src/plugins/intel_gpu/src/plugin/transformations/convert_fc_to_compressed.cpp b/src/plugins/intel_gpu/src/plugin/transformations/convert_fc_to_compressed.cpp index a1c4d60b81977c..0ff0e1fd0bf258 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/convert_fc_to_compressed.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/convert_fc_to_compressed.cpp @@ -3,16 +3,19 @@ // #include "convert_fc_to_compressed.hpp" +#include #include "intel_gpu/op/fully_connected.hpp" #include "intel_gpu/op/fully_connected_compressed.hpp" +#include "openvino/op/constant.hpp" #include "openvino/op/subtract.hpp" #include "openvino/op/matmul.hpp" #include "openvino/op/convert.hpp" #include "openvino/op/transpose.hpp" #include "openvino/op/reshape.hpp" #include "openvino/core/rt_info.hpp" +#include "openvino/pass/pattern/op/pattern.hpp" #include "openvino/pass/pattern/op/wrap_type.hpp" #include "openvino/pass/pattern/op/or.hpp" #include "transformations/utils/utils.hpp" @@ -23,7 +26,19 @@ namespace intel_gpu { ConvertFullyConnectedToFullyConnectedCompressed::ConvertFullyConnectedToFullyConnectedCompressed() { using namespace ov::pass::pattern; - auto weights_m = wrap_type(consumers_count(1)); + auto compressed_constant = [](const ov::Output& output) { + return (output.get_element_type() == ov::element::u8 || + output.get_element_type() == ov::element::i8) && + output.get_target_inputs().size() == 1; + }; + + auto reshape_3d_to_2d = [](const ov::Output& output) { + auto in_ps = output.get_node()->get_input_partial_shape(0); + auto out_ps = output.get_node()->get_output_partial_shape(0); + return in_ps.rank().is_static() && out_ps.rank().is_static() && in_ps.size() == 3 && out_ps.size() == 2; + }; + + auto weights_m = wrap_type(compressed_constant); auto convert_m = wrap_type({weights_m}); auto sub_const_m = wrap_type(consumers_count(1)); @@ -34,11 +49,15 @@ ConvertFullyConnectedToFullyConnectedCompressed::ConvertFullyConnectedToFullyCon auto mul_no_sub_m = wrap_type({convert_m, mul_const_m}); auto mul_m = std::make_shared(OutputVector{mul_with_sub_m, mul_no_sub_m}); + auto reshape_const_m = wrap_type(); + auto reshape_m = wrap_type({mul_m, reshape_const_m}, reshape_3d_to_2d); + + auto transpose_input = std::make_shared(OutputVector{reshape_m, mul_m}); auto transpose_const_m = wrap_type(); - auto transpose_m = wrap_type({mul_m, transpose_const_m}); - auto weights_input_m = std::make_shared(ov::OutputVector{mul_m, transpose_m}); + auto transpose_m = wrap_type({transpose_input, transpose_const_m}); auto data_m = any_input(); + auto weights_input_m = std::make_shared(ov::OutputVector{reshape_m, transpose_m, mul_m}); auto fully_connected_m = wrap_type({data_m, weights_input_m}); ov::matcher_pass_callback callback = [=](ov::pass::pattern::Matcher& m) { @@ -52,53 +71,73 @@ ConvertFullyConnectedToFullyConnectedCompressed::ConvertFullyConnectedToFullyCon return false; } + bool has_transpose = pattern_map.count(transpose_m); + auto scale_shape = pattern_map.at(mul_const_m).get_shape(); + bool grouped = std::count_if(scale_shape.begin(), scale_shape.end(), [](size_t d) { return d > 1; }) > 1; + + auto reshape_const_to_2d = [has_transpose, grouped](std::shared_ptr node) { + auto constant = std::dynamic_pointer_cast(node); + OPENVINO_ASSERT(constant != nullptr); + ov::Shape current_shape = constant->get_shape(); + if (current_shape.size() == 2) + return constant; + OPENVINO_ASSERT(current_shape.size() == 3); + + auto new_shape = (has_transpose || !grouped) ? ov::Shape{current_shape[0] * current_shape[1], current_shape[2]} + : ov::Shape{current_shape[0], current_shape[1] * current_shape[2]}; + + return std::make_shared(*constant, new_shape); + }; + const auto& fc_input_a = fc->get_input_node_shared_ptr(0); - const auto& scale = pattern_map.at(mul_const_m).get_node_shared_ptr(); + const auto& scale = reshape_const_to_2d(pattern_map.at(mul_const_m).get_node_shared_ptr()); std::shared_ptr optional_zero_point = nullptr; - ov::NodeVector nodes_to_copy_info{pattern_map.at(fully_connected_m).get_node_shared_ptr(), - pattern_map.at(convert_m).get_node_shared_ptr()}; - if (pattern_map.count(mul_no_sub_m)) { - nodes_to_copy_info.push_back(pattern_map.at(mul_no_sub_m).get_node_shared_ptr()); - } - if (pattern_map.count(mul_with_sub_m)) { - nodes_to_copy_info.push_back(pattern_map.at(mul_with_sub_m).get_node_shared_ptr()); - } - const bool with_zero_point = pattern_map.count(subtract_m) > 0; if (with_zero_point) { - optional_zero_point = pattern_map.at(sub_const_m).get_node_shared_ptr(); - nodes_to_copy_info.push_back(subtract_m); + optional_zero_point = reshape_const_to_2d(pattern_map.at(sub_const_m).get_node_shared_ptr()); } - std::shared_ptr fc_input_b = pattern_map.at(weights_m).get_node_shared_ptr(); - if (pattern_map.count(transpose_m)) { + std::shared_ptr fc_input_b = reshape_const_to_2d(pattern_map.at(weights_m).get_node_shared_ptr()); + std::shared_ptr fc_input_scale = scale; + std::shared_ptr fc_input_zp = optional_zero_point; + if (has_transpose) { const auto& transpose = pattern_map.at(transpose_m).get_node_shared_ptr(); - const auto& transpose_const = pattern_map.at(transpose_const_m).get_node_shared_ptr(); + std::shared_ptr transpose_const = pattern_map.at(transpose_const_m).get_node_shared_ptr(); + if (ov::shape_size(transpose_const->get_shape()) != fc_input_b->get_output_partial_shape(0).size()) { + std::vector new_order(fc_input_b->get_output_partial_shape(0).size()); + std::iota(new_order.begin(), new_order.end(), 0); + std::swap(new_order[new_order.size() - 1], new_order[new_order.size() - 2]); + transpose_const = std::make_shared(ov::element::i32, ov::Shape{new_order.size()}, new_order); + } + fc_input_b = transpose->clone_with_new_inputs({ fc_input_b->output(0), transpose_const }); + fc_input_scale = transpose->clone_with_new_inputs({ scale->output(0), transpose_const }); + if (with_zero_point) + fc_input_zp = transpose->clone_with_new_inputs({ optional_zero_point->output(0), transpose_const }); } std::shared_ptr new_fc = nullptr; if (with_zero_point) { new_fc = std::make_shared(fc_input_a, fc_input_b, - scale, - optional_zero_point, + fc_input_scale, + fc_input_zp, fc->get_output_type()); } else { new_fc = std::make_shared(fc_input_a, fc_input_b, - scale, + fc_input_scale, fc->get_output_type()); } new_fc->set_friendly_name(fc->get_friendly_name()); - ov::copy_runtime_info(nodes_to_copy_info, new_fc); + ov::copy_runtime_info(m.get_matched_nodes(), new_fc); ov::replace_node(fc, new_fc); return true; }; - auto m = std::make_shared(fully_connected_m); + auto m = std::make_shared(fully_connected_m, "ConvertFullyConnectedToFullyConnectedCompressed"); this->register_matcher(m, callback); } diff --git a/src/plugins/intel_gpu/src/plugin/transformations/convert_matmul_to_fc.cpp b/src/plugins/intel_gpu/src/plugin/transformations/convert_matmul_to_fc.cpp index a30c88e7d1492d..2caf3cd4d69850 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/convert_matmul_to_fc.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/convert_matmul_to_fc.cpp @@ -160,7 +160,7 @@ ConvertMatMulToFullyConnected::ConvertMatMulToFullyConnected() { return true; }; - auto m = std::make_shared(matmul_m); + auto m = std::make_shared(matmul_m, "ConvertMatMulToFullyConnected"); this->register_matcher(m, callback); } diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/matmul_weights_decompression.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/matmul_weights_decompression.cpp index 313015da3406ba..75bdb9f0ec71a7 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/matmul_weights_decompression.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/matmul_weights_decompression.cpp @@ -2,19 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ov_models/builders.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/matmul.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "transformations/rt_info/decompression.hpp" -using namespace ngraph; +using namespace ov; using namespace ov::test; namespace SubgraphTestsDefinitions { /* - * Subtract_const(U8) + * Subtract_const(U8/NF4/U4) * / - * Weights(U8) Convert(F32) + * Weights(U8/NF4/U4) Convert(F32) * | / * Convert(F32) Reshape(optional) * \ / Multiply_const(F32) @@ -29,7 +31,20 @@ namespace SubgraphTestsDefinitions { * | * Bias */ -using MatmulWeightsDecompressionParams = std::tuple, // input shapes + +struct ShapeParams { + ShapeParams() = default; + ShapeParams(InputShape data_shape, ov::Shape weights_shape, int weights_group_size = -1) + : data_shape(std::move(data_shape)), + weights_shape(std::move(weights_shape)), + weights_group_size(weights_group_size) {} + + InputShape data_shape; + ov::Shape weights_shape; + // Decompression group size. If the value is equal to -1, ordinary decompression is used + int weights_group_size; +}; +using MatmulWeightsDecompressionParams = std::tuple, // class MatmulWeightsDecompression : public testing::WithParamInterface, public SubgraphBaseTest { public: static std::string get_test_case_name(testing::TestParamInfo obj) { - std::vector inputShapes; + ShapeParams shape_params; ov::test::ElementType weights_precision; ov::test::ElementType activations_precision; bool transpose; @@ -48,7 +63,7 @@ class MatmulWeightsDecompression : public testing::WithParamInterface additional_config; - std::tie(inputShapes, + std::tie(shape_params, weights_precision, activations_precision, transpose, @@ -57,20 +72,9 @@ class MatmulWeightsDecompression : public testing::WithParamInterface init_subgraph(std::vector& inputShapes, - const ov::element::Type data_precision, - const ov::element::Type weights_precision, - const bool transpose_weights, - const bool add_subtract, - const bool reshape_on_decompression) { - ov::ParameterVector params{std::make_shared(data_precision, inputShapes[0])}; + std::shared_ptr init_subgraph(const ov::PartialShape& data_shape, + const ov::Shape& weights_shape, + const int group_size, + const ov::element::Type data_precision, + const ov::element::Type weights_precision, + const bool transpose_weights, + const bool add_subtract, + const bool reshape_on_decompression) { + ov::ParameterVector params{std::make_shared(data_precision, data_shape)}; + const auto weights_subgraph = init_compressed_weights_subgraph(weights_shape, + group_size, + data_precision, + weights_precision, + transpose_weights, + add_subtract, + reshape_on_decompression); + + auto mat_mul = std::make_shared(params[0], weights_subgraph); + return std::make_shared(NodeVector{mat_mul}, params, "MatmulWeightsDecompression"); + } + + std::shared_ptr init_compressed_weights_subgraph(const ov::Shape& weights_shape, + const int group_size, + const ov::element::Type data_precision, + const ov::element::Type weights_precision, + const bool transpose_weights, + const bool add_subtract, + const bool reshape_on_decompression_constant) { auto transpose_if_necessary = [&](const ov::Shape& shape) { - if (!transpose_weights) - return shape; - auto transposed_shape = shape; - std::swap(*transposed_shape.rbegin(), *(transposed_shape.rbegin() + 1)); - return transposed_shape; + auto result_shape = shape; + if (transpose_weights) + std::swap(*result_shape.rbegin(), *(result_shape.rbegin() + 1)); + return result_shape; }; - auto weights_shape = transpose_if_necessary(inputShapes[1].to_shape()); - auto weights = ngraph::builder::makeConstant(weights_precision, weights_shape, {}, true); + const bool group_decompression = group_size != -1; + // Weights has shape [I, O], where + // I - input channels + // O - output channels + // In case of group decompression, input channels dimension is split into 2: I -> [N, G], where + // N - number of groups + // G - group size + auto transformed_weights_shape = transpose_if_necessary(weights_shape); + if (group_decompression) { + OPENVINO_ASSERT(weights_shape[0] % group_size == 0, + "Weights output channels count (", + weights_shape[0], + ") must be divisible by decompression group size (", + group_size, + ")."); + auto in_channel_idx = transpose_weights ? transformed_weights_shape.size() - 1 : transformed_weights_shape.size() - 2; + transformed_weights_shape[in_channel_idx] = weights_shape[0] / group_size; + transformed_weights_shape.insert(transformed_weights_shape.begin() + in_channel_idx + 1, group_size); + } + auto weights_tensor = ov::test::utils::create_and_fill_tensor(weights_precision, transformed_weights_shape); + auto weights = std::make_shared(weights_tensor); weights->set_friendly_name("Compressed_weights"); auto weights_convert = std::make_shared(weights, data_precision); std::shared_ptr mul_parent = weights_convert; - auto output_channels = transpose_weights ? *(weights_shape.rbegin() + 1) : *weights_shape.rbegin(); - auto scaleshift_target_shape = transpose_if_necessary(ov::Shape{1, output_channels}); - auto scaleshift_const_shape = reshape_on_decompression ? ov::Shape{output_channels} : scaleshift_target_shape; + auto output_channels = *weights_shape.rbegin(); + + // Decompression constants shape: + // Ordinary decompression: [O, 1] + // Group decompression: [O, N, 1] + ov::Shape scaleshift_target_shape{output_channels}; + scaleshift_target_shape.insert(scaleshift_target_shape.begin(), group_decompression ? weights_shape[0] / group_size : 1); + scaleshift_target_shape = transpose_if_necessary(scaleshift_target_shape); + if (group_decompression) { + auto in_channel_idx = transpose_weights ? scaleshift_target_shape.size() - 1 : scaleshift_target_shape.size() - 2; + scaleshift_target_shape.insert(scaleshift_target_shape.begin() + in_channel_idx + 1, 1); + } + + auto scaleshift_const_shape = scaleshift_target_shape; + if (reshape_on_decompression_constant) + scaleshift_const_shape.erase(std::remove(scaleshift_const_shape.begin(), scaleshift_const_shape.end(), 1), scaleshift_const_shape.end()); if (add_subtract) { - auto shift_const = ngraph::builder::makeConstant(weights_precision, scaleshift_const_shape, {}, true); + auto shift_tensor = ov::test::utils::create_and_fill_tensor(weights_precision, scaleshift_const_shape); + auto shift_const = std::make_shared(shift_tensor); std::shared_ptr shift_convert = std::make_shared(shift_const, data_precision); - if (reshape_on_decompression) { + if (reshape_on_decompression_constant) { auto shift_reshape_const = ov::opset10::Constant::create(ov::element::i32, {scaleshift_target_shape.size()}, scaleshift_target_shape); auto shift_reshape = std::make_shared(shift_convert, shift_reshape_const, false); shift_convert = shift_reshape; @@ -122,32 +179,36 @@ class MatmulWeightsDecompression : public testing::WithParamInterface(weights_convert, shift_convert); } - std::shared_ptr scale_const = ngraph::builder::makeConstant(data_precision, scaleshift_const_shape, {}, true); - if (reshape_on_decompression) { + auto scale_tensor = ov::test::utils::create_and_fill_tensor(data_precision, scaleshift_const_shape, 1, -0.5, 10000); + std::shared_ptr scale_const = std::make_shared(scale_tensor); + if (reshape_on_decompression_constant) { auto scale_reshape_const = ov::opset10::Constant::create(ov::element::i32, {scaleshift_target_shape.size()}, scaleshift_target_shape); auto scale_reshape = std::make_shared(scale_const, scale_reshape_const, false); scale_const = scale_reshape; } - auto multiply = std::make_shared(mul_parent, scale_const); + std::shared_ptr last_node = std::make_shared(mul_parent, scale_const); - std::shared_ptr matmul_weights = multiply; + if (group_decompression) { + auto reshape_target_shape = transpose_weights ? std::vector{-1, static_cast(weights_shape[0])} + : std::vector{static_cast(weights_shape[0]), -1}; + auto target_shape_node = ov::opset10::Constant::create(ov::element::i32, {reshape_target_shape.size()}, reshape_target_shape); + last_node = std::make_shared(last_node, target_shape_node, false); + } if (transpose_weights) { - const size_t rank = matmul_weights->get_output_partial_shape(0).size(); + const size_t rank = last_node->get_output_partial_shape(0).size(); std::vector order(rank); std::iota(order.begin(), order.end(), 0); std::swap(*order.rbegin(), *(order.rbegin() + 1)); auto transpose_constant = ov::opset10::Constant::create(ov::element::i32, {rank}, order); - auto transpose = std::make_shared(matmul_weights, transpose_constant); - matmul_weights = transpose; + last_node = std::make_shared(last_node, transpose_constant); } - auto matMul = builder::makeMatMul(params[0], matmul_weights); - return std::make_shared(NodeVector{matMul}, params, "MatmulWeightsDecompression"); + return last_node; } void SetUp() override { targetDevice = ov::test::utils::DEVICE_GPU; - std::vector inputShapes; + ShapeParams shape_params; ov::test::ElementType weights_precision; ov::test::ElementType activations_precision; bool transpose_weights; @@ -155,7 +216,7 @@ class MatmulWeightsDecompression : public testing::WithParamInterface additional_config; - std::tie(inputShapes, + std::tie(shape_params, weights_precision, activations_precision, transpose_weights, @@ -164,14 +225,47 @@ class MatmulWeightsDecompression : public testing::WithParamInterface 200) so fp16 representation & math error is larger than default threshold + if (weights_input_channels > 2048) { + abs_threshold = 4.0f; + } else { + abs_threshold = 1.0f; + } + } + } + + void generate_inputs(const std::vector& target_input_static_shapes) override { + inputs.clear(); + const auto& model_inputs = function->inputs(); + for (size_t i = 0; i < model_inputs.size(); ++i) { + const auto& model_input = model_inputs[i]; + ov::Tensor tensor = ov::test::utils::create_and_fill_tensor(model_input.get_element_type(), + target_input_static_shapes[i], + 2, + -1, + 10000); + inputs.insert({model_input.get_node_shared_ptr(), tensor}); + } } - void checkResults() { + void check_results() { const auto& test_param = GetParam(); ov::test::ElementType weights_precision = std::get<1>(test_param); for (const auto& n : compiledModel.get_runtime_model()->get_ordered_ops()) { @@ -185,24 +279,20 @@ class MatmulWeightsDecompression : public testing::WithParamInterface activations_precisions = {ov::element::f32, ov::element::f16}; const std::vector weights_precisions = {ov::element::u8}; -const std::vector> input_shapes_basic = { - {{{-1, -1, -1}, {{1, 4, 16}, {10, 16, 16}}}, {{}, {{16, 32}}}}, - {{{}, {{10, 40, 496}}}, {{}, {{1, 496, 240}}}}, - {{{}, {{1, 4, 48}}}, {{}, {{48, 256}}}}, - {{{}, {{11, 339, 377}}}, {{}, {{377, 335}}}}, - {{{}, {{1, 4, 32}}}, {{}, {{32, 256}}}}, - {{{}, {{1, 4, 512}}}, {{}, {{512, 256}}}}, - {{{}, {{1, 16, 32}}}, {{}, {{32, 64}}}}, - {{{}, {{2, 4, 32}}}, {{}, {{32, 65}}}}, - {{{}, {{3, 12, 768}}}, {{}, {{768, 1024}}}}, - {{{}, {{11, 339, 577}}}, {{}, {{577, 335}}}}, +const std::vector input_shapes_basic = { + {{{-1, -1, -1}, {{1, 4, 16}, {10, 16, 16}}}, {16, 32}}, + {{{}, {{1, 4, 16}}}, {16, 32}, 2ul}, + {{{}, {{1, 4, 16}}}, {1, 16, 32}}, + {{{}, {{10, 40, 496}}}, {1, 496, 240}}, + {{{}, {{1, 4, 48}}}, {48, 256}}, + {{{}, {{11, 339, 377}}}, {377, 335}} }; INSTANTIATE_TEST_SUITE_P(smoke_MatMulCompressedWeights_basic, @@ -216,15 +306,16 @@ INSTANTIATE_TEST_SUITE_P(smoke_MatMulCompressedWeights_basic, ::testing::Values(std::map())), MatmulWeightsDecompression::get_test_case_name); -const std::vector> input_shapes_corner_cases_basic = { - {{{-1, -1, -1}, {{1, 4, 16}}}, {{}, {{1, 16, 32}}}}, - {{{}, {{1, 4, 16}}}, {{}, {{1, 16, 32}}}}, - {{{-1, -1, -1}, {{1, 4, 16}}}, {{}, {{16, 32}}}}, - {{{-1, -1, -1, -1}, {{1, 1, 4, 16}}}, {{}, {{1, 1, 16, 32}}}}, - {{{}, {{1, 1, 4, 16}}}, {{}, {{1, 1, 16, 32}}}}, +const std::vector input_shapes_corner_cases_basic = { + {{{-1, -1, -1}, {{1, 4, 16}}}, {1, 16, 32}}, + {{{-1, -1, -1}, {{1, 4, 16}}}, {16, 32}}, + {{{-1, -1, 16}, {{1, 4, 16}}}, {16, 32}, 4}, }; -const std::vector> input_shapes_corner_cases_big = { - {{{-1, -1, -1}, {{10, 40, 480}, {11, 40, 480}}}, {{}, {{1, 480, 256}}}}, +const std::vector input_shapes_corner_cases_big = { + {{{-1, -1, -1}, {{10, 40, 480}, {11, 40, 480}}}, {1, 480, 256}}, + {{{-1, -1, -1}, {{1, 1, 4096}}}, {4096, 4096}, 128}, + {{{-1, -1, -1}, {{1, 1, 4096}}}, {4096, 4096}}, + {{{-1, 4096}, {{1, 4096}}}, {4096, 4096}, 128}, }; const std::vector transpose_weights = {true, false}; @@ -242,7 +333,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_MatMulCompressedWeights_corner_cases_basic, ::testing::Values(std::map{})), MatmulWeightsDecompression::get_test_case_name); -INSTANTIATE_TEST_SUITE_P(smoke_MatMulCompressedWeights_corner_cases_big, +INSTANTIATE_TEST_SUITE_P(MatMulCompressedWeights_corner_cases_big, MatmulWeightsDecompression, ::testing::Combine(::testing::ValuesIn(input_shapes_corner_cases_big), ::testing::ValuesIn(weights_precisions), diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/fully_connected_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/fully_connected_gpu_test.cpp index 71301447bb28b9..dc23440c48af67 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/fully_connected_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/fully_connected_gpu_test.cpp @@ -663,21 +663,22 @@ TEST(fully_connected_gpu, compressed_scale_zp_bias) { auto& engine = get_test_engine(); auto input_mem = engine.allocate_memory({ {1, 2, 4}, data_types::f32, format::bfyx }); - auto weights_mem = engine.allocate_memory({ {8, 4}, data_types::f32, format::bfyx }); + auto weights_mem = engine.allocate_memory({ {8, 4}, data_types::u8, format::bfyx }); auto bias_mem = engine.allocate_memory({ {1, 1, 8}, data_types::f32, format::bfyx }); - auto scale_mem = engine.allocate_memory({ {1, 1, 8}, data_types::f32, format::bfyx }); - auto zp_mem = engine.allocate_memory({ {1, 1, 8}, data_types::f32, format::bfyx }); + auto scale_mem = engine.allocate_memory({ {8, 1}, data_types::f32, format::bfyx }); + auto zp_mem = engine.allocate_memory({ {8, 1}, data_types::f32, format::bfyx }); set_values(input_mem, { -0.5f, 2.0f, 0.5f, 1.0f, 0.5f, -2.0f, -0.5f, -1.0f }); - set_values(weights_mem, { 1.5f, 1.0f, 0.5f, -1.0f, - 0.0f, 0.5f, 0.5f, -0.5f, - -2.0f, -0.5f, 1.0f, 1.5f, - -2.0f, -0.5f, 1.0f, 1.5f, - 2.0f, 0.5f, -1.0f, -1.5f, - 2.0f, 0.5f, -1.0f, -1.5f, - -1.5f, -1.0f, -0.5f, 1.0f, - 0.0f, -0.5f, 0.5f, 0.5f }); + set_values(weights_mem, { 1, 2, 3, 4, + 5, 6, 7, 8, + 9, 10, 11, 12, + 13, 14, 15, 0, + 15, 14, 13, 12, + 11, 10, 9, 8, + 7, 6, 5, 4, + 3, 2, 1, 0}); + set_values(bias_mem, { 1.0f, -2.0f, 3.0f, -4.0f, 5.0f, -6.0f, 7.0f, 2.0f }); set_values(scale_mem, { 2.0f, 4.0f, -2.0f, -4.0f, 0.5f, -0.5f, 2.0f, 2.0f }); @@ -709,8 +710,7 @@ TEST(fully_connected_gpu, compressed_scale_zp_bias) { ov::PartialShape expected_shape{1, 2, 8}; ASSERT_EQ(expected_shape, output_mem->get_layout().get_partial_shape()); - std::vector expected_result = {-4.0f, -23.0f, 11.0f, 0.0f, -2.0f, -3.5f, -30.0f, -10.5f, - 6.0f, 19.0f, -5.0f, -8.0f, 12.0f, -8.5f, 44.0f, 14.5f}; + std::vector expected_result = {13.f, 58.f, -51.f, -108.f, 18.5f, -18.f, 1.f, -4.f, -11.f, -62.f, 57.f, 100.f, -8.5f, 6.f, 13.f, 8.f, }; for (size_t i = 0; i < expected_result.size(); i++) { ASSERT_EQ(expected_result[i], output_ptr[i]) << "i = " << i; @@ -721,20 +721,20 @@ TEST(fully_connected_gpu, compressed_scale_bias) { auto& engine = get_test_engine(); auto input_mem = engine.allocate_memory({ {1, 2, 4}, data_types::f32, format::bfyx }); - auto weights_mem = engine.allocate_memory({ {8, 4}, data_types::f32, format::bfyx }); + auto weights_mem = engine.allocate_memory({ {8, 4}, data_types::u8, format::bfyx }); auto bias_mem = engine.allocate_memory({ {1, 1, 8}, data_types::f32, format::bfyx }); auto scale_mem = engine.allocate_memory({ {1, 1, 8}, data_types::f32, format::bfyx }); set_values(input_mem, { -0.5f, 2.0f, 0.5f, 1.0f, 0.5f, -2.0f, -0.5f, -1.0f }); - set_values(weights_mem, { 1.5f, 1.0f, 0.5f, -1.0f, - 0.0f, 0.5f, 0.5f, -0.5f, - -2.0f, -0.5f, 1.0f, 1.5f, - -2.0f, -0.5f, 1.0f, 1.5f, - 2.0f, 0.5f, -1.0f, -1.5f, - 2.0f, 0.5f, -1.0f, -1.5f, - -1.5f, -1.0f, -0.5f, 1.0f, - 0.0f, -0.5f, 0.5f, 0.5f }); + set_values(weights_mem, { 1, 2, 3, 4, + 5, 6, 7, 8, + 9, 10, 11, 12, + 13, 14, 15, 0, + 15, 14, 13, 12, + 11, 10, 9, 8, + 7, 6, 5, 4, + 3, 2, 1, 0}); set_values(bias_mem, { 1.0f, -2.0f, 3.0f, -4.0f, 5.0f, -6.0f, 7.0f, -8.0f }); set_values(scale_mem, { 2.0f, 4.0f, -2.0f, -4.0f, 0.5f, -0.5f, 2.0f, 1.0f }); @@ -764,8 +764,7 @@ TEST(fully_connected_gpu, compressed_scale_bias) { ov::PartialShape expected_shape{1, 2, 8}; ASSERT_EQ(expected_shape, output_mem->get_layout().get_partial_shape()); - std::vector expected_result = {2.0f, 1.0f, -1.0f, -12.0f, 4.0f, -5.0f, 6.0f, -8.25f, - 0.0f, -5.0f, 7.0f, 4.0f, 6.0f, -7.0f, 8.0f, -7.75f}; + std::vector expected_result = {19.f, 40.f, 69.f, 54.f, 83.f, 48.f, 37.f, -2.f, -17.f, -44.f, -63.f, -62.f, -73.f, -60.f, -23.f, -14.f }; for (size_t i = 0; i < expected_result.size(); i++) { ASSERT_EQ(expected_result[i], output_ptr[i]) << "i = " << i; @@ -776,19 +775,19 @@ TEST(fully_connected_gpu, compressed_scale_fp16) { auto& engine = get_test_engine(); auto input_mem = engine.allocate_memory({ { 2, 4}, data_types::f16, format::bfyx }); - auto weights_mem = engine.allocate_memory({ {8, 4}, data_types::f16, format::bfyx }); - auto scale_mem = engine.allocate_memory({ {1, 8}, data_types::f16, format::bfyx }); + auto weights_mem = engine.allocate_memory({ {8, 4}, data_types::u8, format::bfyx }); + auto scale_mem = engine.allocate_memory({ {8, 1}, data_types::f16, format::bfyx }); set_values(input_mem, { ov::float16(-0.5f), ov::float16(2.0f), ov::float16(0.5f), ov::float16(1.0f), ov::float16(0.5f), ov::float16(-2.0f), ov::float16(-0.5f), ov::float16(-1.0f) }); - set_values(weights_mem, {ov::float16( 1.5f), ov::float16( 1.0f), ov::float16( 0.5f), ov::float16(-1.0f), - ov::float16( 0.0f), ov::float16( 0.5f), ov::float16( 0.5f), ov::float16(-0.5f), - ov::float16(-2.0f), ov::float16(-0.5f), ov::float16( 1.0f), ov::float16( 1.5f), - ov::float16(-2.0f), ov::float16(-0.5f), ov::float16( 1.0f), ov::float16( 1.5f), - ov::float16( 2.0f), ov::float16( 0.5f), ov::float16(-1.0f), ov::float16(-1.5f), - ov::float16( 2.0f), ov::float16( 0.5f), ov::float16(-1.0f), ov::float16(-1.5f), - ov::float16(-1.5f), ov::float16(-1.0f), ov::float16(-0.5f), ov::float16( 1.0f), - ov::float16( 0.0f), ov::float16(-0.5f), ov::float16(0.5f), ov::float16( 0.5f) }); + set_values(weights_mem, { 1, 2, 3, 4, + 5, 6, 7, 8, + 9, 10, 11, 12, + 13, 14, 15, 0, + 15, 14, 13, 12, + 11, 10, 9, 8, + 7, 6, 5, 4, + 3, 2, 1, 0}); set_values(scale_mem, {ov::float16(2.0f), ov::float16(4.0f), ov::float16(-2.0f), ov::float16(-4.0f), ov::float16(0.5f), ov::float16(-0.5f), ov::float16(2.0f), ov::float16(2.0f)}); @@ -817,8 +816,8 @@ TEST(fully_connected_gpu, compressed_scale_fp16) { ASSERT_EQ(expected_shape, output_mem->get_layout().get_partial_shape()); std::vector expected_result = { - ov::float16(1.0f), ov::float16( 3.0f), ov::float16(-4.0f), ov::float16(-8.0f), ov::float16(-1.0f), ov::float16( 1.0f), ov::float16(-1.0f), ov::float16(-0.5f), - ov::float16(-1.0f), ov::float16(-3.0f), ov::float16( 4.0f), ov::float16( 8.0f), ov::float16( 1.0f), ov::float16(-1.0f), ov::float16( 1.0f), ov::float16( 0.5f)}; + ov::float16(18), ov::float16(84), ov::float16(-66), ov::float16(-116), ov::float16(19.5), ov::float16(-13.5), ov::float16(30), ov::float16(6), + ov::float16(-18), ov::float16(-84), ov::float16(66), ov::float16(116), ov::float16(-19.5), ov::float16(13.5), ov::float16(-30), ov::float16(-6) }; for (size_t i = 0; i < expected_result.size(); i++) { ASSERT_FLOAT_EQ(expected_result[i], output_ptr[i]) << "i = " << i; From 6e97b91a774a993b8a1995991336c74aa5198565 Mon Sep 17 00:00:00 2001 From: Sergey Shlyapnikov Date: Wed, 18 Oct 2023 16:12:15 +0400 Subject: [PATCH 253/257] [GPU] UsmHostTensor implementation (#20518) Co-authored-by: Vladimir Paramuzov --- .../intel_gpu/plugin/remote_allocators.hpp | 43 ---- .../intel_gpu/plugin/remote_tensor.hpp | 8 +- .../intel_gpu/plugin/sync_infer_request.hpp | 4 +- .../intel_gpu/plugin/usm_host_tensor.hpp | 42 ++++ .../src/plugin/remote_allocators.cpp | 37 --- .../intel_gpu/src/plugin/remote_context.cpp | 5 +- .../intel_gpu/src/plugin/remote_tensor.cpp | 90 ++++++-- .../src/plugin/sync_infer_request.cpp | 97 ++++---- .../intel_gpu/src/plugin/usm_host_tensor.cpp | 50 ++++ .../tests/common/subgraphs_builders.hpp | 50 ++++ .../ov_infer_request/iteration_chaining.cpp | 23 ++ .../subgraph_tests/dynamic/kv_cache.cpp | 217 ++++++++++++++++++ 12 files changed, 516 insertions(+), 150 deletions(-) delete mode 100644 src/plugins/intel_gpu/include/intel_gpu/plugin/remote_allocators.hpp create mode 100644 src/plugins/intel_gpu/include/intel_gpu/plugin/usm_host_tensor.hpp delete mode 100644 src/plugins/intel_gpu/src/plugin/remote_allocators.cpp create mode 100644 src/plugins/intel_gpu/src/plugin/usm_host_tensor.cpp create mode 100644 src/plugins/intel_gpu/tests/common/subgraphs_builders.hpp create mode 100644 src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/iteration_chaining.cpp create mode 100644 src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/kv_cache.cpp diff --git a/src/plugins/intel_gpu/include/intel_gpu/plugin/remote_allocators.hpp b/src/plugins/intel_gpu/include/intel_gpu/plugin/remote_allocators.hpp deleted file mode 100644 index 877c2c707f1791..00000000000000 --- a/src/plugins/intel_gpu/include/intel_gpu/plugin/remote_allocators.hpp +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (C) 2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "openvino/runtime/so_ptr.hpp" - -#include - -namespace ov { -namespace intel_gpu { - -class RemoteTensorImpl; -class RemoteContextImpl; - -class USMHostAllocator final { -private: - ov::SoPtr _usm_host_tensor = { nullptr, nullptr }; - std::shared_ptr _context = nullptr; - -public: - using Ptr = std::shared_ptr; - - explicit USMHostAllocator(std::shared_ptr context) : _context(context) { } - - /** - * @brief Allocates memory - * @param size The size in bytes to allocate - * @return Handle to the allocated resource - */ - void* allocate(const size_t bytes, const size_t alignment = alignof(max_align_t)) noexcept; - /** - * @brief Releases handle and all associated memory resources which invalidates the handle. - * @return false if handle cannot be released, otherwise - true. - */ - bool deallocate(void* handle, const size_t bytes, size_t alignment = alignof(max_align_t)) noexcept; - - bool is_equal(const USMHostAllocator& other) const; -}; - -} // namespace intel_gpu -} // namespace ov diff --git a/src/plugins/intel_gpu/include/intel_gpu/plugin/remote_tensor.hpp b/src/plugins/intel_gpu/include/intel_gpu/plugin/remote_tensor.hpp index 74a07bbcbf38bf..f7f72cc77a16a3 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/plugin/remote_tensor.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/plugin/remote_tensor.hpp @@ -15,6 +15,7 @@ #endif #include "openvino/runtime/iremote_tensor.hpp" +#include "intel_gpu/runtime/memory_caps.hpp" #include "intel_gpu/runtime/memory.hpp" #include "intel_gpu/runtime/engine.hpp" #include "intel_gpu/plugin/common_utils.hpp" @@ -56,6 +57,8 @@ class RemoteTensorImpl : public ov::IRemoteTensor { cldnn::memory::ptr get_memory() const; cldnn::memory::ptr get_original_memory() const; + void set_memory(cldnn::memory::ptr memory, size_t actual_size); + std::shared_ptr get_context() const; private: @@ -76,8 +79,11 @@ class RemoteTensorImpl : public ov::IRemoteTensor { size_t m_hash = 0; bool supports_caching() const; + void update_hash(); void update_strides(); - void init_properties(); + void update_properties(); + + static TensorType allocation_type_to_tensor_type(cldnn::allocation_type t); }; } // namespace intel_gpu diff --git a/src/plugins/intel_gpu/include/intel_gpu/plugin/sync_infer_request.hpp b/src/plugins/intel_gpu/include/intel_gpu/plugin/sync_infer_request.hpp index 1fd6d035dd48af..3050846e2c2354 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/plugin/sync_infer_request.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/plugin/sync_infer_request.hpp @@ -90,9 +90,7 @@ class SyncInferRequest : public ov::ISyncInferRequest { bool need_lockable_mem) const; std::shared_ptr reinterpret_device_tensor(std::shared_ptr tensor, const ov::Shape new_shape) const; std::shared_ptr create_host_tensor(const ov::PartialShape& port_shape, const ov::element::Type& port_element_type) const; - std::shared_ptr create_device_tensor(const ov::Shape& pshape, ov::element::Type element_type, - bool need_lockable_memory = false, void* mem_ptr = nullptr) const; - std::shared_ptr create_shared_device_tensor(const ov::Shape& pshape, ov::element::Type element_type, void* usm_host_mem) const; + std::shared_ptr create_device_tensor(const ov::PartialShape& pshape, ov::element::Type element_type, bool need_lockable_memory = false) const; void allocate_inputs(); void allocate_outputs(); diff --git a/src/plugins/intel_gpu/include/intel_gpu/plugin/usm_host_tensor.hpp b/src/plugins/intel_gpu/include/intel_gpu/plugin/usm_host_tensor.hpp new file mode 100644 index 00000000000000..d410fa046651e5 --- /dev/null +++ b/src/plugins/intel_gpu/include/intel_gpu/plugin/usm_host_tensor.hpp @@ -0,0 +1,42 @@ +// Copyright (C) 2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/runtime/itensor.hpp" + +#include + +namespace ov { +namespace intel_gpu { + +class RemoteContextImpl; +class RemoteTensorImpl; + +class USMHostTensor : public ov::ITensor { +public: + USMHostTensor(std::shared_ptr context, const element::Type element_type, const Shape& shape); + explicit USMHostTensor(std::shared_ptr tensor); + + ~USMHostTensor() override = default; + + void* data(const element::Type& element_type) const override; + const element::Type& get_element_type() const override; + + const Shape& get_shape() const override; + + const Strides& get_strides() const override; + + void set_shape(ov::Shape new_shape) override; + + void set_memory(std::shared_ptr tensor); + + std::shared_ptr get_impl() const; + +private: + std::shared_ptr m_impl; +}; + +} // namespace intel_gpu +} // namespace ov diff --git a/src/plugins/intel_gpu/src/plugin/remote_allocators.cpp b/src/plugins/intel_gpu/src/plugin/remote_allocators.cpp deleted file mode 100644 index fe9ff746f04283..00000000000000 --- a/src/plugins/intel_gpu/src/plugin/remote_allocators.cpp +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "intel_gpu/plugin/remote_allocators.hpp" -#include "intel_gpu/plugin/remote_tensor.hpp" -#include "intel_gpu/plugin/remote_context.hpp" -#include - -namespace ov { -namespace intel_gpu { - -void* USMHostAllocator::allocate(const size_t bytes, const size_t /* alignment */) noexcept { - try { - ov::AnyMap params = { ov::intel_gpu::shared_mem_type(ov::intel_gpu::SharedMemType::USM_HOST_BUFFER) }; - _usm_host_tensor = _context->create_tensor(ov::element::u8, {bytes}, params); - if (auto casted = std::dynamic_pointer_cast(_usm_host_tensor._ptr)) { - return casted->get_original_memory()->get_internal_params().mem; - } - return nullptr; - } catch (std::exception&) { - return nullptr; - } -} - -bool USMHostAllocator::deallocate(void* /* handle */, const size_t /* bytes */, size_t /* alignment */) noexcept { - try { - _usm_host_tensor = {nullptr, nullptr}; - } catch (std::exception&) { } - return true; -} - -bool USMHostAllocator::is_equal(const USMHostAllocator& other) const { - return other._usm_host_tensor != nullptr && _usm_host_tensor != nullptr && other._usm_host_tensor._ptr == _usm_host_tensor._ptr; -} -} // namespace intel_gpu -} // namespace ov diff --git a/src/plugins/intel_gpu/src/plugin/remote_context.cpp b/src/plugins/intel_gpu/src/plugin/remote_context.cpp index 1b932226881db3..e4aefa00bb0f0f 100644 --- a/src/plugins/intel_gpu/src/plugin/remote_context.cpp +++ b/src/plugins/intel_gpu/src/plugin/remote_context.cpp @@ -6,7 +6,7 @@ #include "openvino/runtime/make_tensor.hpp" #include "intel_gpu/plugin/remote_context.hpp" #include "intel_gpu/plugin/remote_tensor.hpp" -#include "intel_gpu/plugin/remote_allocators.hpp" +#include "intel_gpu/plugin/usm_host_tensor.hpp" #include "intel_gpu/runtime/itt.hpp" #include "intel_gpu/runtime/device_query.hpp" #include @@ -111,8 +111,7 @@ std::shared_ptr RemoteContextImpl::get_this_shared_ptr() { ov::SoPtr RemoteContextImpl::create_host_tensor(const ov::element::Type type, const ov::Shape& shape) { if (m_engine->use_unified_shared_memory()) { - USMHostAllocator allocator(get_this_shared_ptr()); - return { ov::make_tensor(type, shape, allocator), nullptr }; + return { std::make_shared(get_this_shared_ptr(), type, shape), nullptr }; } else { return { ov::make_tensor(type, shape), nullptr }; } diff --git a/src/plugins/intel_gpu/src/plugin/remote_tensor.cpp b/src/plugins/intel_gpu/src/plugin/remote_tensor.cpp index a7c68cd8f81107..cd1011ea153bfe 100644 --- a/src/plugins/intel_gpu/src/plugin/remote_tensor.cpp +++ b/src/plugins/intel_gpu/src/plugin/remote_tensor.cpp @@ -2,17 +2,29 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/remote_context.hpp" #include "intel_gpu/plugin/remote_tensor.hpp" -#include "intel_gpu/plugin/remote_allocators.hpp" #include "intel_gpu/plugin/plugin.hpp" #include "intel_gpu/runtime/itt.hpp" +#include "intel_gpu/runtime/memory_caps.hpp" #include namespace ov { namespace intel_gpu { +TensorType RemoteTensorImpl::allocation_type_to_tensor_type(cldnn::allocation_type t) { + switch (t) { + case cldnn::allocation_type::cl_mem: return TensorType::BT_BUF_INTERNAL; + case cldnn::allocation_type::usm_host: return TensorType::BT_USM_HOST_INTERNAL; + case cldnn::allocation_type::usm_device: return TensorType::BT_USM_DEVICE_INTERNAL; + default: return TensorType::BT_EMPTY; + } + + return TensorType::BT_EMPTY; +} + RemoteTensorImpl::RemoteTensorImpl(RemoteContextImpl::Ptr context, const ov::Shape& shape, const ov::element::Type& element_type, @@ -28,20 +40,8 @@ RemoteTensorImpl::RemoteTensorImpl(RemoteContextImpl::Ptr context, , m_mem(mem) , m_surf(surf) , m_plane(plane) { - if (supports_caching()) { - m_hash = cldnn::hash_combine(0, m_mem); - m_hash = cldnn::hash_combine(m_hash, m_surf); - m_hash = cldnn::hash_combine(m_hash, plane); - m_hash = cldnn::hash_combine(m_hash, m_shape.size()); - m_hash = cldnn::hash_combine(m_hash, element_type.hash()); - for (const auto& d : m_shape) { - m_hash = cldnn::hash_combine(m_hash, d); - } - } - - update_strides(); + update_hash(); allocate(); - init_properties(); } RemoteTensorImpl::~RemoteTensorImpl() { @@ -82,12 +82,15 @@ const AnyMap& RemoteTensorImpl::get_properties() const { m_shape = shape; if (ov::shape_size(shape) > m_memory_object->count()) { - OPENVINO_ASSERT(!is_shared(), "Cannot call setShape for Tensor created on top of preallocated memory if shape was increased."); + GPU_DEBUG_TRACE_DETAIL << "Remote realloc" << std::endl; + OPENVINO_ASSERT(!is_shared(), "Cannot call set_shape for Tensor created on top of preallocated memory if shape was increased."); if (!deallocate()) { - OPENVINO_THROW("Cannot deallocate tensor while an attempt to enlarge tensor area in setShape."); + OPENVINO_THROW("Cannot deallocate tensor while an attempt to enlarge tensor area in set_shape."); } allocate(); + } else { + update_strides(); } } @@ -108,23 +111,39 @@ void RemoteTensorImpl::allocate() { if (enable_caching) { m_memory_object = context->try_get_cached_memory(m_hash); - if (m_memory_object) + if (m_memory_object) { + update_properties(); + update_strides(); return; + } } auto& engine = context->get_engine(); + // Currently, clDeviceMemAllocINTEL returns memory address allocated to other input blob if the current blob is empty + // W/A for this issue: + // Allocate with non-empty shape and then reinterprete with original shape + auto shape_copy = m_shape; + for (auto &i : shape_copy) { + if (i == 0) + i = 1; + } + + m_layout.set_partial_shape(shape_copy); + + const bool reset = false; + switch (m_mem_type) { case TensorType::BT_BUF_INTERNAL: { - m_memory_object = engine.allocate_memory(m_layout, cldnn::allocation_type::cl_mem); + m_memory_object = engine.allocate_memory(m_layout, cldnn::allocation_type::cl_mem, reset); break; } case TensorType::BT_USM_HOST_INTERNAL: { - m_memory_object = engine.allocate_memory(m_layout, cldnn::allocation_type::usm_host); + m_memory_object = engine.allocate_memory(m_layout, cldnn::allocation_type::usm_host, reset); break; } case TensorType::BT_USM_DEVICE_INTERNAL: { - m_memory_object = engine.allocate_memory(m_layout, cldnn::allocation_type::usm_device); + m_memory_object = engine.allocate_memory(m_layout, cldnn::allocation_type::usm_device, reset); break; } case TensorType::BT_BUF_SHARED: { @@ -161,6 +180,9 @@ void RemoteTensorImpl::allocate() { m_memory_object.reset(); } + update_properties(); + update_strides(); + if (enable_caching) context->add_to_cache(m_hash, m_memory_object); } @@ -181,6 +203,19 @@ bool RemoteTensorImpl::supports_caching() const { return is_shared(); } +void RemoteTensorImpl::update_hash() { + if (supports_caching()) { + m_hash = cldnn::hash_combine(0, m_mem); + m_hash = cldnn::hash_combine(m_hash, m_surf); + m_hash = cldnn::hash_combine(m_hash, m_plane); + m_hash = cldnn::hash_combine(m_hash, m_shape.size()); + m_hash = cldnn::hash_combine(m_hash, m_element_type.hash()); + for (const auto& d : m_shape) { + m_hash = cldnn::hash_combine(m_hash, d); + } + } +} + bool RemoteTensorImpl::is_surface() const noexcept { return m_mem_type == TensorType::BT_SURF_SHARED || m_mem_type == TensorType::BT_IMG_SHARED || @@ -196,11 +231,24 @@ cldnn::memory::ptr RemoteTensorImpl::get_original_memory() const { return m_memory_object; } +void RemoteTensorImpl::set_memory(cldnn::memory::ptr memory, size_t actual_size) { + auto engine = m_memory_object->get_engine(); + m_layout = memory->get_layout(); + m_shape = m_layout.get_shape(); + + auto actual_layout = m_layout; + actual_layout.set_partial_shape({ov::Dimension(actual_size)}); + m_memory_object = engine->reinterpret_buffer(*memory, actual_layout); + + update_properties(); + update_strides(); +} + std::shared_ptr RemoteTensorImpl::get_context() const { return m_context; } -void RemoteTensorImpl::init_properties() { +void RemoteTensorImpl::update_properties() { OPENVINO_ASSERT(is_allocated(), "[GPU] Can't initialize RemoteTensorImpl parameters as memory was not allocated"); auto params = m_memory_object->get_internal_params(); diff --git a/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp b/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp index 61ac1424c7649e..9c097d222fdc1b 100644 --- a/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp +++ b/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "intel_gpu/plugin/usm_host_tensor.hpp" #include "openvino/runtime/make_tensor.hpp" #include "openvino/core/preprocess/input_tensor_info.hpp" #include "openvino/core/parallel.hpp" @@ -10,7 +11,6 @@ #include "intel_gpu/plugin/sync_infer_request.hpp" #include "intel_gpu/plugin/remote_context.hpp" -#include "intel_gpu/plugin/remote_allocators.hpp" #include "intel_gpu/plugin/remote_tensor.hpp" #include "intel_gpu/plugin/compiled_model.hpp" #include "intel_gpu/plugin/variable_state.hpp" @@ -41,6 +41,15 @@ inline bool can_use_usm_host(const cldnn::engine& engine) { return can_use_usm; } +inline ov::Shape get_tensor_shape(const ov::PartialShape& pshape) { + ov::Shape res(pshape.size()); + for (size_t i = 0; i < pshape.size(); i++) { + res[i] = pshape[i].is_dynamic() ? 0 : pshape[i].get_length(); + } + + return res; +} + inline std::string get_port_name(const ov::Output& port, const bool is_legacy_api) { std::string name; // TODO: Should use tensor name as the port name, but many legacy tests still use legacy name @@ -72,7 +81,7 @@ void convert_and_copy(const void* src_ptr, ov::element::Type src_et, void* dst_p return; if (src_et == dst_et) { - std::memcpy(dst_ptr, src_ptr, size); + std::memcpy(dst_ptr, src_ptr, size * src_et.size()); return; } @@ -425,6 +434,7 @@ void SyncInferRequest::wait() { OV_ITT_SCOPED_TASK(itt::domains::intel_gpu_plugin, "SyncInferRequest::wait::reinterpret_memory"); OPENVINO_ASSERT(!output_memory->get_layout().data_padding, "[GPU] Unexpected padding in output buffer"); output_memory = m_graph->get_engine().reinterpret_buffer(*output_memory, output_layout); + GPU_DEBUG_TRACE_DETAIL << name << " model output: " << output_memory->buffer_ptr() << std::endl; } OPENVINO_ASSERT(m_user_outputs.count(name) > 0, "[GPU] Output ", name, " is not found in output tensors map"); @@ -433,6 +443,12 @@ void SyncInferRequest::wait() { auto remote_ptr = std::dynamic_pointer_cast(output_tensor); bool is_remote = remote_ptr != nullptr; + if (is_remote) { + GPU_DEBUG_TRACE_DETAIL << name << " handle output tensor (remote): " << remote_ptr->get_original_memory()->buffer_ptr() << std::endl; + } else { + GPU_DEBUG_TRACE_DETAIL << name << " handle output tensor (host): " << output_tensor->data() << std::endl; + } + bool need_output_update = output_layout.bytes_count() == 0 || (output_memory && output_tensor->get_byte_size() != output_memory->size()); if (need_output_update) { OV_ITT_SCOPED_TASK(itt::domains::intel_gpu_plugin, "SyncInferRequest::wait::update_output"); @@ -444,6 +460,19 @@ void SyncInferRequest::wait() { OPENVINO_ASSERT(ov::shape_size(port.get_shape()) == ov::shape_size(mem_shape), "[GPU] Unexpected elements count for output tensor"); mem_shape = port.get_shape(); } + if (port.get_partial_shape().is_dynamic()) { + bool need_reallocate = true; + auto usm_host_tensor = std::dynamic_pointer_cast(output_tensor); + if (usm_host_tensor && output_memory) + need_reallocate = usm_host_tensor->get_impl()->get_original_memory()->size() < output_memory->size(); + + if (need_reallocate) { + auto& shape_predictor = m_graph->get_network()->get_shape_predictor(); + auto actual_memory_shape = predict_shape(name, mem_shape, output_tensor->get_element_type(), shape_predictor); + output_tensor->set_shape(actual_memory_shape); + } + } + output_tensor->set_shape(mem_shape); } @@ -453,6 +482,8 @@ void SyncInferRequest::wait() { auto dst_ptr = static_cast(output_tensor->data()); bool same_mem = same_host_mem(output_memory, dst_ptr); if (!same_mem && output_memory->size()) { + GPU_DEBUG_TRACE_DETAIL << name << " copy from: " << output_memory->buffer_ptr() << " to " + << (!is_remote ? output_tensor->data() : remote_ptr->get_original_memory()->buffer_ptr()) << std::endl; if (auto ev = copy_output_data(output_memory, *output_tensor)) { copy_events.push_back(ev); } @@ -492,22 +523,13 @@ void SyncInferRequest::setup_stream_graph() { std::shared_ptr SyncInferRequest::create_host_tensor(const ov::PartialShape& port_shape, const ov::element::Type& port_element_type) const { OV_ITT_SCOPED_TASK(itt::domains::intel_gpu_plugin, "SyncInferRequest::create_host_tensor"); - // Disable USM usage as USMHostAllocator may fail for attempt to allocate 0 bytes - // If we add WA for such case to avoid driver call, then deallocate method will return false and Blob::setShape call will throw an exception - bool use_usm = m_graph->get_engine().use_unified_shared_memory() && !port_shape.is_dynamic(); - - auto shape = port_shape.is_static() ? port_shape.to_shape() : ov::Shape(port_shape.size(), 0); - auto usm_allocator = USMHostAllocator(m_context); - return use_usm ? ov::make_tensor(port_element_type, shape, usm_allocator) - : ov::make_tensor(port_element_type, shape); + return m_context->create_host_tensor(port_element_type, get_tensor_shape(port_shape))._ptr; } -std::shared_ptr SyncInferRequest::create_device_tensor(const ov::Shape& shape, ov::element::Type element_type, - bool need_lockable_memory, void* mem_ptr) const { +std::shared_ptr SyncInferRequest::create_device_tensor(const ov::PartialShape& port_shape, ov::element::Type element_type, + bool need_lockable_memory) const { TensorType tensor_type = TensorType::BT_EMPTY; - if (mem_ptr) { - tensor_type = TensorType::BT_USM_SHARED; - } else if (m_graph->get_engine().use_unified_shared_memory()) { + if (m_graph->get_engine().use_unified_shared_memory()) { tensor_type = need_lockable_memory ? TensorType::BT_USM_HOST_INTERNAL : TensorType::BT_USM_DEVICE_INTERNAL; } else { tensor_type = TensorType::BT_BUF_INTERNAL; @@ -517,24 +539,10 @@ std::shared_ptr SyncInferRequest::create_device_tensor(const ov::Sh if (!can_use_usm_host(m_graph->get_engine()) && need_lockable_memory) tensor_type = TensorType::BT_BUF_INTERNAL; - // Currently, clDeviceMemAllocINTEL returns memory address allocated to other input blob if the current blob is empty - // W/A for this issue: - // Allocate with non-empty shape and then reinterprete with original shape - auto shape_copy = shape; - for (auto &i : shape_copy) { - if (i == 0) - i = 1; - } - return std::make_shared(m_context, - shape_copy, + get_tensor_shape(port_shape), element_type, - tensor_type, - mem_ptr); -} - -std::shared_ptr SyncInferRequest::create_shared_device_tensor(const ov::Shape& shape, ov::element::Type element_type, void* usm_host_mem) const { - return create_device_tensor(shape, element_type, false, usm_host_mem); + tensor_type); } TensorWrapper SyncInferRequest::create_or_share_device_tensor(const TensorWrapper& user_tensor_wrapper, @@ -546,17 +554,12 @@ TensorWrapper SyncInferRequest::create_or_share_device_tensor(const TensorWrappe auto tensor_shape = user_tensor->get_shape(); bool is_dynamic = port_pshape.is_dynamic(); OPENVINO_ASSERT(std::dynamic_pointer_cast(user_tensor) == nullptr, "[GPU] Unexpected remote tensor"); - auto input_ptr = user_tensor->data(); - const auto alloc_type = m_graph->get_engine().detect_usm_allocation_type(input_ptr); - const auto is_usm_host = alloc_type == cldnn::allocation_type::usm_host; - bool can_share = is_usm_host && - !is_convert_required(user_tensor->get_element_type(), element_type) && + auto usm_host_tensor = std::dynamic_pointer_cast(user_tensor); + bool can_share = usm_host_tensor != nullptr && !is_convert_required(user_tensor->get_element_type(), element_type) && can_use_usm_host(m_graph->get_engine()); if (can_share) { - // For USM case we create host blob using custom USM host allocator - // and then create shared device blob on top of this buffer - return { create_shared_device_tensor(tensor_shape, element_type, input_ptr), user_tensor_wrapper.owner }; + return { usm_host_tensor->get_impl(), user_tensor_wrapper.owner }; } auto actual_memory_shape = tensor_shape; @@ -689,13 +692,17 @@ std::vector SyncInferRequest::prepare_batched_input(const std std::vector SyncInferRequest::prepare_input(const std::string& name, const ov::Output& port, const TensorWrapper& user_tensor_wrapper) { - OV_ITT_SCOPED_TASK(itt::domains::intel_gpu_plugin, "SyncInferRequest::prepare_input"); + OV_ITT_SCOPED_TASK(itt::domains::intel_gpu_plugin, openvino::itt::handle("SyncInferRequest::prepare_input: " + name)); auto pshape = port.get_partial_shape(); auto is_dynamic = pshape.is_dynamic(); auto user_tensor = user_tensor_wrapper.ptr; auto element_type = user_tensor->get_element_type(); + auto remote_ptr = std::dynamic_pointer_cast(user_tensor); + auto usm_host_ptr = std::dynamic_pointer_cast(user_tensor); bool is_remote = remote_ptr != nullptr; + bool is_usm_host_tensor = usm_host_ptr != nullptr; + GPU_DEBUG_TRACE_DETAIL << "Prepare input for " << name << " ( is_remote ? " << is_remote << ")" << std::endl; GPU_DEBUG_TRACE_DETAIL << " port shape : " << pshape.to_string() << std::endl; GPU_DEBUG_TRACE_DETAIL << " user_tensor shape: " << user_tensor->get_shape().to_string() << std::endl; @@ -713,12 +720,16 @@ std::vector SyncInferRequest::prepare_input(const std::string user_tensor->get_shape(), ") are incompatible"); + auto device_tensor_et = convert_to_supported_device_type(element_type); + bool convert_needed = is_convert_required(element_type, device_tensor_et); + if (is_remote) { m_plugin_inputs[name] = user_tensor_wrapper; + } else if (is_usm_host_tensor && !convert_needed) { + m_plugin_inputs[name] = {usm_host_ptr->get_impl(), user_tensor_wrapper.owner}; + is_remote = true; } - auto device_tensor_et = convert_to_supported_device_type(element_type); - bool convert_needed = is_convert_required(element_type, device_tensor_et); bool update_device_tensor = m_plugin_inputs.count(name) == 0 || (m_plugin_inputs[name].owner == TensorOwner::USER && !is_remote); if (update_device_tensor) { @@ -780,6 +791,7 @@ std::vector SyncInferRequest::prepare_input(const std::string } } + GPU_DEBUG_TRACE_DETAIL << name << " prepare input: " << memory->buffer_ptr() << std::endl; const cldnn::primitive_id internal_name = "parameter:" + name; network->set_input_data(internal_name, memory); @@ -839,6 +851,7 @@ std::vector SyncInferRequest::prepare_output(const std::strin auto output_tensor = std::dynamic_pointer_cast(m_plugin_outputs.at(name).ptr); auto output_memory = output_tensor->get_memory(); + GPU_DEBUG_TRACE_DETAIL << name << " prepare output: " << output_memory->buffer_ptr() << std::endl; return network->set_output_memory(internal_name, output_memory); } diff --git a/src/plugins/intel_gpu/src/plugin/usm_host_tensor.cpp b/src/plugins/intel_gpu/src/plugin/usm_host_tensor.cpp new file mode 100644 index 00000000000000..bcb0877b521f20 --- /dev/null +++ b/src/plugins/intel_gpu/src/plugin/usm_host_tensor.cpp @@ -0,0 +1,50 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "intel_gpu/plugin/usm_host_tensor.hpp" +#include "intel_gpu/plugin/remote_tensor.hpp" +#include "intel_gpu/plugin/remote_context.hpp" +#include "openvino/runtime/make_tensor.hpp" +#include + +namespace ov { +namespace intel_gpu { + +USMHostTensor::USMHostTensor(std::shared_ptr context, const element::Type element_type, const Shape& shape) + : m_impl(std::make_shared(context, shape, element_type, TensorType::BT_USM_HOST_INTERNAL)) {} + +USMHostTensor::USMHostTensor(std::shared_ptr tensor) + : m_impl(tensor) {} + +void* USMHostTensor::data(const element::Type& element_type) const { + return m_impl->get_original_memory()->buffer_ptr(); +} + +const element::Type& USMHostTensor::get_element_type() const { + return m_impl->get_element_type(); +} + +const Shape& USMHostTensor::get_shape() const { + return m_impl->get_shape(); +} + +const Strides& USMHostTensor::get_strides() const { + return m_impl->get_strides(); +} + +void USMHostTensor::set_shape(ov::Shape new_shape) { + m_impl->set_shape(new_shape); +} + +void USMHostTensor::set_memory(std::shared_ptr tensor) { + OPENVINO_ASSERT(tensor->get_original_memory()->get_allocation_type() == cldnn::allocation_type::usm_host, "[GPU] Unexpected allocation type"); + m_impl = tensor; +} + +std::shared_ptr USMHostTensor::get_impl() const { + return m_impl; +} + +} // namespace intel_gpu +} // namespace ov diff --git a/src/plugins/intel_gpu/tests/common/subgraphs_builders.hpp b/src/plugins/intel_gpu/tests/common/subgraphs_builders.hpp new file mode 100644 index 00000000000000..dea703cf7104b2 --- /dev/null +++ b/src/plugins/intel_gpu/tests/common/subgraphs_builders.hpp @@ -0,0 +1,50 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include "openvino/core/dimension.hpp" +#include "openvino/core/model.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/transpose.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/matmul.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/concat.hpp" + +namespace tests { + +inline std::shared_ptr make_llm_kv_cache_pattern(ov::Dimension batch = ov::Dimension::dynamic(), + ov::Dimension n_heads = ov::Dimension::dynamic(), + ov::Dimension n_features = ov::Dimension::dynamic(), + ov::element::Type_t element_type = ov::element::f32) { + ov::PartialShape kv_cache_size = {batch, n_heads, -1, n_features}; + ov::PartialShape new_token_size = {batch, -1, n_heads, n_features}; + ov::PartialShape matmul_in_size = {batch, n_heads, -1, -1}; + + auto in_kv_prev = std::make_shared(element_type, kv_cache_size); + in_kv_prev->set_friendly_name("past_key_values"); + auto in_new_token = std::make_shared(element_type, new_token_size); + in_new_token->set_friendly_name("new_token_input"); + auto in_matmul = std::make_shared(element_type, matmul_in_size); + in_matmul->set_friendly_name("in_matmul"); + + auto transpose_const = ov::op::v0::Constant::create(ov::element::i32, {new_token_size.size()}, {0, 2, 1, 3}); + auto transpose = std::make_shared(in_new_token, transpose_const); + auto concat = std::make_shared(ov::OutputVector{in_kv_prev, transpose}, 2); + auto convert = std::make_shared(concat, element_type); + auto kv_present = std::make_shared(convert); + kv_present->set_friendly_name("present_key_values"); + auto matmul = std::make_shared(in_matmul, concat, false, false); + auto matmul_out = std::make_shared(matmul); + matmul_out->set_friendly_name("matmul_out"); + + ov::ParameterVector params{in_kv_prev, in_new_token, in_matmul}; + ov::ResultVector results{kv_present, matmul_out}; + return std::make_shared(results, params, "LLM-KV-Cache"); +} + +} // namespace tests diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/iteration_chaining.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/iteration_chaining.cpp new file mode 100644 index 00000000000000..4bcef9a7bedbe0 --- /dev/null +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/iteration_chaining.cpp @@ -0,0 +1,23 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include +#include "behavior/ov_infer_request/iteration_chaining.hpp" +#include "common_test_utils/test_constants.hpp" +#include "openvino/runtime/properties.hpp" + +using namespace ov::test::behavior; + +namespace { + +const std::vector configs = { + { ov::hint::inference_precision(ov::element::f32) } +}; + +INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVIterationChaining, + ::testing::Combine( + ::testing::Values(ov::test::utils::DEVICE_GPU), + ::testing::ValuesIn(configs)), + OVIterationChaining::getTestCaseName); + +} // namespace diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/kv_cache.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/kv_cache.cpp new file mode 100644 index 00000000000000..a32e97d8e8e0fc --- /dev/null +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/kv_cache.cpp @@ -0,0 +1,217 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/ov_tensor_utils.hpp" +#include "openvino/core/node_vector.hpp" +#include "openvino/core/partial_shape.hpp" +#include "openvino/core/preprocess/pre_post_process.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/matmul.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/transpose.hpp" +#include "shared_test_classes/base/layer_test_utils.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "shared_test_classes/base/utils/compare_results.hpp" +#include "transformations/rt_info/decompression.hpp" +#include "subgraphs_builders.hpp" + +using namespace ov::test; + +namespace SubgraphTestsDefinitions { + +using KVCacheTestParams = std::tuple, // input shapes + ov::element::Type, // in/out precision + std::map>; // additional config + +class KVCacheTest : public testing::WithParamInterface, public SubgraphBaseTest { +public: + static std::string get_test_case_name(testing::TestParamInfo obj) { + std::vector input_shapes; + ov::element::Type element_type; + std::map additional_config; + + std::tie(input_shapes, element_type, additional_config) = obj.param; + + std::ostringstream result; + for (const auto& shape : input_shapes) { + result << ov::test::utils::partialShape2str({shape.first}) << "_"; + } + result << "TS="; + for (const auto& shape : input_shapes) { + result << "("; + if (!shape.second.empty()) { + auto itr = shape.second.begin(); + do { + result << ov::test::utils::vec2str(*itr); + } while (++itr != shape.second.end() && result << "_"); + } + result << ")_"; + } + result << "precision=" << element_type << "_"; + result << "config=("; + for (const auto& configEntry : additional_config) { + result << configEntry.first << ", " << configEntry.second << ":"; + } + result << ")"; + + return result.str(); + } + +protected: + void SetUp() override { + targetDevice = ov::test::utils::DEVICE_GPU; + + std::vector input_shapes; + ov::element::Type element_type; + std::map additional_config; + + std::tie(input_shapes, element_type, additional_config) = GetParam(); + + configuration.insert(additional_config.begin(), additional_config.end()); + init_input_shapes(input_shapes); + + inType = outType = element_type; + + function = tests::make_llm_kv_cache_pattern(inputDynamicShapes[0][0], inputDynamicShapes[0][1], inputDynamicShapes[0][3], element_type); + } +}; + +TEST_P(KVCacheTest, CompareWithRefs) { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + run(); +} + +namespace { + +const std::vector precisions = {ov::element::f32, ov::element::f16}; + +const std::vector> input_shapes_basic = { + { + {{-1, 32, -1, 80}, { {1, 32, 0, 80}, {1, 32, 20, 80} }}, + {{-1, -1, 32, 80}, { {1, 20, 32, 80}, {1, 1, 32, 80} }}, + {{-1, 32, -1, -1}, { {1, 32, 1, 20}, {1, 32, 1, 21} }} + }, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_GPU_Dynamic, + KVCacheTest, + ::testing::Combine(::testing::ValuesIn(input_shapes_basic), + ::testing::ValuesIn(precisions), + ::testing::Values(std::map())), + KVCacheTest::get_test_case_name); +} // namespace + +TEST(KVCacheTest, smoke_multipleIterations) { +#if defined(ANDROID) + GTEST_SKIP(); +#endif + auto core = ov::Core(); + + const size_t batch = 1; + const size_t n_heads = 32; + const size_t n_features = 80; + const size_t context_size = 20; + size_t cache_size = 0; + + ov::element::Type element_type = ov::element::f16; + + auto model = tests::make_llm_kv_cache_pattern(batch, n_heads, n_features, element_type); + auto compiled_model = core.compile_model(model, ov::test::utils::DEVICE_GPU, ov::hint::inference_precision(ov::element::f16)); + + auto input0 = model->get_parameters().at(0); + auto input1 = model->get_parameters().at(1); + auto input2 = model->get_parameters().at(2); + auto output0 = model->get_results().at(0); + auto output1 = model->get_results().at(1); + + auto infer_request = compiled_model.create_infer_request(); + auto input0_tensor_remote_io = infer_request.get_tensor(input0); + auto input1_tensor_remote_io = infer_request.get_tensor(input1); + auto input2_tensor_remote_io = infer_request.get_tensor(input2); + auto output0_tensor_remote_io = infer_request.get_tensor(output0); + auto output1_tensor_remote_io = infer_request.get_tensor(output1); + + auto compare_tensors = [&model](const std::vector expected, const std::vector& actual) { + ASSERT_EQ(expected.size(), actual.size()); + ASSERT_EQ(expected.size(), model->get_results().size()); + auto compareMap = ov::test::utils::getCompareMap(); + const auto& results = model->get_results(); + for (size_t j = 0; j < results.size(); j++) { + const auto result = results[j]; + for (size_t i = 0; i < result->get_input_size(); ++i) { + std::shared_ptr inputNode = result->get_input_node_shared_ptr(i); + if (std::dynamic_pointer_cast(inputNode)) { + std::shared_ptr nextNodePtr = inputNode->get_input_node_shared_ptr(0); + if (!ngraph::is_type(nextNodePtr)) { + inputNode = nextNodePtr; + } + } + auto it = compareMap.find(inputNode->get_type_info()); + ASSERT_NE(it, compareMap.end()); + it->second(inputNode, i, expected[j], actual[j], 1e-4f, 1e-4f); + } + } + }; + + { + const ov::Shape kv_cache_size_initial = {batch, n_heads, cache_size, n_features}; + const ov::Shape new_token_size_initial = {batch, context_size, n_heads, n_features}; + const ov::Shape matmul_in_size_initial = {batch, n_heads, context_size, context_size}; + + auto new_token_data = ov::test::utils::create_and_fill_tensor(element_type, new_token_size_initial); + auto matmul_data = ov::test::utils::create_and_fill_tensor(element_type, matmul_in_size_initial); + + auto kv_cache_input = infer_request.get_tensor(input0); + kv_cache_input.set_shape(kv_cache_size_initial); + + auto ref_model = model->clone(); + ngraph::helpers::resize_function(ref_model, {kv_cache_input.get_shape(), new_token_data.get_shape(), matmul_data.get_shape()}); + auto results = ngraph::helpers::interpretFunction(ref_model, {{input0, kv_cache_input}, {input1, new_token_data}, {input2, matmul_data}}); + + infer_request.set_tensor(input0, kv_cache_input); + infer_request.set_tensor(input1, new_token_data); + infer_request.set_tensor(input2, matmul_data); + + infer_request.infer(); + + compare_tensors(results, {infer_request.get_tensor(output0), infer_request.get_tensor(output1)}); + + cache_size += context_size; + } + + const size_t input_tokens = 1; + const size_t niters = 10; + const ov::Shape new_token_size = {batch, input_tokens, n_heads, n_features}; + size_t context_length = cache_size + input_tokens; + for (size_t i = 0; i < niters; i++, context_length += input_tokens) { + ov::Shape matmul_in_size_loop = {batch, n_heads, input_tokens, context_length}; + auto new_token_data = ov::test::utils::create_and_fill_tensor(element_type, new_token_size); + auto matmul_data = ov::test::utils::create_and_fill_tensor(element_type, matmul_in_size_loop); + + auto kv_cache_input = infer_request.get_tensor(output0); + auto kv_shape = kv_cache_input.get_shape(); + + auto ref_model = model->clone(); + ngraph::helpers::resize_function(ref_model, {kv_shape, new_token_data.get_shape(), matmul_data.get_shape()}); + auto results = ngraph::helpers::interpretFunction(ref_model, {{input0, kv_cache_input}, {input1, new_token_data}, {input2, matmul_data}}); + + auto new_token_input = infer_request.get_tensor(input1); + new_token_input.set_shape(new_token_data.get_shape()); + auto matmul_input = infer_request.get_tensor(input2); + matmul_input.set_shape(matmul_data.get_shape()); + + new_token_data.copy_to(new_token_input); + matmul_data.copy_to(matmul_input); + + infer_request.set_tensor(input0, kv_cache_input); + infer_request.set_tensor(input1, new_token_input); + infer_request.set_tensor(input2, matmul_input); + + infer_request.infer(); + + compare_tensors(results, {infer_request.get_tensor(output0), infer_request.get_tensor(output1)}); + } +} + +} // namespace SubgraphTestsDefinitions From 6c396fac9dacd8c62e41f77e288f5b6327730630 Mon Sep 17 00:00:00 2001 From: Sergey Shlyapnikov Date: Wed, 18 Oct 2023 16:28:27 +0400 Subject: [PATCH 254/257] [GPU] Deferred events deallocation and tuning for FC bf_tiled kernel (#20519) --- src/plugins/intel_gpu/include/intel_gpu/graph/network.hpp | 2 ++ src/plugins/intel_gpu/src/graph/network.cpp | 8 +++++++- .../fully_connected/fully_connected_kernel_bf_tiled.cpp | 3 +++ 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/src/plugins/intel_gpu/include/intel_gpu/graph/network.hpp b/src/plugins/intel_gpu/include/intel_gpu/graph/network.hpp index 1474543428a7b7..ab5d6b5e0af140 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/graph/network.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/graph/network.hpp @@ -283,6 +283,8 @@ struct network { size_t _weights_cache_capacity = 1; std::unordered_map _events; + // This map is used to temporarily hold events that will be deallocated later + std::unordered_map _old_events; output_chains_map _output_chains; std::unique_ptr _shape_predictor; diff --git a/src/plugins/intel_gpu/src/graph/network.cpp b/src/plugins/intel_gpu/src/graph/network.cpp index 51b988076d18f3..240db96d5b4988 100644 --- a/src/plugins/intel_gpu/src/graph/network.cpp +++ b/src/plugins/intel_gpu/src/graph/network.cpp @@ -758,7 +758,10 @@ void network::reset_execution(bool wait) { get_stream().wait_for_events(events); } } - _events.clear(); + + // Move events to temporarily map to deallocate them at the end of network::execute() call for better overlapping with + // kernels execution, since it may take significant time for high amount of events + _old_events = std::move(_events); } event::ptr network::set_input_data(const primitive_id& id, memory::ptr data) { @@ -1457,6 +1460,9 @@ void network::execute_impl(const std::vector& events) { // In scenarios with a big number of very small networks it can provide performance drop. get_stream().flush(); + // Deallocate events from the previos iteration + _old_events.clear(); + GPU_DEBUG_IF(debug_config->dump_runtime_memory_pool > 0) { get_memory_pool().dump(get_id()); } diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/fully_connected/fully_connected_kernel_bf_tiled.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/fully_connected/fully_connected_kernel_bf_tiled.cpp index 6b0407f6580cad..c272124627db23 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/fully_connected/fully_connected_kernel_bf_tiled.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/fully_connected/fully_connected_kernel_bf_tiled.cpp @@ -241,6 +241,9 @@ FullyConnected_bf_tiled::GetAutoTuneParams(const fully_connected_params& params, .Case(tune_params(8, std::min(max_tile_ofm, 2u), 1, 1, 1, 1, EXE_MODE_AGE_BASED)); } + if (params.compressed && batch == 1) + selector.Case(tune_params(1, std::min(max_tile_ofm, 2u), 4, 2, 1, 1, EXE_MODE_AGE_BASED)); + selector.Case([&](const fully_connected_params&) -> tune_params { tune_params result(8, std::min(max_tile_ofm, 2u), 1, 2, 1, 1, EXE_MODE_DEFAULT); From f2549f2d5903de90f3702cf1454a1604d7e207ff Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Wed, 18 Oct 2023 15:02:18 +0200 Subject: [PATCH 255/257] [core]Migrate VariadicSplit op to new API (#20416) * Migrate VariadicSlice to new API - refactor to reduce bin size * Move `get_tensors_partial_shapes` to dev API * Use get_tensors_partial_shapes in VariadicSplit * Remove `visit_attributes` is same as base --- .../include/openvino/op/variadic_split.hpp | 10 +- src/core/src/op/variadic_split.cpp | 167 ++++++++---------- 2 files changed, 75 insertions(+), 102 deletions(-) diff --git a/src/core/include/openvino/op/variadic_split.hpp b/src/core/include/openvino/op/variadic_split.hpp index 2d6f751d48d3ba..49cb5dcc714502 100644 --- a/src/core/include/openvino/op/variadic_split.hpp +++ b/src/core/include/openvino/op/variadic_split.hpp @@ -29,25 +29,17 @@ class OPENVINO_API VariadicSplit : public Op { /// outputs. The sum of split_lengths must match data.shape[axis] VariadicSplit(const Output& data, const Output& axis, const Output& split_lengths); - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; size_t get_default_output_index() const override { return no_default_index(); } - OPENVINO_SUPPRESS_DEPRECATED_START - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - OPENVINO_SUPPRESS_DEPRECATED_END + bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override; bool evaluate_lower(TensorVector& outputs) const override; bool evaluate_upper(TensorVector& outputs) const override; bool has_evaluate() const override; bool evaluate_label(TensorLabelVector& output_labels) const override; - -private: - bool evaluate_variadic_split(const HostTensorVector& outputs, const HostTensorVector& inputs) const; - bool has_axis_and_splits_bound_set() const; }; } // namespace v1 } // namespace op diff --git a/src/core/src/op/variadic_split.cpp b/src/core/src/op/variadic_split.cpp index ab94af3fd8345f..38b309325fe16a 100644 --- a/src/core/src/op/variadic_split.cpp +++ b/src/core/src/op/variadic_split.cpp @@ -2,33 +2,69 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/variadic_split.hpp" +#include "openvino/op/variadic_split.hpp" #include #include "bound_evaluate.hpp" #include "compare.hpp" #include "itt.hpp" -#include "ngraph/validation_util.hpp" +#include "openvino/core/validation_util.hpp" #include "openvino/reference/slice.hpp" #include "variadic_split_shape_inference.hpp" -using namespace std; -using namespace ngraph; +namespace ov { +namespace op { +namespace variadic_split { +namespace { -op::v1::VariadicSplit::VariadicSplit(const Output& data, - const Output& axis, - const Output& split_lengths) - : Op({data, axis, split_lengths}) { - constructor_validate_and_infer_types(); +bool has_axis_and_splits_bound_set(const Node* const node) { + return have_node_inputs_bounds_set(node, 1, 2); } -bool ngraph::op::v1::VariadicSplit::visit_attributes(AttributeVisitor& visitor) { - OV_OP_SCOPE(v1_VariadicSplit_visit_attributes); +bool evaluate(TensorVector& outputs, const TensorVector& inputs) { + const auto& data_tensor = inputs[0]; + const auto& axis_tensor = inputs[1]; + const auto axis = + ov::util::normalize(get_tensor_data_as(axis_tensor).front(), data_tensor.get_shape().size()); + + ov::Coordinate upper_bounds(data_tensor.get_shape()); + ov::Coordinate lower_bounds(upper_bounds.size()); + upper_bounds[axis] = 0; + + const Strides default_strides(upper_bounds.size(), 1); + constexpr auto is_zero_dim = ov::cmp::Equal(0); + + for (auto& output : outputs) { + const auto& out_shape = output.get_shape(); + upper_bounds[axis] += out_shape[axis]; + + if (std::none_of(out_shape.cbegin(), out_shape.cend(), is_zero_dim)) { + reference::slice(static_cast(data_tensor.data()), + static_cast(output.data()), + data_tensor.get_shape(), + lower_bounds, + upper_bounds, + default_strides, + out_shape, + data_tensor.get_element_type().size()); + } + + lower_bounds[axis] = upper_bounds[axis]; + } + return true; } +} // namespace +} // namespace variadic_split -void ngraph::op::v1::VariadicSplit::validate_and_infer_types() { +namespace v1 { +VariadicSplit::VariadicSplit(const Output& data, const Output& axis, const Output& split_lengths) + : Op({data, axis, split_lengths}) { + constructor_validate_and_infer_types(); +} + +void VariadicSplit::validate_and_infer_types() { OV_OP_SCOPE(v1_VariadicSplit_validate_and_infer_types); for (size_t i = 0; i < get_input_size(); ++i) { set_input_is_relevant_to_value(i); @@ -45,107 +81,52 @@ void ngraph::op::v1::VariadicSplit::validate_and_infer_types() { } } -shared_ptr op::v1::VariadicSplit::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr VariadicSplit::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v1_VariadicSplit_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), new_args.at(2)); + return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2)); } -OPENVINO_SUPPRESS_DEPRECATED_START -namespace variadic_split { -namespace { -inline bool evaluate(const HostTensorPtr& in, - const HostTensorPtr& out, - const Coordinate& lower_bounds, - const Coordinate& upper_bounds) { - const auto& output_shape = out->get_shape(); - const auto has_nonzero_dims = std::none_of(output_shape.begin(), output_shape.end(), ov::cmp::Equal(0)); - - if (has_nonzero_dims) { - ov::reference::slice(in->get_data_ptr(), - out->get_data_ptr(), - in->get_shape(), - lower_bounds, - upper_bounds, - Strides(lower_bounds.size(), 1), - out->get_shape(), - in->get_element_type().size()); - return true; - } - return false; -} -} // namespace -} // namespace variadic_split - -bool op::v1::VariadicSplit::evaluate_variadic_split(const HostTensorVector& inputs, - const HostTensorVector& outputs) const { - const auto& data_tensor = inputs[0]; - const auto& axis_tensor = inputs[1]; - const auto& split_lengths_tensor = inputs[2]; - OPENVINO_ASSERT(axis_tensor->get_element_type().is_integral_number(), - "axis element type is not integral data type"); - OPENVINO_ASSERT(split_lengths_tensor->get_element_type().is_integral_number(), - "split_lengths element type is not integral data type"); - - OPENVINO_SUPPRESS_DEPRECATED_START - int64_t axis = host_tensor_2_vector(axis_tensor)[0]; - axis = ngraph::normalize_axis(this, axis, data_tensor->get_partial_shape().rank()); - OPENVINO_SUPPRESS_DEPRECATED_END +bool VariadicSplit::evaluate(TensorVector& outputs, const TensorVector& inputs) const { + OV_OP_SCOPE(v1_VariadicSplit_evaluate); - std::vector input_shapes = {data_tensor->get_partial_shape(), - axis_tensor->get_partial_shape(), - split_lengths_tensor->get_partial_shape()}; - auto output_shapes = shape_infer(this, input_shapes, make_tensor_accessor(inputs)); + if (inputs[1].get_element_type().is_integral_number() && inputs[2].get_element_type().is_integral_number()) { + const auto output_shapes = + shape_infer(this, ov::util::get_tensors_partial_shapes(inputs), make_tensor_accessor(inputs)); + OPENVINO_ASSERT(outputs.size() == output_shapes.size()); - const auto data_shape = data_tensor->get_shape(); - std::vector lower_bounds(data_shape.size(), 0); - std::vector upper_bounds = data_shape; - upper_bounds[axis] = 0; + auto out_partial_shape = output_shapes.cbegin(); + for (auto& output : outputs) { + output.set_shape(out_partial_shape->to_shape()); + ++out_partial_shape; + } - size_t split_pos = 0; - for (const auto& output : outputs) { - ov::Shape output_shape = output_shapes[split_pos++].get_shape(); - upper_bounds[axis] += output_shape[axis]; - output->set_shape(output_shape); - variadic_split::evaluate(data_tensor, output, lower_bounds, upper_bounds); - lower_bounds.at(axis) = upper_bounds.at(axis); + return variadic_split::evaluate(outputs, inputs); + } else { + return false; } - - return true; -} -bool op::v1::VariadicSplit::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - OV_OP_SCOPE(v1_VariadicSplit_evaluate); - return evaluate_variadic_split(inputs, outputs); } -bool op::v1::VariadicSplit::has_evaluate() const { +bool VariadicSplit::has_evaluate() const { OV_OP_SCOPE(v1_VariadicSplit_has_evaluate); return get_input_element_type(1).is_integral_number() && get_input_element_type(2).is_integral_number(); } -bool op::v1::VariadicSplit::has_axis_and_splits_bound_set() const { - for (size_t i = 1; i < get_input_size(); ++i) { - if (!get_input_tensor(i).has_and_set_bound()) { - return false; - } - } - return true; -} - -bool op::v1::VariadicSplit::evaluate_lower(ov::TensorVector& output_values) const { +bool VariadicSplit::evaluate_lower(TensorVector& output_values) const { OV_OP_SCOPE(v1_Split_evaluate_lower); - - return has_axis_and_splits_bound_set() && default_lower_bound_evaluator(this, output_values); + return variadic_split::has_axis_and_splits_bound_set(this) && default_lower_bound_evaluator(this, output_values); } -bool op::v1::VariadicSplit::evaluate_upper(ov::TensorVector& output_values) const { +bool VariadicSplit::evaluate_upper(TensorVector& output_values) const { OV_OP_SCOPE(v1_Split_evaluate_upper); - - return has_axis_and_splits_bound_set() && default_upper_bound_evaluator(this, output_values); + return variadic_split::has_axis_and_splits_bound_set(this) && default_upper_bound_evaluator(this, output_values); } -bool op::v1::VariadicSplit::evaluate_label(TensorLabelVector& output_labels) const { +bool VariadicSplit::evaluate_label(TensorLabelVector& output_labels) const { OPENVINO_SUPPRESS_DEPRECATED_START - return has_axis_and_splits_bound_set() && default_label_evaluator(this, output_labels); + return variadic_split::has_axis_and_splits_bound_set(this) && default_label_evaluator(this, output_labels); OPENVINO_SUPPRESS_DEPRECATED_END } +} // namespace v1 +} // namespace op +} // namespace ov From 491454103ea2f29b242587c6084c19868a879a82 Mon Sep 17 00:00:00 2001 From: Anastasia Kuporosova Date: Wed, 18 Oct 2023 17:23:52 +0300 Subject: [PATCH 256/257] [Docs] fix failure of python snippet (#20516) * [Docs] fix failure of python snippet * trigger python snippets * fix ga * Update .github/workflows/linux.yml Co-authored-by: Mikhail Ryzhov * Update .github/workflows/linux.yml Co-authored-by: Mikhail Ryzhov * return back fix --------- Co-authored-by: Chen Peter Co-authored-by: Mikhail Ryzhov --- .github/workflows/linux.yml | 24 ++++++++++++++---------- docs/snippets/ov_auto.py | 3 +-- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 59e021e24eb153..733dfed4c09d14 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -5,17 +5,21 @@ on: - cron: '0 0 * * 3,6' workflow_dispatch: pull_request: - paths-ignore: - - '**/docs/**' - - 'docs/**' - - '**/**.md' - - '**.md' + paths: + - '**' + - '!**/docs/**' + - '!docs/**' + - 'docs/snippets/**' + - '!**/**.md' + - '!**.md' push: - paths-ignore: - - '**/docs/**' - - 'docs/**' - - '**/**.md' - - '**.md' + paths: + - '**' + - '!docs/**' + - '!**/docs/**' + - 'docs/snippets/**' + - '!**/**.md' + - '!**.md' branches: - master - 'releases/**' diff --git a/docs/snippets/ov_auto.py b/docs/snippets/ov_auto.py index 8c2721a355d1b3..47d8d877ecda24 100644 --- a/docs/snippets/ov_auto.py +++ b/docs/snippets/ov_auto.py @@ -8,7 +8,6 @@ import openvino.properties.device as device import openvino.properties.hint as hints import openvino.properties.streams as streams -import openvino.properties.enable_profiling as enable_profiling #! [py_ov_property_import_header] import openvino.properties.log as log @@ -167,7 +166,7 @@ def part5(): cpu_config = { hints.performance_mode: hints.PerformanceMode.LATENCY, streams.num: 8, - enable_profiling: True + properties.enable_profiling: True } compiled_model = core.compile_model( model=model, From cf9791e3e03983f08fe21d9812d4746516a56d5a Mon Sep 17 00:00:00 2001 From: Mikhail Ryzhov Date: Wed, 18 Oct 2023 18:43:46 +0200 Subject: [PATCH 257/257] [GHA] Temporary disable win workflows (#20568) * moved win workflow to nightly * reset redundant files * removed redundant files --- .github/workflows/windows.yml | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 6aed320376c21e..6ce891e6767698 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -1,6 +1,9 @@ name: Windows (VS 2022, Python 3.11) on: - workflow_dispatch: + schedule: + # at 00:00 on workdays + - cron: '0 0 * * 1,2,3,4,5' +# workflow_dispatch: # pull_request: # paths-ignore: # - '**/docs/**' @@ -9,16 +12,16 @@ on: # - '**.md' # - '**/layer_tests_summary/**' # - '**/conformance/**' - push: - paths-ignore: - - '**/docs/**' - - 'docs/**' - - '**/**.md' - - '**.md' - - '**/layer_tests_summary/**' - - '**/conformance/**' - branches: - - master +# push: +# paths-ignore: +# - '**/docs/**' +# - 'docs/**' +# - '**/**.md' +# - '**.md' +# - '**/layer_tests_summary/**' +# - '**/conformance/**' +# branches: +# - master concurrency: # github.ref is not unique in post-commit @@ -336,7 +339,7 @@ jobs: shell: cmd run: | python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt - + :: requires 'unit_tests' from 'tools/mo' set PYTHONPATH=${{ env.OPENVINO_REPO }}\tools\mo;${{ env.LAYER_TESTS_INSTALL_DIR }};%PYTHONPATH% python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/onnx_tests -m "not launch_only_if_manually_specified and precommit" --junitxml=${INSTALL_TEST_DIR}/TEST-onnx.xml @@ -404,7 +407,7 @@ jobs: shell: cmd run: | python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt - + :: TODO: remove setupvars.bat from here; currently, it's used for 'test_utils' installed in '/python/openvino' call "${{ env.INSTALL_DIR }}\\setupvars.bat" && python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/mo_python_api_tests --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-test_mo_convert.xml env: @@ -415,7 +418,7 @@ jobs: shell: cmd run: | python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt - + :: TODO: remove setupvars.sh from here; currently, it's used for 'test_utils' installed in '/python/openvino' call "${{ env.INSTALL_DIR }}\\setupvars.bat" && python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/ovc_python_api_tests --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-test_ovc_convert.xml env: